query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Get the number of bits needed for an item.
def _get_nr_of_bits(self): return sum(self._size_var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\r\n return numBits(self.n)", "def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def number_of_bits(self):\n return self.numbits", "def bitSizeOf() -> int:\n\n return 1", "def NumBits(self):\n num_bits = 8*len(self.output)\n if self.out_boff % 8:\n num_bits -= 8\n num_bits += self.out_boff\n if num_bits < 0:\n print \"What the...\"\n return num_bits", "def bitSizeOf() -> int:\n\n return 64", "def bits(self):\n return self._q.bit_length()", "def bit_length(self, ???):", "def bitSizeOf() -> int:\n\n return 32", "def n_bits(self):\n return self._n_bits", "def get_bitsize(self) -> int:\n return self._surface.get_bitsize()", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def getNbrOfBit(self):\n return DPxGetDinNumBits()", "def BitsRemaining(self):\n return self.NumBits() - (8*self.idx_byte + self.idx_boff) - 1", "def bitSizeOf() -> int:\n\n return 16", "def item_count(self):\n return self.items.shape[0]", "def count(bits: int) -> int:\n return len(to_list(bits)) # I'm lazy", "def size(self):\n return self.num_item", "def count(item):\n return len(item)", "def number_bits_in_cardinality(self,card):\n return 32 - self.count_lead_zs(card)", "def items_num(self):\n\t\treturn len(self.items)", "def items_num(self):\n\t\treturn len(self.items)", "def get_num_items(self):\r\n return self.num_items", "def _calculate_bit_size(self, history: sizing_executor.SizeAndDTypes) -> int:\n bit_size = 0\n for num_elements, dtype in history:\n bit_size += num_elements * self._bits_per_element(dtype)\n return bit_size", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def getSupport(self, item):\n return self.itemCountDict[item] / self.transLength", "def get_number_of_items(self):\n return len(self.__item_map)", "def getItemBitLength(self, partitionValue, partitionType):\n numbers = self.getNumbers(partitionType)\n for i in range(len(numbers)):\n if numbers[i][self.PartitionValue] == partitionValue:\n return numbers[i][self.ItemBits]", "def items_num(self):\n return len(self.items)", "def count_bits(x: int) -> int:\n num_bit: int = 0\n while x:\n # if odd, right most bit is 1\n num_bit += x & 1\n # shift to the right 1 bit\n x >>= 1\n\n return num_bit", "def number_bites_accessed(self) -> int:\r\n accessed_bites = {\r\n row['bite']\r\n for row in self.rows\r\n }\r\n\r\n return len(accessed_bites)", "def remaining_size_bits(self):\n return self.max_buffer_size_bits - self.curr_buffer_size_bits", "def _bit_storing_size(n):\n return -((-n) // 8)", "def bits(self):\n if not self.group_list:\n return 0.0\n # TODO: Is it worth to cache the overall result?\n return self.bits_except_last + self.group_list[-1].bits", "def bytelen(item):\n if isinstance(item, array.array):\n return item.itemsize * len(item)\n elif isinstance(item, (ctypes.Structure, _ctypes._SimpleCData)):\n return ctypes.sizeof(item)\n elif hasattr(item, 'nbytes'):\n # Duck-type as numpy array / memoryview.\n return item.nbytes\n elif isinstance(item, (bytes, bytearray)):\n return len(item)\n else:\n # We could just fall back on len() but I worry that someone will\n # unwittingly use this on a type that has a __len__ that is not its\n # bytelength and is not already caught above. Better to fail like this.\n raise CaprotoNotImplementedError(\"Not sure how to measure byte length \"\n \"of object of type {}\"\n \"\".format(type(item)))", "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def _bit_length_actual(self, obj: int) -> int:\n # Zero always takes 1 bit\n if obj == 0:\n return 1\n\n if self._signed:\n if obj > 0:\n return obj.bit_length() + 1\n else:\n return (obj + 1).bit_length() + 1\n else:\n if obj > 0:\n return obj.bit_length()\n else:\n return -1", "def itemsize(self):\n return self.initial_value.itemsize", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])", "def size_as_number_of_bits(size):\n\n if size == 0:\n return 0\n else:\n return len('{:b}'.format(size))", "def container_size(self):\n import cPickle\n import sys\n t = cPickle.dumps(self.filter_bitarray)\n return sys.getsizeof(t)", "def size(self) -> int:\n return self.num_items", "def n_items(self):\n if self._n_items is None:\n self._n_items = len(self.item_unique_vals)\n return self._n_items", "def data_size( self, item ):\n if type(item) == types.TupleType:\n return abs(item[0])\n try:\n return abs(item)\n except TypeError, te:\n return -1", "def python_int_bitwidth():\r\n # 'l' denotes a C long int, and the size is expressed in bytes.\r\n return struct.calcsize('l') * 8", "def bitsLeftToRead(self):\n return self.N - self.bitsRead", "def countBits(x):\n # from https://stackoverflow.com/questions/10874012/how-does-this-bit-manipulation-work-in-java/10874449#10874449\n # Used because of the O(log(n)) complexity\n\n x = x - ((x >> 1) & 0x55555555)\n x = (x & 0x33333333) + ((x >> 2) & 0x33333333)\n x = (x + (x >> 4)) & 0x0F0F0F0F\n x = x + (x >> 8)\n x = x + (x >> 16)\n return x & 0x0000003F", "def items_count(self):\n return len(self.items)", "def bitSizeOf(bitPosition: int, value: typing.Any) -> int:\n\n return value.bitSizeOf(bitPosition)", "def count_bits(dqxx, n):\n return sum([check_bit(x, n) for x in dqxx])", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def count_set_bits(bitmap):\n bmp = bitmap\n count = 0\n n = 1\n while bmp > 0:\n if bmp & 1:\n count += 1\n bmp = bmp >> 1\n n = n + 1\n return count", "def count_bits(n):\n return sum(1 for x in bin(n) if x == \"1\")", "def count(self, item):\n return _(self._.count(item))", "def easy_count_set_bits(num):\n print('Counted {} set bits'.format(bin(num).count('1')))", "def get_num_values(self, item):\n\tnum_values = 1\n\t\n\t# Valor mas antiguo de la linked list\n\t# Siempre tiene valor, si no, no tenemos la entrada en el hashset\n\tvalue = item[\"tail\"][\"next\"]\n \twhile long(value) != 0:\n\t num_values += 1\n\t value = value[\"next\"]\n\n\treturn num_values", "def bitSizeOf(_bitPosition: int, value: BitBuffer) -> int:\n\n return getBitSizeOfBitBuffer(value)", "def countBits(n):\n binary = bin(n)[2:]\n counter = 0\n \n for i in binary:\n if i == '1':\n counter += 1\n \n return counter", "def _num_32_bit_words_for_bit_fields(bit_fields):\n num_buckets, cur_bucket = 0, 0\n for field in bit_fields:\n if field.size + cur_bucket > 32:\n num_buckets += 1\n cur_bucket = 0\n cur_bucket += field.size\n return num_buckets + (cur_bucket > 0)", "def BytesOfStorage(self):\n return (self.NumBits() + 7) / 8", "def getItemCount(self, ItemBase):\n Found = 0\n for CurrItem in self.List:\n if CurrItem.Base == ItemBase:\n Found = 1\n break\n\n if not Found: return 0\n else: return CurrItem.Count", "def bitSizeOf(_bitPosition: int, value: int) -> int:\n\n return getBitSizeOfVarInt32(value)", "def bitSizeOf(_bitPosition: int, value: int) -> int:\n\n return getBitSizeOfVarInt64(value)", "def get_nbits(self):\n if(not self._constructed): raise EGCSUnconstructedStateError()\n return self._nbits", "def _get_item_lengths(self) -> List[int]:\n return [len(x[0]) for x in self.data]", "def bitSizeOf(_bitPosition: int, value: int) -> int:\n\n return getBitSizeOfVarInt(value)", "def test_bit_count_random_bit_size(self):\n bit_size = random.randint(1, 40)\n ops = [bitwise_operations.bit_count(self.five_255_bin, 0, bit_size)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n assert result[\"255\"] == bit_size", "def size(self) -> int:", "def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty", "def bitsLeftToWrite(self):\n return self.N - self.bitsWritten", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def num_clbits(self):\n return 0", "def nitems_read(self, which_input):\n return _spacegrant_swig.invert_bit_sptr_nitems_read(self, which_input)", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def get_int_bits(self):\n return self.int_bits", "def __len__(self):\n return self._number_of_items", "def nbytes(self) -> int:\n return self._nbytes(False)", "def bitSizeOf(_bitPosition: int, value: int) -> int:\n\n return getBitSizeOfVarUInt32(value)" ]
[ "0.7077946", "0.70436716", "0.69864804", "0.69864804", "0.6966107", "0.68727595", "0.68669325", "0.68431276", "0.6806957", "0.67431825", "0.67405957", "0.6726725", "0.66758716", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6655241", "0.66417986", "0.65701747", "0.6569538", "0.65460414", "0.6534713", "0.6483083", "0.6446569", "0.64413875", "0.64413875", "0.6419937", "0.6402184", "0.63834906", "0.63834906", "0.6383391", "0.6359171", "0.6353695", "0.63375723", "0.6327543", "0.6326894", "0.6316434", "0.6300374", "0.6274582", "0.62597936", "0.62565535", "0.6208174", "0.61752874", "0.6145598", "0.6129768", "0.6128082", "0.6123824", "0.6119526", "0.6106803", "0.60981894", "0.6089323", "0.6083235", "0.6082787", "0.6075549", "0.6061384", "0.6059903", "0.60291886", "0.60291886", "0.60291886", "0.60291886", "0.6012544", "0.60041124", "0.6002464", "0.59849435", "0.5972714", "0.59726864", "0.5961244", "0.59342116", "0.5926412", "0.5925628", "0.5914466", "0.591138", "0.59058654", "0.5902278", "0.5883565", "0.5867465", "0.58614856", "0.58603984", "0.58597505", "0.5857699", "0.582954", "0.5822974", "0.5812363", "0.5806067", "0.5792651", "0.5786391", "0.57808685" ]
0.7367046
0
Get a random genom.
def get_random(self): base_genom = "1" * sum(self._size_var) return utils.randomise_a_string(base_genom)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_genome(self):\n return random.choice(self.genomes)", "def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def random():\r\n return R.NextDouble()", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def get_random_individual(self, generation):\n if len(self.generations) <= generation < 0:\n raise ValueError('Please enter a valid generation.')\n return self.get_individual(\n generation=generation,\n index=random.randint(0, len(self.generations[generation]) - 1))", "def random_gene(self):\n path_number = 6\n x = random.randint(0, path_number)\n return x", "def random(self) -> Gadza:\n return choice(self.gadzas)", "def get_random_object():\n\n return random.choice([\n get_random_alphabetic_string,\n get_random_alphanumeric_string,\n get_random_integer,\n get_random_real_number\n ])()", "def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)", "def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)", "def get_random(self):\n return self._get_random()", "def get_random_population():\r\n return [ get_random_individual() for _ in range(POPULATION_COUNT) ]", "def getRandom(self):\n return random.choice(self.vec)", "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people", "def getRandom(self):\n return random.choice(self.data)", "def get_random(cls):\n\n\t\tnum = randint(0, 6)\n\n\t\treturn Tetromino(num)", "def random_chromosome(self):\n genes = []\n for i in range(self.chromosome_length):\n genes.append(self.random_gene())\n\n return genes", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def get_random(self):\n self.random_range = list(np.array(self.friendly_range) * self.conversion)\n return np.random.uniform(self.random_range[0], self.random_range[1], 1)[0]", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def generate_random_gene_sequence(gene_pool):\n genes = []\n for j in range(DEFAULT_CHROMOSOME_SIZE):\n genes.append(random.choice(gene_pool))\n\n return genes", "def _make_random_genome(evo_config):\n\n # create random genome by creating chromosomes for box size and movement\n return _make_size_dict(evo_config), _make_move_pattern(_make_limb_dict(), evo_config)", "def random():\n return constant(1)", "def random_girl(self):\n return [result for result in self._db.girls.find().limit(1) \\\n .skip(random.randrange(self._db.girls.count()))][0]", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def getRandom(self):\n randomIndex = random.randrange(0, self.size)\n return self.nums[randomIndex]", "def get_mpg():\n return uniform(20.0, 50.0)", "def getRandom(self):\n # pick a random number from the list\n return random.choice(self.nums)", "def generate_RME():\n RME = [\"ogre\", \"goblin\", \"gnoll\", \"orc\", \"personal injury lawyer\"]\n monster = random.choice(RME)\n return monster", "def random(self):\r\n return random.randint(1, 4)", "def getRandom(self):\n return self.nums[random.randint(0, len(self.nums) - 1)]\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()", "def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out", "def get_random_uniform(m,n):\n\n return 2*np.random.random(size=(m,n)) - 1", "def getRandom(self):\r\n return self.data[rnd.randrange(self.len)]", "def getRandom(self):\n \n return self.data[random.randint(0, len(self.data) - 1)]", "def random(self):\n return self._randomize()", "def get_random_2(number):\n return ''.join(random.sample(field, number))", "def __generate_genotype(self):\n if len(self.genotype) < self.__individual_genotype_length:\n gene = ''\n \n while len(self.genotype) < self.__individual_genotype_length:\n gene = str(random.randint(0,1))\n \n self.genotype = self.genotype + gene", "def getRandom(self) -> int:\n return random.choice(list(self.set))", "def generate() -> int:\n return randint(0, 1000000000)", "def rand(self, x):\r\n return np.random.random(1)[0]", "def rand(self):\n raise NotImplementedError", "def random(self):\n return self._random", "def get_rng(obj=None):\n seed = (id(obj) + os.getpid() +\n int(datetime.now().strftime(\"%Y%m%d%H%M%S%f\"))) % 4294967295\n if _RNG_SEED is not None:\n seed = _RNG_SEED\n return random.Random(seed)", "def get_random_sequence(genome):\n \n chr_list = get_chromosome_length(genome)\n \n random_seq = {}\n chr = random.sample(chr_list.keys(),1) #select chromosome\n slen = random.randint(300,1000) #select sequence length\n if chr_list[chr[0]] - slen > 0:\n spos = random.randint(1,chr_list[chr[0]] - slen) #select start position\n \n seq = get_fragment(genome, chr[0], slen, spos)\n if seq.count(\"N\") > 0.1 * slen:\n seq = get_random_sequence(genome)\n else:\n seq = get_random_sequence(genome)\n \n return seq", "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def rand(self):\n q = pinocchio.randomConfiguration(self.model)\n v = np.random.rand(self.model.nv) * 2 - 1\n return np.concatenate([q.flat, v])", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def genMsg(self):\n return os.urandom(self.messageSize)", "def get_genre(self, gen: str) -> Genre:\n self.logging.log(15, f\"getting genre: {gen}\")\n return self.sess.query(Genre).filter(Genre.genre == gen).one()", "def getRandom(self) -> int:\n return random.choice(self.elements)", "def rng():\n return numpy.random.default_rng(564)", "def get_random(number):\n s = ''\n for i in xrange(number):\n s = s + str(random.choice(field))\n return s", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def get_random_generator(random_generator):\n # define random generator function\n if random_generator == 0:\n logger.info(\"Random generator: MersenneTwister\")\n return random_MersenneTwister\n\n elif random_generator == 1:\n logger.info(\"Random generator: Latin Hypercube\")\n return random_LatinHypercube\n\n else:\n raise ValueError(f\"No random generator exists for random_generator={random_generator}.\")", "def simulate_generations(gene_pool, environment, gen=DEFAULT_GENERATIONS):\n seq_to_fitness = multiprocessing.Manager().dict()\n chromosomes = []\n fittest_chromosome = []\n\n for i in range(DEFAULT_POPULATION_SIZE):\n chromosomes.append(generate_random_gene_sequence(gene_pool))\n\n for i in range(gen):\n chromosomes, fittest_chromosome = simulate_generation(chromosomes,\n gene_pool,\n environment,\n seq_to_fitness)\n\n if i < gen - 1:\n chromosomes = delete_duplicates(chromosomes, gene_pool)\n\n return fittest_chromosome", "def random(self, af=False):\n rank = randrange(self.order())\n return self.coset_unrank(rank, af)", "def generate_random_value(self, type):\n generators = {\n str: lambda: self.generate_random_string(20, uppercase=True, punctuations=True),\n int: lambda: random.randrange(100000),\n float: lambda: random.random() * 100000.0,\n bool: lambda: bool(random.getrandbits(1)),\n list: lambda: self.generate_random_list_or_string(),\n dict: lambda: self.generate_random_dict_or_string()\n }\n generator = generators[type]\n return generator()", "def rand(self):\n return self.State.rand()", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def generate_random_gif():\n\n random_gif_url = giphy_random_generator()\n return jsonify(random_gif_url)", "def rand_ident():\n return random.randrange(MAX_IDENT)", "def random_glove_generator(emb_mean, emb_stddev):\n x = np.random.normal(loc=0.0, scale=1.0, size=len(emb_mean))\n x_rand = np.multiply(x, emb_stddev) + emb_mean\n return x_rand", "def acquisition_function_random(gp_reward_model: BasicGPRewardModel) -> int:\n return np.random.randint(0, len(gp_reward_model.candidate_queries))", "def urandom_rng(n):\r\n f = open('/dev/urandom', 'rb')\r\n try:\r\n return f.read(n)\r\n finally:\r\n f.close()", "def get_random_individual():\r\n return [ random.random() for _ in range(PARAMETERS_COUNT) ]", "def random() -> float:\n ...", "def getRandom(self):\n random_index = randint(0, len(self.list_val)-1)\n return self.list_val[random_index]", "def getRandom(self) -> int:\n return random.choice(list(self.d.keys()))", "def getRandom(self):\n return random.choice(self.ls)", "def simple_genotype_matrix(n, p):\n genotypes = np.zeros(shape=(n, p))\n for item in range(0, p):\n genotypes[:, item] = np.random.binomial(1, np.random.uniform(0.1, 0.5, 1), n)\n\n return genotypes", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def get_random_user():\n return random.choice(User.query.all())", "def get_random(self, num_bytes: int) -> bytes:\n if num_bytes > 1024:\n logger.warning(\n \"Requesting a large number of bytes. This may take a while: {num_bytes}\"\n )\n data = ffi.new(\"uint8_t **\")\n ret = lib.Fapi_GetRandom(self._ctx, num_bytes, data)\n _chkrc(ret)\n return bytes(ffi.unpack(_get_dptr(data, lib.Fapi_Free), num_bytes))", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b / np.sum(b, 1)[:, None]", "def get_rng(environ, experiment, swabid):\n r = Random()\n r.seed(experiment.seed_strategy(environ, experiment, swabid))\n return r", "def base_pick():\n\n rnd = generate_random(2, 15)\n return rnd", "def getRandom(self) -> int:\n return random.choice(self.array)", "def random_number():\n random_num = random.choice(empty)\n return random_num", "def choose_random(N):\n db = pymongo.MongoClient('localhost',27020).chembldb\n # Get all CHEMBL IDs\n db.molecules.ensure_index('chembl_id')\n chembl_ids = [m['chembl_id'] for m in db.molecules.find().sort('chembl_id')]\n print len(chembl_ids)\n random.seed(201405291515)\n rands = random.sample(chembl_ids, N)\n return(rands)", "def getTmdbGenres():\n\n #If the file is not present in the resource, creates it \n if not isfile(GENRES_FILE):\n saveTmdbGenres()\n\n return np.load(GENRES_FILE)[0]", "def getRandom(self) -> int:\n import random\n return random.choice(self._array)", "def getRandom(self) -> int:\n return random.choice(self.store_list)", "def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)", "def generate(self):\n return self.gen.generate()", "def build_random_population(n: int)->Population:\n DEF_COO = 2\n v = [make_random_automaton(DEF_COO) for i in range(n)]\n return Population(v)", "def set_random_genres(self, num:int):\n try:\n self.cursor.execute(\"insert into genres (name, example, year) \"\n \"select rand.name, rand.example, rand.year \"\n \"from (SELECT \"\n \"(md5(random()::text)) as name, \"\n \"(md5(random()::text)) as example, \"\n \"2020 - trunc(Random()*1000)::integer as year \"\n f\"from generate_series(1,{num})) as rand\")\n self.connection.commit()\n if self.cursor.rowcount:\n return \"generated genres\"\n else:\n return \"NULL\"\n except(Exception, psycopg2.Error) as error:\n self.connect.rollback()\n print(\"error in generate\", error)", "def random(self):\n\n return self._random", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def getRandom(self) -> int:\n size = len(self.value_set)\n if size > 0:\n from random import randint\n x = randint(1, size)\n return self.values[x - 1]", "def getRandom(self):\n return random.choice(self.table.keys())", "def generate_random_numbers(self):\r\n #random.seed(seed=self.seed)\r\n #err = random.random((3,1))\r\n #f = open('test_res', 'a')\r\n #f.write('probability - %s' %self.seed)\r\n #f.write(str(list(err[:3,:])))\r\n #f.write('\\n')\r\n #f.close()\r\n\r\n dist = RandomDistribution(self.seed)\r\n rand_numbers = dist.return_random_variables(self.num_agents)\r\n return rand_numbers", "def create_random_sample(random_population, r = 100):\n choose_sample = [choice(random_population) for _ in xrange(r)]\n return choose_sample", "def random_agent(self, state):\n\t\trndint = random.randint\n\t\treturn self.state[state][rndint(0, len(self.state[state]))]", "def _get_sample(self):\n mu = self._get_mean()\n sample = self.random.normal(mu)\n return sample" ]
[ "0.7129977", "0.68136334", "0.65213746", "0.64397925", "0.6434923", "0.6387772", "0.63817495", "0.6367737", "0.63332325", "0.63026583", "0.62996596", "0.6297405", "0.6288606", "0.62817526", "0.62786907", "0.6232036", "0.6221588", "0.6218463", "0.61816496", "0.6164786", "0.6125345", "0.6108744", "0.6062824", "0.6056672", "0.60522807", "0.6047211", "0.60292006", "0.6021488", "0.6021488", "0.60089886", "0.5970878", "0.596961", "0.59625965", "0.59615874", "0.59563833", "0.594761", "0.5932397", "0.5927468", "0.59133685", "0.5903598", "0.5903153", "0.5903011", "0.5891772", "0.586049", "0.5859235", "0.58519626", "0.58455426", "0.58355564", "0.58227134", "0.58183616", "0.5807132", "0.5801135", "0.580054", "0.57912314", "0.5778754", "0.57778805", "0.5762343", "0.5760827", "0.57543457", "0.5750739", "0.5744793", "0.5743823", "0.5733622", "0.57311386", "0.57290477", "0.57078165", "0.5707373", "0.5707026", "0.5702056", "0.5697558", "0.5692694", "0.56893617", "0.5684203", "0.5684183", "0.56809974", "0.5678506", "0.5678506", "0.56668454", "0.5658049", "0.5648216", "0.5648171", "0.5637111", "0.5626467", "0.5623892", "0.5623216", "0.56181425", "0.561465", "0.5613251", "0.56043684", "0.5597841", "0.55887806", "0.55793154", "0.5579257", "0.55791086", "0.55776364", "0.5574889", "0.55684537", "0.55673516", "0.55590487", "0.555184" ]
0.6945635
1
Create a new block cipher, configured in CTR mode.
def __init__(self, block_cipher, initial_counter_block, prefix_len, counter_len, little_endian): if len(initial_counter_block) == prefix_len + counter_len: self.nonce = _copy_bytes(None, prefix_len, initial_counter_block) """Nonce; not available if there is a fixed suffix""" self._state = VoidPointer() result = raw_ctr_lib.CTR_start_operation(block_cipher.get(), c_uint8_ptr(initial_counter_block), c_size_t(len(initial_counter_block)), c_size_t(prefix_len), counter_len, little_endian, self._state.address_of()) if result: raise ValueError("Error %X while instantiating the CTR mode" % result) # Ensure that object disposal of this Python object will (eventually) # free the memory allocated by the raw library for the cipher mode self._state = SmartPointer(self._state.get(), raw_ctr_lib.CTR_stop_operation) # Memory allocated for the underlying block cipher is now owed # by the cipher mode block_cipher.release() self.block_size = len(initial_counter_block) """The block size of the underlying cipher, in bytes.""" self._next = [self.encrypt, self.decrypt]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_ctr_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n\n counter = kwargs.pop(\"counter\", None)\n nonce = kwargs.pop(\"nonce\", None)\n initial_value = kwargs.pop(\"initial_value\", None)\n if kwargs:\n raise TypeError(\"Invalid parameters for CTR mode: %s\" % str(kwargs))\n\n if counter is not None and (nonce, initial_value) != (None, None):\n raise TypeError(\"'counter' and 'nonce'/'initial_value'\"\n \" are mutually exclusive\")\n\n if counter is None:\n # Crypto.Util.Counter is not used\n if nonce is None:\n if factory.block_size < 16:\n raise TypeError(\"Impossible to create a safe nonce for short\"\n \" block sizes\")\n nonce = get_random_bytes(factory.block_size // 2)\n else:\n if len(nonce) >= factory.block_size:\n raise ValueError(\"Nonce is too long\")\n \n # What is not nonce is counter\n counter_len = factory.block_size - len(nonce)\n\n if initial_value is None:\n initial_value = 0\n\n if is_native_int(initial_value):\n if (1 << (counter_len * 8)) - 1 < initial_value:\n raise ValueError(\"Initial counter value is too large\")\n initial_counter_block = nonce + long_to_bytes(initial_value, counter_len)\n else:\n if len(initial_value) != counter_len:\n raise ValueError(\"Incorrect length for counter byte string (%d bytes, expected %d)\" % (len(initial_value), counter_len))\n initial_counter_block = nonce + initial_value\n\n return CtrMode(cipher_state,\n initial_counter_block,\n len(nonce), # prefix\n counter_len,\n False) # little_endian\n\n # Crypto.Util.Counter is used\n\n # 'counter' used to be a callable object, but now it is\n # just a dictionary for backward compatibility.\n _counter = dict(counter)\n try:\n counter_len = _counter.pop(\"counter_len\")\n prefix = _counter.pop(\"prefix\")\n suffix = _counter.pop(\"suffix\")\n initial_value = _counter.pop(\"initial_value\")\n little_endian = _counter.pop(\"little_endian\")\n except KeyError:\n raise TypeError(\"Incorrect counter object\"\n \" (use Crypto.Util.Counter.new)\")\n\n # Compute initial counter block\n words = []\n while initial_value > 0:\n words.append(struct.pack('B', initial_value & 255))\n initial_value >>= 8\n words += [ b'\\x00' ] * max(0, counter_len - len(words))\n if not little_endian:\n words.reverse()\n initial_counter_block = prefix + b\"\".join(words) + suffix\n\n if len(initial_counter_block) != factory.block_size:\n raise ValueError(\"Size of the counter block (%d bytes) must match\"\n \" block size (%d)\" % (len(initial_counter_block),\n factory.block_size))\n\n return CtrMode(cipher_state, initial_counter_block,\n len(prefix), counter_len, little_endian)", "def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher", "def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)", "def encrypt_ctr(self, plaintext, iv):\n assert len(iv) == 16\n\n plaintext = pad(plaintext)\n\n blocks = []\n nonce = iv\n for plaintext_block in split_blocks(plaintext):\n # CTR mode encrypt: plaintext_block XOR encrypt(nonce)\n block = xor_bytes(plaintext_block, self.encrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return b''.join(blocks)", "def aes_ctr(key, counter=None):\n return AES.new(key, AES.MODE_CTR, counter=(counter if counter is not None else Counter.new(128)))", "def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):\n # can we use M2Crypto and was it requested?\n if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:\n # yes, so do so\n return self.EVPAdaptor(key_bytes, iv_bytes, mode)\n else:\n # default to PyCrypto\n return self.AESAdaptor(key_bytes, iv_bytes, mode)", "def encryptAESCTR(key, plaintext):\n # 128-bit iv, securely generated\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return (iv, ciphertext)", "def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())", "def aes_cbc(key, iv=None):\n return AES.new(key, AES.MODE_CBC, iv if iv is not None else get_zero_vector(16))", "def ctr(in_file, out_file, block_size, key, op):\n \n with open(in_file, 'rb') as input: # Open files\n with open(out_file, 'wb') as output:\n ctr_str = read_block(input, block_size)[0] # This is the initial ctr\n ctr = byte_str_to_int(ctr_str)\n i = 0 # This is the value which will be added to ctr as we loop\n size = 2**(block_size * 8) # This is the length of the block size in bits\n \n output.write(int_to_byte_str(ctr + i, block_size))\n i += 1\n\n block = [True, True] # This is just to get into the while loop\n while block[1]: # Iterate through the rest of the input\n block = read_block(input, block_size)\n if block [0] != -1:\n this_ctr = (ctr + i) % size\n i += 1\n funced_block = func(int_to_byte_str(this_ctr, block_size), key)\n block_xor = xor(block[0], funced_block)\n output.write(block_xor)", "def ecb_or_cbc_encrypt(plaintext, mode='random'):\n if mode == 'random':\n mode = 'ECB' if randint(0, 1) == 0 else 'CBC'\n\n key = randstr(AES_BSZ)\n plaintext = (\n ''.join([randstr(1) for _ in range(randint(5, 10))]) +\n plaintext +\n ''.join([randstr(1) for _ in range(randint(5, 10))])\n )\n plaintext = pad_to_blocksize(plaintext)\n\n if mode == 'ECB':\n ecb = AES.new(key, AES.MODE_ECB)\n ciphertext = ecb.encrypt(plaintext)\n elif mode == 'CBC':\n iv = randstr(AES_BSZ)\n cbc = AES.new(key, AES.MODE_CBC, iv)\n ciphertext = cbc.encrypt(plaintext)\n else:\n raise Exception(\"invalid mode\")\n\n return ciphertext", "def main():\n b64 = (b\"L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvo\"\n b\"OLSFQ==\")\n binary = base64.b64decode(b64)\n\n key = b\"YELLOW SUBMARINE\"\n nonce = bytes(8)\n cipher = AES.new(key, AES.MODE_ECB)\n ctr = CTRMode(\n blksize=16,\n encrypt_blk=cipher.encrypt,\n decrypt_blk=cipher.decrypt,\n nonce=nonce,\n )\n\n decrypted = ctr.decrypt(binary)\n\n print(decrypted.decode())", "def encryptAESCTR(key, nonce, pt):\n\tct = b''\n\tcounter = 0\n\tfor ptBlock in chunks(pt, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tct += xor(ptBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn ct", "def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()", "def decrypt_ctr(self, ciphertext, iv):\n assert len(iv) == 16\n\n blocks = []\n nonce = iv\n for ciphertext_block in split_blocks(ciphertext):\n # CTR mode decrypt: ciphertext XOR decrypt(nonce)\n block = xor_bytes(ciphertext_block, self.decrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return unpad(b''.join(blocks))", "def operate_cipher(self):", "def encryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend,key_type = 'AES128',mode='CBC'):\n\tif key_type == 'AES128':\n\t\talgo = algorithms.AES(key)\n\telif key_type == 'ChaCha20':\n\t\talgo = algorithms.ChaCha20(key,nonce=os.urandom(32))\n\telse:\n\t\traise('Error algorithm ' + key_type + ' not supported!')\n\tif mode == 'CBC':\n\t\tmode = modes.CBC(iv)\n\telif mode == 'GCM':\n\t\tmode = modes.GCM(iv)\n\telse :\n\t\traise('Error mode ' + mode + ' not supported!')\n\tcipher = Cipher(algo,mode,backend = bc)\n\treturn iv,key,cipher.encryptor()", "def __init__(self, block_cipher: BlockCipher, code_size: int):\n self.cipher = block_cipher\n self.code_size = code_size", "def cipher(input_bytes, expanded_key, n_r):\n\n state = generate_initial_state(input_bytes)\n state = add_round_key(state, expanded_key, 0)\n\n # Apply rounds of operations as stated in AES standard\n for round_no in range(1, n_r):\n state = sub_bytes(state)\n state = shift_rows(state)\n state = mix_columns(state)\n state = add_round_key(state, expanded_key, round_no * 4 * 4)\n\n state = sub_bytes(state)\n state = shift_rows(state)\n state = add_round_key(state, expanded_key, n_r * 4 * 4)\n\n return state", "def decryptAESCTR(key, nonce, ct):\n\tpt = b''\n\tcounter = 0\n\tfor ctBlock in chunks(ct, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tpt += xor(ctBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn pt", "def aes128_ctr_cipher(string, nonce, key):\n cipher_string = b''\n # Divide input string in blocks of 16 bytes\n cipher_text_blocks = [string[i:i + 16] for i in range(0, len(string), 16)]\n for i in range(len(cipher_text_blocks)):\n # Calculate incremental nonce block for each input string block\n nonce_block = nonce + i.to_bytes(8, byteorder='little')\n nonce_matrix = string_to_matrix_states(nonce_block)[0]\n # Cipher nonce block with key\n nonce_matrix_cipher = aes128_RoundBlock(nonce_matrix, key)\n d = xor_states(nonce_matrix_cipher, string_to_matrix_states(cipher_text_blocks[i])[0])\n cipher_string += matrix_to_bytes(d)\n return cipher_string", "def __CreateCipher(self):\n is_data_avail = True\n if not self.__cipher:\n reqd_block_size = self.__key.block_size\n new_bytes_reqd = reqd_block_size - len(self.__encrypted_buffer)\n read_bytes, is_data_avail = self.__ReadBytes(new_bytes_reqd)\n if read_bytes:\n self.__encrypted_buffer += read_bytes\n if len(self.__encrypted_buffer) >= reqd_block_size:\n iv_bytes = self.__encrypted_buffer[:reqd_block_size]\n self.__encrypted_buffer = self.__encrypted_buffer[\n reqd_block_size:\n ]\n self.__hmac_stream.Update(iv_bytes)\n self.__cipher = AES.new(self.__key.key_bytes, AES.MODE_CBC,\n iv_bytes)\n return is_data_avail", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message", "def cbc(in_file, out_file, block_size, key, op):\n with open(in_file, 'rb') as input: # Open files\n with open(out_file, 'wb') as output:\n block = [True, True] # This is just to get into the while loop\n prev_block = read_block(input, block_size)[0] # This is the IV\n output.write(prev_block)\n \n while block[1]: # Iterate through the rest of the input\n block = read_block(input, block_size)\n if block[0] != -1: # Make sure there is at least one char in there\n if (op == \"-e\"):\n block_xor = xor(block[0], prev_block) \n block_cipher = func(block_xor, key)\n output.write(block_cipher)\n prev_block = block_cipher\n elif (op == \"-d\"):\n block_xor = unfunc(block[0], key)\n block_message = xor(block_xor, prev_block)\n output.write(block_message)\n prev_block = block[0]", "def __init__(self, key, iv, do, ciphername='aes-256-cbc', tag_len=12, iv_len=7, tag=None):\n self.cipher = OpenSSL.get_cipher(ciphername)\n self.ctx = OpenSSL.EVP_CIPHER_CTX_new()\n if (do == 1 or do == 0):\n k = OpenSSL.malloc(key, len(key))\n IV = OpenSSL.malloc(iv, len(iv))\n if self.cipher == OpenSSL.get_cipher('aes-128-ccm') or \\\n self.cipher == OpenSSL.get_cipher('aes-128-gcm'):\n OpenSSL.EVP_CipherInit_ex(self.ctx, self.cipher.get_pointer(), 0, 0, 0, do)\n self.tag_len = tag_len\n self.iv_len = iv_len\n if do == 0:\n if tag is None or (tag is not None and len(tag) != tag_len):\n raise Exception(\"Invalid Tag Input...\")\n else:\n self.cipher_ctrl(tag_val=tag)\n else:\n self.cipher_ctrl()\n OpenSSL.EVP_CipherInit_ex(self.ctx, 0, 0, k, IV, do)\n else:\n OpenSSL.EVP_CipherInit_ex(\n self.ctx, self.cipher.get_pointer(), 0, k, IV, do)\n else:\n raise Exception(\"RTFM ...\")", "def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text", "def gen_ciphertext(message: str) -> str:\r\n key = 1\r\n for i in range(26):\r\n ciphertext = cipher(key, message)\r\n yield f\"Key #{key}: {ciphertext}\"\r\n key += 1", "def __init__(self, block_cipher: BlockCipher, section_size: int, gamma_block_size: int):\n assert(block_cipher.key_size == 32)\n assert(block_cipher.block_size % 2 == 0)\n assert(block_cipher.key_size % block_cipher.block_size == 0)\n assert(section_size % block_cipher.block_size == 0)\n assert(block_cipher.block_size % gamma_block_size == 0)\n\n BlockCipherMode.__init__(self, block_cipher)\n self.section_size = section_size\n self.gamma_block_size = gamma_block_size", "def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv", "def poly1305_key_gen(key: bytes, nonce: bytes) -> bytes:\n\n poly = ChaCha(key, nonce)\n return poly.encrypt(bytes(32))", "def cbc_encrypt(pt, cipher, iv):\n\n ct = [iv]\n pt = chunks(pt, cipher.block_size)\n for i in range(len(pt)):\n ct += [cipher.encrypt(bytes(fixed_xor(pt[i], ct[i])))]\n return flatten(ct[1:])", "def __get_cipher(self):\n return Fernet(open(self.__key_file, 'rb').read())", "def __init__(self, key, msg0503):\n enkey1 = map(ord, AES.new(key).encrypt(msg0503[:16]))\n self.cipher = AES.new(\"\".join(\n map(chr, (enkey1[i] ^ ord(msg0503[i + 16]) for i in range(16)))))\n self.encrypt_seq = random.randint(0, 0xffff)", "def aes_cipher_from_key(key):\r\n return AES.new(key, AES.MODE_CBC, generate_aes_iv(key))", "def test_always_CBC(n):\n count = 0\n for _ in xrange(n):\n text = bytearray(['A'] * 100)\n key, iv = utils.gen_rand_key(), utils.gen_rand_key()\n code = utils.apply_CBC('encrypt', text, key, iv)\n blocks = utils.gen_blocks(code)\n count += 1 if utils.repeated_blocks(blocks, 3) else 0\n\n print 'Always CBC: % ECB in sample {:.0%}'.format(count / n)\n assert count == 0\n return True", "def __Cipher(self, selector):\n assert selector in self.OP_TYPES, 'Invalid selector :%s' % selector\n if selector == self.OP_ACTIVE and (len(self.ciphers.keys()) > 1 or\n not len(self.ciphers.keys())):\n assert 0, 'If both encryption and decryption used then selector must \\\n be OP_ENCRYPT or OP_DECRYPT and at least 1 must be active'\n\n cipher = None\n if selector == self.OP_ACTIVE:\n # should only be one cipher active\n cipher = self.ciphers.values()[0]\n else:\n cipher = self.ciphers.get(selector)\n # have we been created a cipher for this selector yet?\n if not cipher:\n # no, so set it up as requested\n\n # convert between AES and EVP modes\n # NOTE: AES auto-selects based on key size using the same mode, but\n # EVP requires different mode strings for each key size (in bits)\n mode = 'aes_%s_cbc' % (self.key_size * 8)\n cipher = EVP.Cipher(alg=mode,\n key=self.key_bytes,\n iv=self.IV,\n op=selector,\n padding=0)\n self.ciphers[selector] = cipher\n return cipher", "def encrypt_block(self, plaintext):\n assert len(plaintext) == 16\n plain_state = bytes2matrix(plaintext)\n\n add_round_key(plain_state, self._key_matrices[0])\n\n for i in range(1, self.n_rounds):\n sub_bytes(plain_state)\n shift_rows(plain_state)\n mix_columns(plain_state)\n add_round_key(plain_state, self._key_matrices[i])\n\n sub_bytes(plain_state)\n shift_rows(plain_state)\n add_round_key(plain_state, self._key_matrices[-1])\n\n return matrix2bytes(plain_state)", "def encrypt_block(input_bytes, key):\n return AES.new(key, AES.MODE_ECB).encrypt(input_bytes)", "def choose_cipher(cls):\n while True:\n\n crypt = input(\"Would you like to encrypt or decrypt?\").lower()\n print(crypt)\n if (crypt != \"encrypt\") and (crypt != \"decrypt\"):\n crypt = 0\n print(\"Invalid Selection\")\n else:\n break\n\n while True:\n\n cipher_choice = input(\"Select Cipher: \\n\"\n \"A) Affine\\n\"\n \"B) Atbash\\n\"\n \"C) Keyword\\n\"\n ).lower()\n\n if cipher_choice == (\"a\" or \"a)\" or \"affine\"):\n cipher_choice = \"affine\"\n break\n elif cipher_choice == (\"b\" or \"b)\" or \"atbash\"):\n cipher_choice = \"atbash\"\n break\n elif cipher_choice == (\"c\" or \"c)\" or \"keyword\"):\n cipher_choice = \"keyword\"\n break\n\n else:\n print(\"Invalid Selection\")\n while True:\n message = input(\"Input your message: \")\n if (len(message) >= 1):\n break\n else:\n print(\"Invalid Message\")\n while True:\n otp = input(\"Enter one time pad: \")\n if crypt == \"encrypt\" or crypt == \"e\":\n if (len(message) % 5):\n otp_length = (len(message) + (5 - (len(message) % 5)))\n else:\n otp_length = (len(message))\n if len(otp) >= otp_length:\n break\n else:\n print(\"otp for this message must be \"\n \"{} characters long\".format(otp_length))\n else:\n break\n return cls(crypt, cipher_choice, otp, message)", "def _derive_crypto(self, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n self.initiator_seed + self.responder_seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def cbc_bit_flip(encryption_oracle):\n\n # Get the length of a block and the length of the prefix\n block_length = find_block_length(encryption_oracle.encrypt)\n prefix_length = find_prefix_length(encryption_oracle.encrypt, block_length)\n\n # Compute the number of bytes to add to the prefix to make its length a multiple of block_length\n additional_prefix_bytes = (block_length - (prefix_length % block_length)) % block_length\n total_prefix_length = prefix_length + additional_prefix_bytes\n\n # Compute the number of bytes to add to the plaintext to make its length a multiple of block length\n plaintext = \"?admin?true\"\n additional_plaintext_bytes = (block_length - (len(plaintext) % block_length)) % block_length\n\n # Make the plaintext long one block_length and encrypt it\n final_plaintext = additional_plaintext_bytes * '?' + plaintext\n ciphertext = encryption_oracle.encrypt(additional_prefix_bytes * '?' + final_plaintext)\n\n # Because XORing a byte with itself produces zero, we can produce the byte that we want\n # by changing the bytes of the block before the plaintext\n semicolon = ciphertext[total_prefix_length - 11] ^ ord('?') ^ ord(';')\n equals = ciphertext[total_prefix_length - 5] ^ ord('?') ^ ord('=')\n\n # Put the pieces of our forged ciphertext together to generate the full ciphertext\n forced_ciphertext = ciphertext[:total_prefix_length - 11] + bytes([semicolon]) + \\\n ciphertext[total_prefix_length - 10: total_prefix_length - 5] + \\\n bytes([equals]) + ciphertext[total_prefix_length - 4:]\n\n return forced_ciphertext", "def ctr_process(msg, nonce, cnt, key, rounds):\n ivcount = nonce + bc.int_to_binary(cnt, 8)\n x = feistel_encrypt(ivcount,key,rounds)\n y = xor_compare(msg,x)\n return y", "def encryptAESCBCPKCS5(key, iv, pt):\n\tpt = padPKCS5(pt)\n\tct = b''\n\txorSource = iv\n\tfor ptBlock in chunks(pt, 16):\n\t\tb = xor(ptBlock, xorSource)\n\t\tctBlock = encryptAESBlock(key, b)\n\t\tct += ctBlock\n\t\txorSource = ctBlock\n\treturn ct", "def aes_ecb(key):\n return AES.new(key, AES.MODE_ECB)", "def _encrypt_block(self, v, k):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"_encrypt_block()\"\n\n v0 = struct.unpack(\"<L\", v[0:4])[0]\n v1 = struct.unpack(\"<L\", v[4:8])[0]\n delta = CRYPTO_DETLA; _sum = 0\n k0 = struct.unpack(\"<L\", k[ 0: 4])[0]\n k1 = struct.unpack(\"<L\", k[ 4: 8])[0]\n k2 = struct.unpack(\"<L\", k[ 8:12])[0]\n k3 = struct.unpack(\"<L\", k[12:16])[0]\n\n for i in range(CRYPTO_ROUNDS):\n _sum = uint32_t(_sum + delta)\n\n tmp0 = uint32_t(uint32_t(v1 << 6) + k0)\n tmp1 = uint32_t(v1 + _sum)\n tmp2 = uint32_t(uint32_t(v1 >> 7) + k1)\n v0 = uint32_t(v0 + uint32_t(tmp0 ^ tmp1 ^ tmp2))\n\n tmp0 = uint32_t(uint32_t(v0 << 6) + k2)\n tmp1 = uint32_t(v0 + _sum)\n tmp2 = uint32_t(uint32_t(v0 >> 7) + k3)\n v1 = uint32_t(v1 + uint32_t(tmp0 ^ tmp1 ^ tmp2))\n\n return struct.pack(\"<LL\", v0, v1)", "def encrypt(self, plaintext, output=None):\n\n if self.encrypt not in self._next:\n raise TypeError(\"encrypt() cannot be called after decrypt()\")\n self._next = [self.encrypt]\n \n if output is None:\n ciphertext = create_string_buffer(len(plaintext))\n else:\n ciphertext = output\n \n if not is_writeable_buffer(output):\n raise TypeError(\"output must be a bytearray or a writeable memoryview\")\n \n if len(plaintext) != len(output):\n raise ValueError(\"output must have the same length as the input\"\n \" (%d bytes)\" % len(plaintext))\n\n result = raw_ctr_lib.CTR_encrypt(self._state.get(),\n c_uint8_ptr(plaintext),\n c_uint8_ptr(ciphertext),\n c_size_t(len(plaintext)))\n if result:\n if result == 0x60002:\n raise OverflowError(\"The counter has wrapped around in\"\n \" CTR mode\")\n raise ValueError(\"Error %X while encrypting in CTR mode\" % result)\n \n if output is None:\n return get_raw_buffer(ciphertext)\n else:\n return None", "def aes_ctr_decrypt(self, key: bytes, encrypted_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.decryptor()\n return enc.update(encrypted_data) + enc.finalize()", "def encrypt_data_ctr(\n key: bytes,\n counter_0: bytes,\n mac_cbc: bytes,\n payload: bytes = b\"\",\n) -> tuple[bytes, bytes]:\n s_cipher = Cipher(algorithms.AES(key), modes.CTR(counter_0))\n s_encryptor = s_cipher.encryptor()\n mac = s_encryptor.update(mac_cbc)\n encrypted_data = s_encryptor.update(payload) + s_encryptor.finalize()\n return (encrypted_data, mac)", "def set_cipher(self, key_name, hint):\n message_key_types.set_cipher(self.shared_key, self.nonce, key_name, hint)", "def calculate_message_authentication_code_cbc(\n key: bytes,\n additional_data: bytes,\n payload: bytes = b\"\",\n block_0: bytes = bytes(16),\n) -> bytes:\n blocks = (\n block_0 + len(additional_data).to_bytes(2, \"big\") + additional_data + payload\n )\n y_cipher = Cipher(algorithms.AES(key), modes.CBC(bytes(16)))\n y_encryptor = y_cipher.encryptor()\n y_blocks = (\n y_encryptor.update(byte_pad(blocks, block_size=16)) + y_encryptor.finalize()\n )\n # only calculate, no ctr encryption\n return y_blocks[-16:]", "def createNewBlock(self, nonce, previousBlockHash, hash):\n newBlock = Block(len(self.chain), self.pendingTransactions, nonce, hash, previousBlockHash)\n self.pendingTransactions = []\n self.chain.append(newBlock)\n return newBlock", "def _generateblocks(self, n):\n if self.key is None:\n raise AssertionError('generator must be seeded before use')\n result = b''\n for i in range(n):\n result += self._cipher.encrypt(self.counter())\n return result", "def decryptAESCTR(key, iv, ciphertext):\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext) + decryptor.finalize()", "def reset_cbc(self):\n if not self.block_count:\n raise ValueError(\"cannot reset cbc until block_count is set\")\n cbc_len = np.prod(self.block_count)\n self.cbc = np.ones(cbc_len, dtype=np.bool)", "def update_cipher(self, input):\n\n if self.cipher is None:\n \"\"\" We directly put the input in the lsfr\n Generally, at the beginning, the input correspond\n to the xoring of the uid, key and nonce Nt \"\"\"\n self.cipher = input\n else:\n \"\"\" We update the state of the lfsr by\n xoring the lfsr with the input. For the moment,\n the feedback bits generated from g(x) are \n not taken in account. But it should be at the \n initialization step only ... \"\"\" \n self.cipher = self.cipher ^ input", "def encrypt( raw, key, iv ):\n result = ''\n tmp_iv = iv \n text = pad(raw)\n\n for i in xrange(0, len(text) / BS):\n lower_bound = i * 16\n upper_bound = (i+1) * 16\n \n tmp = AES.new(key, AES.MODE_OFB, tmp_iv).decrypt( text[lower_bound:upper_bound] )\n tmp_iv = tmp\n result += tmp\n\n return result", "def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def encrypt_ecb_or_cbc(bytes_str: bytes, key: bytes) -> bytes:\n prepend_bytes = os.urandom(5 + int(os.urandom(1)[0] / (2 ** 8 / 6)))\n append_bytes = os.urandom(5 + int(os.urandom(1)[0] / (2 ** 8 / 6)))\n encryptable_bytes = b''.join([prepend_bytes, bytes_str, append_bytes])\n if os.urandom(1)[0] > 127:\n return AesEcbCipher(key).encrypt(encryptable_bytes)\n else:\n return AesCbcCipher(key, os.urandom(len(key))).encrypt(encryptable_bytes)", "def ctr_encrypt(pt_bin_list, keys, rounds):\n msg = pt_bin_list\n nonce = generate_random_binary(len(pt_bin_list[0])-8) # Initialization Vector\n counter = range(0,len(msg))\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(ctr_process, zip(msg, repeat(nonce), counter, keys, repeat(rounds)))\n\n enc_result.insert(0,nonce+\"00000000\") # Store padded IV to the start of ciphertext\n return enc_result", "def new(cls, cipher_list_, name='NewOnionCipher'):\n if isinstance(cipher_list_, str):\n raise ValueError('cipher_list should be a list-like thing')\n try:\n for cipher in cipher_list_:\n if not issubclass(cipher, BaseCipher):\n raise ValueError('Cipher list should contain BaseCipher ' +\n 'subclasses.')\n except TypeError:\n raise ValueError('cipher_list should be a list-like thing')\n\n return type(name, (cls,), {'cipher_list': cipher_list_})", "def decryptAESCBCPKCS5(key, iv, ct):\n\tpt = b''\n\txorSource = iv\n\tfor ctBlock in chunks(ct, 16):\n\t\tdt = decryptAESBlock(key, ctBlock)\n\t\tpt += xor(dt, xorSource)\n\t\txorSource = ctBlock\n\treturn unpadPKCS5(pt)", "def encrypt(self, plainText):\n encryptor = RijndaelCbc(\n self.key,\n self.encryptIV,\n padding=ZeroPadding(BLOCK_SIZE),\n block_size=BLOCK_SIZE,\n )\n encText = encryptor.encrypt(plainText)\n self.encryptIV = encText[-BLOCK_SIZE:]\n return encText", "def cbc_encrypt(pt_bin_list, keys, rounds):\n bsize = len(pt_bin_list[0])\n ivector = generate_random_binary(bsize) # Initialization Vector\n enc_result = []\n msg = pt_bin_list\n\n enc_result.append(feistel_encrypt(xor_compare(msg[0],ivector),keys[0],rounds))\n if len(msg) > 1:\n for i in range(1,len(msg)):\n enc_result.append(feistel_encrypt(xor_compare(msg[i], enc_result[i-1]),keys[i],rounds))\n enc_result.insert(0,ivector) # Store IV to the start of ciphertext\n return enc_result", "def __init__(self,**kwargs):\n self.msg = kwargs.get('msg','')\n self.shift = kwargs.get('shift','')\n op = kwargs.get('op', False)\n if op:\n try:\n op = getattr(self,op)\n except AttributeError as e: \n raise CipherError(\"valid operations: (encode|decode).\")\n op()\n print \"cipher={c}|key={s}|{r}\".format(c=self.__module__.split('.')[2],\n s=self.shift,\n r=self.result)", "def encrypt(cls, plaintext, aad, key, iv):", "def __init__(self, encryption_method: str, encryption_key_size: int = 32, encryption_key: bytes = None,\r\n block_size: int = 32, block_mode: str = BlockMode.ECB):\r\n self.__encryption_method = encryption_method\r\n self.__encryption_key_size = encryption_key_size\r\n self.__encryption_key = encryption_key\r\n self.__block_size = block_size\r\n self.__block_mode = block_mode\r\n\r\n if self.__encryption_key is None:\r\n self.__randomize_key_on_every_encryption = True\r\n else:\r\n self.__randomize_key_on_every_encryption = False\r\n\r\n # Generate the next key to be used\r\n if self.__randomize_key_on_every_encryption:\r\n self.__encryption_key = get_random_bytes(self.__encryption_key_size)", "def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def aes_enc_dec(self, key, iv, inputVal):\n\n\t\taes = Cipher(\"AES-128-CTR\")\n\t\tenc = aes.enc(key, iv)\n\t\toutput = enc.update(inputVal)\n\t\toutput += enc.finalize()\n\t\treturn output", "def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def encryptAESBlock(key, pt):\n\tif len(pt) != 16 and len(pt) != 32:\n\t\traise Exception(\"Plaintext is not length 16 or 32\")\n\tcipher = AES.new(key, AES.MODE_ECB)\n\treturn cipher.encrypt(pt)", "def encrypt(inp):\n # prepare plaintext\n prefix = \"comment1=cooking%20MCs;userdata=\"\n suffix = \";comment2=%20like%20a%20pound%20of%20bacon\"\n pt = inp.replace(\";\", \"\").replace(\"=\", \"\") # remove invalid character\n pt = prefix + pt + suffix # add prefix and suffix\n pt_encoded = pt.encode(\"utf-8\")\n pt_padded = pkcs7.add(pt_encoded, aes.S_BLOCK)\n\n # encrypt\n ct = aes.cbc_encrypt(pt_padded, KEY, IV)\n\n return ct", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def encrypt(plaintext):\n # Pad plaintext\n plaintext = pad(plaintext)\n\n # AES encrypt\n iv = Random.new().read(BS)\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return iv + aes.encrypt(plaintext)", "def crypt(key, data, iv):\n return xtea.crypt(key, data, iv)", "def __init__(self, ciphertext):\n\n slice_index = None # Replace None\n self.preamble = ciphertext[:slice_index]\n self.ciphertext = ciphertext[slice_index:]", "def encrypt_aes(msg, key, iv):\r\n #start timer\r\n start = timeit.default_timer()\r\n\r\n #converting key to bytes from hex\r\n key = bytes.fromhex(key)\r\n msg = pad(msg)\r\n obj = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertxt = obj.encrypt(msg)#ciphertxt will be in 'bytes'\r\n\r\n #converting ciphertxt into hexadecimal\r\n ciphertxt = ciphertxt.hex()\r\n\r\n print(\"Ciper is: \",ciphertxt)\r\n\r\n #stop timer\r\n stop = timeit.default_timer()\r\n print('Encryption Running Time: ', stop-start)\r\n \r\n return ciphertxt", "def seal(self, nonce: bytes, plaintext: bytes, data: bytes = b'') -> bytes:\n\n if len(nonce) != 12:\n raise ValueError('Nonce must be 12 bytes large')\n\n otk = self.poly1305_key_gen(key=self.key, nonce=nonce)\n\n ciphertext = ChaCha(key=self.key, nonce=nonce, counter=1).encrypt(plaintext)\n\n mac_data = b''.join((\n data, self.pad16(data),\n ciphertext, self.pad16(ciphertext),\n pack('<Q', len(data)),\n pack('<Q', len(ciphertext))\n ))\n\n tag = Poly1305(otk).create_tag(mac_data)\n\n return ciphertext + tag", "def CBCMACbasedOnAES(message, key):\n\n # Convert the message into bytes\n message1 = bytes(message)\n # Convert the key into bytes\n key1 = bytes(key)\n\n # Create the AES object\n aes_obj = AES.new(key1, AES.MODE_CBC, iv)\n # Encrypt the message\n MAC = aes_obj.encrypt(message1)\n # Return the MAC of the message\n return MAC", "def symcipher_from_secret(\n cls,\n secret,\n algorithm=TPM2_ALG.AES,\n mode=TPM2_ALG.CFB,\n nameAlg=TPM2_ALG.SHA256,\n objectAttributes=(\n TPMA_OBJECT.DECRYPT | TPMA_OBJECT.SIGN_ENCRYPT | TPMA_OBJECT.USERWITHAUTH\n ),\n seed=None,\n ):\n nbits = len(secret) * 8\n if algorithm == TPM2_ALG.SM4 and nbits != 128:\n raise ValueError(f\"invalid key size, expected 128, got {nbits}\")\n elif nbits not in (128, 192, 256):\n raise ValueError(\n f\"invalid key size, expected 128, 192 or 256 bits, got {nbits}\"\n )\n pub = TPMT_PUBLIC(\n type=TPM2_ALG.SYMCIPHER, nameAlg=nameAlg, objectAttributes=objectAttributes\n )\n pub.parameters.symDetail.sym.keyBits.sym = nbits\n pub.parameters.symDetail.sym.algorithm = algorithm\n pub.parameters.symDetail.sym.mode.sym = mode\n digsize = get_digest_size(nameAlg)\n if seed and len(seed) != digsize:\n raise ValueError(\n f\"invalid seed size, expected {digsize} but got {len(seed)}\"\n )\n elif not seed:\n seed = secrets.token_bytes(digsize)\n pub.unique.sym = calculate_sym_unique(nameAlg, secret, seed)\n priv = cls(sensitiveType=TPM2_ALG.SYMCIPHER)\n priv.sensitive.bits = secret\n priv.seedValue = seed\n return (priv, pub)", "def decryptAESBlock(key, ct):\n\tif len(ct) != 16 and len(ct) != 32:\n\t\traise Exception(\"Ciphertext is not length 16 or 32\")\n\tcipher = AES.new(key, AES.MODE_ECB)\n\treturn cipher.decrypt(ct)", "def cz(control: QubitInput, target: QubitInput) -> Instruction:\n return Instruction(CZ(), target=[control, target])", "def symcipher_from_secret(\n cls,\n secret,\n algorithm=TPM2_ALG.AES,\n mode=TPM2_ALG.CFB,\n nameAlg=TPM2_ALG.SHA256,\n objectAttributes=(\n TPMA_OBJECT.DECRYPT | TPMA_OBJECT.SIGN_ENCRYPT | TPMA_OBJECT.USERWITHAUTH\n ),\n seed=None,\n ):\n sa, pa = TPMT_SENSITIVE.symcipher_from_secret(\n secret, algorithm, mode, nameAlg, objectAttributes, seed\n )\n priv = TPM2B_SENSITIVE(sensitiveArea=sa)\n pub = TPM2B_PUBLIC(publicArea=pa)\n return (priv, pub)", "def cipher_feedback_mode_encode(msg, CEK, IV = int(0).to_bytes(8, 'big')):\n assert(len(CEK) == 32)\n assert(len(IV) == 8)\n last_block = IV\n res = b''\n for i in range(0, len(msg), 8):\n gamma = GOST2814789ECB_encode(last_block, CEK)\n block = msg[i: min(i + 8, len(msg))]\n encrypted_block = b''\n for j in range(len(block)):\n encrypted_block += int(block[j] ^ gamma[j]).to_bytes(1, 'big')\n res += encrypted_block\n last_block = encrypted_block\n return res", "def create_from_transaction(tx, prev_hash):\n\n tx_hash = HashAssist.hash_value(tx.to_string_for_hashing())\n\n print(\"Mining nonce....\")\n nonce = proof.mint(prev_hash + tx_hash, WORK_FACTOR)\n header_hash = HashAssist.hash_value(prev_hash + tx_hash + nonce)\n\n return Block(header_hash, prev_hash, nonce, tx_hash, tx)", "def encode(key, plain):\n print(\"ciphertext: \", end=\"\")\n\n # used variables\n pos = 0\n key_len = len(key)\n\n # loop over every character in the text\n for char in plain:\n key_pos = pos % key_len\n # leave non-alphabetical characters alone\n if not char.isalpha():\n print(char, end=\"\")\n # cipher characters\n elif char.isupper():\n cipher = chr((char_to_number(char) + char_to_number(key[key_pos])) \\\n % 26 + ord(\"A\"))\n \n print(cipher, end=\"\")\n pos += 1\n else:\n cipher = chr((char_to_number(char) + char_to_number(key[key_pos])) \\\n % 26 + ord(\"a\"))\n \n print(cipher, end=\"\")\n pos += 1\n\n print()", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def cipher_feedback(self):", "def main():\n\n f = sys.stdin\n\n # skip first message\n f.readline()\n\n m_frame = bitstring_to_bytes(f.readline()[-113:-17])\n c_frame = bitstring_to_bytes(f.readline()[-160:-16])\n\n tmp = xor(key, c_frame[11:17])\n aes = AES.new(tmp, AES.MODE_ECB)\n\n Pd_d = bytes.fromhex('00' * 6) + m_frame[1:11]\n Pd_ = aes.encrypt(Pd_d)\n\n parameters = m_frame[11:15]\n Pd = xor(Pd_, parameters)\n P = aes.encrypt(Pd)\n\n sys.stdout.write(bytes_to_bitstring(P))", "def __init__(self):\n self.key = b'FSMF73R873YM187R'\n self.signer = AES.new(self.key, AES.MODE_EAX)\n self.verifier = AES.new(self.key, AES.MODE_EAX, nonce=self.signer.nonce)", "def create_block(self, complete_hash, nonce):\n print(\"Creating block with hash: '%s'\" % complete_hash)\n block = Block(complete_hash, nonce)\n for transaction in self.transactions:\n block.add_transaction(transaction)\n return block", "def decrypt_cbc(key, ciphertext):\n\tmessage = ''\n\tfor i in range(0, len(ciphertext)/16 - 1):\n\t\tiv = ciphertext[i*16:(i+1)*16]\n\t\tinputblock = ciphertext[(i+1)*16:(i+2)*16]\n\t\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t\tmessage +=cipher.decrypt(inputblock)\n\tif ord(message[-1]) <=16:\n\t\tmessage = message[:-ord(message[-1])]\n\treturn message", "def make_key(password, iterations=ITERATIONS):\n key = PBKDF2(password, SALT, dkLen=KEY_LENGTH_BYTES, count=iterations)\n return key", "def create_level1_cipher(self, m, pub):\n r = random.randrange(256, pub.n)\n c = CipherLevel1(1,1)\n c.prepare_message_given_random(pub, m, r)\n self.alpha = c\n self.beta = r", "def aes(encrypt, key, data):\n cipher = AES.new(key, AES.MODE_CBC, get_zero_vector(16))\n if encrypt:\n return cipher.encrypt(data)\n else:\n return cipher.decrypt(data)", "def iv():\n return chr(0) * 16", "def cbc_decrypt(ct, cipher, iv):\n\n pt = []\n ct = [iv] + chunks(ct, cipher.block_size)\n for i in range(1, len(ct)):\n pt += [fixed_xor(ct[i-1], cipher.decrypt(ct[i]))]\n return flatten(pt)", "def init(cls, key: bytes, passphrase: str, otp: YubikeyOTP, **kwargs):\n # Pick out the user key salt length\n user_salt_len = kwargs.pop('user_salt_len', 32)\n\n # Initialise with a dummy context for now\n context = cls(key=key, iv=b'', context=b'', **kwargs)\n\n # Derive the key used for the shared context\n context_key = context._derive_key(passphrase, otp)\n\n # Save the new context and return it.\n context._update_context(context_key, otp, token_bytes(user_salt_len))\n return context" ]
[ "0.7619631", "0.7126915", "0.67098886", "0.65550065", "0.6327081", "0.62329865", "0.6143115", "0.60997117", "0.60525525", "0.59431106", "0.59406626", "0.588603", "0.5855224", "0.5798523", "0.5777665", "0.573847", "0.5719348", "0.5686902", "0.5685684", "0.56328213", "0.55945", "0.5580485", "0.5573387", "0.555133", "0.550119", "0.54734033", "0.54612625", "0.5457154", "0.54376197", "0.5429617", "0.54231066", "0.53985167", "0.5332388", "0.52984935", "0.5281436", "0.52751523", "0.5269133", "0.5268279", "0.5252706", "0.5225085", "0.51711226", "0.51667166", "0.5145408", "0.51073486", "0.51033247", "0.50963396", "0.5082023", "0.50732064", "0.5032077", "0.5020628", "0.50087255", "0.5004025", "0.49845263", "0.4981747", "0.49764264", "0.49519092", "0.4951461", "0.49512458", "0.4937194", "0.49256587", "0.49189344", "0.49026078", "0.4896648", "0.4874892", "0.48703992", "0.48676428", "0.48652938", "0.48630482", "0.48586974", "0.48558953", "0.48544613", "0.48452085", "0.48365587", "0.48252788", "0.4823386", "0.48209602", "0.4814462", "0.48143122", "0.47959736", "0.4792302", "0.47836274", "0.477921", "0.47766858", "0.47703183", "0.47603247", "0.47582296", "0.4756124", "0.4752101", "0.47515738", "0.47507754", "0.47504056", "0.4749779", "0.4748194", "0.47449562", "0.47433957", "0.47418588", "0.47365636", "0.47352296", "0.47351387", "0.47339156" ]
0.714244
1
Encrypt data with the key and the parameters set at initialization.
def encrypt(self, plaintext, output=None): if self.encrypt not in self._next: raise TypeError("encrypt() cannot be called after decrypt()") self._next = [self.encrypt] if output is None: ciphertext = create_string_buffer(len(plaintext)) else: ciphertext = output if not is_writeable_buffer(output): raise TypeError("output must be a bytearray or a writeable memoryview") if len(plaintext) != len(output): raise ValueError("output must have the same length as the input" " (%d bytes)" % len(plaintext)) result = raw_ctr_lib.CTR_encrypt(self._state.get(), c_uint8_ptr(plaintext), c_uint8_ptr(ciphertext), c_size_t(len(plaintext))) if result: if result == 0x60002: raise OverflowError("The counter has wrapped around in" " CTR mode") raise ValueError("Error %X while encrypting in CTR mode" % result) if output is None: return get_raw_buffer(ciphertext) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypt_data(self, params):\n raise NotImplementedError", "def encrypt(self, sensor_data):\r\n \r\n # set encryption parameters\r\n encryption1 = aes(self.ivkey, 2, self.staticiv)\r\n encryption2 = aes(self.datakey, 2, self.iv)\r\n # encrypt data\r\n self.encrypted_data = encryption2.encrypt(sensor_data) \r\n self.encrypted_iv = encryption1.encrypt(self.iv)\r\n self.encrypted_nodeid = encryption2.encrypt(self.nodeid)\r\n \r\n self.iv = bytes(random.getrandbits(8) for _ in range(16)) # changes every time\r", "def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)", "def encrypt_data ( aes_key, data ) :\n salt = Crypto.Random.new( ).read( Crypto.Cipher.AES.block_size )\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n encrypted_data = cipher.encrypt( data )\n\n return encode_data( salt + encrypted_data )", "def encryptData(self, key, iv, data, align = True):\r\n\t\tif((len(data) % self.align) != 0 and align):\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).encrypt(data + (\"\\x00\" * (self.align - (len(data) % self.align))))\r\n\t\telse:\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).encrypt(data)", "def encrypt(self, key, data, mode, padding):\n # pylint: disable=unused-argument,no-self-use\n if hasattr(key, \"private_bytes\"):\n _key = key.public_key()\n else:\n _key = key\n try:\n return _key.encrypt(data, padding.build())\n except Exception:\n error_message = \"Encryption failed\"\n _LOGGER.exception(error_message)\n raise EncryptionError(error_message)", "def Encrypt(self, data):\n data = self.__Pad(data)\n iv_bytes = util.RandBytes(self.block_size)\n ciph_bytes = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).encrypt(data)\n msg_bytes = self.Header() + iv_bytes + ciph_bytes\n sig_bytes = self.hmac_key.Sign(msg_bytes) # Sign bytes\n return msg_bytes + sig_bytes", "def _encrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n\n # Pad to 16 bytes for AES CBC\n for i in range(16 - (len(data) % 16)):\n data += b'\\0'\n\n return cipher.encrypt(data)", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def encrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n padded = Pad.pad(data.encode())\n ciphertext = obj.encrypt(padded)\n return ciphertext.hex()", "def encryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def _encrypt_data_key(self, data_key, algorithm, encryption_context):\n # Raw key string to EncryptedData\n encrypted_wrapped_key = self.config.wrapping_key.encrypt(\n plaintext_data_key=data_key.data_key, encryption_context=encryption_context\n )\n # EncryptedData to EncryptedDataKey\n return aws_encryption_sdk.internal.formatting.serialize.serialize_wrapped_key(\n key_provider=self.key_provider,\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n encrypted_wrapped_key=encrypted_wrapped_key,\n )", "def Encrypt(self, data):\n\n if len(data) % 16 != 0:\n data += ' ' * (16 - len(data) % 16)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n data = es.encrypt(data)\n data = base64.b64encode(data)\n return data", "def encrypt(self, data):\n if not data:\n return ''\n data = self._pad_data(data)\n return self._crypt(data, self.ENCRYPT)", "def encrypt(self, data):\n cipher_rsa = PKCS1_OAEP.new(self.key)\n return cipher_rsa.encrypt(data)", "def encrypt(self, public_key, data):\n d_data = Data(data)\n out = Buffer(self.encrypted_len(public_key=public_key, data_len=len(data)))\n status = self._lib_vscf_ecc.vscf_ecc_encrypt(self.ctx, public_key.c_impl, d_data.data, out.c_buffer)\n VscfStatus.handle_status(status)\n return out.get_bytes()", "def encrypt_data(self, filename, data, master_pass, website): \n\n \"\"\"Concatenated extra characters in the case that the master password\n is less than 16 characters. However, this isn't a big safety trade off\n as the full length master password is hashed and checked for.\"\"\"\n concatenated_master = master_pass + \"================\"\n\n key = concatenated_master[:16].encode(\"utf-8\")\n\n cipher = AES.new(key, AES.MODE_EAX)\n\n \"\"\"A value that must never be reused for any other encryption done with\n this key saved alongside encrypted password. Converted to hexadecimal\n to be saved in DB. Later converted back to bytes to decode data\"\"\"\n nonce = cipher.nonce.hex()\n\n data_to_encrypt = data.encode(\"utf-8\")\n # again, bytes is invalid data for JSON so we convert it\n encrypted_data = cipher.encrypt(data_to_encrypt).hex()\n\n self.__save_password(filename, encrypted_data, nonce, website)", "def encrypt(self, data):\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return b64encode(key_public.Encrypt(data))", "def encrypt(data=None, pairing_group=None, pk=None, policy=None, debug=0):\n\n # Check if data is set\n if data is None:\n logging.error('encrypt_seed_key_len data exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt_seed_key_len data')\n raise Exception\n\n # Check if pk is set\n if pk is None:\n logging.error('encrypt_seed_key_len pk_file exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt_seed_key_len pk_file')\n raise Exception\n\n # Check if policy is set\n if policy is None:\n logging.error('encrypt_seed_key_len policy exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt_seed_key_len policy')\n raise Exception\n\n if debug: # ONLY USE FOR DEBUG\n print('DATA = (%s) %s' % (type(data), data))\n print('PK = (%s) %s' % (type(pk), pk))\n print('POLICY = (%s) %s' % (type(policy), policy))\n\n # Encrypt data with CP-ABE\n cpabe = CPabe_BSW07(pairing_group)\n enc_data = cpabe.encrypt(pk, data, policy)\n\n if debug: # ONLY USE FOR DEBUG\n print('ENC DATA WITH POLICY = (%d) %s' % (len(enc_data), enc_data))\n\n # Remove policy from encrypted data\n enc_data.pop('policy')\n\n if debug: # ONLY USE FOR DEBUG\n print('ENCRYPTED DATA = (%d) %s' % (len(enc_data), enc_data))\n\n return enc_data", "async def encrypt(self, data, sequence_no):\n\t\treturn self.SEAL(\n\t\t\t#self.SignKey_client, \n\t\t\tself.SignKey_client,\n\t\t\tself.SealKey_client, \n\t\t\tdata,\n\t\t\tdata,\n\t\t\tsequence_no, \n\t\t\tself.crypthandle_client.encrypt\n\t\t)", "def encrypt(self, data):\n\n if self.crypt_public == \"\":\n raise ValueError(\"Error encrypting: No public encryption key found for {}\".format(self))\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return key_public.Encrypt(data)", "def Encrypt(self, data):\n data = self.__Encode(data)\n ciph_bytes = self.key.encrypt(data, None)[0] # PyCrypto returns 1-tuple\n return self.Header() + ciph_bytes", "def encrypt_data(data, encryption_key, iv=None):\n # Generate a random iv\n if iv is None:\n iv = get_random_bytes(IV_SIZE)\n generate_iv = True\n iv_length = IV_SIZE\n else:\n generate_iv = False\n iv_length = len(iv)\n cipher = AES.new(encryption_key, AES.MODE_GCM, iv)\n ciphered_data, tag = cipher.encrypt_and_digest(bytes(data))\n if generate_iv:\n # if iv passed by user is None, random iv generated\n # above is prepended in encrypted data\n # iv + Cipher + Tag\n result = iv + ciphered_data + tag\n else:\n # Cipher + Tag\n result = ciphered_data + tag\n return result", "def encrypt(algorithm, key, plaintext, associated_data, iv):\n encryptor = Encryptor(algorithm, key, associated_data, iv)\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return EncryptedData(encryptor.iv, ciphertext, encryptor.tag)", "def _EncryptData(self, data):\n if isinstance(data, str):\n data = data.encode('utf-8')\n encrypted_data = self._gpg.encrypt(\n data,\n self.args.target_key,\n sign=self._gpg.list_keys(True)[0]['fingerprint'],\n always_trust=False)\n if not encrypted_data.ok:\n raise Exception('Failed to encrypt data! Log: %s' % encrypted_data.stderr)\n return encrypted_data.data", "def encrypt_data_key(self, dataKey, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKey, str):\n dataKey = dataKey.encode('utf-8')\n cipherText= box.encrypt(dataKey).decode('cp855')\n return cipherText", "def encrypt(self, key, data, mode, padding):\n # this can be disabled by _disable_encryption, so pylint: disable=method-hidden\n try:\n block_size = self.cipher.block_size\n iv_len = block_size // 8\n iv = os.urandom(iv_len)\n\n encryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).encryptor()\n padder = padding.build(block_size).padder()\n\n padded_data = padder.update(data) + padder.finalize()\n return iv + encryptor.update(padded_data) + encryptor.finalize()\n except Exception:\n error_message = \"Encryption failed\"\n _LOGGER.exception(error_message)\n raise EncryptionError(error_message)", "def encrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n length = str(len(data))\n length = _pad16(length)\n\n key = _pad16(key)\n iv = _pad16(iv)\n data = _pad16(data)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.encrypt(data)\n data = length + data\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data", "def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def encrypt(self, data):\n data = data.replace(';', '').replace('=', '') # Remove special characters to avoid injection\n plaintext = (self._prefix + data + self._suffix).encode()\n return aes_cbc_encrypt(plaintext, self._key, self._iv)", "def encrypt(cls, plaintext, aad, key, iv):", "def encrypt_data(self, params):\n from django.core.signing import dumps\n return dumps(params, salt=self.salt_namespace)", "def encrypt(self, input, key, iv):\n pass", "def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv", "def aes(encrypt, key, data):\n cipher = AES.new(key, AES.MODE_CBC, get_zero_vector(16))\n if encrypt:\n return cipher.encrypt(data)\n else:\n return cipher.decrypt(data)", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def __WriteEncrypted(self, data, pad=False):\n if pad:\n data = self.__key._Pad(data)\n\n encrypted_bytes = self.__cipher.encrypt(data)\n self.__output_stream.write(encrypted_bytes)\n self.__hmac_stream.Update(encrypted_bytes)", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def encryptFromString(self, data, keyobj):\n return self.encryptByteArray(bytearray(data, 'utf-8'), keyobj)", "def crypt(key, data, iv):\n return xtea.crypt(key, data, iv)", "def encrypt(plaintext, key, associated_data=''):\n\n iv = os.urandom(12)\n\n encryptor = Cipher(\n algorithms.AES(key), modes.GCM(iv),\n backend=default_backend()).encryptor()\n\n encryptor.authenticate_additional_data(associated_data)\n\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n\n return (iv, ciphertext, encryptor.tag)", "def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def __encrypt(self, plaintext):\n iv = get_random_bytes(16)\n try:\n encryption_envelope = {'ciphertext':'', \n 'keyid':esn_manifest + '_' + str(self.sequence_number), 'sha256':'AA==', \n 'iv':base64.standard_b64encode(iv).decode('utf-8')}\n except Exception:\n print('ESN is invalid.')\n sys.exit(0)\n\n plaintext = Padding.pad(plaintext.encode('utf-8'), 16)\n cipher = AES.new(self.encryption_key, AES.MODE_CBC, iv)\n ciphertext = cipher.encrypt(plaintext)\n encryption_envelope['ciphertext'] = base64.standard_b64encode(ciphertext).decode('utf-8')\n return json.dumps(encryption_envelope)", "def encrypt(self,password,indata):\n key = hashlib.sha256(password).digest()\n return encrypt_file(key,indata)", "def set_enc_params(self, enc_params):\n self.enc_params = enc_params", "def enc(self, data):\n return data", "def encrypt(self, message, key):\n message = self.pkcs7_pad(message)\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(AES.block_size))\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n return iv + cipher.encrypt(message)", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def get_encrypted_data_keys(self, data_key, encryption_context):\n encrypted_data_keys = [message.header.EncryptedDataKey(b'aws-kms',\n bytes(data_key['KeyId']),\n bytes(data_key['CiphertextBlob']))]\n\n for client in self.kms_clients[1:]:\n key = client.encrypt(KeyId=self.master_key_id,\n Plaintext=data_key['Plaintext'],\n EncryptionContext=encryption_context)\n encrypted_data_key = message.header.EncryptedDataKey(b'aws-kms',\n bytes(key['KeyId']),\n bytes(key['CiphertextBlob']))\n encrypted_data_keys.append(encrypted_data_key)\n\n return encrypted_data_keys", "def write(self, data):\n self.__CheckOpen('write')\n self.__data += data\n encrypt_buffer_size = self.__key._NoPadBufferSize(len(self.__data))\n\n if len(self.__data) >= encrypt_buffer_size:\n self.__WriteEncrypted(self.__data[:encrypt_buffer_size])\n else:\n encrypt_buffer_size = 0\n\n self.__data = self.__data[encrypt_buffer_size:]", "def aes_encrypt(mode, aes_key, aes_iv, *data):\n encryptor = Cipher(\n algorithms.AES(aes_key), mode(aes_iv), backend=default_backend()\n ).encryptor()\n\n result = None\n for value in data:\n result = encryptor.update(value)\n encryptor.finalize()\n\n return result, None if not hasattr(encryptor, \"tag\") else encryptor.tag", "def _send(self, data):\n self._sock.send(self._cipher_tx.crypt(data))", "def encrypt(content, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\t\tfrom Cryptodome import Random\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\t\tfrom Crypto import Random\n\n\tif not isPython2():\n\t\tif isString(content):\n\t\t\tcontent = content.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\n\tcontent = pad(content)\n\tiv = Random.new().read(AES.block_size)\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tresult = iv + cipher.encrypt(content)\n\treturn result", "def encrypt(self, input, iv):\n pass", "def encrypt(key, plaintext, associated_data=None):\n\n cipher = AES.new(key, AES.MODE_GCM)\n if associated_data:\n cipher.update(associated_data)\n\n ciphertext, tag = cipher.encrypt_and_digest(plaintext)\n nonce = cipher.nonce\n\n return nonce, ciphertext, tag", "def encode(self, data):\n return self.__cipher.encrypt(data.encode('utf-8'))", "def encrypt():\n\tnull = 0", "def encrypt(self, key, value):\n\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n crypted = cipher.encrypt(self.pkcs5_pad(value))\n return iv+crypted", "def encrypt_data():\n\tkey = generate_key()\n\tctr = AES_CTR(key, 0)\n\n\tdata_lines = open('20.txt').readlines()\n\n\tciphertexts = []\n\n\tfor line in data_lines:\n\t\tciphertexts.append( ctr.encrypt( b64decode( line ) ) )\n\n\treturn ciphertexts", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def encrypt(self, message, key=None):\n if key is None:\n key = self.public_key\n encrypter = RSA.importKey(key)\n return encrypter.encrypt(message, 2048)", "def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def true_send(conn, data):\n encrypted_data = key.encrypt(pickle.dumps(data))\n length = str(len(encrypted_data)).zfill(LENGTH).encode()\n data = length + encrypted_data\n conn.send(data)", "def provider_encrypt(self, uid, input_vec) -> str:\n params = {\n 'input': input_vec,\n }\n return self.context.post(\"/ckks/provider/encrypt/%s\" % uid, params,\n \"CKKS:: failed encrypting data on provider side\"\n )", "def __init__(__self__, *,\n key_data: pulumi.Input[str]):\n pulumi.set(__self__, \"key_data\", key_data)", "def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)", "def wrap(self, key:bytes, credential:PublicKeyCredentialSource)->bytes:\n return keywrap.aes_key_wrap_with_padding(key,credential.get_bytes(True),default_backend())", "def Encrypt(key, value):\n key = key.zfill(32)[:32]\n value = Pad(value, 16)\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = aes.encrypt(value)\n return base64.b64encode(encrypted)", "def __encrypt_text_aes__(self, text, password):\n BLOCK_SIZE = 32\n PADDING_CHAR = b'^'\n iv = Random.new().read(16)\n # key must be 32 bytes for AES-256, so the password is hashed with md5 first\n cipher = AES.new(self.__hash_md5__(password), AES.MODE_CBC, iv)\n plaintext = text.encode('utf-8')\n # plaintext must be padded to be a multiple of BLOCK_SIZE\n plaintext_padded = plaintext + (BLOCK_SIZE - len(plaintext) % BLOCK_SIZE) * PADDING_CHAR\n ciphertext = cipher.encrypt(plaintext_padded)\n return (\n base64.b64encode(iv),\n base64.b64encode(ciphertext),\n PADDING_CHAR\n )", "def encrypt(self):\n self.cipherText = self.cipherField.getText()\n # Set up the initial state of the encryption.\n if self.cipherText == \"\":\n self.matrixButton[\"state\"] = \"disabled\"\n self.plainText = self.plainField.getText()\n self.limit = len(self.plainText)\n if self.limit % 2 == 1:\n self.limit -= 1\n self.cursor = 0\n # Use the matrix to encrypt one pair of characters.\n if self.cursor < self.limit:\n self.cipherText += self.encryptPair()\n self.cipherField.setText(self.cipherText)\n self.cursor += 2\n # Add the last character if plaintext length was odd.\n elif self.limit < len(self.plainText):\n self.cipherText += self.plainText[self.limit]\n self.cipherField.setText(self.cipherText)\n # Clean up when done.\n if len(self.plainText) == len(self.cipherText):\n self.encryptButton[\"text\"] = \"Clear fields\"\n self.encryptButton[\"command\"] = self.clearFields", "def user_encrypt_password(data=None, **kw):\n if 'password' in data:\n data['password'] = encrypt_password(data['password'])", "def encrypt(pt, key, macKey):\n\tif not hasattr(pt, 'decode'):\n\t\tpt = bytes(pt, 'utf-8')\n\tpadder = padding.PKCS7(128).padder()\n\tpt = padder.update(pt) + padder.finalize()\n\tiv = os.urandom(16)\n\t#key = hashlib.sha256(key).digest()\n\tcipher = cryptography.hazmat.primitives.ciphers.Cipher(\n\t algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n\tencryptor = cipher.encryptor()\n\tct = encryptor.update(pt) + encryptor.finalize()\n\tmac = hmac.new(macKey, iv + ct, 'sha256').digest()\n\treturn encodeCipherString(2, base64.b64encode(iv), base64.b64encode(ct),\n\t base64.b64encode(mac))", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def encryptor(text: bytes, IV: bytes, key: bytes) -> bytes:\n \n # Given\n prepend_string = \"comment1=cooking%20MCs;userdata=\"\n append_string = \";comment2=%20like%20a%20pound%20of%20bacon\"\n\t\n plaintext = text.replace(b';', b'\";\"').replace(b'=', b'\"=\"')\n ciphertext = AES_CBC_encrypt(PKCS7_pad(plaintext, len(key)), IV, key)\n return ciphertext", "def encrypt(self, message):\n message = self._padding(message, self._block_size)\n initialization_vector = Random.new().read(self._block_size)\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n return base64.b64encode(initialization_vector +\n cipher.encrypt(message))", "def encrypt(plaintext):\n # Pad plaintext\n plaintext = pad(plaintext)\n\n # AES encrypt\n iv = Random.new().read(BS)\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return iv + aes.encrypt(plaintext)", "def passwd_encryption(self):\n key = Fernet.generate_key()\n cipher_suite = Fernet(key)\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n with open(self.pass_path, 'wb') as pass_output:\n pass_output.write(ciphered_text)\n with open(self.key_path, 'wb') as key_output:\n key_output.write(key)", "def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")", "def setup(self):\n self.nonce = generate_nonce()\n # print('nonce: ' + str(self.nonce))\n n, ciphertext, tag = aes_encode(self.aes, self.nonce)\n print(Colors.BOLD + 'N --> S: {N_N}K' + Colors.ENDC)\n print('\\t' + Colors.BOLD + 'N_N: ' + Colors.ENDC + str(self.nonce))\n print('\\t' + Colors.BOLD + 'K: ' + Colors.ENDC + str(self.aes))\n print('\\t' + Colors.BOLD + '{N_N}K : (n, c, t)' + Colors.ENDC)\n # print('sending encrypted, (n, c, t) : (' + str(n) + ', ' + str(ciphertext) + ', ' + str(tag) + ')')\n to_send = {'dest': 'setup', 'n': n, 'c': ciphertext, 't': tag} # dictionary to send to the server\n self.nodesocket.sendall(pickle.dumps(to_send))\n data = pickle.loads(self.nodesocket.recv(MAX_SIZE))\n self.id = data['id'] # set the given id from the server\n return data", "def DHencrypt(plaintext, symmetricKey, p, gen):\r\n \"Method was updated to use AES symetric decryption that was\"\r\n \"provided in the starter code as option of symetric encrytion using shared secret keys is generated.\"\r\n simplified_AES.keyExp(symmetricKey) # Generating round keys for AES.\r\n ciphertext = simplified_AES.encrypt(plaintext) # Running simplified AES.\r\n return ciphertext", "def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")", "def encrypt(data, address, path, raw):\n client = ConfigClient(address=address, fail_fast=False)\n try:\n resp = client.encrypt(data, path=path)\n except Exception:\n raise click.ClickException(\"💥 Failed to contact server!\")\n\n if raw:\n resp = f\"{{cipher}}{resp}\"\n\n table = Table.grid(padding=(0, 1))\n table.add_column(style=\"cyan\", justify=\"right\")\n table.add_column(style=\"magenta\")\n\n table.add_row(\"encrypted data[yellow]:[/yellow] \", f\"'{resp}'\")\n console.print(Panel(table, border_style=\"yellow\", expand=True))", "def encrypt(key, plaintext, cipher):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n p = plaintext.read()\n c = rsa.encrypt(p, k)\n\n cipher.write(c)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except OverflowError:\n click.echo(\"ERROR: Message is to long for encryption with the given key.\")", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def encryptByteArray(self, data, keyobj):\n\n nrOfBlocks = int(math.ceil(len(data)/self.blockLengthBytes))\n C = bytearray()\n\n for i in range(nrOfBlocks):\n m = bytearray(self.blockLengthBytes)\n for j in range(self.blockLengthBytes):\n index = i*self.blockLengthBytes+j\n if index < len(data):\n m[j] = data[index]\n c = self.encryptor.encrypt(m, keyobj)\n if len(c) != self.blockLengthBytes:\n raise AssertionError(\"Encryptor is not symmetric! \"+\n \"Block Cipher modes require encryptors \"+\n \"with equal length of input & output blocks.\")\n for cb in c:\n C.append(cb)\n\n return C", "def _encrypt(self, text, **options):\n\n raise CoreNotImplementedError()", "def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()", "def encrypt(self, filename):\n\t f = Fernet(self.key)\n\t with open(filename, \"rb\") as file:\n\t # read all file data\n\t file_data = file.read()\n\t # encrypt data\n\t encrypted_data = f.encrypt(file_data)\n\t # write the encrypted file\n\t with open(filename+\".enc\", \"wb\") as file:\n\t file.write(encrypted_data)", "def encrypt(self, data, expires_in_s=None):\n assert isinstance(data, dict)\n\n now = time()\n dataserial = dumps(data)\n\n if expires_in_s is None:\n expires_in_s = self.expiration_s\n\n # Compress token\n cps = False\n if self.compress:\n uncompressed = dataserial.encode(encoding='utf-8')\n compressed = compress(uncompressed, Z_BEST_COMPRESSION)\n\n compresseddataserial = \\\n b64encode(compressed).decode(encoding='ascii')\n\n cprratio = ratio(dataserial, compresseddataserial)\n # Uncomment for debug. Do not leave commented on production as this\n # may leak user information in logs.\n #\n # print(\n # 'Compression ratio of {:.2f}\\n{}\\n{}\\n{}'.format(\n # cprratio, dataserial, '-' * 80, compresseddataserial,\n # )\n # )\n if cprratio > 0.0:\n dataserial = compresseddataserial\n cps = True\n\n # Build signed token\n signed = JWT(\n header={'alg': self.sign_alg},\n claims={\n # Custom claims\n 'cps': cps,\n 'dta': dataserial,\n # Standard claims\n # https://tools.ietf.org/html/rfc7519#section-4.1\n 'iss': self.issuer, # issuer name\n 'iat': now, # issued at\n 'nbf': now, # not before\n 'exp': now + expires_in_s, # expires at\n },\n )\n signed.make_signed_token(self.signkey)\n signedserial = signed.serialize()\n\n # Build encrypted token\n encrypted = JWT(\n header={'alg': self.enc_alg, 'enc': self.enc_enc},\n claims={\n # Custom claims\n 'dta': signedserial,\n # Standard claims\n # https://tools.ietf.org/html/rfc7519#section-4.1\n 'iss': self.issuer, # issuer name\n 'iat': now, # issued at\n 'nbf': now, # not before\n 'exp': now + expires_in_s, # expires at\n },\n )\n encrypted.make_encrypted_token(self.encryptkey)\n encryptedserial = encrypted.serialize()\n\n return encryptedserial", "def encrypt(self, bytes):\r\n paddedBytes = self._addPKCS1Padding(bytes, 2)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPublicKeyOp(m)\r\n encBytes = numberToByteArray(c, numBytes(self.n))\r\n return encBytes", "def _encrypt(self, b):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n encryptor = cypher.encryptor()\n pad_length = 16 - (len(b) % 16)\n b += bytes([pad_length]) * pad_length\n result = encryptor.update(b) + encryptor.finalize()\n return result", "def encryptAESBlock(key, pt):\n\tif len(pt) != 16 and len(pt) != 32:\n\t\traise Exception(\"Plaintext is not length 16 or 32\")\n\tcipher = AES.new(key, AES.MODE_ECB)\n\treturn cipher.encrypt(pt)", "def perform_aes_algorithm(plaintext, key):\n if len(key) == 32:\n print('C.1 AES-128 (Nk=4, Nr=10)\\n')\n elif len(key) == 48:\n print('\\nC.2 AES-192 (Nk=6, Nr=12)\\n')\n else:\n print('\\nC.3 AES-256 (Nk=8, Nr=14)\\n')\n\n print('{:<19} {:}'.format('PLAINTEXT:', plaintext))\n print('{:<19} {:}\\n'.format('KEY:', key))\n\n print('CIPHER (ENCRYPT):')\n ciphertext = encrypt(plaintext, key, verbose=True)\n\n print('\\nINVERSE CIPHER (DECRYPT):')\n decrypt(ciphertext, key, verbose=True)", "def enable_encryption(self, output_key: bytes, input_key: bytes) -> None:\n self.chacha = chacha20.Chacha20Cipher(output_key, input_key)\n self.state.has_authenticated = True", "def encrypt(project_id, location_id, key_ring_id, crypto_key_id,\n plaintext_file_name, ciphertext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read data from the input file.\n with io.open(plaintext_file_name, 'rb') as plaintext_file:\n plaintext = plaintext_file.read()\n\n # Use the KMS API to encrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.encrypt(\n name=name,\n body={'plaintext': base64.b64encode(plaintext).decode('ascii')})\n response = request.execute()\n ciphertext = base64.b64decode(response['ciphertext'].encode('ascii'))\n\n # Write the encrypted data to a file.\n with io.open(ciphertext_file_name, 'wb') as ciphertext_file:\n ciphertext_file.write(ciphertext)\n\n print('Saved ciphertext to {}.'.format(ciphertext_file_name))", "def encrypt(inp):\n # prepare plaintext\n prefix = \"comment1=cooking%20MCs;userdata=\"\n suffix = \";comment2=%20like%20a%20pound%20of%20bacon\"\n pt = inp.replace(\";\", \"\").replace(\"=\", \"\") # remove invalid character\n pt = prefix + pt + suffix # add prefix and suffix\n pt_encoded = pt.encode(\"utf-8\")\n pt_padded = pkcs7.add(pt_encoded, aes.S_BLOCK)\n\n # encrypt\n ct = aes.cbc_encrypt(pt_padded, KEY, IV)\n\n return ct", "def _encrypt(self):\n self._outfile = os.path.join(self.dest, self.encrypted_file)\n self._infile = self.plain_file\n self._log.info(\"Encrypting '%s' to '%s'\", self.plain_file, self._outfile)\n with open(self.plain_file, \"rb\") as plain_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=plain_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' encrypted to '%s'\", self.plain_file, self._outfile)\n return True", "def encrypt(self, message):\n\n message = self._pad(message)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return base64.b64encode(iv + cipher.encrypt(message)).decode('utf-8')" ]
[ "0.7956872", "0.7449426", "0.73192656", "0.7263974", "0.7056065", "0.70533365", "0.69593483", "0.6946052", "0.68951267", "0.68717563", "0.68052113", "0.6775894", "0.67707175", "0.67657804", "0.67324567", "0.6702431", "0.66863525", "0.6679284", "0.66733557", "0.66417927", "0.66352636", "0.66305715", "0.66282", "0.6617607", "0.66141105", "0.6605749", "0.65967005", "0.65838", "0.65482515", "0.6547393", "0.6448795", "0.6446192", "0.642112", "0.6415748", "0.6369634", "0.6275997", "0.62326443", "0.62087685", "0.62004274", "0.6156047", "0.60822827", "0.6035577", "0.6024031", "0.59952426", "0.59632087", "0.59584874", "0.59545314", "0.5952868", "0.59235674", "0.59183675", "0.5918017", "0.5915304", "0.5870559", "0.5854212", "0.58488345", "0.5845403", "0.58181804", "0.58118236", "0.58051527", "0.5803942", "0.5788265", "0.57857144", "0.5742212", "0.5725923", "0.5708332", "0.5707221", "0.568746", "0.5684663", "0.5679669", "0.5677295", "0.5673973", "0.5670497", "0.5667182", "0.5666737", "0.5665069", "0.56589174", "0.56514484", "0.5650696", "0.5645146", "0.56314313", "0.5630846", "0.5624554", "0.56229204", "0.56141794", "0.5607981", "0.56077933", "0.5590777", "0.55870914", "0.55825174", "0.5573226", "0.55608195", "0.55585414", "0.5555028", "0.55540454", "0.5550538", "0.55368483", "0.55286604", "0.5510055", "0.5506245", "0.55018485", "0.54944706" ]
0.0
-1
Decrypt data with the key and the parameters set at initialization.
def decrypt(self, ciphertext, output=None): if self.decrypt not in self._next: raise TypeError("decrypt() cannot be called after encrypt()") self._next = [self.decrypt] if output is None: plaintext = create_string_buffer(len(ciphertext)) else: plaintext = output if not is_writeable_buffer(output): raise TypeError("output must be a bytearray or a writeable memoryview") if len(ciphertext) != len(output): raise ValueError("output must have the same length as the input" " (%d bytes)" % len(plaintext)) result = raw_ctr_lib.CTR_decrypt(self._state.get(), c_uint8_ptr(ciphertext), c_uint8_ptr(plaintext), c_size_t(len(ciphertext))) if result: if result == 0x60002: raise OverflowError("The counter has wrapped around in" " CTR mode") raise ValueError("Error %X while decrypting in CTR mode" % result) if output is None: return get_raw_buffer(plaintext) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def decrypt(self, data):", "def decrypt_data ( aes_key, data ) :\n decoded_data = decode_data( data )\n salt = decoded_data[ 0 : Crypto.Cipher.AES.block_size ]\n encrypted_data = decoded_data[ Crypto.Cipher.AES.block_size : ]\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n decrypted_data = cipher.decrypt( encrypted_data )\n\n return decrypted_data", "def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)", "def decrypt_raw(self, key, data):\n iv = data[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data[AES.block_size:])\n return self.__unpad(data)", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv=iv)\n return decryptor.decrypt(data)", "def decrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n bytes_data = bytes.fromhex(data)\n return Pad.unpad(obj.decrypt(bytes_data)).decode()", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def decrypt(self, key, data, mode, padding):\n # pylint: disable=unused-argument,no-self-use\n if hasattr(key, \"public_bytes\"):\n raise NotImplementedError('\"decrypt\" is not supported by public keys')\n try:\n return key.decrypt(data, padding.build())\n except Exception:\n error_message = \"Decryption failed\"\n _LOGGER.exception(error_message)\n raise DecryptionError(error_message)", "def decrypt(self, key, data, mode, padding):\n # this can be disabled by _disable_encryption, so pylint: disable=method-hidden\n try:\n block_size = self.cipher.block_size\n iv_len = block_size // 8\n iv = data[:iv_len]\n data = data[iv_len:]\n\n decryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).decryptor()\n decrypted_data = decryptor.update(data) + decryptor.finalize()\n\n unpadder = padding.build(block_size).unpadder()\n return unpadder.update(decrypted_data) + unpadder.finalize()\n except Exception:\n error_message = \"Decryption failed\"\n _LOGGER.exception(error_message)\n raise DecryptionError(error_message)", "def decryptData(self, key, iv, data, align = True):\r\n\t\tif((len(data) % self.align) != 0 and align):\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).decrypt(data + (\"\\x00\" * (self.align - (len(data) % self.align))))\r\n\t\telse:\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).decrypt(data)", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def decrypt(algorithm, key, encrypted_data, associated_data):\n decryptor = Decryptor(algorithm, key, associated_data, encrypted_data.iv, encrypted_data.tag)\n return decryptor.update(encrypted_data.ciphertext) + decryptor.finalize()", "def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)", "def decryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def base64_aes_decrypt(self,data,key):\r\n cipher = AES.new(key)\r\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def _decrypt_data_key(self, encrypted_data_key, algorithm, encryption_context):\n # Wrapped EncryptedDataKey to deserialized EncryptedData\n encrypted_wrapped_key = aws_encryption_sdk.internal.formatting.deserialize.deserialize_wrapped_key(\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n wrapped_encrypted_key=encrypted_data_key,\n )\n # EncryptedData to raw key string\n plaintext_data_key = self.config.wrapping_key.decrypt(\n encrypted_wrapped_data_key=encrypted_wrapped_key, encryption_context=encryption_context\n )\n # Raw key string to DataKey\n return DataKey(\n key_provider=encrypted_data_key.key_provider,\n data_key=plaintext_data_key,\n encrypted_data_key=encrypted_data_key.encrypted_data_key,\n )", "def decrypt_message(self, env_key, data):\n\n if not env_key or not data:\n raise Exception('Arguments missing.')\n\n key = RSA.importKey(self.private_key)\n try:\n env_key = unquote(env_key).decode('utf8')\n data = unquote(data).decode('utf8')\n except AttributeError:\n # Python 3 compatible\n env_key = unquote(env_key)\n data = unquote(data)\n\n try:\n env_key = base64.b64decode(env_key)\n data = base64.b64decode(data)\n \n cipher = PKCS1_v1_5.new(key)\n\n sentinel = []\n session_key = cipher.decrypt(env_key, sentinel)\n\n rc4_cipher = ARC4.new(session_key)\n\n xml_data = rc4_cipher.decrypt(data)\n\n # TODO: add xml validation\n # schema_root = etree.XML(xml_data)\n # schema = etree.XMLSchema(schema_root)\n # parser = etree.XMLParser(schema=schema)\n\n return xml_data\n except Exception as e:\n if self.developement:\n exception(e)\n\n raise Exception('Could not decrypt message.')", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def decrypt_data_key(self, dataKeyCypher, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKeyCypher, str):\n dataKeyCypher = dataKeyCypher.encode('cp855')\n try:\n plainText = box.decrypt(dataKeyCypher).decode('utf-8')\n except Exception:\n raise UnableToDecryptException(\"Unable to verify cyphertext/key pair\")\n return plainText", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def decrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n pad_ch = '\\0'\n length = int(data[:16].rstrip(pad_ch.encode('utf-8')).decode('utf-8'))\n data = data[16:]\n key = _pad16(key)\n iv = _pad16(iv)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data)\n data = data[:length]\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data", "def decrypt(enc_data=None, pk=None, sk=None, pairing_group=None, debug=0):\n\n # Check if enc_data is set\n if enc_data is None:\n logging.error('decrypt_seed_key ciphertext exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt_seed_key ciphertext')\n raise Exception\n\n # Check if pk is set and it exists\n if pk is None:\n logging.error('[ERROR] decrypt_seed_key pk_file exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt_seed_key pk_file')\n raise Exception\n\n # Check if sk is set and it exists\n if sk is None:\n logging.error('decrypt_seed_key sk_file exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt_seed_key sk_file')\n raise Exception\n\n # Decrypt data with CP-ABE and return the result\n cpabe = CPabe_BSW07(pairing_group)\n return cpabe.decrypt(pk, sk, enc_data)", "def decrypt_aes256(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n return decryptor.decrypt(data)", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def decrypt(self, input, key, iv) :\n pass", "def base64_aes_decrypt(self,data,key):\n cipher = AES.new(key)\n try:\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))\n except Exception, ex:\n return ''", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def decrypt(self, key_file, input_file, output_file=None):\n data = self.__input_encrypted(input_file)\n iv = data[:AES.block_size]\n key = self.import_key(key_file)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n\n data = self.__unpad(cipher.decrypt(data[AES.block_size:]))\n if output_file != None:\n with open(output_file, \"w\") as f:\n f.write(data)\n return data", "def decrypt(self, encryptedserial):\n # Obtain data and metadata, but return only data\n data, _ = self.decrypt_with_metadata(encryptedserial)\n return data", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def decrypt_kms_data(encrypted_data):\n if not AWS_REGION:\n return\n\n kms = boto3.client('kms', region_name=AWS_REGION)\n\n decrypted = kms.decrypt(CiphertextBlob=encrypted_data)\n\n if decrypted.get('KeyId'):\n # Decryption succeed\n decrypted_value = decrypted.get('Plaintext', '')\n if isinstance(decrypted_value, bytes):\n decrypted_value = decrypted_value.decode('utf-8')\n return decrypted_value", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decrypt_data(self, encrypted_data):\n from django.core.signing import loads\n return loads(encrypted_data, salt=self.salt_namespace)", "def decrypt(self, key, device, private_key):\n device_key = base64.b64decode(self.keys[device.id.hex])\n\n master_key = private_key_decrypt(private_key, device_key)\n\n if master_key is None:\n return\n\n return fernet_decrypt(self.values[key], master_key, self.salt)", "def decrypt(key, cipher, use_custom=False):\n result = logic(key, cipher, use_custom)\n return array.array(\"B\", result)", "def decrypt_data(encryption_key, data, iv=None):\n if not data:\n logger.debug(\"Outdata is empty, nothing to decrypt\")\n return data\n # if iv is None the it's assumed that 12 bytes iv is\n # prepended in encrypted data\n data_byte = base64_to_byte_array(data)\n if iv is None:\n iv_length = IV_SIZE\n iv = data_byte[:iv_length]\n data_contains_iv = True\n else:\n iv_length = len(iv)\n data_contains_iv = False\n\n cipher = AES.new(encryption_key, AES.MODE_GCM, iv)\n # Split data into iv, tag and ciphered data\n if data_contains_iv:\n ciphertext_len = len(data_byte) - iv_length - TAG_SIZE\n ciphered_data = data_byte[iv_length: iv_length + ciphertext_len]\n tag = data_byte[-TAG_SIZE:]\n else:\n ciphertext_len = len(data_byte) - TAG_SIZE\n ciphered_data = data_byte[: ciphertext_len]\n tag = data_byte[-TAG_SIZE:]\n\n result = cipher.decrypt_and_verify(ciphered_data, tag).decode(\"utf-8\")\n logger.info(\"Decryption result at client - %s\", result)\n return result", "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):\n\n decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]\n\n # Modern, PKCS#5 PBES2-based encryption\n if encryption_algorithm_info.kdf == 'pbkdf2':\n\n if encryption_algorithm_info.encryption_cipher == 'rc5':\n raise ValueError(pretty_message(\n '''\n PBES2 encryption scheme utilizing RC5 encryption is not supported\n '''\n ))\n\n enc_key = pbkdf2(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length\n )\n enc_iv = encryption_algorithm_info.encryption_iv\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pbkdf1':\n derived_output = pbkdf1(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length + 8\n )\n enc_key = derived_output[0:8]\n enc_iv = derived_output[8:16]\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pkcs12_kdf':\n enc_key = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length,\n 1 # ID 1 is for generating a key\n )\n\n # Since RC4 is a stream cipher, we don't use an IV\n if encryption_algorithm_info.encryption_cipher == 'rc4':\n plaintext = decrypt_func(enc_key, encrypted_content)\n\n else:\n enc_iv = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.encryption_block_size,\n 2 # ID 2 is for generating an IV\n )\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n return plaintext", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def decrypt(self,password,indata):\n key = hashlib.sha256(password).digest()\n return decrypt_file(key,indata)", "def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext", "def decrypt(pwd, data):\n\n ct = b64decode(data['ct'])\n salt = b64decode(data['salt'])\n tag_start = len(ct) - data['ts'] // 8\n tag = ct[tag_start:]\n ciphertext = ct[:tag_start]\n\n mode_class = getattr(modes, data['mode'].upper())\n algo_class = getattr(algorithms, data['cipher'].upper())\n\n kdf = _kdf(data['ks'], iters=data['iter'], salt=salt)[0]\n key = kdf.derive(bytes(pwd, \"utf-8\"))\n cipher = Cipher(\n algo_class(key),\n mode_class(\n b64decode(data['iv']),\n tag,\n min_tag_length=data['ts'] // 8\n ),\n backend=_BACKEND\n )\n\n dec = cipher.decryptor()\n return dec.update(ciphertext) + dec.finalize()", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(self, encrypted):\n\n encrypted = base64.b64decode(encrypted)\n IV = encrypted[:self.BLOCK_SIZE]\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return self._unpad(aes.decrypt(encrypted[self.BLOCK_SIZE:]))", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt(project_id, location_id, key_ring_id, crypto_key_id,\n ciphertext_file_name, plaintext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n\n # Write the decrypted data to a file.\n with io.open(plaintext_file_name, 'wb') as plaintext_file:\n plaintext_file.write(plaintext)\n\n print('Saved plaintext to {}.'.format(plaintext_file_name))", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt(self, key, dir):\n self.encrypt(key, dir)", "def passwd_decryption(self):\n with open(self.key_path, 'rb') as input_key:\n for line in input_key:\n key = line\n with open(self.pass_path, 'rb') as input_password:\n for line in input_password:\n password = line\n cipher_suit = Fernet(key)\n plain_password = cipher_suit.decrypt(password)\n plain_password = bytes(plain_password).decode('utf-8')\n \n return plain_password", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def decrypt(self, cipher_text, iv=\"\", auth_data=None, tag=b\"\"):\n if not iv:\n raise ValueError(\"Missing Nonce\")\n\n return self.key.decrypt(iv, cipher_text + tag, auth_data)", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt(self, input, iv) :\n pass", "def decrypt_attr(data, key):\n data = MegaCrypto.base64_decode(data)\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n attr = MegaCrypto.cbc_decrypt(data, k)\n\n #: Data is padded, 0-bytes must be stripped\n return json.loads(\n re.search(r'{.+?}', attr).group(0)) if attr[:6] == 'MEGA{\"' else False", "def decrypt(self, filename):\n\t f = Fernet(self.key)\n\t with open(filename, \"rb\") as file:\n\t # read the encrypted data\n\t encrypted_data = file.read()\n\t # decrypt data\n\t decrypted_data = f.decrypt(encrypted_data)\n\t # write the original filename\n\t return decrypted_data", "def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext", "def do_ios_decryption(self):\r\n try:\r\n self.aes_decryption_key = self.extract_aes_key()\r\n except DecryptionKeyInvalidError:\r\n self.aes_decryption_key = self.get_backup_encryption_key()\r\n self.used_ios_decryption_key_cache = True\r\n \r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(self, cryptod, secret):\n try:\n # From json to python crypto dict\n data = base64.b64decode(\n bytes(cryptod['ciphervalue'], encoding=self.encoding))\n # Decrypt\n iv = base64.b64decode(bytes(cryptod['iv'], encoding=self.encoding))\n algorithm = self._algorithm(\n secret=secret, name=cryptod['algorithm'])\n cipher = Cipher(algorithm, modes.CBC(iv), backend=self.backend)\n decryptor = cipher.decryptor()\n data = decryptor.update(data) + decryptor.finalize()\n # Unpad\n unpadder = padding.PKCS7(cipher.algorithm.block_size).unpadder()\n data = unpadder.update(data) + unpadder.finalize()\n # Unzip\n data = str(gzip.decompress(data), encoding=self.encoding)\n cipher = None\n # json string\n except ValueError as ve:\n raise ValueError('Decrypt failure!') from ve\n try:\n data = json.loads(data)\n except ValueError as ve:\n raise ValueError('JSON formatting failure!') from ve\n return data", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)", "def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)", "def decrypt(self, encText, previouslyProcessedData=None):\n if previouslyProcessedData is None:\n length = len(self.oldDecrypt)\n if length % BLOCK_SIZE == 0:\n previouslyProcessedData = length\n else:\n previouslyProcessedData = int(\n BLOCK_SIZE * math.floor(length / BLOCK_SIZE)\n )\n\n # previouslyProcessedData was passed by the parent: it means that a frame was decoded and there was some data left. This does not include the padding zeros\n if previouslyProcessedData % BLOCK_SIZE != 0:\n previouslyProcessedData = int(\n BLOCK_SIZE * math.ceil(previouslyProcessedData / BLOCK_SIZE)\n )\n\n remainingData = self.oldDecrypt[previouslyProcessedData:]\n if self.oldDecrypt != b\"\":\n self.decryptIV = self.oldDecrypt[\n previouslyProcessedData - BLOCK_SIZE : previouslyProcessedData\n ]\n\n self.oldDecrypt = encText # save current block\n\n toDecrypt = truncate_multiple(remainingData + encText, BLOCK_SIZE)\n decryptor = RijndaelCbc(\n self.key,\n self.decryptIV,\n padding=ZeroPadding(BLOCK_SIZE),\n block_size=BLOCK_SIZE,\n )\n return decryptor.decrypt(toDecrypt)", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def ecb_decrypt(self, encrypted_data, color):\n msg = b''\n for d in encrypted_data:\n encoded_bytes = d[0] + d[1]\n encoded_int = self.bytes_to_int(encoded_bytes)\n decoded_int = self.power(encoded_int, self.private_key, self.N)\n decoded_byte = self.int_to_bytes(decoded_int, len(d[0]))\n msg += decoded_byte\n return msg", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def decryptByteArray(self, data, keyobj):\n\n nrOfBlocks = int(math.ceil(len(data)/self.blockLengthBytes))\n M = bytearray()\n\n for i in range(nrOfBlocks):\n c = bytearray(self.blockLengthBytes)\n for j in range(self.blockLengthBytes):\n index = i*self.blockLengthBytes+j\n if index < len(data):\n c[j] = data[index]\n m = self.encryptor.decrypt(c, keyobj)\n for mb in m:\n M.append(mb)\n\n return M", "def decrypt_message(encrypted_message):", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain", "def decrypt(ciphertext, key, iv):\n cipher = AES.new(key, AES.MODE_CFB, iv)\n msg = cipher.decrypt(ciphertext)\n return msg", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decrypt(self, ciphertext):\n\n # Note that the state of the cipher is updated by each operation,\n # and the offset into the stream is implicit, which means that\n # it is almost always an error to use the encrypt and decrypt\n # methods of the same instance, so we do a simple check to ensure\n # that this isn't the case.\n #\n if self.prev_crypto_op and self.prev_crypto_op != self.decrypt:\n raise RuntimeError('Same instance used for encrypt/decrypt')\n self.prev_crypto_op = self.decrypt\n\n return self.rc4.update(ciphertext)", "def extract(self, data):\n return ujson.loads(self.cipher.decrypt(data))", "def decrypt(ciphertext, key, iv, tag, associated_data=''):\n\n decryptor = Cipher(\n algorithms.AES(key), modes.GCM(iv, tag),\n backend=default_backend()).decryptor()\n\n decryptor.authenticate_additional_data(associated_data)\n\n return decryptor.update(ciphertext) + decryptor.finalize()", "def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Your decrypted message is\")\r\n\t\tprint(self.my_code + \"|\")", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def decrypt_data(self, master_pass, website, filename): \n\n if os.path.isfile(filename):\n try:\n with open(filename, 'r') as jdata:\n jfile = json.load(jdata)\n nonce = bytes.fromhex(jfile[website][\"nonce\"])\n password = bytes.fromhex(jfile[website][\"password\"])\n except KeyError:\n raise PasswordNotFound\n else:\n raise PasswordFileDoesNotExist\n # add extra characters and take first 16 to make sure key is right.\n formatted_master_pass = master_pass + \"================\"\n master_pass_encoded = formatted_master_pass[:16].encode(\"utf-8\")\n cipher = AES.new(master_pass_encoded, AES.MODE_EAX, nonce = nonce)\n plaintext_password = cipher.decrypt(password).decode(\"utf-8\")\n\n return plaintext_password", "async def decrypt(self, data, sequence_no, direction='init', auth_data=None):\n\t\tedata = data[16:]\n\t\tsrv_sig = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(data[:16])\n\t\tsealedMessage = self.crypthandle_server.encrypt(edata)\n\t\tsignature = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, srv_sig.SeqNum, sealedMessage)\n\t\t#print('seqno %s' % sequence_no)\n\t\t#print('Srv sig: %s' % data[:16])\n\t\t#print('Calc sig: %s' % signature)\n\n\t\treturn sealedMessage, None", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt(path, key):\n key = load_key(key)\n\n if p.isdir(path):\n # encrypt a directory\n return decrypt_dir(path, key)\n # decrypt a file\n path = decrypt_file(path, key)\n # check if file contains suffix\n if \"-encrypted.zip\" in path:\n return decrypt_dir(path, key)\n return", "def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message", "def test_decryption_private_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(given))", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def decrypt(self, message):\n message = base64.b64decode(message)\n initialization_vector = message[:self._block_size]\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n raw_message = cipher.decrypt(message[self._block_size:])\n return self._remove_padding(raw_message).decode('utf-8')", "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None" ]
[ "0.7820853", "0.7805203", "0.77293396", "0.76565194", "0.7467573", "0.74420536", "0.73967624", "0.72905445", "0.72817254", "0.7259105", "0.7258132", "0.71645397", "0.71422887", "0.70957357", "0.70479023", "0.69664323", "0.695035", "0.6944712", "0.68905735", "0.6879039", "0.6867665", "0.6866126", "0.6851958", "0.68455476", "0.68229485", "0.68013626", "0.6796837", "0.6728284", "0.6702605", "0.6688926", "0.6642425", "0.6641388", "0.6630929", "0.6619162", "0.66156554", "0.658224", "0.6580541", "0.6579804", "0.6567188", "0.6561632", "0.6561632", "0.65473205", "0.65334594", "0.6521848", "0.65103775", "0.6508089", "0.65055853", "0.64781326", "0.64607346", "0.64568925", "0.64070535", "0.6404971", "0.6367124", "0.63578016", "0.62638694", "0.6261817", "0.62542576", "0.6250245", "0.6204106", "0.6197818", "0.6191125", "0.6164145", "0.61540467", "0.61496305", "0.614954", "0.6137068", "0.6136492", "0.6123909", "0.61235833", "0.6120325", "0.61194354", "0.61174136", "0.6095821", "0.6093736", "0.6079204", "0.6073295", "0.6060587", "0.60586214", "0.60540366", "0.60397357", "0.6024572", "0.6024269", "0.6005834", "0.599269", "0.5989348", "0.59873515", "0.5984633", "0.59801185", "0.596783", "0.5967107", "0.59641325", "0.59615475", "0.5959323", "0.5956005", "0.59529984", "0.5942948", "0.5938525", "0.5937075", "0.5931648", "0.59309715", "0.59284544" ]
0.0
-1
Instantiate a cipher object that performs CTR encryption/decryption.
def _create_ctr_cipher(factory, **kwargs): cipher_state = factory._create_base_cipher(kwargs) counter = kwargs.pop("counter", None) nonce = kwargs.pop("nonce", None) initial_value = kwargs.pop("initial_value", None) if kwargs: raise TypeError("Invalid parameters for CTR mode: %s" % str(kwargs)) if counter is not None and (nonce, initial_value) != (None, None): raise TypeError("'counter' and 'nonce'/'initial_value'" " are mutually exclusive") if counter is None: # Crypto.Util.Counter is not used if nonce is None: if factory.block_size < 16: raise TypeError("Impossible to create a safe nonce for short" " block sizes") nonce = get_random_bytes(factory.block_size // 2) else: if len(nonce) >= factory.block_size: raise ValueError("Nonce is too long") # What is not nonce is counter counter_len = factory.block_size - len(nonce) if initial_value is None: initial_value = 0 if is_native_int(initial_value): if (1 << (counter_len * 8)) - 1 < initial_value: raise ValueError("Initial counter value is too large") initial_counter_block = nonce + long_to_bytes(initial_value, counter_len) else: if len(initial_value) != counter_len: raise ValueError("Incorrect length for counter byte string (%d bytes, expected %d)" % (len(initial_value), counter_len)) initial_counter_block = nonce + initial_value return CtrMode(cipher_state, initial_counter_block, len(nonce), # prefix counter_len, False) # little_endian # Crypto.Util.Counter is used # 'counter' used to be a callable object, but now it is # just a dictionary for backward compatibility. _counter = dict(counter) try: counter_len = _counter.pop("counter_len") prefix = _counter.pop("prefix") suffix = _counter.pop("suffix") initial_value = _counter.pop("initial_value") little_endian = _counter.pop("little_endian") except KeyError: raise TypeError("Incorrect counter object" " (use Crypto.Util.Counter.new)") # Compute initial counter block words = [] while initial_value > 0: words.append(struct.pack('B', initial_value & 255)) initial_value >>= 8 words += [ b'\x00' ] * max(0, counter_len - len(words)) if not little_endian: words.reverse() initial_counter_block = prefix + b"".join(words) + suffix if len(initial_counter_block) != factory.block_size: raise ValueError("Size of the counter block (%d bytes) must match" " block size (%d)" % (len(initial_counter_block), factory.block_size)) return CtrMode(cipher_state, initial_counter_block, len(prefix), counter_len, little_endian)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, block_cipher, initial_counter_block,\n prefix_len, counter_len, little_endian):\n\n if len(initial_counter_block) == prefix_len + counter_len:\n self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)\n \"\"\"Nonce; not available if there is a fixed suffix\"\"\"\n\n self._state = VoidPointer()\n result = raw_ctr_lib.CTR_start_operation(block_cipher.get(),\n c_uint8_ptr(initial_counter_block),\n c_size_t(len(initial_counter_block)),\n c_size_t(prefix_len),\n counter_len,\n little_endian,\n self._state.address_of())\n if result:\n raise ValueError(\"Error %X while instantiating the CTR mode\"\n % result)\n\n # Ensure that object disposal of this Python object will (eventually)\n # free the memory allocated by the raw library for the cipher mode\n self._state = SmartPointer(self._state.get(),\n raw_ctr_lib.CTR_stop_operation)\n\n # Memory allocated for the underlying block cipher is now owed\n # by the cipher mode\n block_cipher.release()\n\n self.block_size = len(initial_counter_block)\n \"\"\"The block size of the underlying cipher, in bytes.\"\"\"\n\n self._next = [self.encrypt, self.decrypt]", "def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher", "def aes_ctr(key, counter=None):\n return AES.new(key, AES.MODE_CTR, counter=(counter if counter is not None else Counter.new(128)))", "def encryptAESCTR(key, plaintext):\n # 128-bit iv, securely generated\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return (iv, ciphertext)", "def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return AES(key,mode,IV,counter,segment_size)", "def encrypt_ctr(self, plaintext, iv):\n assert len(iv) == 16\n\n plaintext = pad(plaintext)\n\n blocks = []\n nonce = iv\n for plaintext_block in split_blocks(plaintext):\n # CTR mode encrypt: plaintext_block XOR encrypt(nonce)\n block = xor_bytes(plaintext_block, self.encrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return b''.join(blocks)", "def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):\n # can we use M2Crypto and was it requested?\n if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:\n # yes, so do so\n return self.EVPAdaptor(key_bytes, iv_bytes, mode)\n else:\n # default to PyCrypto\n return self.AESAdaptor(key_bytes, iv_bytes, mode)", "def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def __Cipher(self, selector):\n assert selector in self.OP_TYPES, 'Invalid selector :%s' % selector\n if selector == self.OP_ACTIVE and (len(self.ciphers.keys()) > 1 or\n not len(self.ciphers.keys())):\n assert 0, 'If both encryption and decryption used then selector must \\\n be OP_ENCRYPT or OP_DECRYPT and at least 1 must be active'\n\n cipher = None\n if selector == self.OP_ACTIVE:\n # should only be one cipher active\n cipher = self.ciphers.values()[0]\n else:\n cipher = self.ciphers.get(selector)\n # have we been created a cipher for this selector yet?\n if not cipher:\n # no, so set it up as requested\n\n # convert between AES and EVP modes\n # NOTE: AES auto-selects based on key size using the same mode, but\n # EVP requires different mode strings for each key size (in bits)\n mode = 'aes_%s_cbc' % (self.key_size * 8)\n cipher = EVP.Cipher(alg=mode,\n key=self.key_bytes,\n iv=self.IV,\n op=selector,\n padding=0)\n self.ciphers[selector] = cipher\n return cipher", "def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())", "def __init__(self, key, iv, do, ciphername='aes-256-cbc', tag_len=12, iv_len=7, tag=None):\n self.cipher = OpenSSL.get_cipher(ciphername)\n self.ctx = OpenSSL.EVP_CIPHER_CTX_new()\n if (do == 1 or do == 0):\n k = OpenSSL.malloc(key, len(key))\n IV = OpenSSL.malloc(iv, len(iv))\n if self.cipher == OpenSSL.get_cipher('aes-128-ccm') or \\\n self.cipher == OpenSSL.get_cipher('aes-128-gcm'):\n OpenSSL.EVP_CipherInit_ex(self.ctx, self.cipher.get_pointer(), 0, 0, 0, do)\n self.tag_len = tag_len\n self.iv_len = iv_len\n if do == 0:\n if tag is None or (tag is not None and len(tag) != tag_len):\n raise Exception(\"Invalid Tag Input...\")\n else:\n self.cipher_ctrl(tag_val=tag)\n else:\n self.cipher_ctrl()\n OpenSSL.EVP_CipherInit_ex(self.ctx, 0, 0, k, IV, do)\n else:\n OpenSSL.EVP_CipherInit_ex(\n self.ctx, self.cipher.get_pointer(), 0, k, IV, do)\n else:\n raise Exception(\"RTFM ...\")", "def decrypt_ctr(self, ciphertext, iv):\n assert len(iv) == 16\n\n blocks = []\n nonce = iv\n for ciphertext_block in split_blocks(ciphertext):\n # CTR mode decrypt: ciphertext XOR decrypt(nonce)\n block = xor_bytes(ciphertext_block, self.decrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return unpad(b''.join(blocks))", "def __init__(self, key, msg0503):\n enkey1 = map(ord, AES.new(key).encrypt(msg0503[:16]))\n self.cipher = AES.new(\"\".join(\n map(chr, (enkey1[i] ^ ord(msg0503[i + 16]) for i in range(16)))))\n self.encrypt_seq = random.randint(0, 0xffff)", "def operate_cipher(self):", "def aes_ctr_decrypt(self, key: bytes, encrypted_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.decryptor()\n return enc.update(encrypted_data) + enc.finalize()", "def __get_cipher(self):\n return Fernet(open(self.__key_file, 'rb').read())", "def decryptAESCTR(key, iv, ciphertext):\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext) + decryptor.finalize()", "def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message", "def aes_cbc(key, iv=None):\n return AES.new(key, AES.MODE_CBC, iv if iv is not None else get_zero_vector(16))", "def encrypt(self, plaintext, output=None):\n\n if self.encrypt not in self._next:\n raise TypeError(\"encrypt() cannot be called after decrypt()\")\n self._next = [self.encrypt]\n \n if output is None:\n ciphertext = create_string_buffer(len(plaintext))\n else:\n ciphertext = output\n \n if not is_writeable_buffer(output):\n raise TypeError(\"output must be a bytearray or a writeable memoryview\")\n \n if len(plaintext) != len(output):\n raise ValueError(\"output must have the same length as the input\"\n \" (%d bytes)\" % len(plaintext))\n\n result = raw_ctr_lib.CTR_encrypt(self._state.get(),\n c_uint8_ptr(plaintext),\n c_uint8_ptr(ciphertext),\n c_size_t(len(plaintext)))\n if result:\n if result == 0x60002:\n raise OverflowError(\"The counter has wrapped around in\"\n \" CTR mode\")\n raise ValueError(\"Error %X while encrypting in CTR mode\" % result)\n \n if output is None:\n return get_raw_buffer(ciphertext)\n else:\n return None", "def main():\n b64 = (b\"L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvo\"\n b\"OLSFQ==\")\n binary = base64.b64decode(b64)\n\n key = b\"YELLOW SUBMARINE\"\n nonce = bytes(8)\n cipher = AES.new(key, AES.MODE_ECB)\n ctr = CTRMode(\n blksize=16,\n encrypt_blk=cipher.encrypt,\n decrypt_blk=cipher.decrypt,\n nonce=nonce,\n )\n\n decrypted = ctr.decrypt(binary)\n\n print(decrypted.decode())", "def __init__(self,**kwargs):\n self.msg = kwargs.get('msg','')\n self.shift = kwargs.get('shift','')\n op = kwargs.get('op', False)\n if op:\n try:\n op = getattr(self,op)\n except AttributeError as e: \n raise CipherError(\"valid operations: (encode|decode).\")\n op()\n print \"cipher={c}|key={s}|{r}\".format(c=self.__module__.split('.')[2],\n s=self.shift,\n r=self.result)", "def aes_cipher_from_key(key):\r\n return AES.new(key, AES.MODE_CBC, generate_aes_iv(key))", "def __init__(self, key, plaintext=None, ciphertext=None):\n self.key = key\n # If plaintext is specified, generate its encrypted counterpart\n if plaintext:\n self.plaintext = plaintext\n self.ciphertext, self.iv = self.encrypt()\n # If instead cipher-text is specified, decrypt it\n elif ciphertext:\n self.ciphertext = ciphertext\n self.plaintext, self.iv = self.decrypt()\n # Otherwise declaration is invalid\n else:\n raise InvalidMessage(\"Either plaintext or cipher-text must be declared\")", "def encryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend,key_type = 'AES128',mode='CBC'):\n\tif key_type == 'AES128':\n\t\talgo = algorithms.AES(key)\n\telif key_type == 'ChaCha20':\n\t\talgo = algorithms.ChaCha20(key,nonce=os.urandom(32))\n\telse:\n\t\traise('Error algorithm ' + key_type + ' not supported!')\n\tif mode == 'CBC':\n\t\tmode = modes.CBC(iv)\n\telif mode == 'GCM':\n\t\tmode = modes.GCM(iv)\n\telse :\n\t\traise('Error mode ' + mode + ' not supported!')\n\tcipher = Cipher(algo,mode,backend = bc)\n\treturn iv,key,cipher.encryptor()", "def cipher(input_bytes, expanded_key, n_r):\n\n state = generate_initial_state(input_bytes)\n state = add_round_key(state, expanded_key, 0)\n\n # Apply rounds of operations as stated in AES standard\n for round_no in range(1, n_r):\n state = sub_bytes(state)\n state = shift_rows(state)\n state = mix_columns(state)\n state = add_round_key(state, expanded_key, round_no * 4 * 4)\n\n state = sub_bytes(state)\n state = shift_rows(state)\n state = add_round_key(state, expanded_key, n_r * 4 * 4)\n\n return state", "def aes_enc_dec(self, key, iv, inputVal):\n\n\t\taes = Cipher(\"AES-128-CTR\")\n\t\tenc = aes.enc(key, iv)\n\t\toutput = enc.update(inputVal)\n\t\toutput += enc.finalize()\n\t\treturn output", "def choose_cipher(cls):\n while True:\n\n crypt = input(\"Would you like to encrypt or decrypt?\").lower()\n print(crypt)\n if (crypt != \"encrypt\") and (crypt != \"decrypt\"):\n crypt = 0\n print(\"Invalid Selection\")\n else:\n break\n\n while True:\n\n cipher_choice = input(\"Select Cipher: \\n\"\n \"A) Affine\\n\"\n \"B) Atbash\\n\"\n \"C) Keyword\\n\"\n ).lower()\n\n if cipher_choice == (\"a\" or \"a)\" or \"affine\"):\n cipher_choice = \"affine\"\n break\n elif cipher_choice == (\"b\" or \"b)\" or \"atbash\"):\n cipher_choice = \"atbash\"\n break\n elif cipher_choice == (\"c\" or \"c)\" or \"keyword\"):\n cipher_choice = \"keyword\"\n break\n\n else:\n print(\"Invalid Selection\")\n while True:\n message = input(\"Input your message: \")\n if (len(message) >= 1):\n break\n else:\n print(\"Invalid Message\")\n while True:\n otp = input(\"Enter one time pad: \")\n if crypt == \"encrypt\" or crypt == \"e\":\n if (len(message) % 5):\n otp_length = (len(message) + (5 - (len(message) % 5)))\n else:\n otp_length = (len(message))\n if len(otp) >= otp_length:\n break\n else:\n print(\"otp for this message must be \"\n \"{} characters long\".format(otp_length))\n else:\n break\n return cls(crypt, cipher_choice, otp, message)", "def __init__(self, ciphertext):\n\n slice_index = None # Replace None\n self.preamble = ciphertext[:slice_index]\n self.ciphertext = ciphertext[slice_index:]", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def init_alternate_cipher(self, alternate_encryption_key: bytes) -> None:\n #: Initialize AES ECB cipher with alternate_encryption_key\n cipher: Cipher = Cipher(algorithms.AES(alternate_encryption_key), modes.ECB(), backend=backend)\n #: Initialize AES ECB cipher context\n encryptor: CipherContext = cipher.encryptor()\n #: Add cipher to alternate_ciphers dict\n self.alternate_ciphers[alternate_encryption_key] = cipher\n #: Add encryptor to alternate_encryptors dict\n self.alternate_encryptors[alternate_encryption_key] = encryptor\n self.logger.debug(f'Alternate Cipher initialized for key {hexlify(alternate_encryption_key).decode(\"ASCII\")}')", "def encrypt(cls, plaintext, aad, key, iv):", "def ecb_or_cbc_encrypt(plaintext, mode='random'):\n if mode == 'random':\n mode = 'ECB' if randint(0, 1) == 0 else 'CBC'\n\n key = randstr(AES_BSZ)\n plaintext = (\n ''.join([randstr(1) for _ in range(randint(5, 10))]) +\n plaintext +\n ''.join([randstr(1) for _ in range(randint(5, 10))])\n )\n plaintext = pad_to_blocksize(plaintext)\n\n if mode == 'ECB':\n ecb = AES.new(key, AES.MODE_ECB)\n ciphertext = ecb.encrypt(plaintext)\n elif mode == 'CBC':\n iv = randstr(AES_BSZ)\n cbc = AES.new(key, AES.MODE_CBC, iv)\n ciphertext = cbc.encrypt(plaintext)\n else:\n raise Exception(\"invalid mode\")\n\n return ciphertext", "def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv", "def __init__(self):\n self.key = b'FSMF73R873YM187R'\n self.signer = AES.new(self.key, AES.MODE_EAX)\n self.verifier = AES.new(self.key, AES.MODE_EAX, nonce=self.signer.nonce)", "def aes128_ctr_cipher(string, nonce, key):\n cipher_string = b''\n # Divide input string in blocks of 16 bytes\n cipher_text_blocks = [string[i:i + 16] for i in range(0, len(string), 16)]\n for i in range(len(cipher_text_blocks)):\n # Calculate incremental nonce block for each input string block\n nonce_block = nonce + i.to_bytes(8, byteorder='little')\n nonce_matrix = string_to_matrix_states(nonce_block)[0]\n # Cipher nonce block with key\n nonce_matrix_cipher = aes128_RoundBlock(nonce_matrix, key)\n d = xor_states(nonce_matrix_cipher, string_to_matrix_states(cipher_text_blocks[i])[0])\n cipher_string += matrix_to_bytes(d)\n return cipher_string", "def __init__(self, crypt, cipher_choice, otp, message):\n self.crypt = crypt\n self.cipher_choice = cipher_choice\n self.otp = otp\n self.message = message", "def __CreateCipher(self):\n is_data_avail = True\n if not self.__cipher:\n reqd_block_size = self.__key.block_size\n new_bytes_reqd = reqd_block_size - len(self.__encrypted_buffer)\n read_bytes, is_data_avail = self.__ReadBytes(new_bytes_reqd)\n if read_bytes:\n self.__encrypted_buffer += read_bytes\n if len(self.__encrypted_buffer) >= reqd_block_size:\n iv_bytes = self.__encrypted_buffer[:reqd_block_size]\n self.__encrypted_buffer = self.__encrypted_buffer[\n reqd_block_size:\n ]\n self.__hmac_stream.Update(iv_bytes)\n self.__cipher = AES.new(self.__key.key_bytes, AES.MODE_CBC,\n iv_bytes)\n return is_data_avail", "def encryptAESCTR(key, nonce, pt):\n\tct = b''\n\tcounter = 0\n\tfor ptBlock in chunks(pt, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tct += xor(ptBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn ct", "def decryptAESCTR(key, nonce, ct):\n\tpt = b''\n\tcounter = 0\n\tfor ctBlock in chunks(ct, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tpt += xor(ctBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn pt", "def _derive_crypto(self, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n self.initiator_seed + self.responder_seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])", "def encrypt_data_ctr(\n key: bytes,\n counter_0: bytes,\n mac_cbc: bytes,\n payload: bytes = b\"\",\n) -> tuple[bytes, bytes]:\n s_cipher = Cipher(algorithms.AES(key), modes.CTR(counter_0))\n s_encryptor = s_cipher.encryptor()\n mac = s_encryptor.update(mac_cbc)\n encrypted_data = s_encryptor.update(payload) + s_encryptor.finalize()\n return (encrypted_data, mac)", "def __init__(self, block_cipher: BlockCipher, code_size: int):\n self.cipher = block_cipher\n self.code_size = code_size", "def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text", "def __init__(self, key, initial_prng):\n self.cipher = key\n self.prng = initial_prng\n self.nonce = None", "def __init__(\n self,\n context: \"ts.Context\" = None,\n vector=None,\n scale: float = None,\n data: ts._ts_cpp.CKKSVector = None,\n ):\n # wrapping\n if data is not None:\n self.data = data\n # constructing a new object\n else:\n if not isinstance(context, ts.Context):\n raise TypeError(\"context must be a tenseal.Context\")\n if not isinstance(vector, ts.PlainTensor):\n vector = ts.plain_tensor(vector, dtype=\"float\")\n if len(vector.shape) != 1:\n raise ValueError(\"can only encrypt a vector\")\n vector = vector.raw\n\n if scale is None:\n self.data = ts._ts_cpp.CKKSVector(context.data, vector)\n else:\n self.data = ts._ts_cpp.CKKSVector(context.data, vector, scale)", "def aes_ecb(key):\n return AES.new(key, AES.MODE_ECB)", "def encrypt(self, plainText):\n encryptor = RijndaelCbc(\n self.key,\n self.encryptIV,\n padding=ZeroPadding(BLOCK_SIZE),\n block_size=BLOCK_SIZE,\n )\n encText = encryptor.encrypt(plainText)\n self.encryptIV = encText[-BLOCK_SIZE:]\n return encText", "def __init__(self, shift):\n\n # Initializing temporary arrays for encryption and decryption, respectively.\n encoder = [None] * 26\n decoder = [None] * 26\n\n # Constructing the arrays.\n for k in range(26):\n encoder[k] = chr((k + shift) % 26 + ord(\"A\"))\n decoder[k] = chr((k - shift) % 26 + ord(\"A\"))\n\n # Converting to string (to make immutable).\n self._forward = \"\".join(encoder)\n self._backward = \"\".join(decoder)", "def aes(encrypt, key, data):\n cipher = AES.new(key, AES.MODE_CBC, get_zero_vector(16))\n if encrypt:\n return cipher.encrypt(data)\n else:\n return cipher.decrypt(data)", "def __init__(self, alphabet, m, b):\n # We're cheating here by not actually having the decryption method use the \"inverse\" argument\n transformed = alphabet.affinal(m, b)\n super(AffineCipher, self).__init__(alphabet, transformed)", "def test_encryption_cycle_default_algorithm_non_framed_no_encryption_context(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"], key_provider=self.kms_master_key_provider, frame_length=0\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def __init__(self, encryption_method: str, encryption_key_size: int = 32, encryption_key: bytes = None,\r\n block_size: int = 32, block_mode: str = BlockMode.ECB):\r\n self.__encryption_method = encryption_method\r\n self.__encryption_key_size = encryption_key_size\r\n self.__encryption_key = encryption_key\r\n self.__block_size = block_size\r\n self.__block_mode = block_mode\r\n\r\n if self.__encryption_key is None:\r\n self.__randomize_key_on_every_encryption = True\r\n else:\r\n self.__randomize_key_on_every_encryption = False\r\n\r\n # Generate the next key to be used\r\n if self.__randomize_key_on_every_encryption:\r\n self.__encryption_key = get_random_bytes(self.__encryption_key_size)", "def new(cls, cipher_list_, name='NewOnionCipher'):\n if isinstance(cipher_list_, str):\n raise ValueError('cipher_list should be a list-like thing')\n try:\n for cipher in cipher_list_:\n if not issubclass(cipher, BaseCipher):\n raise ValueError('Cipher list should contain BaseCipher ' +\n 'subclasses.')\n except TypeError:\n raise ValueError('cipher_list should be a list-like thing')\n\n return type(name, (cls,), {'cipher_list': cipher_list_})", "def decrypt_ctr(\n key: bytes,\n counter_0: bytes,\n mac: bytes,\n payload: bytes = b\"\",\n) -> tuple[bytes, bytes]:\n cipher = Cipher(algorithms.AES(key), modes.CTR(counter_0))\n decryptor = cipher.decryptor()\n mac_tr = decryptor.update(mac) # MAC is encrypted with counter 0\n decrypted_data = decryptor.update(payload) + decryptor.finalize()\n\n return (decrypted_data, mac_tr)", "def encrypt_aes(msg, key, iv):\r\n #start timer\r\n start = timeit.default_timer()\r\n\r\n #converting key to bytes from hex\r\n key = bytes.fromhex(key)\r\n msg = pad(msg)\r\n obj = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertxt = obj.encrypt(msg)#ciphertxt will be in 'bytes'\r\n\r\n #converting ciphertxt into hexadecimal\r\n ciphertxt = ciphertxt.hex()\r\n\r\n print(\"Ciper is: \",ciphertxt)\r\n\r\n #stop timer\r\n stop = timeit.default_timer()\r\n print('Encryption Running Time: ', stop-start)\r\n \r\n return ciphertxt", "def ciphers_obj(self):\n if self.esp_enc_alg == \"ENCR_AES_GCM_16_IIV\":\n ## BEGIN code to update\n \n return [ AES.new(self.esp_enc_key,AES.MODE_GCM, nonce=self.nonce)]\n ## END code to update\n raise UnsupportedEncAlgError(sa.esp_enc_alg, \"unsupported\")", "def get_otp(self, key):\n packed = self.pack()\n obj = AES.new(key, AES.MODE_ECB)\n ciphertext = obj.encrypt(packed)\n return ciphertext", "def ctr_process(msg, nonce, cnt, key, rounds):\n ivcount = nonce + bc.int_to_binary(cnt, 8)\n x = feistel_encrypt(ivcount,key,rounds)\n y = xor_compare(msg,x)\n return y", "def decrypt(self, ciphertext, output=None):\n\n if self.decrypt not in self._next:\n raise TypeError(\"decrypt() cannot be called after encrypt()\")\n self._next = [self.decrypt]\n \n if output is None:\n plaintext = create_string_buffer(len(ciphertext))\n else:\n plaintext = output\n\n if not is_writeable_buffer(output):\n raise TypeError(\"output must be a bytearray or a writeable memoryview\")\n \n if len(ciphertext) != len(output):\n raise ValueError(\"output must have the same length as the input\"\n \" (%d bytes)\" % len(plaintext))\n\n\n result = raw_ctr_lib.CTR_decrypt(self._state.get(),\n c_uint8_ptr(ciphertext),\n c_uint8_ptr(plaintext),\n c_size_t(len(ciphertext)))\n if result:\n if result == 0x60002:\n raise OverflowError(\"The counter has wrapped around in\"\n \" CTR mode\")\n raise ValueError(\"Error %X while decrypting in CTR mode\" % result)\n \n if output is None:\n return get_raw_buffer(plaintext)\n else:\n return None", "def test_encryption_cycle_default_algorithm_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def decryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend):\n\tcipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend = bc)\n\treturn iv, key, cipher.decryptor()", "def gen_ciphertext(message: str) -> str:\r\n key = 1\r\n for i in range(26):\r\n ciphertext = cipher(key, message)\r\n yield f\"Key #{key}: {ciphertext}\"\r\n key += 1", "def ctr(in_file, out_file, block_size, key, op):\n \n with open(in_file, 'rb') as input: # Open files\n with open(out_file, 'wb') as output:\n ctr_str = read_block(input, block_size)[0] # This is the initial ctr\n ctr = byte_str_to_int(ctr_str)\n i = 0 # This is the value which will be added to ctr as we loop\n size = 2**(block_size * 8) # This is the length of the block size in bits\n \n output.write(int_to_byte_str(ctr + i, block_size))\n i += 1\n\n block = [True, True] # This is just to get into the while loop\n while block[1]: # Iterate through the rest of the input\n block = read_block(input, block_size)\n if block [0] != -1:\n this_ctr = (ctr + i) % size\n i += 1\n funced_block = func(int_to_byte_str(this_ctr, block_size), key)\n block_xor = xor(block[0], funced_block)\n output.write(block_xor)", "def call_cipher(self):\n if self.cipher_choice == \"affine\":\n\n if self.crypt == \"encrypt\":\n encrypted_message = Affine().encrypt(self.message.upper())\n otp_encrypted = OneTimePad().encrypt(\n encrypted_message, self.otp.upper())\n return (otp_encrypted)\n\n elif self.crypt == \"decrypt\":\n otp_decrypted = OneTimePad().decrypt(\n self.message.upper(), self.otp.upper())\n decrypted_message = Affine().decrypt(otp_decrypted)\n return (decrypted_message.lower())\n\n elif self.cipher_choice == \"atbash\":\n\n if self.crypt == \"encrypt\":\n encrypted_message = Atbash().encrypt(self.message.upper())\n otp_encrypted = OneTimePad().encrypt(\n encrypted_message, self.otp.upper())\n return (otp_encrypted)\n\n elif self.crypt == \"decrypt\":\n otp_decrypted = OneTimePad().decrypt(\n self.message.upper(), self.otp.upper())\n decrypted_message = Atbash().decrypt(otp_decrypted)\n return (decrypted_message.lower())\n\n elif self.cipher_choice == \"keyword\":\n\n if self.crypt == \"encrypt\":\n encrypted_message = KeywordCipher().encrypt(self.message.upper())\n otp_encrypted = OneTimePad().encrypt(\n encrypted_message, self.otp.upper())\n return (otp_encrypted)\n\n elif self.crypt == \"decrypt\":\n otp_decrypted = OneTimePad().decrypt(\n self.message.upper(), self.otp.upper())\n decrypted_message = KeywordCipher().decrypt(otp_decrypted)\n return (decrypted_message.lower())", "def __init__(self, authBlob=None):\r\n if authBlob == None:\r\n randomData = os.urandom(Globals.SYMMETRIC_KEY_BYTES)\r\n iv = os.urandom(IV_LENGTH)\r\n hmacKey = os.urandom(HMAC_KEY_LENGTH)\r\n else:\r\n randomData, iv, hmacKey = self.unpack(authBlob)\r\n self.alg='aes_256_cfb'\r\n self.iv = iv\r\n self.randomData = randomData\r\n self.hmacKey = hmacKey\r\n self.value = randomData + iv\r\n self.reset()", "def __init__(self, mapping):\n if len(mapping) != 26:\n raise ValueError('SubstitutionCipher requires a 26-letter mapping.')\n self.charsets = [\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',\n 'abcdefghijklmnopqrstuvwxyz'\n ]\n self.mappings = [\n ''.join([l.upper() for l in mapping]),\n ''.join([l.lower() for l in mapping])\n ]\n self._original = ''.join(ch for charset in self.charsets for ch in charset)\n self._shifted = ''.join(ch for mapping in self.mappings for ch in mapping)\n self._encoder = str.maketrans(self._original, self._shifted)\n self._decoder = str.maketrans(self._shifted, self._original)", "def init(cls, key: bytes, passphrase: str, otp: YubikeyOTP, **kwargs):\n # Pick out the user key salt length\n user_salt_len = kwargs.pop('user_salt_len', 32)\n\n # Initialise with a dummy context for now\n context = cls(key=key, iv=b'', context=b'', **kwargs)\n\n # Derive the key used for the shared context\n context_key = context._derive_key(passphrase, otp)\n\n # Save the new context and return it.\n context._update_context(context_key, otp, token_bytes(user_salt_len))\n return context", "def crypt(key, data, iv):\n return xtea.crypt(key, data, iv)", "def cbc_encrypt(pt, cipher, iv):\n\n ct = [iv]\n pt = chunks(pt, cipher.block_size)\n for i in range(len(pt)):\n ct += [cipher.encrypt(bytes(fixed_xor(pt[i], ct[i])))]\n return flatten(ct[1:])", "def perform_aes_algorithm(plaintext, key):\n if len(key) == 32:\n print('C.1 AES-128 (Nk=4, Nr=10)\\n')\n elif len(key) == 48:\n print('\\nC.2 AES-192 (Nk=6, Nr=12)\\n')\n else:\n print('\\nC.3 AES-256 (Nk=8, Nr=14)\\n')\n\n print('{:<19} {:}'.format('PLAINTEXT:', plaintext))\n print('{:<19} {:}\\n'.format('KEY:', key))\n\n print('CIPHER (ENCRYPT):')\n ciphertext = encrypt(plaintext, key, verbose=True)\n\n print('\\nINVERSE CIPHER (DECRYPT):')\n decrypt(ciphertext, key, verbose=True)", "def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)", "def symcipher_from_secret(\n cls,\n secret,\n algorithm=TPM2_ALG.AES,\n mode=TPM2_ALG.CFB,\n nameAlg=TPM2_ALG.SHA256,\n objectAttributes=(\n TPMA_OBJECT.DECRYPT | TPMA_OBJECT.SIGN_ENCRYPT | TPMA_OBJECT.USERWITHAUTH\n ),\n seed=None,\n ):\n nbits = len(secret) * 8\n if algorithm == TPM2_ALG.SM4 and nbits != 128:\n raise ValueError(f\"invalid key size, expected 128, got {nbits}\")\n elif nbits not in (128, 192, 256):\n raise ValueError(\n f\"invalid key size, expected 128, 192 or 256 bits, got {nbits}\"\n )\n pub = TPMT_PUBLIC(\n type=TPM2_ALG.SYMCIPHER, nameAlg=nameAlg, objectAttributes=objectAttributes\n )\n pub.parameters.symDetail.sym.keyBits.sym = nbits\n pub.parameters.symDetail.sym.algorithm = algorithm\n pub.parameters.symDetail.sym.mode.sym = mode\n digsize = get_digest_size(nameAlg)\n if seed and len(seed) != digsize:\n raise ValueError(\n f\"invalid seed size, expected {digsize} but got {len(seed)}\"\n )\n elif not seed:\n seed = secrets.token_bytes(digsize)\n pub.unique.sym = calculate_sym_unique(nameAlg, secret, seed)\n priv = cls(sensitiveType=TPM2_ALG.SYMCIPHER)\n priv.sensitive.bits = secret\n priv.seedValue = seed\n return (priv, pub)", "def encode(key, plain):\n print(\"ciphertext: \", end=\"\")\n\n # used variables\n pos = 0\n key_len = len(key)\n\n # loop over every character in the text\n for char in plain:\n key_pos = pos % key_len\n # leave non-alphabetical characters alone\n if not char.isalpha():\n print(char, end=\"\")\n # cipher characters\n elif char.isupper():\n cipher = chr((char_to_number(char) + char_to_number(key[key_pos])) \\\n % 26 + ord(\"A\"))\n \n print(cipher, end=\"\")\n pos += 1\n else:\n cipher = chr((char_to_number(char) + char_to_number(key[key_pos])) \\\n % 26 + ord(\"a\"))\n \n print(cipher, end=\"\")\n pos += 1\n\n print()", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def _encrypt(self, b):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n encryptor = cypher.encryptor()\n pad_length = 16 - (len(b) % 16)\n b += bytes([pad_length]) * pad_length\n result = encryptor.update(b) + encryptor.finalize()\n return result", "def make_test(test, key, plaintext_bytes, ciphertext, encrypt: True):\n aesCipher = Cipher(algorithms.AES(key),\n modes.ECB(),\n backend=default_backend())\n aesEncryptor = aesCipher.encryptor()\n aesDecryptor = aesCipher.decryptor()\n if encrypt:\n ciphertext_bytes = aesEncryptor.update(plaintext_bytes)\n got_ciphertext = ciphertext_bytes.hex()\n result = \"[PASS]\" if got_ciphertext == ciphertext else \"[FAIL]\"\n print(f\"Test {test}. Expected {ciphertext}, got {got_ciphertext}. Result {result}.\")\n else:\n got_plaintext_bytes = aesDecryptor.update(bytes.fromhex(ciphertext))\n got_plaintext = got_plaintext_bytes.hex()\n result = \"[PASS]\" if got_plaintext == plaintext_bytes.hex() else \"[FAIL]\"\n print(f\"Test {test}. Expected {plaintext_bytes.hex()}, got {got_plaintext}. Result {result}.\")", "def run():\n key = input(\"Enter a 26 letter key: \")\n if not isValidKey(key):\n print(\"Invalid key.\")\n return\n plainText = input(\"Plain Text: \")\n cipherText = substitution(plainText, key)\n print(f\"Cipher Text: {cipherText}\")\n return", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def encrypt(plaintext):\n # Pad plaintext\n plaintext = pad(plaintext)\n\n # AES encrypt\n iv = Random.new().read(BS)\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return iv + aes.encrypt(plaintext)", "def symcipher_from_secret(\n cls,\n secret,\n algorithm=TPM2_ALG.AES,\n mode=TPM2_ALG.CFB,\n nameAlg=TPM2_ALG.SHA256,\n objectAttributes=(\n TPMA_OBJECT.DECRYPT | TPMA_OBJECT.SIGN_ENCRYPT | TPMA_OBJECT.USERWITHAUTH\n ),\n seed=None,\n ):\n sa, pa = TPMT_SENSITIVE.symcipher_from_secret(\n secret, algorithm, mode, nameAlg, objectAttributes, seed\n )\n priv = TPM2B_SENSITIVE(sensitiveArea=sa)\n pub = TPM2B_PUBLIC(publicArea=pa)\n return (priv, pub)", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def decrypt(ciphertext, key, iv):\n cipher = AES.new(key, AES.MODE_CFB, iv)\n msg = cipher.decrypt(ciphertext)\n return msg", "def poly1305_key_gen(key: bytes, nonce: bytes) -> bytes:\n\n poly = ChaCha(key, nonce)\n return poly.encrypt(bytes(32))", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def encrypt(self, key, data, mode, padding):\n # this can be disabled by _disable_encryption, so pylint: disable=method-hidden\n try:\n block_size = self.cipher.block_size\n iv_len = block_size // 8\n iv = os.urandom(iv_len)\n\n encryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).encryptor()\n padder = padding.build(block_size).padder()\n\n padded_data = padder.update(data) + padder.finalize()\n return iv + encryptor.update(padded_data) + encryptor.finalize()\n except Exception:\n error_message = \"Encryption failed\"\n _LOGGER.exception(error_message)\n raise EncryptionError(error_message)", "def ctr_encrypt(pt_bin_list, keys, rounds):\n msg = pt_bin_list\n nonce = generate_random_binary(len(pt_bin_list[0])-8) # Initialization Vector\n counter = range(0,len(msg))\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(ctr_process, zip(msg, repeat(nonce), counter, keys, repeat(rounds)))\n\n enc_result.insert(0,nonce+\"00000000\") # Store padded IV to the start of ciphertext\n return enc_result", "def encrypt(algorithm, key, plaintext, associated_data, iv):\n encryptor = Encryptor(algorithm, key, associated_data, iv)\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return EncryptedData(encryptor.iv, ciphertext, encryptor.tag)", "def cipher_feedback(self):", "def _encrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n\n # Pad to 16 bytes for AES CBC\n for i in range(16 - (len(data) % 16)):\n data += b'\\0'\n\n return cipher.encrypt(data)", "def __init__(self, file_name, key):\n try:\n self._file_name = file_name\n self._encryptor = AES(key.encode())\n self._document = open(self._file_name, \"rb+\")\n except Exception as error:\n print(error)\n sys.exit(1)", "def encrypt(self, plain):\n plain = bytearray(plain)\n key_len = len(self.key)\n env = bytes(c ^ self.key[i % key_len] for i, c in enumerate(plain))\n return env", "def iv():\n return chr(0) * 16", "def __init__(self):\r\n\t\tself.introducer()\r\n\t\tif self.code_mode == \"1\":\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.encrypt_message()\r\n\t\t\telse:\r\n\t\t\t\tself.encrypt_text_file()\r\n\t\t\t\t#print(\"work in progress\")\r\n\t\telif self.code_mode == \"2\":\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.decrypt_message()\r\n\t\t\telse:\r\n\t\t\t\tself.decrypt_text_file()\r\n\t\telse:\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.hack_message()\r\n\t\t\telse:\r\n\t\t\t\tself.hack_text_file()", "def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))", "def __init__(self, key=None):\n\n self.key = key\n self.cryptor = None\n self.file_ext_targets = ['txt']", "def from_ccw(cls, TP, FP, TN, FN):\n return cls(TP, FN, FP, TN)", "def test_encryption_cycle_aes_256_gcm_iv12_tag16_non_framed(self):\n ciphertext, _ = aws_encryption_sdk.encrypt(\n source=VALUES[\"plaintext_128\"],\n key_provider=self.kms_master_key_provider,\n encryption_context=VALUES[\"encryption_context\"],\n frame_length=0,\n algorithm=Algorithm.AES_256_GCM_IV12_TAG16,\n )\n plaintext, _ = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=self.kms_master_key_provider)\n assert plaintext == VALUES[\"plaintext_128\"]", "def encript(self): \n if (len(sys.argv) == Cconfiguration_caesar.DUAL_PARAMETER) and (int(sys.argv[Cconfiguration_caesar.INCREMENTAL_PARAMETER])>=Cconfiguration_caesar.INITIAL_INT_PARAMETER):\n result = \"\"\n k = int(sys.argv[Cconfiguration_caesar.INCREMENTAL_PARAMETER])\n plaintext = input(\"plaintext: \")\n for i in range(len(plaintext)):\n char = plaintext[i]\n if ((Cconfiguration_caesar.ALPHABET_LOWER_INDEX>ord(char)) or (Cconfiguration_caesar.ALPHABET_LOWER_LIMIT<ord(char))) and ((Cconfiguration_caesar.ALPHABET_UPPER_INDEX>ord(char)) or (Cconfiguration_caesar.ALPHABET_UPPER_LIMIT<ord(char))):\n result += char\n elif (char.isupper()):\n result += chr((ord(char) + k-Cconfiguration_caesar.ALPHABET_UPPER_INDEX) % Cconfiguration_caesar.ALPHABET_LIMIT + Cconfiguration_caesar.ALPHABET_UPPER_INDEX)\n else:\n result += chr((ord(char) + k - Cconfiguration_caesar.ALPHABET_LOWER_INDEX) % Cconfiguration_caesar.ALPHABET_LIMIT + Cconfiguration_caesar.ALPHABET_LOWER_INDEX)\n print(f\"ciphertext: {result}\")\n else:\n print(CextraStatusDefinition.COMMAND_LINE_EERROR)\n exit(Cconfiguration_caesar.INCREMENTAL_PARAMETER)" ]
[ "0.72857714", "0.7123023", "0.6978616", "0.6857513", "0.6587187", "0.6543356", "0.65047663", "0.645677", "0.64431256", "0.6244246", "0.6212461", "0.618751", "0.60572654", "0.6051523", "0.6047843", "0.6029323", "0.592563", "0.5921373", "0.5918702", "0.59144205", "0.5887465", "0.5854102", "0.58235747", "0.5823531", "0.58145875", "0.5801802", "0.5748123", "0.57185954", "0.56878304", "0.5654341", "0.56337583", "0.5608569", "0.5605825", "0.5604627", "0.5594264", "0.5591045", "0.55786836", "0.55704075", "0.55670124", "0.55478376", "0.55210143", "0.5500783", "0.54624915", "0.5460298", "0.5442874", "0.54293597", "0.54249996", "0.5423684", "0.53945667", "0.53860396", "0.53424907", "0.5328771", "0.52961475", "0.52921265", "0.52267003", "0.5226661", "0.5225535", "0.52137524", "0.5210514", "0.5190875", "0.51875114", "0.5142616", "0.5115935", "0.5115291", "0.5109889", "0.51092494", "0.5099088", "0.50725305", "0.50716126", "0.505189", "0.50508296", "0.5044336", "0.50116634", "0.50063604", "0.49989927", "0.49921182", "0.49882433", "0.49867144", "0.49850136", "0.49718034", "0.49560437", "0.495075", "0.49403986", "0.4931631", "0.4930986", "0.49251276", "0.4924235", "0.49167758", "0.49162808", "0.49006012", "0.48872083", "0.48792058", "0.484456", "0.483784", "0.48365033", "0.48330623", "0.48208523", "0.48130918", "0.48116556", "0.48116365" ]
0.7571308
0
Connect to the database
def attach(self): # if i have an existing connection to the back end, do nothing if self.connection is not None: return # otherwise, build the connection specification string spec = [ # the name of the database is required ['dbname', self.database] ] # the others are optional, depending on how the database is configured if self.username is not None: spec.append(['user', self.username]) if self.password is not None: spec.append(('password', self.password)) if self.application is not None: spec.append(('application_name', self.application)) # put it all together spec = ' '.join('='.join(entry) for entry in spec) # establish the connection self.connection = self.postgres.connect(spec) # if the user asked for {quiet} operation if self.quiet: # set the minimum diagnostic level to {warning} self.execute("SET client_min_messages = warning;") # all done return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_db(self):\n try:\n self.connection = self.engine.connect()\n except Exception:\n self.print_std_error()", "def connect_to_db(self):\n self.read_config()\n print('Connecting to database...', end=\"\")\n self.db_conn = pymysql.connect(host=self.host, user=self.user, db=self.db)\n self.db_cur = self.db_conn.cursor()\n print('[DONE]')", "def connect(self):\n self.conn = psycopg2.connect(database=self.config['dbname'], user=self.config['dbuser'], password=self.config['dbpassword'], host=self.config['dbhost'])", "def connect_db(self) -> sqlite3.Connection:", "def _dbconnect(self):\n user = mm_cfg.STORM_MEMBER_DB_USER\n password = mm_cfg.STORM_MEMBER_DB_PASS\n host = mm_cfg.STORM_MEMBER_DB_HOST\n dbname = mm_cfg.STORM_MEMBER_DB_NAME\n\n\n db = 'postgres://'+user+':'+password+'@'+host+'/'+dbname\n return create_database(db)", "def connect_to_db(self):\n\t\t# connection = psycopg2.connect(database=config.database, user=config.user,password = config.password)\n\t\tconnection = psycopg2.connect(database=config.database, user=config.user)\n\t\treturn connection", "def connect(self):\n\t\t# PostgreSQL PyPgSQL\n\t#\tcp = adbapi.ConnectionPool(\"pyPgSQL.PgSQL\", database=\"test\")\n\t\t# MySQL\n\t\tself.dbpool = adbapi.ConnectionPool('MySQLdb',\n\t\t\thost = self.settings.get('hostname', 'localhost'),\n\t\t\tport = self.settings.get('port', 3306),\n\t\t\tdb = self.settings.get('database'),\n\t\t\tuser = self.settings.get('username'),\n\t\t\tpasswd = self.settings.get('password'),\n\t\t\tcursorclass = MySQLdb.cursors.DictCursor,\n\t\t\tcharset = 'utf8',\n\t\t\tuse_unicode = True,\n\t\t)", "def _connect_to_db(self) -> None:\n self._connection = psycopg2.connect(database=self.dbname,\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport))", "def connect_to_db(self):\n try:\n self.connection = pymysql.connect(host=self.host, user=self.user, password=self.password,\n database=self.database_name)\n except OperationalError as e:\n print(e)", "def connect_db(self):\n self.connection = MySQLdb.connect(\n host=MYSQL_HOST, passwd=MYSQL_PWD,\n user=MYSQL_USER, db=MYSQL_DB)", "def db_connection(self):\n try:\n self.connection = connect(host=self.host, user=self.user, password = self.password, db = self.db, cursorclass = self.cursor)\n except MySQLError:\n print(\"DB Error\")", "def connect(self):\n self.db = pymysql.connect(self.db_ip, self.uid, self.pwd, self.db_name)\n self.cursor = self.db.cursor()", "async def connect(self):\n self.logger.info(f'connecting to {self.dsn}')\n await self.dbase.connect()", "def connect_db(self) -> sqlite3.Connection:\n self.connection = sqlite3.connect(self.database)\n self.connection.row_factory = sqlite3.Row\n\n self.get_cursor()", "def connect(self):\n\n self._dbcon = psycopg2.connect(\n database=self._dbname, host=self._host, port=self._port, user=self._username, password=self._password)\n self.result_as_dict(self._result_as_dict)", "def _connect(self):\n database_file = get_database_filename()\n if not os.path.isfile(database_file):\n logger.error('No capabilities database found')\n logger.error(\n 'To import a database from xml: a2p2v --importdb <filename>.xml'\n )\n else:\n self.conn = sqlite3.connect(database_file)", "def connect(self):\n try:\n connect = psycopg2.connect(\"\"\"\n dbname=ovdata_db \n user=larsstegman \n host=localhost \n password=password\n \"\"\")\n return connect\n except Exception as e:\n return ConnectionError()", "def connect_db(self):\n # connect to database\n connection = psycopg2.connect(\n database = \"postgres\",\n user = \"postgres\",\n password = os.getenv('DB_PASSWORD'),\n host = \"groalives.cvslmiksgnix.us-east-1.rds.amazonaws.com\",\n port = '5432'\n )\n # create cursor that is used throughout\n\n try:\n self.cursor_dog = connection.cursor()\n print(\"Connected!\")\n except:\n print(\"Connection problem chief!\")", "def connect(self):\n \n # return if already connected\n if self._connected: \n return\n \n # preconditions\n if self._url is None: \n raise Exception(\"Need a connection url\")\n \n self._engine = sqlalchemy.create_engine(self._url)\n\n self._conn = self._engine.connect()\n \n self._metadata = sqlalchemy.MetaData(bind=self._engine)\n \n self._session_maker = sessionmaker(bind=self._engine)\n \n self._connected = True\n \n self._log.info(\"Connected to the database %s\"%(self._url))", "def connect(self):\n # use our connection values to establish a connection\n self.conn = psycopg2.connect(self.connectString)\n # create a psycopg2 cursor that can execute queries\n self.cursor = self.conn.cursor()\n connectionStringEnding = self.databaseName + ' at ' + self.databaseHost\n \"\"\"\n if (self.cursor):\n print('connection made to ' + connectionStringEnding)\n else:\n print('connection problem with ' + connectionStringEnding)\"\"\"", "def connect(self):\n try:\n connection = Connection(host=settings.DATABASE_HOST, port=settings.DATABASE_PORT)\n except ConnectionFailure, error:\n return \"Could not connect to database: %s\" % error\n print \"Could not connect to database: %s \\n\" % error\n if __name__ == \"spider\":\n sys.exit(1)\n self.dbconnection = connection[settings.DATABASE_NAME]", "def _connect(self):\n try:\n self.conn = psycopg2.connect(\n host=self.host,\n user=self.username,\n password=self.password,\n port=self.port,\n dbname=self.dbname\n )\n except psycopg2.DatabaseError as e:\n logger.error(e)\n raise e\n logger.info('Connection opened successfully.')", "def connect_db():\n debug(\"Connecting to DB.\")\n conn = sqlite3.connect(os.path.join(app.root_path, 'banweb.db'))\n conn.row_factory = sqlite3.Row\n return conn", "def connect_db():\n\treturn sqlite3.connect(app.config['DATABASE'])", "def connect_db():\n return sqlite3.connect(DATABASE)", "def connect_db():\n return sqlite3.connect(DATABASE)", "def connect_db():\n return sqlite3.connect(DATABASE)", "def __KConnectDB(self):\n try:\n conn= kdb.connect(dsn = self.__dsn,\n user = self.user_name.encode(),\n password= self.user_password.encode(),\n charset = self.character_set.encode(),\n dialect = int(self.sql_dialect))\n except:\n self.__result.NoteException(cause=\"Exception raised while connecting to database.\")\n else:\n return conn", "def connect_db():\r\n return sqlite3.connect(app.config['DATABASE'])", "def _connect(self):\n if self.settings.get('ENGINE', 'mysql') == 'mysql':\n self._db = self.dbdriver.connect(self.settings['HOST'],\n self.settings['USER'],\n self.settings['PASSWORD'],\n self.settings['NAME'],\n self.settings.get('PORT', 3306))\n elif self.settings['ENGINE'] == 'postgres':\n self._db = self.dbdriver.connect(database=self.settings['NAME'],\n user=self.settings['USER'],\n password=self.settings['PASSWORD'],\n host=self.settings['HOST'])\n elif self.settings['ENGINE'] == 'sqlite':\n self._db = self.dbdriver.connect(self.settings['NAME'])\n self.cursor = self._db.cursor()", "def create_connection(self):\r\n\r\n try:\r\n self.conn = sqlite3.connect(self.database_name)\r\n\r\n except sqlite3.Error:\r\n print('Error connecting to database')", "def connect():\n global db_name\n con = sqlite3.connect(db_name)\n return con", "def connect():\n try:\n return psycopg2.connect(\"dbname=tournament\")\n except psycopg2.Error:\n print \"Cannot connect to the database\"", "def connect(self):\r\n\r\n db_config = read_db_config()\r\n\r\n try:\r\n print('Connecting to MySQL database...')\r\n conn = MySQLConnection(**db_config)\r\n\r\n if conn.is_connected():\r\n print('connection established.')\r\n return conn\r\n else:\r\n print('connection failed.')\r\n\r\n except Error as e:\r\n print(e)", "def connect_db():\n return hc_db.HCDB(app.config[\"DATABASE\"])", "def connect(self):\n if self.db is not None:\n self.disconnect()\n\n self.db = MySQLdb.connect(host=self.conn.host, port=self.conn.port, db=self.conn.db, user=self.conn.user, passwd=self.conn.pwd, use_unicode=True, charset='utf8', cursorclass=MySQLdb.cursors.DictCursor)\n self.db.autocommit(self.conn.auto_commit)", "def create_connection(self):\n try:\n self.conn = psycopg2.connect(host=self.host, port=self.port, database=self.database, user=self.user, password=self.password)\n\n except:\n print(\"Unable to connect to the database. Please check your options and try again.\")\n exit()", "def connect(self):\n\n self.logger.debug(\"creating DB connection\")\n conn = sql.connect(**self.connection_arguments)\n self.logger.debug(\"DB connection ready: %r\", conn.get_host_info())\n return conn", "def connect():\n return psycopg2.connect(\"dbname=forum\")", "def connectDB(self): \n #connect to the database\n try:\n print(self.pg_dbname)\n self.conn = psycopg2.connect(\"dbname=%s user=%s password=%s host=%s port=%s\" % (self.pg_dbname, self.pg_username, self.pg_password, self.pg_host, self.pg_port))\n print(\"connected!\")\n except psycopg2.Error as e:\n print(\"I am unable to connect to the database\")\n print(e)\n\n #define cursor\n self.cur = self.conn.cursor()", "def connect():\n\n try:\n return psycopg2.connect(\n dbname = os.environ['DATABASE_NAME'],\n user = os.environ['DATABASE_USER'],\n password = os.environ['DATABASE_PASSWORD'],\n host = ['DATABASE_HOST'],\n port = ['DATABASE_PORT']\n )\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)", "def connect_db():\n logging.info('Connects to the specific database.')\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n g.db = rv\n logging.info(rv)\n return rv", "def obtainDatabaseConnection(self):\n\t\tself.databaseConnector = DatabaseConnector()", "def startConnection(self):\n try:\n self.conn = psycopg2.connect(\"dbname='library' user='postgres' host='localhost' password='Codechef'\")\n # self.conn = psycopg2.connect(\"dbname='library' user='postgres' host='localhost' password='Codechef'\")\n # self.conn = psycopg2.connect(\"dbname='db_b130974cs' user='postgres' host='localhost' password='Codechef'\")\n except:\n print \"I am unable to connect to the database\"\n print \"connected to database...\"\n self.schema = SchemaGraph(self.conn)", "def connect_db():\n return sqlite3.connect(config.db)", "def setup_db_conn():\n # TODO update so DB does not have to be hard coded\n # Currently DB is hardcoded", "def connect(self, databasepath):\n self.__databasepath = databasepath\n try:\n self.__conn = sql.connect(self.__databasepath)\n except Exception as e:\n raise e\n self.__datatbasename = databasepath.split('/')[-1]\n self.__metadata[\"dbname\"] = self.__datatbasename\n self.__metadata[\"dbpath\"] = self.__databasepath\n self.__conn.row_factory = sql.Row\n self.__cur = self.__conn.cursor()", "async def connect(self) -> None:\n if hasattr(self.db, \"connect\"):\n await self.db.connect()", "def db_connect():\n if 'db' not in g:\n g.db = sql.connect(current_app.config[\"DATABASE\"], detect_types=sql.PARSE_DECLTYPES)\n g.db.row_factory = sql.Row\n return g.db", "def __connect(self):\n self.conn = pymysql.connect(self.opts.DB_HOST, self.opts.DB_USER,\n self.opts.DB_PASSWORD, self.opts.DB_NAME)", "def connect_database(self, *args, **kwargs):\n return self._get_storage().connect_database(*args, **kwargs)", "def connect(self):\n try:\n self.conn = MySQLdb.connect(db=self.conf['db'], host=self.conf['host'],\n port=self.conf['port'], user=self.conf['user'],\n passwd=self.conf['passwd'],\n charset=self.conf['charset'])\n self.cur = self.conn.cursor()\n self.conn.autocommit(self.conf[\"autocommit\"])\n # print \"connected to \", self.conf['host'], self.conf['db']\n except:\n print (\"MySQL connection failed\")\n raise", "def connect():\n try:\n conn = psycopg2.connect(\"dbname=tournament\")\n return conn\n except:\n print \" I am unable to connect to the database\"", "def connect(self):\n self.connection = mysql.connector.connect(host=self.host, \n database=self.database, \n user=self.user, \n password=self.password, \n auth_plugin=self.auth_plugin)\n if self.connection.is_connected():\n print(\"Succesful connection to the database {} as {}\".format(self.database, self.user))\n self.cursor = self.connection.cursor()\n else:\n print(\"The connection to the database was not successful.\")", "def create_connection():\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn", "def connect_to_db(self):\n _config = self.config\n try:\n if self.logger is not None:\n self.logger.debug('Connecting to the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n _client = pymongo.MongoClient(host=_config['database']['host'], port=_config['database']['port'])\n # grab main database:\n _db = _client[_config['database']['db']]\n\n except Exception as _e:\n if self.logger is not None:\n self.logger.error(_e)\n self.logger.error('Failed to connect to the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n # raise error\n raise ConnectionRefusedError\n try:\n # authenticate\n _db.authenticate(_config['database']['user'], _config['database']['pwd'])\n if self.logger is not None:\n self.logger.debug('Successfully authenticated with the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n except Exception as _e:\n if self.logger is not None:\n self.logger.error(_e)\n self.logger.error('Authentication failed for the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n raise ConnectionRefusedError\n\n if self.logger is not None:\n self.logger.debug('Successfully connected to the database at {:s}:{:d}'.\n format(_config['database']['host'], _config['database']['port']))\n\n # (re)define self.db\n self.db = dict()\n self.db['client'] = _client\n self.db['db'] = _db", "def connect():\n \n db_config = read_db_config()\n \n try:\n print('Connecting to MySQL database...')\n conn = MySQLConnection(**db_config)\n \n if conn.is_connected():\n print('connection established.')\n else:\n print('connection failed.')\n \n except Error as error:\n print(error)\n \n finally:\n conn.close()\n print('Connection closed.')", "def connect_db():\n\trv = sqlite3.connect(app.config['DATABASE'])\n\trv.row_factory = sqlite3.Row\n\treturn rv", "async def conn(self) -> None:\n self.bot.db = await aiosqlite.connect('database.db')", "def connect():\n # returns the connection object to tournament\n # database from PostgreSQL\n return psycopg2.connect(\"dbname=tournament\")", "def do_connectdb(self, db_name):\n if not db_name:\n db_name = input(\"Enter database that you want to connect :\\n\")\n else:\n pass\n self.connection_obj = CRUDTable(db_name)\n print(\"Connection Successful\")", "def connect() -> None:\n # attempt to load a database extension\n global _db_ext\n _db_ext = extensions.database_extension()\n if _db_ext is None:\n # The fallback gets implemented via the default_cap\n # dict defined in _capability()\n log.debug(\"Using internal database module.\")\n _db_ext = dict()\n\n # Tell everyone whether we're threadsafe\n global threadsafe\n threadsafe = _capability(\"reentrant\")\n\n # If fetch the database config, if present\n if \"database\" in config.config:\n database_config = config.config[\"database\"]\n else:\n database_config = dict()\n\n # Call the connect function from the database extension (or fallback)\n func = _capability(\"connect\")\n if func is None:\n func = _connect\n db = func(config=database_config)\n\n database_proxy.initialize(db)\n\n if isinstance(db, (pw.MySQLDatabase, pw.PostgresqlDatabase)):\n db.field_types[\"enum\"] = \"enum\"\n EnumField.native = True\n else:\n EnumField.native = False", "def database_connect():\n # Read the config file\n config = configparser.ConfigParser()\n config.read('config.ini')\n if 'database' not in config['DATABASE']:\n config['DATABASE']['database'] = config['DATABASE']['user']\n\n # Create a connection to the database\n connection = None\n try:\n # Parses the config file and connects using the connect string\n connection = pg8000.connect(database=config['DATABASE']['database'],\n user=config['DATABASE']['user'],\n password=config['DATABASE']['password'],\n host=config['DATABASE']['host'])\n except pg8000.OperationalError as operation_error:\n print(\"\"\"Error, you haven't updated your config.ini or you have a bad\n connection, please try again. (Update your files first, then check\n internet connection)\n \"\"\")\n print(operation_error)\n return None\n\n # return the connection to use\n return connection", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect():\n return psycopg2.connect(\"dbname=tournament\")", "def connect(self):\n self.engine = create_engine(self.connection_string)\n self.conn = self.engine.connect()\n self.connected = True", "def connect_to_db(self):\n try:\n self.con = sqlite.connect(self.file)\n self.cur = self.con.cursor()\n print 'Connected to', self.file\n print\n\n except sqlite.Error, e:\n if self.con:\n self.con.rollback()\n\n print 'Error connecting to', self.file\n print 'Exception follows:'\n print e\n print 'Quitting...'\n sys.exit(1)", "def connect(self):\r\n self.con_origem = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\r\n \"Server=%s;\" \r\n \"Database=%s;\"\r\n \"UID=%s;\"\r\n \"PWD=%s;\" %(self.host_origem,self.database_origem,self.user_origem,self.senha_origem))\r\n\r\n\r\n self.con_destino = psycopg2.connect(\r\n \"user=%s password=%s host=%s port=%s dbname=%s\" % (self.user_destino, self.senha_destino, self.host_destino, self.porta_destino, self.database_destino))\r\n\r\n self.cur_origem = self.con_origem.cursor()\r\n self.cur_destino = self.con_destino.cursor()\r\n\r\n self.con_destino.autocommit = True", "def connect():\n try:\n return psycopg2.connect(\"dbname=tournament\")\n except:\n print(\"Connection failed\")", "def connect_to_db():\n return pg.connect(DB_CONN_STRING)", "def db_connect():\n DB_SETTINGS = app.config['DB_SETTINGS']\n engine = create_engine(URL(**DB_SETTINGS))\n connection = engine.connect()\n return connection", "def __connect(self):\n session, metadata, connection = db(dbhost=getattr(self, \"host\"),\n dbuser=getattr(self, \"user\"),\n dbpass=getattr(self, \"password\"),\n dbname=getattr(self, \"dbname\"))\n return session, metadata, connection", "def connect():\r\n params = config()\r\n print('Connecting to the PostgreSQL database...')\r\n global conn\r\n conn = psycopg2.connect(**params)", "def createConnection(self):\n conn = None\n try:\n db_file = Path(\"db/ransomSTATS.DB\")\n conn = sqlite3.connect(db_file)\n # if (conn != \"None\"):\n # # print(\"[+]Connected to database successfully!\")\n # return conn\n # else:\n return conn\n except Exception as e:\n print(f\"Error occured: {e}\")", "def connect(self):\r\n self.con_origem = fdb.connect(\r\n host=self.host_origem, database=self.database_origem,\r\n user=self.user_origem, password=self.senha_origem\r\n )\r\n\r\n self.con_destino = psycopg2.connect(\r\n \"user=%s password=%s host=%s port=%s dbname=%s\" % (self.user_destino, self.senha_destino, self.host_destino, self.porta_destino, self.database_destino))\r\n\r\n self.cur_origem = self.con_origem.cursor()\r\n self.cur_destino = self.con_destino.cursor()\r\n\r\n self.con_destino.autocommit = True", "def open_connection(self):\n self.conn = pymysql.connect(host=self.host, user=self.user, passwd=self.passwd, db=self.db)", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n return self.connection\n try:\n self.connection = DataPostgres.connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection", "def db_connect(self):\n \n _LOG.debug(\"Attempting to connect to publication database...\")\n path = self.repo.working_dir + \"/.rdm/publications.db\"\n try:\n self.connection = sqlite.connect(path)\n self.connection.row_factory = sqlite.Row\n _LOG.debug(\"Connected successfully!\")\n except sqlite.Error as e:\n _LOG.exception(\"Could not connect to the publication database. Check read permissions? Check that the .rdm directory exists? If it doesn't, run the 'git rdm init' command.\")\n sys.exit(1)\n return", "def connect(self):\r\n self.con_origem = cx_Oracle.connect(\r\n \"%s/%s@%s/%s\" % (self.user_origem, self.senha_origem, self.host_origem, self.database_origem))\r\n\r\n self.con_destino = psycopg2.connect(\r\n \"user=%s password=%s host=%s port=%s dbname=%s\" % (self.user_destino, self.senha_destino, self.host_destino, self.porta_destino, self.database_destino))\r\n\r\n self.cur_origem = self.con_origem.cursor()\r\n self.cur_destino = self.con_destino.cursor()\r\n\r\n self.con_destino.autocommit = True", "def connect_to_database(self, flavor, user, passwd, host, db_name):\r\n self.engine = create_engine('{}://{}:{}@{}/{}'.format(flavor, user, passwd, host, db_name))\r\n try:\r\n self.engine.connect()\r\n except InterfaceError:\r\n logging.warning('Could not connect to database. Is the host \"%s\" correct?', host)\r\n raise(NotedbUserError)\r\n except ProgrammingError:\r\n logging.warning('Could not connect to database. Check username, pass, and db_name.')\r\n raise(NotedbUserError)\r\n\r\n Session = sessionmaker()\r\n Session.configure(bind=self.engine)\r\n self.session = Session()\r\n self.connected = True\r\n logging.info('Successfully connected to the Database')", "def connect_db(__keyspace__):\n return connection.setup([DATABASE],__keyspace__, protocol_version=3)", "def connect_db():\n db = psycopg2.connect(\n dbname=app.config['DBNAME'],\n user=app.config['DBUSER'],\n password=app.config['DBPASSWORD'],\n host=app.config['DBHOST'],\n port=app.config['DBPORT']\n )\n return db", "def __connect_to_database(self, database_name: str) -> sqlite3.Connection:\n conn = sqlite3.connect(database_name)\n conn.row_factory = sqlite3.Row\n\n return conn", "def connect_db():\n \n con = None\n try:\n \n # connect to the PostgreSQL server\n \n con = psycopg2.connect(host=config_esql.server,\n database=config_esql.database,\n user=config_esql.database,\n password=config_esql.password)\t\n # create a cursor\n return (con)\n \n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n print ('Could not connect to DB. Exiting..')\n sys.exit(2)", "def connect():\n connection = psycopg2.connect(host='localhost', database='huwebshop', user='postgres', password='Xplod_555')\n return connection", "def connect(self):\n self.client = MongoClient(self.mongo_uri)\n self.db = self.client[self.db_name]", "def connect_db():\r\n rv = sqlite3.connect(app.config['DATABASE'])\r\n rv.row_factory = sqlite3.Row\r\n return rv", "def connect_to_db():\n with open(r'omppackage\\\\server_config.cfg', 'r') as f:\n conn_string = f.read()\n return pyodbc.connect(conn_string)", "def connect():\n\t\n\tglobal _conn\n\turlparse.uses_netloc.append('postgres')\n\turl = urlparse.urlparse(config.get_database())\n\ttry:\n\t\t_conn = psycopg2.connect(\n\t\tdatabase = url.path[1:],\n\t\tuser = url.username,\n\t\tpassword = url.password,\n\t\thost = url.hostname,\n\t\tport = url.port)\n\n\t\tprint('Connected to database!')\n\t\treturn True\n\texcept:\n\t\tprint('Connection to database failed.')\n\t\texit()", "def connectToDB():\r\n Base = declarative_base()\r\n engine = create_engine('sqlite:///DBLatency.db', echo=True)\r\n Base.metadata.bind = engine\r\n DBSession = sessionmaker(bind=engine)\r\n session = DBSession()\r\n return session", "def connect_db():\n conn = psycopg2.connect(\n database = app.config['DATABASE'],\n host = app.config['HOST'],\n port = app.config['PORT'])\n conn.autocommit = True\n return conn", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)" ]
[ "0.86798936", "0.8313488", "0.8166338", "0.8110618", "0.8100089", "0.8085869", "0.8058323", "0.80558985", "0.8038659", "0.7968234", "0.7959786", "0.7954142", "0.78795606", "0.7867048", "0.78208673", "0.7815891", "0.7813048", "0.7778143", "0.77519286", "0.775042", "0.7729294", "0.77102953", "0.7709704", "0.7709344", "0.7708997", "0.7708997", "0.7708997", "0.7694745", "0.76882946", "0.76811355", "0.7674106", "0.76547265", "0.76494", "0.7645267", "0.7622548", "0.76038253", "0.7597644", "0.75910866", "0.75871575", "0.7586645", "0.7585086", "0.75657433", "0.7564546", "0.75401187", "0.7538626", "0.7531457", "0.7529986", "0.7523922", "0.7522955", "0.75228953", "0.7520764", "0.75171244", "0.75132173", "0.7511621", "0.7507435", "0.7492179", "0.74909186", "0.7477735", "0.74777", "0.74722624", "0.7469394", "0.7465166", "0.74454397", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.74441713", "0.7444115", "0.74399763", "0.74349874", "0.74315447", "0.74129647", "0.74061656", "0.7401024", "0.73929566", "0.73894817", "0.7387493", "0.7382627", "0.7381405", "0.73760176", "0.73747146", "0.7368721", "0.7366374", "0.73598063", "0.7354082", "0.7351959", "0.7344792", "0.73344696", "0.7328126", "0.73270774", "0.7324364", "0.7321364", "0.732007", "0.73185986" ]
0.0
-1
Close the connection to the database Closing a connection makes it unsuitable for any further database access. This applies to all objects that may retain a reference to the connection being closed. Any uncommitted changes will be lost
def detach(self): # if i don't have an existing connection to the back end, do nothing if self.connection is None: return # otherwise, close the connection status = self.postgres.disconnect(self.connection) # invalidate the member self.connection = None # and return the status return status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_db_connection(cls):\n db.close()", "def close_database(self):\n if self._conn is not None:\n self._conn.close()\n self._conn = None", "def close_connection(self):\n if self.cursor is None and self.database is None:\n # if we don't have an open connection, do nothing\n return\n self.cursor.close()\n self.database.close()", "def close(self):\n Log.debug('DB -> close')\n # 关闭数据库连接\n self.conn.close()", "def close_connection(self):\n self.dbcon.commit()\n self.dbcon.close()", "def close(self):\n if self._conn is not None:\n self._close_database()", "def Close(self):\n l_logger.debug(\"Closing database connection\")\n self.db.close()", "def close_connection():\n if DbUtil.connection:\n DbUtil.connection.commit()\n DbUtil.connection.close()", "def close(self):\n self.conn.commit()\n self.cursor().close()\n self.conn.close()\n self._db = None", "def close(self):\n if self._dbopen:\n self._dbcon.close()", "def close(self):\n\t\tif self.connected:\n#\t\t\t\t self.__db.close()\n\t\t\tself.__engine.dispose()\n\t\t\tself.connected = False", "def close_connection(self):\n self.cursor.close()\n self.connection.close()", "def close(self):\n self.connection.commit()\n self.cursor.close()\n self.connected = False", "def close(self):\n if self._con is not None:\n self._commit()\n self._con.close()\n self._con = None", "def close(self):\n self.conn.commit()\n self.conn.close()\n lock.release(self.lock)", "def close(self):\n self.__database__.close()", "def _close_connection(self, connection):\n connection.commit()\n connection.close()", "def db_disconnect(self):\n \n if(self.connection):\n self.connection.close()\n return", "def close(self):\r\n debug.write(\"[SourceRPG] handling SQL close\", 1)\r\n self.cursor.close()\r\n self.connection.close()\r\n debug.write(\"[SourceRPG] SQL close handled\", 1)", "def close(self):\n self.db.commit()\n self.db.close()", "def close(self):\n\t\tself.db.close()", "def close(self):\n self.db.close()", "def disconnect(self):\n\n try:\n self.cursor.close()\n self.db.close()\n except cx_Oracle.DatabaseError:\n pass", "def close(self):\n if self.cursor:\n self.cursor.close()\n if self.conn:\n self.conn.close()", "def close_connection(self):\n self.conn.close()", "def disconnect_from_db(self):\n self.db_cur.close()\n self.db_conn.close()", "def close(self):\n self.clean()\n\n for conn in self.conn__.values():\n try:\n conn.commit()\n conn.close()\n except sqlite3.ProgrammingError:\n pass", "def close(self):\n # Instead of actually closing the connection,\n # return it to the pool so that it can be reused.\n if self._con is not None:\n self._pool.returnConnection(self._con)\n self._con = None", "def close(self):\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n if self._closed:\r\n raise Error('The cursor has already been closed.')\r\n else:\r\n self._closed = True", "def disconnect(self):\n self.db.close()", "def disconnect(self):\n if self.db:\n try:\n self.db.close()\n finally:\n self.db = None", "def close(self):\n self.cursor.close()\n self.db.close()", "def close_db_connection(self, database_name):\n\n self.connections[database_name].close()\n del self.connections[database_name]", "def close(self):\n if getattr(self, \"_db\", None):\n self._db.close()\n self._db = None", "def close(self): # XXX This should stop everything else from working but currently doesn't!\r\n if self._closed:\r\n raise Error('The connection to the database has already been closed.')\r\n self.rollback()\r\n for table in self.tables.keys():\r\n if self.tables[table].open:\r\n self.tables[table]._close()\r\n self._closed = True", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n self._db.close()\n self._db = None", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n self._db.close()\n self._db = None", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n self._db.close()\n self._db = None", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n if not self._db_args['autocommit']:\n self._db.commit()\n self._db.close()\n self._db = None", "def close_connection(self):\n self._conn.close()", "def close(self):\n# self.cursor.close()\n\tself.db.close()", "def __close(self):\n\n self.__cursor.close()\n self.__connection.close()", "def close_connection(self):\r\n if self.conn:\r\n self.conn.close()", "def close(self):\n try:\n self.connection.Close()\n del self.connection\n except:\n pass", "def _close_database(self):\n assert self._conn is not None\n logging.info(\"Closing file {!r}.\".format(self._filename))\n self._cursor.close()\n self._cursor = None\n self._conn.close()\n self._conn = None\n self._filename = None\n self._sessionUID = None", "def close_connection(self) -> None:\n self.conn.close()", "def close(self):\n logger.debug('Function close start')\n\n try:\n self.conn.commit()\n\n # Close the connection\n self.conn.close()\n logger.info(\"DB conn closed\")\n return True\n except Error as e:\n logger.error('Could not close itself: {}'.format(e))\n return False\n\n logger.debug('Function close end')", "def close_connections(self):\n self.db_connection.close_connections()", "def close_connection(self):\n if self.connection is not None:\n self.connection.close()", "def close_connection(self):\n self.connection.close()", "def close_connection(self):\n self.connection.close()", "def close_connection(self):\n self.session.close()", "def close_connection(self):\n\t\tself.session.close()", "def __del__(self):\n\n try:\n self._conn.commit()\n self._conn.close()\n except:\n print(\"---- Error closing database\")\n\n return", "def close(self):\n self.conn.close()", "def close(self):\n self.conn.close()", "def close(self):\n self.conn.close()", "def close_db(connection: sqlite3.Connection):\n connection.commit()\n connection.close()", "def close(self):\n self._flush()\n self.database.close()", "def close(self):\n if self._conn:\n self._conn.close()", "def close(self):\n self._conn.close()", "def close(self):\n\n self.conn.close()", "def close(self):\n if type(self._dbcon).__name__.lower() == 'connection':\n self._dbcon.close()\n else:\n raise dbodriver.DBODriverException('Not connected')", "def close(self):\n with self.connlock:\n self.conn.do_close()", "def close(self):\n if self.authenticated:\n self.db.logout()\n if self.connection is not None:\n self.connection.close()", "def close_connection(exception):\n db = database()\n\n if db is not None:\n db.close()", "def close_db(self, commit=True):\n self.conn.commit()\n self.cur.close()\n self.conn.close()", "def _closeConnection(cursor, db):\n cursor.commit()\n cursor.close()\n db.close()", "def close_connection(self) -> None:\n self.connection.close()", "def close(self):\n if self.conn is not None:\n self.conn.close()\n self.conn = None", "def __close(self):\n with self.__db_lock:\n if self.__open:\n self.__connection.close()\n self.__open = False\n\n return self.__open", "def close(self):\n self.__connection.close()", "def _close_connection(self):\n if self.connection:\n self.connection.destroy()\n self.connection = None", "def close(self): \n self.connection.close()", "def close(self):\n if not self.connection:\n return\n\n self.connection.close()\n self.connection = None", "def close(self):\n self.conn.close()\n self.destroy()", "def close(self):\n\t\tif self._conn is not None:\n\t\t\tself._conn.close()", "def close(self):\n self.connection = None", "def closeConnection(self):\n self.engine.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def _close(self):\n if self.__session is not None:\n self._rollback()\n self.__session.close()", "def close_db_session(self, session):\r\n session.close()", "def close(self):\n self.db.remove()\n self.engine.dispose()", "def close_connection(self, connection):\n connection.close()", "def close():\n conn.commit()\n return conn.close()", "def close(self) -> None:\n self.real_conn.close()", "def close(self): \n\t\tself.connection = None", "def DBDisconnect(self):\n if self._dbh:\n self._dbh.close()\n self._dbh = None", "def close_connection_if_open():\n\ttry:\n\t\tconn = flask.g._database_connection\n\texcept AttributeError:\n\t\tpass\n\telse:\n\t\tconn.close()\n\t\tdel flask.g._database_connection", "async def close(self):\n print('Close {}'.format(str(self.__hash__)))\n await self.db.close()", "def __del__(self):\n self.db_conn.close()", "def __del__(self):\n self.db_conn.close()", "def close(self) -> None:\n self.connection.close()", "def close(self):\n if self.connection and not self.connection.closed:\n self.connection.fetch_all()\n if self.transaction:\n self.transaction.close()\n self.driver.recycle(self)", "def __del__(self):\n\t\t# self.close() \n\t\tself.connected = False\n\t\tprint('db close in destructor')" ]
[ "0.84439224", "0.83842707", "0.8336061", "0.8335705", "0.83256894", "0.8237229", "0.81866556", "0.8157642", "0.8143764", "0.8094697", "0.80411196", "0.7991679", "0.7938622", "0.7927069", "0.7838937", "0.7826264", "0.780774", "0.7779079", "0.77731264", "0.7767926", "0.77654034", "0.7758636", "0.77581954", "0.7714032", "0.7713748", "0.7709053", "0.77061516", "0.7702548", "0.77022135", "0.7700942", "0.7695629", "0.7690128", "0.76844335", "0.7671034", "0.7665468", "0.7665067", "0.7665067", "0.7665067", "0.76603156", "0.7656924", "0.7647671", "0.7630582", "0.7618329", "0.7614411", "0.7613382", "0.7607458", "0.7578612", "0.7568423", "0.756358", "0.7563022", "0.7563022", "0.75427675", "0.7537569", "0.7536454", "0.753409", "0.753409", "0.753409", "0.75337756", "0.752847", "0.7516185", "0.7488112", "0.74872094", "0.7474451", "0.7466605", "0.74660325", "0.74611056", "0.7458755", "0.74455243", "0.74422777", "0.7438716", "0.7414288", "0.7406926", "0.7390495", "0.7384398", "0.7384097", "0.73801374", "0.7361078", "0.73567474", "0.73508865", "0.73366183", "0.73366183", "0.73366183", "0.73366183", "0.73366183", "0.73366183", "0.73366183", "0.7329492", "0.73246044", "0.7322368", "0.73050666", "0.7294328", "0.728811", "0.72858876", "0.72843313", "0.72689366", "0.72651905", "0.7238516", "0.7238516", "0.72343", "0.7217476", "0.72037274" ]
0.0
-1
Execute the sequence of SQL statements in {sql} as a single command
def execute(self, *sql): # assemble the command and pass it on to the connection return self.postgres.execute(self.connection, "\n".join(sql))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n break\n result = self.execute_silent(stmt)\n #if result is not None,It's select stmt.\n if result:\n return result", "def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))", "def execute_sql_cmds(cursor, cmds, args):\n\tfor cmd in cmds:\n\t\tcursor.execute(cmd, args)\n\t\tif len(args) == 3:\n\t\t\tprint(\"{} rows updated on {} table for {}\".format(cursor.rowcount, str.split(cmd)[1], args[2]))\n\t\telse:\n\t\t\tprint(\"{} rows updated on {} table\".format(cursor.rowcount, str.split(cmd)[1]))", "def runSql(self, sql):\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n return True", "def execute(self, sql, *args, **kwgs):\n curr = self.conn.cursor()\n curr.execute(sql, *args, **kwgs)\n self.conn.commit()\n curr.close()", "def pg_execute(pg_conn, sql):\n print sql\n # XXX execute command", "def execute_sql_files(connection, sql_files):\n for filename in sql_files:\n statement = resource_text(filename)\n for sub_statement in statement.split(\";\"):\n if sub_statement.strip():\n connection.execute(text(sub_statement))", "def sql_query(sql):\n cur = c.cursor()\n cur.execute(sql)\n c.commit()", "def batched_query(self, sql):\r\n\r\n result_sets = []\r\n messages = \"\"\r\n query = []\r\n last_query=\"\"\r\n\r\n batches = re.split(\"^\\s*(GO(?:\\s+[0-9]+)?)\\s*(?:--.*)?$\",sql,flags=re.M|re.I)\r\n # print(batches)\r\n for b in batches:\r\n if b.upper() == \"GO\":\r\n # execute one\r\n query.append(last_query)\r\n continue\r\n else:\r\n match = re.match(\"^GO\\s+([0-9]+)$\",b,re.I)\r\n if match is not None:\r\n #execute many\r\n for i in range(0,int(match.group(1))):\r\n query.append(last_query)\r\n else:\r\n # not a Go statment\r\n last_query = b\r\n query.append(last_query)\r\n\r\n # print(query)\r\n for q in query:\r\n r = self.query(q)\r\n if r is not None:\r\n result_sets.extend(r)\r\n messages += self.messages\r\n\r\n self.messages = messages\r\n return result_sets", "def execute_query_sequence(db_cursor, all_queries):\n\n for query in all_queries:\n db_cursor.execute(query)", "def run_sql_file(self, sqlfile):\n try:\n queries = self.get_queries_from(sqlfile)\n queries_executed = 0\n for query in queries:\n if self._execute_query(query, values=None): # execute each query\n queries_executed += 1\n print(\"{} Executed queries from {}\".format(queries_executed, sqlfile))\n except pymysql.InternalError as error:\n print(error.args[1])", "def execute_sql(self, a_sql):\n \n sql = sqlalchemy.text(a_sql)\n \n if self._activate_timer:\n result = []\n func = self._conn.execute\n the_timer = ftimer(func, [sql], {}, result, number = 1)\n self._log.debug(\"\\nTime: %s secs \\nDatabase: %s\\nRequest: %s\\n\"%(the_timer, self._url, a_sql))\n return result[0]\n else:\n result = self._conn.execute(sql)\n return result", "def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise", "def batch_execute(self, sql_list):\n with self.connection.cursor() as dbc:\n responses = []\n for sql in sql_list:\n dbc.execute(sql)\n responses.append(dbc.fetchall())\n return responses", "def run_multiple_sql_statements(statements, fetch=True, cur=None, conn=None, commit=True):\n\n try:\n if conn is None:\n logger.error(\"Connection cannot be None.\")\n raise ValueError(\"Connection cannot be None.\")\n\n if cur is None:\n cur = conn.cursor()\n\n if statements is None:\n logger.error(\"Sql statement list is empty\")\n raise ValueError(\"Sql statement list is empty\")\n\n for _, statement in enumerate(statements):\n logger.debug(\"Executing SQL = \" + statement)\n res = cur.execute(statement)\n if fetch:\n data = cur.fetchall()\n else:\n data = None\n if commit:\n conn.commit()\n except Exception as exception:\n logger.error(exception)\n raise exception\n\n return (res, data)", "def execute_on_each_row(self, a_sql, a_treatment):\n \n sql = sqlalchemy.text(a_sql)\n \n result = self._conn.execute(sql)\n \n row = result.fetchone()\n \n while row:\n a_treatment.executeOnRow(row)\n row = result.fetchone()\n \n result.close()", "def sql_scripts_execute(self, sql_scripts, params={}):\n ps = self.parameter_handler(params)\n log.debug('Got parameters: %s', ps)\n cursor = self._get_cursor()\n for q in sql_scripts:\n with open(q, 'r') as s:\n sql_string_formatted = s.read().format(**ps)\n cursor.execute(sql.SQL(sql_string_formatted), ps)\n self.connection.commit()\n self.connection.close()", "def query(self, *sql):\n self.cursor.execute(*sql)\n self.conn.commit()", "def execute_and_commit_sql(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()", "def __execsql(self, sql, seq):\n return self.sqldb.executemany(sql, [x._asdict() for x in seq])", "def insert_many_execute(self) -> None:\n self.connection.isolation_level = None\n self.cursor.execute('BEGIN TRANSACTION')\n for i in self.__sql_buffer.split(';'):\n self.cursor.execute(i)\n self.__sql_buffer = \"\"\n self.cursor.execute('COMMIT')", "def execute_sql(conn, sql):\n try:\n c = conn.cursor()\n if __debug__:\n print(\"Executing SQL: %s\" % sql)\n c.execute(sql)\n except Error as e:\n print(e)\n conn.commit()", "def run(self):\n rows = None\n if self.sql.startswith('select'):\n conn = self.table.connect()\n with conn.cursor() as curs:\n try:\n curs.execute(self.sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {self.sql}:\n {error.code}\"\"\")\n self.excep = exc\n raise exc\n else:\n rows = curs.fetchall()\n # logging.critical(f\"\"\"executed {self.sql}\"\"\")\n self.result_exec = rows", "def execute(self, sql):\n return self.db.execute(sql)", "def run_new_sql(self):\n\n pass", "def run_db_query(sql):\n with connect_sqlalchemy() as conn:\n return conn.execute(sql)", "def run(self, sql, *args):\n return self.database.execute(sql, args)", "def _runsql(self):\n self.logger.info(\"Running SQL where sequence > %s\" % self.seq)\n try:\n results = self.engine.execute(self.sql,\n (self.max_rows, self.seq)).fetchall()\n except sqlalchemy.exc.ProgrammingError, err:\n self.logger.critical(\"Error connecting to DB : %s\" % err)\n return None\n self.logger.info('Fetched %d rows from DB' % len(results))\n if not len(results):\n self.logger.info(\"No rows returned from DB. Finished loading\")\n return False\n return results", "def execute(sql, args=()):\n res = con.execute(sql, args)\n con.commit()\n return res.fetchall()", "def execute(self, sqlcmd):\n self._c.execute(sqlcmd)\n return", "def execute_sql(sql_stmt, host_in='client'):\n #db = create_engine(host_in,'')\n #sql = sqltext(sql_stmt) \n #return db.execute(sql)\n with open('temp.sql','w') as sql:\n sql.write(sql_stmt)\n\n proc=sp.Popen(\"mysql < temp.sql\",stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out,err = proc.communicate()\n sp.Popen(\"rm temp.sql\",stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n return out.strip(),err.strip()", "def execute(self, sql):\n\n res = self.cur.execute(sql)\n self.cxn.commit()\n\n return res", "def DBExecute( DB, sql, *args ):\n DB.execute( sql, args )\n DB.commit()", "def run(self, statement):\n\n # Remove spaces and EOL\n statement = statement.strip()\n if not statement: # Empty string\n yield (None, None, None, None)\n\n # Split the sql into separate queries and run each one.\n # Unless it's saving a favorite query, in which case we\n # want to save them all together.\n if statement.startswith('\\\\fs'):\n components = [statement]\n\n else:\n components = sqlparse.split(statement)\n\n for sql in components:\n # Remove spaces, eol and semi-colons.\n sql = sql.rstrip(';')\n\n # \\G is treated specially since we have to set the expanded output.\n if sql.endswith('\\\\G'):\n special.set_expanded_output(True)\n sql = sql[:-2].strip()\n try: # Special command\n _logger.debug('Trying a dbspecial command. sql: %r', sql)\n cur = self.conn.cursor()\n for result in special.execute(cur, sql):\n yield result\n except special.CommandNotFound: # Regular SQL\n yield self.execute_normal_sql(sql)", "def runSqlNoTransaction(self, sql):\r\n self.c.autocommit = True\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n self.c.autocommit = False\r\n return True", "def execute_query_list(cur, conn, query_list):\n try:\n for query in query_list:\n cur.execute(query)\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error executing query list\")\n print(e)", "def exec_sys_sql(self, sql):\n conn = self.__get_open_connection(self.sys_conn_hash)\n conn.autocommit(True)\n\n # MySQLdb prints warnings when using \"drop database if exists\",\n # these warnings add no value and can be ignored.\n # http://www.nomadjourney.com/2010/04/suppressing-mysqlmysqldb-warning-messages-from-python/\n filterwarnings('ignore', category = MySQLdb.Warning)\n return self.exec_sql_on_conn(conn, sql)\n resetwarnings()", "def execute(self, sql, params=None):\n if params is None:\n for statement in sql.split(';'):\n self.cursor.execute(statement)\n return\n print('string parameters get escaped to guard against sql injection')\n print(\"resulting sql is \" + \\\n sql.replace(\"?\", \"'\" + params[0].replace(\"'\", \"''\") + \"'\"))\n self.cursor.execute(sql, params)", "def execute(self, sql):\n with self.connection.cursor() as dbc:\n if sql[-1] != ';':\n sql += ';'\n dbc.execute(sql)\n self.last_row = dbc.lastrowid\n try:\n return dbc.fetchall()\n except:\n return", "def run_multiple(self, sql, it):\n self.database.executemany(sql, it)", "def exec_sql(connection, sql):\n cursor = None\n try:\n cursor = connection.cursor()\n cursor.execute(sql)\n except mysql.connector.Error as err:\n raise MySqlError(message=err.msg,\n args=err.args)\n return cursor.fetchall() # return a list of tuples", "def execute_sql_script(conn, script_filename):\n file_contents = open_sql_script(script_filename)\n cursor = conn.cursor()\n cursor.execute(file_contents)\n conn.commit()", "def commit(cls, sql, **kwargs):\n conn = kwargs['conn']\n\n cursor = conn.cursor(dictionary=True, buffered=False)\n if CHECKS_OFF:\n sql = TURN_CHECKS_OFF + sql\n\n for _ in cursor.execute(sql, kwargs.get('args'), multi=True):\n pass\n\n cls.close(conn, cursor)", "def mysql(cursor, query, print_query=False):\n if isinstance(query, basestring):\n query = [query]\n for q in query:\n cursor.execute(q)\n q.replace('\\n', ' ')\n if print_query:\n print('Executed: {}'.format(q))", "def _multi_query_execution(self):\n multi_query_staging = self.query.split(';')\n for query in multi_query_staging:\n self.query = query\n self._execute_command()", "def execute_sql(db_name, sql):\n db_path = 'db/' + db_name + '.db'\n db_file = Path(db_path)\n if not db_file.is_file():\n log(\"execute_sql: Calling %s, but doesn't exist\", db_path, 'warning')\n create_db(db_name)\n db_con = sqlite3.connect(db_path)\n c = db_con.cursor()\n c.execute(sql)\n # output = c.fetchone()\n output = c.fetchall()\n logging.getLogger('sql').info(output)\n db_con.commit()\n db_con.close()\n return output", "def execute(cls, sql):\n cursor = cls.get_conn().cursor()\n cursor.execute(sql)\n return cursor", "async def execute(self, stmt, *args):\n with (await self.application.db.cursor()) as cur:\n await cur.execute(stmt, args)", "def sql(self, db, sql, args=()):\n assert db in ('source', 'target'), u\"First arg of sql() should be 'source' or 'target'\"\n connection = self.target_connection if db == 'target' else self.source_connection\n with connection.cursor() as cursor:\n cursor.execute(sql, args)\n return cursor.fetchall() if 'select ' in sql.lower() else ()", "def DBExecuteScript( DB: sqlite3.Connection, sql:str, *args ):\n assert isinstance( DB, sqlite3.Connection )\n DB.executescript( sql )\n DB.commit()", "def make_sql_call(self):\n c_data = {'db_host': self.server,\n 'db_user': self.user,\n 'db_password': self.password,\n 'db_database': self.database}\n db_conn = self.SH.sql.helper.sql_conn_obj(c_data)\n result, detail = db_conn.connect()\n self.print_to_log(detail)\n result, detail = db_conn.execute(self.sql)\n db_conn.shutdown()\n self.print_to_log(detail)", "def run_query(query, args=[], conn=None):\n if conn == None:\n conn = create_connection()\n\n with conn.cursor() as cursor:\n if query.lower().startswith(\"select\"):\n cursor.execute(query=query, args=args)\n return cursor.fetchall()\n else:\n cursor.execute(query=query, args=args)\n try:\n conn.commit()\n except Exception as e:\n print(\"ERROR OCCURED WHILE DB COMMIT --- DB_UTILS\", e)", "def execute(self, sql, vars=None, log_data=True):\n if log_data:\n strip_vars = vars\n if type(log_data) is not bool:\n if type(vars) is dict:\n strip_vars = {key: value if key in log_data else None\n for key, value in vars.items()}\n else:\n strip_vars = [vars[i] if i in log_data else None\n for i in range(len(vars))]\n text = self.mogrify(sql.strip(), strip_vars).decode(\"utf\")\n else:\n text = sql.strip()\n self.logger.info(\"Execute {}: {}\".format(id(self.connection), text))\n super().execute(sql, vars)", "def __call__(self, dbio, *args, **kwargs):\n sql = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(\"'execute' will run\\n{}\".format(sql))\n cur = dbio.conn.cursor()\n cur.execute(sql)\n cur.close()\n dbio.conn.commit()\n else:\n logger.info(\"'execute' will run\\n{}\".format(sql))", "def _db_execute(self, cur, sql_query):\n self.ctx.dbq_count += 1\n \n try:\n a = time.time()\n query, params = self._process_query(sql_query)\n out = cur.execute(query, params)\n b = time.time()\n except:\n if self.printing:\n print >> debug, 'ERR:', str(sql_query)\n if self.ctx.transactions:\n self.ctx.transactions[-1].rollback()\n else:\n self.ctx.rollback()\n raise\n\n if self.printing:\n print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))\n return out", "def sql_statement(self, operation, sql):\n if type(sql) != str:\n raise Exception(\n \"Invalid argument: sql of type {} should be: <class 'str'>\".format(type(sql)))\n if operation == 0 or operation == 2 or operation == 3:\n self.__database__.execute(sql)\n elif operation == 1:\n return self.__database__.execute(sql)\n else:\n raise Exception(\"Invalid SQL operation code\")", "def run_sql_from_string(conn, statement):\n statement = sqlalchemy.text(statement)\n conn.execute(statement)", "def build_sql_cmds(sql):\n\tsql_cmds = []\n\n\t# Sql for path table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"path\", column=\"strPath\"))\n\t# SQL for movie table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"movie\", column=\"c22\"))\n\t# SQL for episode table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"episode\", column=\"c18\"))\n\t# SQL for art table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"art\", column=\"url\"))\n\t# SQL for tvshow table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"tvshow\", column=\"c16\"))\n\n\treturn sql_cmds", "def run_query(sql_query):\n\n with db_connection.cursor(pymysql.cursors.DictCursor) as cur:\n try:\n items = cur.execute(sql_query)\n db_connection.commit()\n\n return items\n\n except Exception as query_error:\n db_connection.rollback()\n raise query_error", "def execute(self, context):\n logging.info(f\"Running SQL :{self.sql}\")\n self.hook = TrinoHook()\n query = self.hook.run(self.sql, autocommit=self.autocommit, parameters=self.parameters)\n if self.xcom_push:\n return query", "def mogrify_sql_statement(self, content):\n sql = content[0]\n args = content[1]\n\n if self.dbmi.__name__ == \"psycopg2\":\n if len(args) == 0:\n return sql\n else:\n if self.connected:\n try:\n return self.cursor.mogrify(sql, args)\n except Exception as exc:\n print(sql, args)\n raise exc\n else:\n self.connect()\n statement = self.cursor.mogrify(sql, args)\n self.close()\n return statement\n\n elif self.dbmi.__name__ == \"sqlite3\":\n if len(args) == 0:\n return sql\n else:\n # Unfortunately as sqlite does not support\n # the transformation of sql strings and qmarked or\n # named arguments we must make our hands dirty\n # and do it by ourself. :(\n # Doors are open for SQL injection because of the\n # limited python sqlite3 implementation!!!\n pos = 0\n count = 0\n maxcount = 100\n statement = sql\n\n while count < maxcount:\n pos = statement.find(\"?\", pos + 1)\n if pos == -1:\n break\n\n if args[count] is None:\n statement = \"%sNULL%s\" % (statement[0:pos],\n statement[pos + 1:])\n elif isinstance(args[count], (int, long)):\n statement = \"%s%d%s\" % (statement[0:pos], args[count],\n statement[pos + 1:])\n elif isinstance(args[count], float):\n statement = \"%s%f%s\" % (statement[0:pos], args[count],\n statement[pos + 1:])\n elif isinstance(args[count], datetime):\n statement = \"%s\\'%s\\'%s\" % (statement[0:pos], str(args[count]),\n statement[pos + 1:])\n else:\n # Default is a string, this works for datetime\n # objects too\n statement = \"%s\\'%s\\'%s\" % (statement[0:pos],\n str(args[count]),\n statement[pos + 1:])\n count += 1\n\n return statement", "def main():\n args = sys.argv[1:]\n if len(args) != 2:\n print('Usage: sqlite.py db SQL', file=sys.stderr)\n return\n\n db, sql = args\n\n print('Opening {}'.format(args[0]))\n conn = connect(db)\n\n for q in sql.split(';'):\n print('Executing {}'.format(q))\n with conn:\n conn.execute(q)", "def _execute_query(db_path, query, *args):\n\n con = sql.connect(db_path)\n cur = con.cursor()\n cur.execute(query, args)\n con.commit()\n con.close()", "def execute(self, a_sql_obj):\n \n # if sql_obj is a string wrap it in a sqlalchemy.text\n \n if type(a_sql_obj) == type(''):\n sql = sqlalchemy.text(a_sql_obj)\n else:\n sql = a_sql_obj\n \n if self._activate_timer:\n result = []\n func = self._conn.execute\n the_timer = ftimer(func, [sql], {}, result, number = 1)\n self._log.debug(\"\\nTime: %s secs \\nDatabase: %s\\nRequest: %s\\n\"%(the_timer, self._url, sql))\n return result[0]\n else:\n result = self._conn.execute(sql)\n return result", "def executemany(self, sql, sql_args_list):\n # Check that sql arguments have the correct type\n for sql_args in sql_args_list:\n self._check_sql_args(sql_args)\n # Execute the query\n try:\n pgcursor = self.get_postgres_cursor()\n pgcursor.executemany(sql, sql_args_list)\n self._connection.commit()\n except PostgresError, e:\n self._connection.rollback()\n raise RuntimeError(\"Error running SQL query: %s\", str(e))\n finally:\n pgcursor.close()", "def sql_execute2(sql,value):\n cur = c.cursor()\n cur.execute(sql,value)", "def execute(query):\n print query\n cursor.execute(query)", "def execute(self, sql, sql_args=None):\n # Check that sql arguments have the correct type\n self._check_sql_args(sql_args)\n # Execute the query\n try:\n pgcursor = self.get_postgres_cursor()\n pgcursor.execute(sql, sql_args)\n self._connection.commit()\n except PostgresError, e:\n self._connection.rollback()\n raise RuntimeError(\"Error running SQL query: %s\", str(e))\n finally:\n pgcursor.close()", "def execute(self, sql, values=None):\n c = self.conn.cursor()\n self.lock.acquire()\n hasReturn = sql.lstrip().upper().startswith(\"SELECT\")\n\n result = []\n try:\n if values:\n c.executemany(sql, values)\n else:\n c.execute(sql)\n\n if hasReturn:\n result = c.fetchall()\n\n except Exception, e:\n Log.error(traceback.format_exc())\n self.conn.rollback()\n finally:\n self.lock.release()\n\n if hasReturn:\n return result", "def _doing(self, data):\n curr = self.conn.cursor()\n curr.executemany(self.sql, data)\n self.conn.commit()\n curr.close()", "def insert_sql(command):\n logging.debug(\"Running insert sql \"+str(command))\n try:\n## host, userid, password, database instance\n con = mdb.connect(serverip, username, userpass, schema);\n cursor = con.cursor()\n \n sql = command\n cursor.execute(sql)\n sql = \" commit;\"\n cursor.execute(sql)\n \n con.close()\n\n except mdb.Error, e:\n logger.error(e)", "def execute(self, sql):\r\n try:\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(sql)\r\n self.connection.commit()\r\n try:\r\n count = cursor.rowcount\r\n res = cursor.fetchall()\r\n except psycopg2.ProgrammingError:\r\n res = count\r\n return res\r\n except psycopg2.Error as e:\r\n # print(sql)\r\n raise e", "def query(self, sql_query, values=None):\n # TODO check sql_query and values to see if they are lists\n # if sql_query is a string\n if isinstance(sql_query, basestring): \n self.cur.execute(sql_query, values)\n self.con.commit()\n # otherwise sql_query should be a list of strings\n else:\n # execute each query with relative values\n for query, sub_values in zip(sql_query, values):\n self.cur.execute(query, sub_values)\n # commit all these queries\n self.con.commit()\n return self.cur.fetchall()", "def execute():", "def do(self, executor):\n sql, kw = self._assemble()\n return executor.execute(\n sql, kw\n )", "def batch(self, sql):\n return _Batch(self.conn, sql)", "def execute_queries(self, queries, **format_args):\n query_len = len(queries)\n commit = False\n\n for i,query in enumerate(queries):\n q = query.format(**format_args)\n\n if i >= query_len - 1:\n commit = True\n\n self._submit_single_q(q, commit=commit)", "def execute_command(self, sql_command, args):\n cursor, connection = self.execute_command_get_connection(sql_command, args)\n cursor.close()\n connection.close()", "def executeAll(lines):", "def execute_sql(sql_string):\n\tcon = mdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME)\n\twith con:\n\t\tcur = con.cursor()\n\t\tcur.execute(sql_string)\n\t\treturn cur", "def test_transaction_management_statements(self):\n for script_pattern in (\n \"BEGIN TRANSACTION; %s; COMMIT;\",\n \"BEGIN; %s; END TRANSACTION;\",\n \"/* comment */BEGIN TRANSACTION; %s; /* comment */COMMIT;\",\n \"/* comment */ BEGIN TRANSACTION; %s; /* comment */ COMMIT;\",\n \"\"\"\n-- comment\nBEGIN TRANSACTION;\n\n%s;\n\n-- comment\nCOMMIT;\"\"\",\n ):\n\n test_statement = (\"CREATE TABLE TEST1 (field1 int); \"\n \"DROP TABLE TEST1\")\n script = script_pattern % test_statement\n src = self.tmp()\n\n with open(src, 'wt') as f:\n f.write(script)\n\n sqls = SqlScript(src)\n sqls.run(self.engine)", "def executescript(c, of, debug = False):\n\tquery_list = []\n\tquery_list_candidates = of.readlines()\n\tfor line in query_list_candidates:\n\t\t# process out comment lines\n\t\tif line.startswith(\"--\"):\n\t\t\tpass\n\t\telse:\n\t\t\tif line.strip() != \"\":\n\t\t\t\tquery_list.append(line.strip())\n\tquery_list = \" \".join(query_list).split(';')\n\tfor query in query_list:\n\t\tif query.strip():\n\t\t\tif debug:\n\t\t\t\tprint \"executescript [status] : executing query:\\n\\t%s\\n\" % (query.strip())\n\t\t\tc.execute(query.strip())", "def _exec1(self, sql):\n result = self._exec(sql)\n return [row[0] for row in result]", "def execute_many(self, sql, args=None):\r\n args = args or None\r\n with Executer(self) as cursor:\r\n rows = cursor.executemany(sql, args)\r\n return rows", "def run_sql_transformations(self): \n conn = pg2.connect(user='postgres', dbname='penny', host='localhost', port='5432', password='password')\n for d in self.get_list_of_dates():\n print(d) \n df = pd.read_sql(\"Select count(*) as acount from auctions where auctiontime < '\" + d + \"' and qauctionID not in (SELECT DISTINCT AuctionID from bid_transform)\", conn)\n print (df.acount[0])\n if (df.acount[0] > 0):\n bashCommand = \"sudo -u postgres psql -d penny -f new_transformations.sql -v auction_date='\" + d + \"'\"\n process = subprocess.Popen(bashCommand.split())\n output, error = process.communicate()\n conn.close", "def dbexecute(crx, cmd, args=None):\n debug(\"executing {}\".format(cmd))\n crx.execute(cmd, args)", "def main(argv, out=print):\n opts = parser.parse_args(argv[1:])\n out(generate_sql(vars(opts)))", "def sql(self, sql, *args):\n cursor = self.connection.cursor()\n try:\n num_records = cursor.execute(sql, args)\n \n #Restart mysql if it's died for whatever reason, seems quite common\n except (pymysql.err.InterfaceError, pymysql.err.OperationalError):\n self.refresh()\n num_records = cursor.execute(sql, args)\n \n self.connection.commit()\n \n try:\n if sql.startswith('SELECT count(*) FROM'):\n return cursor.fetchall()[0][0]\n \n elif sql.startswith('SELECT'):\n return cursor.fetchall()\n \n elif sql.startswith('UPDATE'):\n return num_records\n \n elif sql.startswith('INSERT'):\n return cursor.lastrowid\n \n elif sql.startswith('DELETE'):\n return num_records\n finally:\n cursor.close()", "def _execute(self, *args):\n if self.db_type == \"postgres\":\n self.cursor.execute(args[0].replace(\"?\", \"%s\"), *args[1:])\n else:\n self.cursor.execute(*args)", "def execute_ddl(self, ddl):\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl)", "def execute(self, sql, val=()):\n cursor = self.__db.cursor()\n try:\n cursor.execute(sql, val)\n self.__db.commit()\n except Exception as e:\n self.__db.rollback()\n raise e", "def execute_sql(self, sql_statement, to_write=False):\n cur = self.connection.cursor()\n cur.execute(sql_statement)\n if to_write:\n self.connection.commit()\n cur.close()\n else:\n return cur", "def _execute_command(self):\n try:\n self.dw.execute_sql_command(self.query)\n except Exception as e:\n raise e", "def test_execute_multiple(self):\n pg_conn = PostgreSQL(*self.conn_params)\n sql = f\"\"\"CREATE TABLE table1 (id integer, column1 varchar(100),\n column2 float);\n INSERT INTO table1 (id, column1, column2)\n VALUES (1, 'Varchar; text; (100 char)',\n 123456789.012787648484859);\n INSERT INTO table1 (id, column1, column2)\n VALUES (2, 'Varchar; text; (100 char)',\n -789.0127876);\n SELECT id, column2 FROM table1;\"\"\"\n result = pg_conn.execute(sql)\n self.assertEqual(len(result), 4)\n self.assertEqual(len(result[3].index), 2)\n pg_conn.execute('DROP TABLE table1')", "def execute_script_from_file(self, filename):\n filename = os.path.join(self.curr_dir, filename)\n # Connect to db\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with open(filename, \"r\", encoding=\"utf-8\") as sql_file:\n sql_script = sql_file.read()\n\n # all SQL commands (split on ';')\n sql_commands = filter(None, sql_script.split(\";\"))\n # Execute every command from the input file\n for command in sql_commands:\n # This will skip and report errors\n # For example, if the tables do not yet exist, this will skip over\n # the DROP TABLE commands\n try:\n cursor.execute(command)\n except OperationalError as msg:\n print(\"Command skipped: \", msg)\n conn.commit()\n conn.close()", "def execute_sql(conn, query):\n\ttry:\n\t\t\tc = conn.cursor()\n\t\t\tc.execute(query)\n\texcept Error as e:\n\t\t\tprint(f\"SQL error :{e}\")\n\t\t\tprint(f\"Attempted to run: {query}\")\n\t\t\t\n\treturn c", "def sql_execute(sql,value):\n cur = c.cursor()\n cur.execute(sql,value)\n results = cur.fetchall()\n return results", "def execSqlWithNoParams(self, sql_str):\n try:\n conn = MySQLdb.connect(**self.connect)\n cursor = conn.cursor()\n result = cursor.execute(sql_str)\n conn.commit()\n conn.close()\n return result\n except Exception, e:\n logger.error(\"mysql util error:\" + str(e))", "def executeQuery(self, segments=None,):\n if not segments: segments = self.segments\n query =''.join(['INSERT INTO ', self.tableName, ' (',\n ', '.join(self.firstLine), ') VALUES ',\n ', '.join([segment for segment in segments]), ';'])\n try:\n self.cursorExecute(query)\n except:\n print 'Bulk query rejected, splitting into smaller queries.'\n self.handleSQLRejection(0, len(self.segments))", "def dbExecute(con, statement, args=[], skipTrace=False):\n cursor = con.cursor()\n stmt = cursor.mogrify(statement, args);\n if not skipTrace:\n trace(\"executing:\" + str(stmt))\n cursor.execute(stmt)\n global quiet\n if not skipTrace:\n trace(\"statusmessage=\" + cursor.statusmessage + \", rowcount=\" + str(cursor.rowcount))\n return cursor" ]
[ "0.7174249", "0.7070815", "0.70517427", "0.7040094", "0.7025959", "0.6985139", "0.6980579", "0.6964437", "0.69108903", "0.6902813", "0.6886875", "0.6844708", "0.6828679", "0.6801381", "0.67388976", "0.67349917", "0.67162806", "0.66934794", "0.66897595", "0.66596186", "0.66156036", "0.6584134", "0.65690196", "0.6567074", "0.65546435", "0.6548934", "0.65483195", "0.65304446", "0.6498274", "0.6480291", "0.64499164", "0.6411375", "0.64072955", "0.6384274", "0.6366235", "0.6354167", "0.63419294", "0.6340998", "0.63359404", "0.63337517", "0.62886363", "0.62780666", "0.6265355", "0.62633663", "0.6235214", "0.62271005", "0.6210583", "0.62018126", "0.62012863", "0.61804056", "0.6170203", "0.61619014", "0.6155839", "0.61449045", "0.61437947", "0.6138311", "0.61308885", "0.61240387", "0.60779744", "0.6073487", "0.6071847", "0.6067789", "0.60539573", "0.6045348", "0.6019019", "0.60180026", "0.60178167", "0.6016901", "0.6015585", "0.60121864", "0.60040224", "0.5999598", "0.5986728", "0.59720415", "0.59679645", "0.59525573", "0.5941064", "0.5940224", "0.59264225", "0.5923069", "0.5921563", "0.5911582", "0.590578", "0.5905266", "0.58815706", "0.58788025", "0.58776355", "0.5873712", "0.5867889", "0.58503574", "0.5842099", "0.58328354", "0.58204556", "0.5813315", "0.580912", "0.58067894", "0.57998323", "0.5796572", "0.5789051", "0.5781086" ]
0.7186094
0
Hook invoked when the context manager is entered
def __enter__(self): # mark the beginning of a transaction self.execute(*self.sql.transaction()) # and hand me back to the caller return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_context(self):\n pass", "def context_started(self, cls, example):", "def handle_context_missing(self):", "def on_start(self, ctx):\n pass", "def __enter__(self):\n self._logger.debug(\"__enter__()\")\n self.install(\"PRE\")", "def on_hook(self) -> None:", "def context_ended(self, cls, example):", "def _hook(self):", "def post_setup(self, context):\n pass", "def post_start_hook(self):\n\n LOG.debug(_('XManager post_start_hook...'))\n\n pass", "def activate(cls, ctx):\r\n if hasattr(ctx, '_on_context_exit'):\r\n raise cls.ContextError('Context actions registered outside this parse context arg active')\r\n\r\n try:\r\n cls._active.append(ctx)\r\n ctx._on_context_exit = []\r\n yield\r\n finally:\r\n for func, args, kwargs in ctx._on_context_exit:\r\n func(*args, **kwargs)\r\n del ctx._on_context_exit\r\n cls._active.pop()", "def post_start(self):", "def on_before_execution(self):\n pass", "def cooked_mode(self) -> ContextManager[None]:", "def main_thread_enter(self):\n ...", "def on_exit(self):\n pass", "def push_context(self):\n raise NotImplementedError()", "def run401_02():\n\n class Context:\n def __init__(self):\n print('__init__()')\n\n def __enter__(self):\n print('__enter__()')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # print(exc_type, exc_val, exc_tb)\n print('__exit__()')\n\n with Context():\n print('do something')", "def _context(self, context):\n self.context = context\n # if there have been changed lines encountered that haven't yet\n # been add to a hunk.\n if self.changedlines:\n self.add_new_hunk()", "def switch_context(self, context):\r\n self.context_stack.append(self.current_context)\r\n self.current_context = context", "def pre_start_hook(self):\n\n LOG.debug(_('XManager pre_start_hook...'))\n\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_end(self, ctx):\n pass", "def _set_context(self, ctx):\n try:\n current_engine_name = self.parent.engine.name \n if sgtk.platform.current_engine(): \n sgtk.platform.current_engine().destroy()\n sgtk.platform.start_engine(current_engine_name, ctx.tank, ctx)\n except Exception, e:\n QtGui.QMessageBox.critical(self, \n \"Could not Switch!\", \n \"Could not change work area and start a new \" \n \"engine. This can be because the task doesn't \"\n \"have a step. Details: %s\" % e)\n return", "def on_run(self):\r\n\r\n\t\tpass", "def before(self, context):\n raise NotImplementedError", "def on_enter(self, userdata):\n pass", "def context(self) -> Any:\n ...", "def start():\n log(\"=========== hook: start ===========\")", "def context(self) -> CONTEXT:", "def hook (self, *args, **kwargs):\n self.launch([\"--fastexit\"])", "def state_processing_enter(cfg, app, win):", "def ctx():\n return None", "def on_pre_enter(self):\n self.setup()\n self.start()", "def on_startup(self) -> None:\n ...", "def __init__(self):\n self._context = {}", "def _post_chroot_block(self):\n pass", "def startTestHook(self):", "def after_get_hook(self):\n pass", "def before_scenario(context, scenario):\n context.resource_manager = contextlib.ExitStack()", "def context(self, context):\n self._context = context", "def before_foreground(self):\n pass", "def setup_with_context_manager(testcase, cm):\n val = cm.__enter__()\n testcase.addCleanup(cm.__exit__, None, None, None)\n return val", "def _post_hooks(self):", "def on_start(self, session):\n pass", "def pre_execute(self):", "def hook(self) -> None:\n self._orig_exit = sys.exit\n sys.exit = self.exit\n sys.excepthook = self.exc_handler", "def on_start(self):", "def on_start(self):", "def on_run(self):\n pass", "def on_exit(self, userdata):\n pass", "def on_context_exit(self, func, *args, **kwargs):\r\n if not hasattr(self, '_on_context_exit'):\r\n raise self.ContextError('Can only register context exit actions when a parse context '\r\n 'is active')\r\n\r\n if not callable(func):\r\n raise TypeError('func must be a callable object')\r\n\r\n self._on_context_exit.append((func, args, kwargs))", "def postRun(self):\n pass", "def on_activate(self) -> None:", "def _start(self):", "def on_pre_enter(self):\n Logger.info('Application: Changed to the Return screen.')", "def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]:", "def _post_construct(self, func, *args, **kwargs):\r\n ParseContext.locate().on_context_exit(func, *args, **kwargs)", "def context(self, context):\n\n self._context = context", "def pre_start(self) -> None:\n pass", "def idle(self):\n return", "def preloop(self):\n super(CoreCommand, self).preloop() # sets up command completion", "def __enter__(self):\n self.__within_context = True\n if not self.__initialized:\n self.__initialization__()\n return self", "def hook_file_opened(self):", "def at_pre_cmd(self):\n pass", "def _pre_chroot_block(self):\n pass", "def setCurrent(ctx):\n THREAD_CONTEXT.current = ctx", "def state_failsafe_enter(cfg, app, win):", "def idle():", "def started(self):", "def after(self, context):\n raise NotImplementedError", "def on_context_menu(self, event):\n self.declaration.context_menu_event()", "def cli(ctx):\n ctx.obj = Context()", "def __enter__(self):\r\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_process(self):\n pass", "def post_execute(self):", "def handle(self, context: Context):\n raise NotImplementedError()", "def __enter__(self):\n pass", "def __enter__(self):\n pass", "def _extra_context(self):\r\n return {}", "def switch(self, context):\n return", "def push_context(self, ctx):\n self._tpl_context = ctx", "def post_exec(self):\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def enter_context(self, cm):\n # We look up the special methods on the type to match the with\n # statement\n _cm_type = type(cm)\n _exit = _cm_type.__exit__\n result = _cm_type.__enter__(cm)\n self._push_cm_exit(cm, _exit)\n return result", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}" ]
[ "0.7259149", "0.72568685", "0.6937752", "0.69183266", "0.6744398", "0.6600733", "0.65994066", "0.65975237", "0.6585466", "0.6491403", "0.64669836", "0.64669746", "0.6438095", "0.64119595", "0.63947785", "0.637796", "0.6347448", "0.6327832", "0.6199883", "0.6191347", "0.6180872", "0.6166143", "0.6166143", "0.6166143", "0.6166143", "0.6166143", "0.6166143", "0.6166143", "0.6166143", "0.6162799", "0.6159081", "0.6140747", "0.6132489", "0.6114094", "0.61115086", "0.61052537", "0.60930604", "0.6080689", "0.6080588", "0.60782874", "0.60486186", "0.6027628", "0.6021808", "0.6020055", "0.60163575", "0.6013771", "0.600788", "0.59995043", "0.5992238", "0.59823817", "0.59769094", "0.59710264", "0.5968781", "0.595877", "0.5956848", "0.5956848", "0.5951233", "0.5945903", "0.5945872", "0.593555", "0.59255874", "0.5921726", "0.59137005", "0.5892156", "0.58915174", "0.58798474", "0.5879638", "0.5874338", "0.5873852", "0.58602446", "0.58510447", "0.5848956", "0.58330566", "0.58294046", "0.5827092", "0.58136195", "0.5810158", "0.58066475", "0.5800107", "0.57916754", "0.57856", "0.57855815", "0.57855815", "0.57855815", "0.57855815", "0.57855815", "0.5784858", "0.57821816", "0.578169", "0.578169", "0.57781476", "0.57640374", "0.57618976", "0.57556474", "0.5755472", "0.5748105", "0.5748105", "0.5748105", "0.5748105", "0.5748105", "0.5748105" ]
0.0
-1
Hook invoked when the context manager's block exits
def __exit__(self, exc_type, exc_instance, exc_traceback): # if there were no errors detected if exc_type is None: # commit the transaction to the datastore self.execute(*self.sql.commit()) # otherwise else: # roll back self.execute(*self.sql.rollback()) # indicate that we want to re-raise any exceptions that occurred while executing the # body of the {with} statement return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_exit(self):\n pass", "def __exit__(self, *args, **kwargs):\n\n pass", "def on_exit(self, userdata):\n pass", "def on_end(self, ctx):\n pass", "def __exit__(self, *args):\n pass", "def __exit__(self, *args):\n if self.teardown:\n super().__exit__(*args)", "def main_thread_exit(self):\n ...", "def end():\n return EndBlock()", "def _stopcontextmanager(self):\n yield\n self.stop()", "def _stopcontextmanager(self):\n yield\n self.stop()", "def __exit__(self, exc_type, exc_val, exc_tb):\n pass", "def context_ended(self, cls, example):", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def __exit__(self, exc_type, exc_value, traceback):\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\r\n pass", "def __exit__(self, exec_type, value, traceback): # suppress(no-self-use)\n IndentedLogger.__exit__(exec_type, value, traceback)\n Task.nest_level -= 1", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def run401_02():\n\n class Context:\n def __init__(self):\n print('__init__()')\n\n def __enter__(self):\n print('__enter__()')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # print(exc_type, exc_val, exc_tb)\n print('__exit__()')\n\n with Context():\n print('do something')", "def block(self):\n pass", "def __exit__(self, *args):\n self.stop()", "def exit(self) -> None:\n self.on_exit(None)", "def __exit__(self, type, value, traceback):\n context = self._contexts.pop()\n context.reset()", "def exit(self):\n pass", "def _closecontextmanager(self):\n yield\n self.close()", "def _closecontextmanager(self):\n yield\n self.close()", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass", "def on_context_exit(self, func, *args, **kwargs):\r\n if not hasattr(self, '_on_context_exit'):\r\n raise self.ContextError('Can only register context exit actions when a parse context '\r\n 'is active')\r\n\r\n if not callable(func):\r\n raise TypeError('func must be a callable object')\r\n\r\n self._on_context_exit.append((func, args, kwargs))", "def __exit__(self, exc_type, exc_value, traceback):\n return None", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.stop()", "def terminate_context(self):\n\n # break pylint duplicate-code\n self.original_signal_handlers[signal.SIGTERM] = signal.signal(signal.SIGTERM, self.terminate)\n self.original_signal_handlers[signal.SIGINT] = signal.signal(signal.SIGINT, self.terminate)\n try:\n # break pylint duplicate-code\n yield\n finally:\n signal.signal(signal.SIGINT, self.original_signal_handlers[signal.SIGINT])\n signal.signal(signal.SIGTERM, self.original_signal_handlers[signal.SIGTERM])", "def __exit__(self, exc_type, exc_val, exc_tb):\n if self.event_loop:\n self.event_loop.stop()", "def __exit__(self, exc_type, exc_val, exc_tb):\n\n self.quit()", "def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore\n self.shutdown()", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def exit_handler():\n logger.debug(\"Application exit caught.\")\n save_state()", "def hook(self) -> None:\n self._orig_exit = sys.exit\n sys.exit = self.exit\n sys.excepthook = self.exc_handler", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def exit(self):\n logger.debug(\"EXIT\")", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n self._entered -= 1\n if not self._awaited and not self._entered:\n await self._teardown()\n self._active = False", "def end(self):\n ...", "def _post_construct(self, func, *args, **kwargs):\r\n ParseContext.locate().on_context_exit(func, *args, **kwargs)", "def __exit__(self, *ex_info):\n if self.device:\n self._device_ctx.__exit__(*ex_info)\n\n stdout('')\n stdout('Finished {0} in {1:0.1f}s'.format(self.name, self.timer_elapsed('script')))", "def __exit__(self, exc_type, exc_value, traceback):\n if self._close_on_exit:\n self.close()", "def analyze_on_exit(self):\n atexit.register(exit_handler, self)", "def __exit__(self, *excinfo):\n pass", "def __exit(self, *args):\n sys.exit(0)", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()", "def on_closing(self, *args):\n pass", "def __exit__(self, exc_type, exc_value, exc_traceback):\n self.end = self()\n print(str(self))", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n return False # any exception is raised by the with statement.", "def __exit__(self, exc_type, exc_value, traceback):\n return self.close()", "def __exit__(self, exc_type, exc_value, exc_traceback):\n\n self.close()", "def __exit_handler(signum, frame):\n #print \"EH START\"\n with this.lock:\n exit_barrier = this.exit_barrier\n\n if exit_barrier is not None:\n # Meet up with the worker\n this.exit_barrier.wait()\n #print \"EH FIRST BARRIER\"\n # Wait for the worker to be done\n this.finish_barrier.wait()\n #print \"EH HANDLER FINISHED\"\n #print \"EH DONE\"\n sys.exit(0)", "def _handler_direct_access_exit(self, *args, **kwargs):", "def __exit__(self, type=None, value=None, traceback=None):\n self.stop()", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, *args, **kwargs):\n\t\tfcntl.flock(self.file, fcntl.LOCK_UN)\n\t\tself.file.close()\n\t\tfor function in self.exit_functions:\n\t\t\tfunction()", "def __exit__(self, *exc):\n return self._lock.__exit__(*exc)", "def __exit__(self, type_, value, traceback):\n self.close()", "def _on_finalize(self):\n pass", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self._unlock()\n # Let all exceptions through by not returning True.", "def exit(context):\n return _nfc.exit(context)", "def __exit__(self, exc_type, exc_val, exc_tb):\n self._finalize()\n # Do not return anything to pass on all other exceptions.", "def __exit__(self, type, value, traceback):\n\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self._cleanup()", "def _on_exit(cls, error=None):\n # type: (Exception) -> None\n pass", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\r\n self.close()", "def shutdown(self):\n yield self.cxn.manager.expire_context(self.server.ID,\n context=self.ctx)", "def __exit__(self, exc_type, exc_value, traceback):\n self._reader.__exit__(exc_type, exc_value, traceback)", "def test_compiler_finally_block(patch, compiler, lines, tree):\n patch.object(Compiler, 'subtree')\n compiler.finally_block(tree, '1')\n lines.set_exit.assert_called_with(tree.line())\n lines.set_scope.assert_called_with(tree.line(), '1')\n kwargs = {'enter': tree.nested_block.line(), 'parent': '1'}\n lines.append.assert_called_with('finally', tree.line(), **kwargs)\n Compiler.subtree.assert_called_with(tree.nested_block, parent=tree.line())", "def on_closing_event(self):\n self.exit_event(None)", "def on_exit(self, function):\n\t\tself.exit_functions += [function]", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self._close()", "def handle_termination(self):\n pass", "def __exit__(self, type, value, traceback) :\n if self.spec :\n self.handle.close()\n self.handle = None", "def __exit__(self, *_exc):\r\n self.release()", "def __exit__(self, exception, value, trace):\n self.manual_exit()", "def __exit__(self, exception_type, exception_value, traceback):\n QueuingContext._active_contexts.remove(self)", "def hook (self, *args, **kwargs):\n self.launch([\"--fastexit\"])", "def exit(self):\n self.current.exit()", "def __exit__(self):\n self._stop_all()", "def handler(self):\n\t\tself.exitClient()", "def __exit__(self, exc_type, exc_value, traceback):\n nvmlShutdown()", "def do_exit(self, _):\n return True", "def finalize(self, interrupted=False):\n pass", "def finalize():\n pass", "def finalize():\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n if self.returncode is None and self.proc.poll() is None:\n self.proc.terminate()" ]
[ "0.6937684", "0.67767626", "0.6692932", "0.66752976", "0.666442", "0.6626548", "0.66104794", "0.64507276", "0.63927466", "0.63927466", "0.6357777", "0.6347324", "0.6330202", "0.6329335", "0.6317543", "0.6316851", "0.6314924", "0.6314924", "0.6314924", "0.62950003", "0.6293853", "0.6290503", "0.62649584", "0.62514913", "0.624936", "0.62470806", "0.62470806", "0.62331426", "0.62115633", "0.620844", "0.6194623", "0.61833984", "0.61705494", "0.6162008", "0.61555046", "0.61296386", "0.6126591", "0.61251086", "0.6116879", "0.6116879", "0.6116879", "0.6116879", "0.60994434", "0.609601", "0.60887283", "0.60751563", "0.60646784", "0.6063113", "0.60541433", "0.6052187", "0.6042117", "0.6041025", "0.6041025", "0.6041025", "0.6041025", "0.6041025", "0.6037963", "0.6021354", "0.60016376", "0.5989195", "0.598327", "0.59754026", "0.5964012", "0.595749", "0.5955189", "0.5953267", "0.5953267", "0.59529185", "0.5951325", "0.5948407", "0.59478456", "0.5946881", "0.5946881", "0.59456426", "0.59448445", "0.5938896", "0.5929999", "0.59215355", "0.5911778", "0.5911566", "0.59097904", "0.5908327", "0.5901192", "0.59007734", "0.5886469", "0.58762586", "0.58752793", "0.5874631", "0.5872231", "0.5857813", "0.58528495", "0.5845789", "0.5843164", "0.58431137", "0.58392817", "0.5838866", "0.58307856", "0.5830251", "0.5825436", "0.5825436", "0.5824108" ]
0.0
-1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, ShowProjectWorkHoursResponseBodyWorkHours): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
This is the view handler for the "/" url.
async def index(request): # Note: we return a dict not a response because of the @template decorator return { 'title': request.app['name'], 'intro': "Success! you've setup a basic aiohttp app.", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def root(self, request):\n return ''", "def index(_):\n template = loader.get_template('route/home.html')\n return HttpResponse(template.render(Context({})))", "def root(self):\n return self.app.get('/',headers=self.headers)", "def get(self):\n\n self.response.out.write(template.render('templates/home.html', None))", "def get(self):\n self.render(\"index.html\")", "def get(self):\n self.render('index.html')\n return", "def index(request, path=''):\n return render(request, 'index.html')", "def index(request, path=''):\n return render(request, 'index.html')", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def get(self):\n self.render(\n \"index.html\",\n )", "def index(request):\r\n badRequest(\"Url not found\")", "def get(self, request):\n return render(request, 'start/Index.html', {})", "def root():\n return render_template('index.html')", "def root():\n return render_template('index.html')", "def root():\n return render_template('index.html')", "def get(self):\n return render_template(\"index.html\")", "def get_root():\r\n return render_template(\"index.html\"), 200", "def home(request):\n return render_to_response('index.html')", "def home():\n logging.info('Entering route: HOME')\n\n logging.info('Rendering template: main.html')\n return render_template('main.html')", "def index_route(self, request, *args, **kwargs):\n raise Http404()", "def index():\n return \"This is root!!!!\"", "def home():\n payload = manager.get_payload()\n return render_template('index.html', payload=payload)", "def Home():\n resp = render_template('index.html')\n return resp", "def root(request):\n\n return render(request, 'users/index.html')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def home():\n return render_template(\"index.html\")", "def home():\n return render_template(\"index.html\")", "def home():\n return render_template(\"index.html\")", "def home():\n return render_template(\"index.html\")", "def home():\n #mongoconn()\n return render_template(\n 'index.html'\n )", "def index():\r\n return render_template('home.html')", "def home():\n return render_template('index.html')", "def home():\n return render_template('index.html')", "def home():\n return render_template('index.html')", "def home():\n return render_template('index.html')", "def home():\n return render_template('index.html')", "def home():\n return render_template('index.html')", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def render_home():\r\n\treturn render_template(\"index.html\")", "async def root(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})", "def home():\n\n return render_template('index.html')", "def root(request):\n\ttemplate = 'bfbot/main'\n\treturn redirect(template)", "def index():\n response.view_title = myconf.get('app.name') + ' Home Page'\n return dict(message='')", "def index(self):\n raise cherrypy.HTTPRedirect('/user')", "def view(self, url):\n abort(404)", "def index_file():\n return redirect(\"/\")", "def view(self, url):\r\n abort(404)", "def index():\n return render_template(\"home.html\")", "def index(self):\n\t\treturn render_template('index.html')", "def index():\n return render_template('home.html')", "def home():\n return make_response(open('app/templates/index.html').read())", "def home():\n return render_template(\n 'index.html'\n )", "def catch_all(path=''):\n return render_template('index.html')", "def home(request): \n return render_to_response('index.html', locals(), context_instance = RequestContext(request))", "def get(self):\n self.response.write(view_utils.render('base.html', {}))", "def home_page():\n\n return render_template('index.html')", "def index():\n return 'There is nothing here.'", "def index_view(request):\n return render(request, 'index.html')", "def root():\n return render_template('root.html')", "def home(request):\n return render_to_response('home.html', {}, RequestContext(request))", "def index():\r\n return render_template('index.html')", "def index(request):\n return render(request, \"dbkeeper/index.html\")", "def get(self):\n self.render('view.html')", "def index():\n\n return render_template('home.html')", "def index(request):\n return render(request, 'home.html')", "def do_GET(self):\n if not self.path or self.path == \"/\":\n self.redirect()\n elif self.is_viewvc():\n try:\n self.run_viewvc()\n except IOError:\n # ignore IOError: [Errno 32] Broken pipe\n pass\n else:\n self.send_error(404)", "def index(request):\n \n return render(request, 'home/index.html')", "def index():\n return render_template('index.html', name=urlpath)", "def index():\n return render_template('index.html'), 200", "def homepage():\n return redirect(\"/posts\")", "def root():\n return flask.render_template('index.html')", "def home(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'index.html',\n context_instance=RequestContext(request,\n {\n 'title': 'Home Page',\n 'year': datetime.now().year,\n })\n )", "def index():\n return render_template('0-index.html')", "def home(request):\r\n return render(request, 'home.html')", "def home_view(request):\n if request.authenticated_userid:\n return HTTPFound(location=request.route_url('app_view')) # pragma no cover\n return {} # pragma no cover", "def index():\n return redirect(url_for(\"home\"))", "def root_redirect():\r\n return redirect(url_for(\"display_top\"))", "def home(request):\n\n return render(request,'index.html',{})", "def index(request):\n \n \n return render(request, 'index.html')", "def root1(request):\n\ttemplate = 'main'\n\treturn redirect(template)", "def index(request):\n\treturn render(request, 'Toeic/index.html')", "def index():\n return render_template('index.html')", "async def home_page(request: Request):\r\n return templates.TemplateResponse('index.html', {'request': request})", "def index() -> Any:\n return render_template(\"index.html\")", "def home(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/home.html', context)", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def index(request):\r\n\treturn render(request, 'templates/index.html')", "def start_page():\n if not _home:\n abort(404)\n return redirect(_home)", "def start_page():\n if not _home:\n abort(404)\n return redirect(_home)", "def index(self):\n return render_template('main/index.html')", "def home():\n\n\treturn render_template('solai.html')", "def index():\n pass", "def home_view(request: HttpRequest) -> HttpResponse:\n return render(request=request, template_name='todo/home.html')", "def home():\n return render_template('main.html')", "def index(request):\n\n return render(request, 'home/index.html')", "def index(request):\n\n return render(request, 'home/index.html')", "def index(request):\n return render(request, \"index.html\")", "def indexView(request):\n return render(request, 'auvsi_suas/index.html')" ]
[ "0.6833893", "0.68323773", "0.68276465", "0.6732128", "0.6723868", "0.67115694", "0.670275", "0.670275", "0.6678917", "0.6669672", "0.66280764", "0.6614279", "0.6596165", "0.6596165", "0.6596165", "0.6584849", "0.6568513", "0.6565733", "0.6544685", "0.6540552", "0.6508354", "0.6476614", "0.6467354", "0.64662075", "0.643906", "0.643906", "0.643906", "0.64294124", "0.64294124", "0.64294124", "0.64294124", "0.642587", "0.642067", "0.64171404", "0.64171404", "0.64171404", "0.64171404", "0.64171404", "0.64171404", "0.64056563", "0.6398662", "0.6384231", "0.6381002", "0.6363994", "0.6350501", "0.6349542", "0.6342888", "0.6330088", "0.6327266", "0.63272136", "0.6322987", "0.63204616", "0.63150364", "0.63064015", "0.6298898", "0.6283587", "0.6278979", "0.6274541", "0.62720615", "0.62594485", "0.6252391", "0.6250629", "0.6250103", "0.624787", "0.62420285", "0.6239579", "0.623748", "0.6235711", "0.6232037", "0.62108606", "0.62101096", "0.6204614", "0.6201879", "0.62015843", "0.6199717", "0.6195559", "0.6193508", "0.6192912", "0.61912847", "0.6184218", "0.61836237", "0.617635", "0.61692953", "0.6168258", "0.61634636", "0.61594594", "0.61590475", "0.61570764", "0.61570764", "0.6156601", "0.6156351", "0.6156351", "0.61558175", "0.6154189", "0.6153408", "0.61524814", "0.6150242", "0.6145629", "0.6145629", "0.6143394", "0.61408406" ]
0.0
-1
Return RGB image representing fractal.
def render(self, width=300, height=300, zoom=None, itermax=50, colors=5, color_offset=0.5, **kwargs): if zoom is None: zoom = self._defaultZoom() complex_plane = self._complexPlane(width, height, *zoom) fractal = self._computeFractal(complex_plane, itermax, **kwargs) rgb_image = self._toRgbImage(fractal, colors, color_offset) # Display fractal on screen. self._show(rgb_image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _toRgbImage(self, fractal, colors, color_offset):\n soln_real = adjustRange(fractal[0], 0, 127)\n soln_imag = adjustRange(fractal[1], 0, 127)\n iters = adjustRange(fractal[2], 0, 128)\n\n rgb_image = np.array([\n soln_real + iters,\n soln_imag + iters,\n iters\n ]\n ).astype(dtype=np.uint8)\n\n return rgb_image.T", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = fractal value.\n fractal,\n\n # Maximum value.\n np.ones(fractal.shape)\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = 1 where fractal values > 0,\n # Saturation = 0 otherwise.\n fractal.astype(dtype=bool).astype(dtype=float),\n\n # Invert colours\n 1 - fractal\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def create_colorful_test_image(self):\n ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)\n ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)\n ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)\n imr = np.concatenate((ch255, ch128, ch128), axis=2)\n img = np.concatenate((ch255, ch255, ch0), axis=2)\n imb = np.concatenate((ch255, ch0, ch255), axis=2)\n imw = np.concatenate((ch128, ch128, ch128), axis=2)\n imu = np.concatenate((imr, img), axis=1)\n imd = np.concatenate((imb, imw), axis=1)\n image = np.concatenate((imu, imd), axis=0)\n return image", "def to_color(self):\n if self.channels == 4:\n color = opencv.cvtColor(self.img, opencv.COLOR_BGRA2BGR)\n return Image(color)\n elif self.channels == 1:\n color = opencv.cvtColor(self.img, opencv.COLOR_GRAY2BGR)\n return Image(color)\n else:\n return Image(self.img)", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)", "def get_image(self):\n image = Image.new('1', (8, 16))\n draw = ImageDraw.Draw(image)\n for x in xrange(8):\n for y in xrange(16):\n draw.point((x,y),self.get_pixel(x, y))\n return image", "def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)", "def getcolorim(ra, dec, size=240, output_size=None, filters=\"grizy\", format=\"jpg\"):\n\n if format not in (\"jpg\", \"png\"):\n raise ValueError(\"format must be jpg or png\")\n url = geturl(ra, dec, size=size, filters=filters, output_size=output_size, format=format, color=True)\n r = requests.get(url)\n im = Image.open(BytesIO(r.content))\n return im", "def get_BGR_img(self):\n img = self.img.copy()\n # Convert BGR to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # define range of BGR color in HSV\n threshold_blue = np.array([[100,43,46], [124,255,255]])\n threshold_green = np.array([[35,43,46], [77,255,255]])\n threshold_red1 = np.array([[0,43,46], [10,255,255]])\n threshold_red2 = np.array([[156,43,46], [180,255,255]])\n # Threshold the HSV image to get only BGR colors\n mask_blue = cv2.inRange(hsv, threshold_blue[0], threshold_blue[1])\n mask_green = cv2.inRange(hsv, threshold_green[0], threshold_green[1])\n mask_red1 = cv2.inRange(hsv, threshold_red1[0], threshold_red1[1])\n mask_red2 = cv2.inRange(hsv, threshold_red2[0], threshold_red2[1])\n mask_red = mask_red1 | mask_red2\n # Bitwise-AND mask and original image\n self.blue = cv2.bitwise_and(img, img, mask=mask_blue)\n self.green = cv2.bitwise_and(img, img, mask=mask_green)\n self.red = cv2.bitwise_and(img, img, mask=mask_red)\n # 返回 bgr 三通道的分量合成的图片\n return np.stack((self.blue[:, :, 0], self.green[:, :, 1], self.red[:, :, 2]), axis=2)", "def createRGBImage(self, filepath, width=None, outdir=None):\n print('[createRGBImage] filepath, outdir', filepath, outdir)\n\n index = 0\n rgb_data = []\n\n # Read binary file\n binary_data = self.getBinaryData(filepath)\n\n # Create R,G,B pixels\n while (index + 3) < len(binary_data):\n R = binary_data[index]\n G = binary_data[index+1]\n B = binary_data[index+2]\n index += 3\n rgb_data.append((R, G, B))\n\n size = self.get_size(len(rgb_data), width)\n image = Image.new('RGB', size)\n image.putdata(rgb_data)\n if width > 0:\n image = image.resize((width, width))\n if outdir is not None:\n self.save_file(filepath, image, size, 'RGB', width, outdir)\n # print('np.array(image)', np.array(image).shape)\n return np.array(image)/255.0", "def generate_image(height=512, \n width=512, \n color=(255, 255, 255)):\n if type(color) == tuple:\n b = np.full((height, width, 1), color[0], dtype=np.uint8)\n g = np.full((height, width, 1), color[1], dtype=np.uint8)\n r = np.full((height, width, 1), color[2], dtype=np.uint8)\n img = np.concatenate((b, g, r), axis=2)\n else:\n gray = np.full((height, width), color, dtype=np.uint8)\n img = gray\n\n return img", "def _fractalize(self, f, compMap):\n\n from PIL import Image\n\n def toImage(cmObject):\n \"\"\"cmObject is the ComplexMap instance\"\"\"\n size = self.gridsize, self.gridsize\n cm = cmObject()\n master = []\n for item in cm:\n master.extend(item)\n\n #Apply default Mandelbrot Set Function\n master = map(f, master)\n\n col1 = (0,0,102,0)\n col2 = (255,204,51,0)\n\n def select_color(x):\n if x == 1: return col1\n else: return col2\n\n master = map(select_color, master)\n \n image = Image.new(\"RGBA\", size, (0,0,0,0))\n image.putdata(master)\n return image\n\n image_width = 0\n image_height = 0\n image_list = []\n #Unpack row\n for (y, row) in enumerate(compMap):\n image_row = []\n\n #Unpack columns\n for item in row:\n #Unpack the individual\n image_row.append(toImage(item))\n\n width = len(image_row) * self.gridsize\n height = self.gridsize\n row_holder_image = Image.new(\"RGBA\", (width, height), (0,0,0,0)) \n\n for (n, image) in enumerate(image_row):\n row_holder_image.paste(image, ((n*self.gridsize),0))\n\n image_list.append(row_holder_image)\n \n image_width = width\n image_height = len(image_list) * self.gridsize\n\n image_whole = Image.new(\"RGBA\", (image_width, image_height), (0,0,0,0))\n for (n, image) in enumerate(image_list):\n image_whole.paste(image, (0, (n*self.gridsize)))\n image_whole.save(\"fractal.jpg\", \"JPEG\")\n\n return", "def img_to_rgb(img):\r\n if len(img.shape) < 3 or img.shape[2] == 1:\r\n return np.repeat(img, 3).reshape(img.shape[0], img.shape[1], 3)\r\n else:\r\n return img", "def color(image, magnitude, name=None):\n _check_image_dtype(image)\n\n with tf.name_scope(name or \"color\"):\n tiled_gray_image = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))\n colored_image = blend(tiled_gray_image, image, magnitude)\n return colored_image", "def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb", "def read_color_image(path):\n with open(path, 'rb') as f:\n img = Image.fromarray(read_ppm(f), mode='RGB')\n img = tf.keras.preprocessing.image.img_to_array(img, dtype=int)\n img = tf.convert_to_tensor(img)\n return img", "def luminance(self):\n \n return (self.r + self.g + self.b) // 3", "def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_path=args.output_path, method=args.method, size=args.size, p=args.p, gpu_id=args.gpu_id, plot=args.plot)\n\n ec.encode(input_image_path)\n img_gray_name = ar_utils.gen_new_gray_filename(input_image_path)\n img_gray_path = os.path.join(args.intermediate_representation, img_gray_name)\n dc.decode(img_gray_path)\n\n if args.delete_gray and os.path.exists(img_gray_path):\n os.remove(img_gray_path)", "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)", "def Color(img: Image, magnitude: float) -> Image:\n return PIL.ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1]))", "def make_grayscale(self):\n \n lum = self.luminance()\n# self.r = lum\n# self.g = lum\n# self.b = lum\n\n # Instead, we can call them method we already defined for setting RGB\n self.set_rgb(lum, lum, lum)", "def recompute_final_image(self):\n if self._image is None:\n self.final_image = None\n return\n if isinstance(self._image, np.ndarray):\n if self._image.dtype == np.float and np.any(self._image > 1):\n im = self._image / 255\n else:\n im = self._image\n if self.cmap is not None:\n im = cm.get_cmap(self.cmap)(im)\n im = PIL.Image.fromarray((im * 255).astype(np.uint8))\n else: # we hope it is a PIL image or equivalent\n im = self._image\n im = im.convert('RGBA')\n if self.make_square:\n new_size = max(im.width, im.height)\n im = im.resize((new_size, new_size), PIL.Image.NEAREST)\n if self.resolution is not None:\n if self.resolution.size == 1:\n im = im.resize((self.resolution, self.resolution),\n PIL.Image.NEAREST)\n else:\n im = im.resize(self.resolution,\n PIL.Image.NEAREST)\n if self.circ_cut is not None:\n middle = np.array(im.size) / 2\n x = np.arange(im.size[0]) - middle[0] + 0.5\n x = x / np.max(np.abs(x))\n y = np.arange(im.size[1]) - middle[1] + 0.5\n y = y / np.max(np.abs(y))\n yy, xx = np.meshgrid(y, x)\n r = np.sqrt(xx ** 2 + yy ** 2)\n alpha = np.empty(r.shape)\n alpha[r > 1] = 0\n alpha[r <= self.circ_cut] = 1\n val = (r > self.circ_cut) & (r <= 1)\n alpha[val] = (\n 0.5 + 0.5 * np.cos(\n np.pi * (r[val] - self.circ_cut)\n / (1 - self.circ_cut)))\n alpha = alpha.T * np.array(im.getchannel('A'))\n alpha = PIL.Image.fromarray(np.uint8(alpha))\n im.putalpha(alpha)\n if self.col is not None:\n if self.border_type is None:\n pass\n elif self.border_type == 'alpha':\n bg_alpha = np.array(im.getchannel('A'))\n bg_alpha = bg_alpha > 0\n bg_alpha = PIL.Image.fromarray(255 * np.uint8(bg_alpha))\n bg = PIL.Image.new('RGBA', im.size, color=self.col)\n bg.putalpha(bg_alpha)\n im = PIL.Image.alpha_composite(bg, im)\n elif self.border_type == 'pad':\n im = PIL.ImageOps.expand(\n im,\n border=self.border_width,\n fill=self.col)\n elif self.border_type == 'conv':\n im = PIL.ImageOps.expand(\n im,\n border=self.border_width,\n fill=(0, 0, 0, 0))\n bg_alpha = im.getchannel('A')\n bg_alpha = bg_alpha.filter(PIL.ImageFilter.BoxBlur(\n self.border_width))\n bg_alpha = np.array(bg_alpha)\n bg_alpha = 255 * np.uint8(bg_alpha > 0)\n bg_alpha = PIL.Image.fromarray(bg_alpha)\n bg = PIL.Image.new('RGBA', im.size, color=self.col)\n bg.putalpha(bg_alpha)\n im = PIL.Image.alpha_composite(bg, im)\n self.final_image = im", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def greyScale(img, shape):\n s, v = shape\n greyPicture = [sum(img[i]) / 3 for i in range(v * s)]\n\n return greyPicture", "def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def create_image(self):\n # how many categories?\n aspect_ratio = float(4) / 3\n self.width = int(math.sqrt(aspect_ratio * self.total))\n self.height = int(self.width / aspect_ratio)\n\n img = Image.new(\"RGB\", (self.width, self.height))\n return img", "def generate_normalized_rgb(self):\n \n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r/256.0,g/256.0,b/256.0", "def get_coloured_grid(self, r1, r2, r3, b1=4, b2=2.5, b3=1):\n r, g, b = np.frompyfunc(self.get_colour(r1, r2, r3, b1, b2, b3), 2, 3)(self.end_z, self.end_step)\n img_array = np.dstack((r, g, b))\n return Image.fromarray(np.uint8(img_array * 255))", "def generate_image(self):\n\t\tcenters = self.generate_centers()\n\t\timg = Image.new('RGB', (self.config.image_size, self.config.image_size), color=(0,0,0))\n\t\tshapes = np.random.randint(2, size=len(centers))\n\t\tdrawer = ImageDraw.Draw(img)\n\t\tr = int(0.05 * self.config.image_size)\n\t\tR = []\n\t\tfor i in range(len(centers)):\n\t\t\tcoor = (centers[i][0] - r , centers[i][1] - r, centers[i][0] + r, centers[i][1] + r)\n\t\t\tif shapes[i] < 0.5:\n\t\t\t\tdrawer.rectangle(coor, fill=COLOR[i])\n\t\t\telse:\n\t\t\t\tdrawer.ellipse(coor, fill=COLOR[i])\n\t\t\tR.append([centers[i], i, shapes[i]])\n\t\treturn np.array(img), R", "def rgb(r, g, b):\n return (r/255, g/255, b/255)", "def red_fish(target_size: Optional[Tuple[int, int]] = None, rgb: bool = True) -> Tensor:\n return imread(HERE+'red_fish.jpg', target_size=target_size, rgb=rgb)", "def create_image(self):\n\n self._image = 255 * np.ones((self._height, self._width, 3), np.uint8)", "def orb():\n image = pygame.Surface([16, 16], pygame.SRCALPHA)\n pygame.draw.circle(image, colour.WHITE, (8, 8), 8, 0)\n return image", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def get_dress(self,stack=False):\r\n \"\"\"takes input rgb----> return PNG\"\"\"\r\n name = self.imageid\r\n file = cv2.imread(name)\r\n file = tf.image.resize_with_pad(file,target_height=512,target_width=512)\r\n rgb = file.numpy()\r\n file = np.expand_dims(file,axis=0)/ 255.\r\n seq = self.model.predict(file)\r\n seq = seq[3][0,:,:,0]\r\n seq = np.expand_dims(seq,axis=-1)\r\n c1x = rgb*seq\r\n c2x = rgb*(1-seq)\r\n cfx = c1x+c2x\r\n dummy = np.ones((rgb.shape[0],rgb.shape[1],1))\r\n rgbx = np.concatenate((rgb,dummy*255),axis=-1)\r\n rgbs = np.concatenate((cfx,seq*255.),axis=-1)\r\n if stack:\r\n stacked = np.hstack((rgbx,rgbs))\r\n return stacked\r\n else:\r\n return rgbs", "def generate_image(color):\n color_tuple = int_rgb_tuple(color)\n return Image.new('RGB', (500, 500), color=color_tuple)", "def rgb(self):\n return (self.r, self.g, self.b)", "def flow_to_image(flow):\n out = []\n maxu = -999.\n maxv = -999.\n minu = 999.\n minv = 999.\n maxrad = -1\n for i in range(flow.shape[0]):\n u = flow[i, :, :, 0]\n v = flow[i, :, :, 1]\n idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)\n u[idxunknow] = 0\n v[idxunknow] = 0\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n rad = np.sqrt(u ** 2 + v ** 2)\n maxrad = max(maxrad, np.max(rad))\n u = u / (maxrad + np.finfo(float).eps)\n v = v / (maxrad + np.finfo(float).eps)\n img = compute_color(u, v)\n out.append(img)\n return np.float32(np.uint8(out))", "def displayImage():\n f = open(os.path.join(\"outputColor\",\"epoch_total.txt\"),\"r\")\n epochNum = int(f.readline())\n f.close()\n return Image.open(\"outputPhotosColor/image_at_epoch_{:04d}.png\".format(epochNum))", "def greyscale(c):\n return desaturate(c, 1)", "def generate_image(size, bitdepth, pattern):\n\n width, height = size\n\n maxval = 2**bitdepth-1\n if maxval > 255:\n a = array('H')\n else:\n a = array('B')\n fw = float(width)\n fh = float(height)\n pfun = PATTERN[pattern]\n for y in range(height):\n fy = float(y)/fh\n for x in range(width):\n a.append(int(round(pfun(float(x)/fw, fy) * maxval)))\n return a", "def generate_image(self):\n\n if not has_pillow:\n raise RuntimeError(\"requires https://pypi.org/project/pillow/\")\n\n background = self.get_background()\n foreground = self.get_foreground()\n\n matrix = self.generate_matrix()\n\n image = Image.new(\"RGB\", (420, 420), background)\n draw = ImageDraw.Draw(image)\n\n for (i, row) in enumerate(matrix):\n for (j, bit) in enumerate(row):\n x = 35 + j * 70\n y = 35 + i * 70\n\n if bit:\n draw.rectangle((x, y, x + 70, y + 70), foreground)\n\n return image", "def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im", "def grey(self):\n return sum((self.value(0), self.value(1), self.value(2)))/3", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def DisplayFractal(a, colorConsts, saveIndex, outputNumber=1, fmt='jpeg'):\n a_cyclic = (6.28*a/20.0).reshape(list(a.shape)+[1])\n img = np.concatenate([colorConsts[0]+20*np.cos(a_cyclic),\n colorConsts[1]+50*np.sin(a_cyclic),\n colorConsts[2]-80*np.cos(a_cyclic)], 2)\n img[a==a.max()] = [150, 200, 155]\n a = img\n a = abs(a)\n a %= 255\n a = np.uint8(np.clip(a, 0, 255))\n # PIL.Image.fromarray(a).save(, fmt)\n f = BytesIO()\n PIL.Image.fromarray(a).save(f, fmt)\n saver.imsave(\"%s/output%s_%s.jpeg\" % (path, outputNumber, saveIndex), a)\n # display(Image(data=f.getvalue()))", "def get_image(self):\n glReadBuffer(GL_FRONT)\n glReadPixels(0, 0, self.__width, self.__height, GL_RGBA, GL_FLOAT,\n self.__output_image)\n return self.__output_image", "def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )", "def retrieveColor(image):\n w, h, dim = image.shape\n ret = np.zeros((w, h, dim), dtype=np.uint8)\n for i in range(w):\n for j in range(h):\n ret[i][j] = fakingColors(image[i][j])\n return np.clip(ret, 0, 255)", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def export_image(self, name):\n\t\tred = Color(\"red\")\n\t\tblue = Color(\"blue\")\n\t\twhite = Color(\"white\")\n\t\tblack = Color(\"black\")\n\t\tgold = Color(\"gold\")\n\t\trgb_gold = []\n\t\tfor part in gold.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_gold.append(part)\n\t\trgb_black = []\n\t\tfor part in black.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_black.append(part)\n\t\trgb_white = []\n\t\tfor part in white.rgb:\n\t\t\tpart = part * 255\n\t\t\trgb_white.append(part)\n\t\tcolours = list(red.range_to(blue, int(self.grains)))\n\t\timage = np.zeros([self.space.shape[1],self.space.shape[0], 3], dtype=np.uint(8))\n\t\tfor grain in range(self.grains+1):\n\t\t\trgb = []\n\t\t\tfor part in colours[grain-1].rgb:\n\t\t\t\tpart = part * 255\n\t\t\t\trgb.append(part)\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state == grain:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb\n\t\t\t\tif cell.state == 999:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_black\n\t\t\t\tif cell.state == 500:\n\t\t\t\t\tx,y = cell.find_id()\n\t\t\t\t\timage[x,y] = rgb_gold\n\t\timg = Image.fromarray(image.astype('uint8'))\n\t\timg = img.resize((self.space.shape[1]*3,self.space.shape[0]*3))\n\t\timg.save('./static/temp/'+str(name)+'.png')", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def flow_to_rgb(flow):\n im1 = flow[:, :, 0]\n im2 = flow[:, :, 1]\n\n h, w = flow.shape[:2]\n\n # Use Hue, Saturation, Value colour model\n hsv = np.zeros((h, w, 3), dtype=np.float32)\n hsv[..., 1] = 1\n\n mag, ang = cv2.cartToPolar(im1, im2)\n hsv[..., 0] = ang * 180 / np.pi\n hsv[..., 2] = cv2.normalize(mag, None, 0, 1, cv2.NORM_MINMAX)\n\n return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)", "def blank_image(height, width):\n all_green = create_uniform_image(height, width, [0, 255, 0])\n return all_green", "def _dwd_create_RGB_image(self, channels, cranges):\n if not isinstance(channels, (list, tuple, set)) and \\\n not isinstance(cranges, (tuple, list, set)) and \\\n not len(channels) == len(cranges) and \\\n not (len(channels) == 3 or len(channels == 4)):\n raise ValueError(\"Channels and color ranges must be list/tuple/set \\\n and they must have the same length of 3 or 4 elements\")\n\n if len(channels) == 3:\n return geo_image.GeoImage(channels,\n self.area,\n get_first(self.time_slot),\n fill_value=(0, 0, 0),\n mode=\"RGB\",\n crange=cranges)\n if len(channels) == 4:\n return geo_image.GeoImage(channels,\n self.area,\n get_first(self.time_slot),\n fill_value=(0, 0, 0, 0),\n mode=\"RGBA\",\n crange=cranges)", "def create_tourism_raster(self):\n self.create_raster('flickr',\n pixeltype='32BF', noData=0,\n value_col='pictures')", "def polarTransform(scale, img):\n from matplotlib.colors import hsv_to_rgb\n \n \n img = np.asarray(img)\n dims = img.shape\n\n phi = ((np.arctan2(-img[0], -img[1]) + np.pi/2) % (np.pi*2)) / (2 * np.pi)\n rho = np.sqrt(img[0]**2 + img[1]**2)\n saturation = np.ones((dims[1], dims[2]))\n\n out = hsv_to_rgb(np.dstack((phi, saturation, scale * rho)))\n\n return np.clip(out * scale, 0, 1)", "def get_image(image_path):\n image = Image.open(image_path, \"r\")\n width, height = image.size\n pixel_values = list(image.getdata())\n\n if (image.mode != \"RGBA\"):\n image = image.convert(\"RGB\")\n pixel_values = list(image.getdata())\n for idx, px in enumerate(pixel_values):\n pixel_values[idx] = [px[0], px[1], px[2], 255]\n\n return (list(chunks(pixel_values, width)), width, height)", "def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')", "def get_result_image(self):\n\n im_result = cv2.cvtColor(self.im_t, cv2.COLOR_GRAY2BGR)\n pcd_final = copy.deepcopy(self.pcd_s)\n\n pcd_final.transform(self.trans_final)\n np_final = np.asarray(pcd_final.points, np.int)\n\n for i in range(np_final.shape[0]):\n im_result = cv2.circle(\n im_result,\n (np_final[i, 1], np_final[i, 0]),\n 2,\n (0, 255, 0),\n -1,\n cv2.LINE_AA,\n )\n\n im_result = cv2.rectangle(im_result, (5, 12), (170, 38), (0, 0, 0), -1)\n # Draw rotation in image\n _, _, rotate = mat2rpy(self.trans_final)\n d_rotate = np.degrees(rotate)\n str_rotate = format(d_rotate, \".2f\") + \"[deg](CW)\"\n im_result = cv2.putText(\n im_result, str_rotate, (5, 30), 1, 1.25, (255, 255, 255), 2, cv2.LINE_AA\n )\n im_result = cv2.putText(\n im_result, str_rotate, (5, 30), 1, 1.25, (0, 255, 255), 1, cv2.LINE_AA\n )\n return im_result", "def get_result_image(self):\n\n im_result = cv2.cvtColor(self.im_t, cv2.COLOR_GRAY2BGR)\n pcd_final = copy.deepcopy(self.pcd_s[self.result_id])\n\n pcd_final.transform(self.trans_final)\n np_final = np.asarray(pcd_final.points, np.int)\n for i in range(np_final.shape[0]):\n im_result = cv2.circle(\n im_result,\n (np_final[i, 1], np_final[i, 0]),\n 1,\n (0, 255, 0),\n -1,\n cv2.LINE_AA,\n )\n\n im_result = cv2.rectangle(im_result, (5, 12), (170, 38), (0, 0, 0), -1)\n # Draw rotation in image\n d_rotate = np.degrees(self.rotate)\n str_rotate = format(d_rotate, \".2f\") + \"[deg](CCW)\"\n im_result = cv2.putText(\n im_result, str_rotate, (5, 30), 1, 1.25, (255, 255, 255), 2, cv2.LINE_AA\n )\n im_result = cv2.putText(\n im_result, str_rotate, (5, 30), 1, 1.25, (0, 255, 255), 1, cv2.LINE_AA\n )\n return im_result", "def fom_rgb(direction, inclination, mask=None):\n if mask is None:\n mask = np.ones_like(direction, dtype=np.bool)\n\n rgb = np.zeros((mask.shape[0], mask.shape[1], 3), np.uint8)\n for x in range(mask.shape[0]):\n for y in range(mask.shape[1]):\n if not mask[x, y]:\n continue\n\n rgb[x,\n y, :] = _vec_to_rgb(\n np.sin(0.5 * np.pi - inclination[x, y]) *\n np.cos(direction[x, y]),\n np.sin(0.5 * np.pi - inclination[x, y]) *\n np.sin(direction[x, y]),\n np.cos(0.5 * np.pi - inclination[x, y]))\n return rgb", "def render_image(camera, scene, lights, nx, ny):\n # TODO A5 copy implementation from A4\n img = np.zeros((ny, nx, 3), np.float32)\n\n for x in range(0, nx):\n for y in range(0, ny):\n u = (x + 0.5) / nx\n v = (y + 0.5) / ny\n ray = camera.generate_ray((u, v))\n hit = scene.intersect(ray)\n img[y][x] = shade(ray, hit, scene, lights)\n\n return img", "def recreate_image(x):\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n in_channel = x.shape[-1]\n recreated_im = copy.copy(x) # C, H, W\n if in_channel == 3:\n for c in range(in_channel):\n recreated_im[:, :, c] /= reverse_std[c]\n recreated_im[:, :, c] -= reverse_mean[c]\n elif in_channel == 1:\n recreated_im[:, :, 0] /= reverse_std[1]\n recreated_im[:, :, 0] -= reverse_mean[1]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im) # H, W, C\n return recreated_im", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values", "def show_rgb_img(img):\n return plt.imshow(cv2.cvtColor(img, cv2.CV_32S))", "def plot_rgb(r_fits, g_fits, b_fits, object_getter=asteroid):\n data = [None, None, None]\n for ii, fits_file in enumerate([r_fits, g_fits, b_fits]):\n data[ii] = data_from_fits(fits_file)\n data[ii] = object_getter(data[ii])\n plt.figure()\n plt.imshow(data[ii])\n\n data = np.dstack(data)\n plt.figure()\n plt.imshow(data)", "def get_image(self) -> Image.Image:\n raw_buffer_data = self.get_raw_frame_buffer_object_data()\n image = Image.frombytes(\n \"RGBA\",\n self.get_pixel_shape(),\n raw_buffer_data,\n \"raw\",\n \"RGBA\",\n 0,\n -1,\n )\n return image", "def to_image(self, width=800, height=600, outline=20, background=\"white\", **kwargs):\n img = _rodframe_image(\n parameters=self.parameters,\n width=width,\n height=height,\n outline=outline,\n background=background,\n **kwargs\n )\n return img", "def _solid_image(color, size):\n from PIL import Image\n # convert to RGB uint8\n color = g.np.array(color, dtype=g.np.uint8)[:3]\n\n # create a one pixel RGB image\n image = Image.fromarray(\n g.np.tile(color, (g.np.prod(size), 1)).reshape(\n (size[0], size[1], 3)))\n assert image.size == tuple(size[::-1])\n\n return image", "def hsl2rgb_img(hsl):\r\n\r\n def core(_hsl, _frgb):\r\n\r\n h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]\r\n fr, fg, fb = _frgb[:, :, 0], _frgb[:, :, 1], _frgb[:, :, 2]\r\n\r\n q = np.zeros(l.shape, dtype=np.float)\r\n\r\n lbot = l < 0.5\r\n q[lbot] = l[lbot] * (1 + s[lbot])\r\n\r\n ltop = lbot == False\r\n l_ltop, s_ltop = l[ltop], s[ltop]\r\n q[ltop] = (l_ltop + s_ltop) - (l_ltop * s_ltop)\r\n\r\n p = 2 * l - q\r\n q_sub_p = q - p\r\n\r\n is_s_zero = s == 0\r\n l_is_s_zero = l[is_s_zero]\r\n per_3 = 1./3\r\n per_6 = 1./6\r\n two_per_3 = 2./3\r\n\r\n def calc_channel(channel, t):\r\n\r\n t[t < 0] += 1\r\n t[t > 1] -= 1\r\n t_lt_per_6 = t < per_6\r\n t_lt_half = (t_lt_per_6 == False) * (t < 0.5)\r\n t_lt_two_per_3 = (t_lt_half == False) * (t < two_per_3)\r\n t_mul_6 = t * 6\r\n\r\n channel[:] = p.copy()\r\n channel[t_lt_two_per_3] = p[t_lt_two_per_3] + q_sub_p[t_lt_two_per_3] * (4 - t_mul_6[t_lt_two_per_3])\r\n channel[t_lt_half] = q[t_lt_half].copy()\r\n channel[t_lt_per_6] = p[t_lt_per_6] + q_sub_p[t_lt_per_6] * t_mul_6[t_lt_per_6]\r\n channel[is_s_zero] = l_is_s_zero.copy()\r\n\r\n calc_channel(fr, h + per_3)\r\n calc_channel(fg, h.copy())\r\n calc_channel(fb, h - per_3)\r\n\r\n frgb = np.zeros(hsl.shape, dtype=np.float)\r\n cpus = multiprocessing.cpu_count()\r\n length = int(math.ceil(float(hsl.shape[0]) / cpus))\r\n line = 0\r\n threads = []\r\n while line < hsl.shape[0]:\r\n line_next = line + length\r\n thread = threading.Thread(target=core, args=(hsl[line:line_next], frgb[line:line_next]))\r\n thread.start()\r\n threads.append(thread)\r\n line = line_next\r\n\r\n for thread in threads:\r\n thread.join()\r\n\r\n return (frgb*255).round().astype(np.uint8)", "def exercise2a(self):\n colors = [\n [253, 253, 150],\n [255, 125, 125],\n [150, 111, 214],\n [119, 158, 203],\n [255, 105, 97],\n [ 3, 192, 60]\n ]\n # This takes some time …\n #self.b1 = mandelbrot(1200, 600, colors, [0, 0, 0], 0.02, 0.73, 0.21)\n # Load the pregenerated image ;)\n self.b1 = misc.imread(\"B1.png\")\n plt.axis('off')\n plt.imshow(self.b1)\n plt.show()\n misc.imsave(\"B1.png\", self.b1)\n misc.imsave(\"B1_Brightness.png\", print_brightness(self.b1))", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def image(self):\n if self.roi and len(self.roi) == 4:\n return self.calibrated_image[self.roi[0]:self.roi[1],self.roi[2]:self.roi[3]]\n else:\n return self.calibrated_image", "def rgbcolor(h, f):\n # q = 1 - f\n # t = f\n if h == 0:\n return v, f, p\n elif h == 1:\n return 1 - f, v, p\n elif h == 2:\n return p, v, f\n elif h == 3:\n return p, 1 - f, v\n elif h == 4:\n return f, p, v\n elif h == 5:\n return v, p, 1 - f", "def greyscale(img):\n grey_img = np.zeros([210, 160])\n for i in range(3):\n grey_img =np.sum([grey_img, img[:, :, i]], 0)\n grey_img /= 3\n grey_img = grey_img.astype(np.uint8)\n return grey_img", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def grey_to_rgb(im):\n assert im.n_channels in [1, 3]\n\n if im.n_channels == 3:\n return im\n\n im.pixels = np.vstack([im.pixels] * 3)\n return im", "def lab_to_rgb(image: tf.Tensor) -> tf.Tensor:\n xyz = lab_to_xyz(image)\n rgb_image = xyz_to_rgb(xyz)\n return rgb_image", "def GetRGB(*args):\n return _gdi_.Colour_GetRGB(*args)", "def make_image(self, mode=\"L\") -> Image:\r\n return Image.fromarray(self.fb, mode=\"L\")", "def rgb_565(self):\n return (\n (int(self.red * 0xF800) & 0xF800) |\n (int(self.green * 0x07E0) & 0x07E0) |\n (int(self.blue * 0x001F) & 0x001F))", "def get_rgb_frame(self) -> np.array:\n return self.rstate.render_frame_rgb(self.rsimulator)", "def prediction_to_pic(prediction, mode=\"RBYG\"):\n prediction_shape = prediction.shape\n layer_num = 5\n color_list = np.array([\n [255, 0, 0], # Red:Main vessel\n [0, 0, 255], # Blue:Catheter\n [255, 255, 0], # Yellow:SubVessel\n [0, 255, 0], # Green:outer\n [0, 0, 0] # Black:Tissue\n ])\n\n if mode == \"RB\":\n layer_num = 3\n color_list = np.array([\n [255, 0, 0], # Red: main\n [0, 0, 255], # Blue: Catheter\n [0, 0, 0] # Black:NOT Tissue and not main\n ])\n if mode == \"old\":\n layer_num = 5\n color_list = np.array([\n [0, 0, 0], # Black:Tissue\n [0, 0, 255], # Blue: Catheter\n [255, 0, 0], # Red:Main vessel\n [0, 255, 0], # Green:outer\n [255, 255, 0] # Yellow:SubVessel\n ])\n _, max_color = prediction.cpu().max(1)\n max_color.numpy()\n #print(max_color)\n image = np.zeros((prediction_shape[2], prediction_shape[3], 3), dtype=np.uint8)\n #print(image.shape)\n for i in range(layer_num):\n image[max_color[0] == i] = color_list[i]\n return image", "def cs4243_rgb2grey(image):\n if len(image.shape) != 3:\n print('RGB Image should have 3 channels')\n return\n ###Your code here####\n # construct weights numpy\n weights = np.array([0.299, 0.587, 0.114])\n \n # multiply pixel RGB with weights and sum up the RGB axis\n image = np.dot(image, weights)\n \n ###\n\n return image/255.", "def green_channel(input_image):\n return input_image[:, :, 1]", "def getImage(file):\n image = imread(file)\n image = crop(image, 64, 64)\n image = color.rgb2gray(image)\n\n return image", "def _raw_to_gray(self):\n img_rgb = np.zeros((self.y_res, self.x_res, 3), dtype=np.uint8)\n img_rgb = np.array(self.img_raw)\n img_gray = np.zeros((self.y_res, self.x_res))\n img_gray[:, :] = img_rgb[:, :, 2]\n\n return img_gray", "def generate_image(filename, x_size=350, y_size=350):\n global timeflag\n timeflag = 0\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(13, 15)\n green_function = build_random_function(13, 15)\n blue_function = build_random_function(13,15)\n print \"red_function:\\t\" + str(red_function)+\"\\n\"\n print \"green_function:\\t\" + str(green_function)+\"\\n\"\n print \"blue_function:\\t\" + str(blue_function)+\"\\n\"\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def xyz_to_rgb(image: tf.Tensor) -> tf.Tensor:\n x, y, z = tf.unstack(image, axis=-1)\n var_x = x / 100\n var_y = y / 100\n var_z = z / 100\n\n var_r = var_x * 3.2406 + var_y * -1.5372 + var_z * -0.4986\n var_g = var_x * -0.9689 + var_y * 1.8758 + var_z * 0.0415\n var_b = var_x * 0.0557 + var_y * -0.2040 + var_z * 1.0570\n\n var_r = tf.where(var_r > 0.0031308,\n 1.055 * tf.pow(var_r, (1 / 2.4)) - 0.055,\n 12.92 * var_r)\n var_g = tf.where(var_g > 0.0031308,\n 1.055 * tf.pow(var_g, (1 / 2.4)) - 0.055,\n 12.92 * var_g)\n var_b = tf.where(var_b > 0.0031308,\n 1.055 * tf.pow(var_b, (1 / 2.4)) - 0.055,\n 12.92 * var_b)\n r = var_r * 255\n g = var_g * 255\n b = var_b * 255\n rgb_image = tf.cast(tf.stack([r, g, b], axis=-1), tf.uint8)\n return rgb_image", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def greyscale(im):\n\t\n # YOUR CODE HERE\n rgb_weights= np.array([0.212, 0.7152, 0.0722])\n x,y,_ = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = im[i,j].dot(rgb_weights)\n\t\t\t\n return img", "def image_rgba(self) -> np.ndarray | None:\n return self._image.make_image() if self._image is not None else None" ]
[ "0.72595185", "0.70602125", "0.70376635", "0.6999839", "0.6523746", "0.6408264", "0.6224894", "0.6194157", "0.61483246", "0.6030305", "0.601784", "0.59868425", "0.5976731", "0.59150046", "0.5913448", "0.58958936", "0.5891541", "0.58815885", "0.5854497", "0.5818988", "0.5805886", "0.5805573", "0.5787849", "0.5786787", "0.57822114", "0.57504964", "0.57480985", "0.57475966", "0.57414675", "0.57360595", "0.5717602", "0.5707746", "0.56986177", "0.56895816", "0.56722695", "0.5663059", "0.56602484", "0.56323415", "0.56318474", "0.56241137", "0.56147593", "0.56038445", "0.5598898", "0.55939984", "0.55737126", "0.55664015", "0.5548155", "0.55474794", "0.5545764", "0.55406386", "0.55403143", "0.5535185", "0.55260956", "0.5522182", "0.55129945", "0.55070156", "0.5502306", "0.5497129", "0.5496986", "0.54899365", "0.5487879", "0.5476553", "0.54548955", "0.5454397", "0.54538107", "0.5447382", "0.543502", "0.5434391", "0.54322624", "0.54308665", "0.5426027", "0.54258865", "0.5424015", "0.54190415", "0.541841", "0.54122454", "0.54074925", "0.54013014", "0.5392697", "0.53908837", "0.53889215", "0.53856456", "0.5380341", "0.5376153", "0.537253", "0.53692186", "0.5368641", "0.5368621", "0.53683776", "0.53666645", "0.53588706", "0.53580445", "0.53576255", "0.5357403", "0.5356652", "0.53443056", "0.5333767", "0.5332755", "0.5332733", "0.53316766", "0.532652" ]
0.0
-1
Display given RGB array.
def _show(self, a): fig = plt.figure() fig.set_size_inches((2, 2)) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) plt.set_cmap('hot') ax.imshow(a, aspect='equal') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self, colorArray):\n pass", "def _show_rgb(self):\n R, G, B = self._rgb_frames()\n image = numpy.dstack((R, G, B))\n imageItem = self.parent.image.getImageItem()\n imageItem.updateImage(image)", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def display(array):\n if isinstance(array, np.ndarray):\n plt.imshow(array)\n plt.show()\n else:\n raise TypeError(\"display() needs a numpy ndarray as parameter, \"\n f\"got {type(array)}\")", "def display_image(np_rgb, text=None, scale_up=False):\n if scale_up:\n np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)\n np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)\n\n img_r, img_c, img_ch = np_rgb.shape\n if text is not None:\n np_t = np_text(text)\n t_r, t_c, _ = np_t.shape\n t_i_c = max(t_c, img_c)\n t_i_r = t_r + img_r\n t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)\n t_i.fill(255)\n t_i[0:t_r, 0:t_c] = np_t\n t_i[t_r:t_r + img_r, 0:img_c] = np_rgb\n np_rgb = t_i\n\n pil_img = util.np_to_pil(np_rgb)\n pil_img.show()", "def show_rgb_viewer():\n if not IS_INITIALIZED:\n print \"Device not initialized\"\n return\n\n device = openni2.Device.open_any()\n\n rgb_stream = _rgb_stream_from_device(device)\n rgb_stream.start()\n\n done = False\n while not done:\n key = cv2.waitKey(1) & 255\n if key == 27:\n print \"ESC pressed\"\n done = True\n\n bgr = np.fromstring(\n rgb_stream.read_frame().get_buffer_as_uint8(),\n dtype=np.uint8\n ).reshape(240, 320, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n\n cv2.imshow(\"rgb\", rgb)\n\n cv2.destroyAllWindows()\n rgb_stream.stop()\n openni2.unload()\n print \"Terminated\"", "def pg_print_state(env, screen):\n\n rgb_state = env.render(mode='rgb_array')\n # rgb_state = rgb_state[:,:,1].T one color\n rgb_state_transp = np.transpose(rgb_state,(1,0,2))\n screen.blit(pg.surfarray.make_surface(rgb_state_transp), (0,0))\n pg.display.flip()", "def display_rgb(rgb_files,\n min=0, max=3000,\n zoom=0.4, width=800, height=800):\n from os import system\n command = \"ds9 -zoom {0:f} -width {1:d} -height {2:d} -rgb \".format(zoom, width, height)\n for i, j in zip(rgb_files, ['red', 'green', 'blue']):\n command += \"-{0:s} {1:s} -log -z1 {2:f} -z2 {3:f} \".format(j, i, min, max)\n command += \"-colorbar no &\"\n print command\n #system(command)", "def show_rgb_img(img):\n return plt.imshow(cv2.cvtColor(img, cv2.CV_32S))", "def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def plot_pixel_array(arr, figsize=(10, 10)):\n arr = arr.squeeze()\n plt.figure(figsize=figsize)\n plt.imshow(arr, cmap=plt.cm.bone)\n plt.show()", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def show_display(flag=\"rainbow\", brightness=1, gamma=1):\n colors = ledfx.col_cor(flags[flag], brightness, gamma)\n colors = expand(colors, 160)\n with display.open() as disp:\n disp.clear()\n for line, color in enumerate(colors):\n disp.line(line, 0, line, 80, col=color)\n disp.update()\n disp.close()", "def show_digit( Pixels ):\n from matplotlib import pyplot as plt\n print(Pixels.shape)\n Patch = Pixels.reshape((8,8))\n plt.figure(1, figsize=(4,4))\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # plt.cm.gray_r # plt.cm.hot\n plt.show()", "def plot_rgb(r_fits, g_fits, b_fits, object_getter=asteroid):\n data = [None, None, None]\n for ii, fits_file in enumerate([r_fits, g_fits, b_fits]):\n data[ii] = data_from_fits(fits_file)\n data[ii] = object_getter(data[ii])\n plt.figure()\n plt.imshow(data[ii])\n\n data = np.dstack(data)\n plt.figure()\n plt.imshow(data)", "def show_digit( Pixels ):\r\n print(Pixels.shape)\r\n Patch = Pixels.reshape((8,8))\r\n plt.figure(1, figsize=(4,4))\r\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # cm.gray_r # cm.hot\r\n plt.show()", "def displayRGBColor(*args, create: bool=True, hueSaturationValue: bool=True, list: bool=True,\n resetToFactory: bool=True, resetToSaved: bool=True, q=True, query=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def showAverageRGB(self):\n \n # Affichage dans les boxes\n self.red_label.setText(\"Mean RED: \"+str(int(round(self.R.mean()))))\n self.red_label.adjustSize()\n self.green_label.setText(\"Mean GREEN: \"+str(int(round(self.G.mean()))))\n self.green_label.adjustSize()\n self.blue_label.setText(\"Mean BLUE: \"+str(int(round(self.B.mean()))))\n self.blue_label.adjustSize()", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def display(self, color = (190,205,205), add = False):\r\n\t\tpass", "def display(tab):\n screen = [None] * 64\n for i in range(8):\n for j in range(8):\n if tab[i][j] == 1:\n sense.set_pixel(i, j, 255, 0, 0) if STATES[index_state] == \"temp\" else sense.set_pixel(i, j, 0, 0, 255)\n else:\n sense.set_pixel(i, j, 0, 0, 0)", "def display_image ( X ):\r\n\t# on teste que le tableau contient bien 256 valeurs\r\n\tif X.size != 256:\r\n\t\traise ValueError ( \"Les images doivent etre de 16x16 pixels\" )\r\n\r\n\t# on cree une image pour imshow: chaque pixel est un tableau a 3 valeurs\r\n\t# (1 pour chaque canal R,G,B). Ces valeurs sont entre 0 et 1\r\n\tY = X / X.max ()\r\n\timg = np.zeros ( ( Y.size, 3 ) )\r\n\tfor i in range ( 3 ):\r\n\t\timg[:,i] = X\r\n\r\n\t# on indique que toutes les images sont de 16x16 pixels\r\n\timg.shape = (16,16,3)\r\n\r\n\t# affichage de l'image\r\n\tplt.imshow( img )\r\n\tplt.show ()", "def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')", "def view(self):\n plt.imshow(self.texture_array, vmin = 0, vmax = 255)\n if self.texture_array.ndim == 2:\n plt.set_cmap('gray')\n \n plt.title(self.texture_name)\n plt.show()", "def show_tiff_image_data(bgrn_image):\n\ttif_rgb = get_rgb(bgrn_image, [2, 1, 0]) # RGB\n\t# rescaling to 0-255 range - uint8 for display\n\trescaleIMG = np.reshape(tif_rgb, (-1, 1))\n\tscaler = MinMaxScaler(feature_range=(0, 255))\n\trescaleIMG = scaler.fit_transform(rescaleIMG)\n\timg_scaled = (np.reshape(rescaleIMG, tif_rgb.shape)).astype(np.uint8)\n\tnew_style = {'grid': False}\n\tplt.imshow(img_scaled)\n\tplt.title('RGB')\n\tplt.colorbar()", "def print_colors() -> None:\n color_pivot = [0]\n color_pivot += [e * 6 + 16 for e in range(37)]\n color_pivot.append(256)\n color_pivot_start = color_pivot[:-1]\n color_pivot_end = color_pivot[1:]\n color_table_list = [range(cs, ce) for cs, ce in zip(color_pivot_start, color_pivot_end)]\n\n for color_table in color_table_list:\n text = \"\"\n for color in color_table:\n color_string = str(color)\n padding = \"\".join([\" \" for e in range(3 - len(color_string))])\n text += colorize(f\" {padding}{color_string} \", background_256=color, with_end=False)\n print(text + colorize(\"\", background=DEFAULT))", "def display_output(inFrame, outRed, outGreen, outBlue):\r\n \r\n outFrame = np.zeros(p.frame_size)\r\n \r\n # Display the frame\r\n temp1 = upscale_frame(inFrame)\r\n \r\n outFrame[:,:,0] = outBlue.reshape(32,32)\r\n outFrame[:,:,1] = outGreen.reshape(32,32)\r\n outFrame[:,:,2] = outRed.reshape(32,32)\r\n temp2 = upscale_frame(outFrame)\r\n \r\n merged = np.concatenate((temp1, temp2), axis = 1)\r\n cv2.imshow('frame', merged)", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def show_rgbd(self):\n while self.depth_data is None:\n pass\n\n while not rospy.is_shutdown():\n rospy.Rate(10).sleep()\n d = self.depth_data * self.cam.DEPTH_UNIT\n d = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW)\n dc = np.concatenate((d, self.rgb_data), axis=1)\n cv2.imshow('RGB & Depth Image', dc)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.destroyAllWindows()", "def display(self):\n for r in range(len(self.grid)):\n for c in range(len(self.grid[r])):\n if (r, c) == self.location:\n print('\\033[96m*\\x1b[0m', end=' ') # print a blue *\n else:\n print(self.grid[r][c], end=' ') # prints a space or wall\n print()\n print()", "def show(arr2d):\n print (\"\\n\".join(\"\\t\".join(row) for row in arr2d))", "def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)", "def show(self):\n\t\tself.processQueue()\n\t\tself.flattenLayers()\n\t\tcount = 0\n\t\tfor v in self.ledsColorBuffer:\n\t\t\tself.strip.setPixelColor(count, v)\n\t\t\tcount += 1\n\t\tself.strip.show()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def show_np(mat):\n for x in range(15):\n for y in range(15):\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif mat[x, y, 0] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif mat[x, y, 1] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def GetRGBArray(self, p_int):\n ...", "def show_digit(self):\n x_train, _, _, _ = self._load_data()\n plt.imshow(x_train[0], cmap=plt.cm.binary)\n plt.show()", "def display(self):\n for row0 in range(self.y):\n print()\n for row in range(self.height):\n for column0 in range(self.x):\n print(\" \", end=\"\")\n for column in range(self.width):\n print(\"#\", end=\"\")\n print()", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def display(self):\n [print() for i in range(self.__y)]\n for i in range(self.__height):\n [print(\" \", end=\"\") for i in range(self.__x)]\n for j in range(self.__width):\n print(\"#\", end=\"\")\n print()", "def display(self):\n for i in range(self.y):\n print()\n for i in range(self.height):\n for k in range(self.x):\n print(' ', end='')\n for j in range(self.width):\n print('#', end='')\n print()", "def display_board():\n print(board[0], '|', board[1], '|', board[2])\n print(board[3], '|', board[4], '|', board[5])\n print(board[6], '|', board[7], '|', board[8])", "def display(face, min_=0, max_=255):\n image = face.reshape(IMG_HEIGHT, IMG_WIDTH)\n plt.matshow(image, cmap='gray', vmin=min_, vmax=max_)\n plt.show()", "def __unicode__(self):\n return \"{},{},{}\".format(*self.rgb)", "def glblshow(X, border=0.0):\n from numpy import take, resize, shape\n from numpy.random import rand\n\n mmin = X.min()\n mmax = X.max()\n ncolors = mmax - mmin + 1\n R = to_int32(rand(ncolors)*255)\n G = to_int32(rand(ncolors)*255)\n B = to_int32(rand(ncolors)*255)\n if mmin == 0:\n R[0],G[0],B[0] = 0,0,0\n r=resize(take(R, X.ravel() - mmin),X.shape)\n g=resize(take(G, X.ravel() - mmin),X.shape)\n b=resize(take(B, X.ravel() - mmin),X.shape)\n Y=concat('d',r,g,b)\n return Y", "def display(board, leds, delay=0.05, flashdelay=0.05):\n global i\n delay = float(delay)\n flashdelay = float(flashdelay)\n img = np.tile([i, 255, 255], board.shape).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n leds.draw(img, delay=delay)\n img = np.tile([0, 0, 0], board.shape).astype(np.uint8)\n if flashdelay > 0:\n leds.draw(img, delay=flashdelay)\n i += 5\n if i > 255:\n i = 0", "def display(self):\n lines = []\n for y in range(1, self.height+1):\n line = [\".\"] * self.width\n for x in range(1, self.width+1):\n if self.array[y][x]:\n line[x-1] = \"#\"\n lines.append(\"\".join(line))\n return \"\\n\".join(lines)", "def _rgb2plot(self, data):\n\n mindata, maxdata = np.percentile(data[np.isfinite(data)], (0.01, 99))\n return np.clip((data - mindata) / (maxdata-mindata) * 255, 0, 255).astype(np.uint8)", "def display(self):\n for row in self.tile_rows:\n print(row)", "def show(self, color = Color.GREEN):\n self.draw(color)\n self.image.show()", "def imshow(self):\n axes([0, 0, 1, 1], xticks=[], yticks=[])\n imshow(self.rgb_image())", "def _show_numpy(tensor: ndarray, zoom: float = 1.) -> None:\n from PIL import Image\n shape = tuple(map(lambda s: round(s * zoom), tensor.shape))\n Image.fromarray(tensor).resize((shape[1], shape[0])).show()", "def display_board(self):\n print('*' + '*'.join(['**']*len(self.board[0])) + '*')\n for row in self.board:\n print('|' + ' '.join([('%s' % square) for square in row]) + '|')\n print('*' + '*'.join(['**']*len(self.board[0])) + '*')", "def render_board(self):\n print \"\"\n for row in self._board:\n print row", "def display(self):\n print (\"+\" + \"-\"*self.size + \"+\")\n for i in range(self.size):\n terrain_strs = [Terrain.display_string(self.array[j, i]) for j in range(self.size)]\n print(\"|\" + \"\".join(terrain_strs) + \"|\")\n print (\"+\" + \"-\"*self.size + \"+\")", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def show(black, white):\n for x in X:\n for y in Y:\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif black & gobit[(x, y)]:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif white & gobit[(x, y)]:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def print_board(board):\n\n colors = {\n '*': None,\n '2': 'red',\n '4': 'green',\n '8': 'yellow',\n '16': 'blue',\n '32': 'magenta',\n '64': 'cyan',\n '128': 'grey',\n '256': 'white',\n '512': 'green',\n '1024': 'red',\n '2048': 'blue',\n '4096': 'magenta'\n };\n header = \"Use the arrows keys to play 2048! Press q to quit\";\n print(header);\n N = len(board);\n vertical_edge = \"\";\n for i in range(N + 2):\n vertical_edge += \"-\\t\";\n print(vertical_edge);\n for y in range(N):\n row = \"\";\n for x in board[y]:\n\n # Handling installation fail (no colors printed)\n if termcolor is not None:\n row += termcolor.colored(x, colors[x]);\n else:\n row += x\n\n row += \"\\t\";\n print(\"|\\t\" + row + \"|\");\n if y is not N - 1: print(\"\")\n print(vertical_edge);\n\n if GUI_runnable:\n gui.update_grid(board)\n gui.update()", "def render_frame_rgb(self, sim: Simulator) -> np.array:\n rgba_frame = self.render_frame_color(sim)\n return rgba_frame[:, :, :3]", "def _color(self, args):", "def test_fromarray_grey():\n arr = numpy.zeros((20, 10), dtype='float')\n arr[10, 5] = 249.34\n\n parameters = {'data': [arr]}\n\n img = images.fromarray(parameters).convert('L')\n stats = ImageStat.Stat(img)\n\n assert_equal(img.size, (10, 20))\n assert_equal(img.getpixel((5, 10)), round(arr[10, 5]))\n assert_equal(stats.sum[0], round(arr.sum()))", "def print_tile(tile: Image.Image):\n width, height = tile.size\n\n pixels = tile.getcolors(width * height)\n\n most_frequent_pixel = pixels[0]\n\n for count, color in pixels:\n if count > most_frequent_pixel[0]:\n most_frequent_pixel = (count, color)\n\n r, g, b = most_frequent_pixel[1]\n\n light = r * 299/1000 + g * 587/1000 + b * 114/1000\n\n char = get_char_from_light(light)\n\n color = get_xterm_color(r, g, b)\n\n print(\"\\u001b[38;5;\" + str(color) + \"m\" + char, end=\"\\033[0m\")", "def display_board(board):\n print(board[7] + '|' + board[8] + '|' + board[9])\n print(board[4] + '|' + board[5] + '|' + board[6])\n print(board[1] + '|' + board[2] + '|' + board[3])\n pass", "def display_layers(layers, wide, tall):\n\n colours = {\n \"0\": \" \",\n \"1\": \" # \",\n }\n\n for row in range(tall):\n for col in range(wide):\n pixels = [layer[row][col] for layer in layers]\n line = next(colours[p] for p in pixels if p in colours)\n print(line, end=\"\")\n print()", "def rgb2lab(r, g, b):\n r, g, b = r / 255.0, g / 255.0, b / 255.0\n\n # http://www.brucelindbloom.com/index.html?Math.html\n # Inverse sRGB Companding\n r = r / 12.92 if r <= 0.04045 else ((r + 0.055) / 1.055) ** 2.4\n g = g / 12.92 if g <= 0.04045 else ((g + 0.055) / 1.055) ** 2.4\n b = b / 12.92 if b <= 0.04045 else ((b + 0.055) / 1.055) ** 2.4\n\n # http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html\n # sRGB, D65\n x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375\n y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750\n z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041\n\n # http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_Lab.html\n kappa, epsilon = 903.3, 0.008856\n\n # http://brucelindbloom.com/index.html?Eqn_ChromAdapt.html\n # White point for D65\n xr, yr, zr = x / 0.95047, y / 1.00000, z / 1.08883\n\n fx = xr ** (1 / 3.0) if xr > epsilon else (kappa * xr + 16) / 116.0\n fy = yr ** (1 / 3.0) if yr > epsilon else (kappa * yr + 16) / 116.0\n fz = zr ** (1 / 3.0) if zr > epsilon else (kappa * zr + 16) / 116.0\n\n l = 166.0 * fy - 16.0\n a = 500.0 * (fx - fy)\n b = 200.0 * (fy - fz)\n\n return l, a, b", "def displayBoard(board):\n\n # Background color.\n WINDOW.fill(BORDER)\n\n # Draws out each row.\n for i in range(0, 4):\n displayRow(board[i], 8, i)\n\n pygame.display.update()", "def get_displayable_heatmap(array, # type: thelper.typedefs.ArrayType\n convert_rgb=True, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.ArrayType\n if array.ndim != 2:\n array = np.squeeze(array)\n if array.ndim != 2:\n raise AssertionError(\"indexing should return a pre-squeezed array\")\n array_normalized = np.empty_like(array, dtype=np.uint8).copy() # copy needed here due to ocv 3.3 bug\n cv.normalize(array, array_normalized, 0, 255, cv.NORM_MINMAX, dtype=cv.CV_8U)\n heatmap = cv.applyColorMap(array_normalized, cv.COLORMAP_JET)\n if convert_rgb:\n heatmap = cv.cvtColor(heatmap, cv.COLOR_BGR2RGB)\n return heatmap", "def display(self):\n print(\"\\n\" * self.y, end=\"\")\n for i in range(self.height):\n print(\" \" * self.x, end=\"\")\n for j in range(self.width):\n print(\"#\", end=\"\")\n print()", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def img_disp(name,img):\n cv2.imshow(name,img.astype(int)/255.0)\n cv2.waitKey()", "def RGBToHTMLColor(rgb_tuple):\n\thexcolor = '#%02x%02x%02x' % rgb_tuple\n\t# that's it! '%02x' means zero-padded, 2-digit hex values", "def show(self, exec_rasterize = False):\n\n if (exec_rasterize):\n self.rasterize()\n\n Image.fromarray(self._image).show()", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def display_network(A, cols):\n\n # rescale\n A = A - A.mean()\n\n # compute rows, cols\n (M, L)= A.shape\n sz = np.sqrt(L)\n buf=1\n n = cols\n m = np.floor(M/n).astype(int)\n\n arr = -np.ones((buf+m*(sz+buf),buf+n*(sz+buf)))\n\n k=0;\n for i in range(m):\n for j in range(n):\n clim=max(abs(A[k]));\n start0 = buf + i*(sz+buf)\n start1 = buf + j*(sz+buf)\n arr[start0:start0+sz,start1:start1+sz] = A[k].reshape((sz,sz))/clim\n k=k+1\n\n plt.imshow(arr,cmap='gray', vmin=-1, vmax=1)\n plt.show()\n return arr", "def display(self):\n print(\"\\n\" * self.__y, end='')\n for row in range(self.__height):\n if self.__x:\n print(\" \" * (self.__x), end='')\n if self.__width:\n print(\"#\" * self.__width)", "def show(raster):\n gk.read(raster).show()", "def display(self):\n for space in range(self.y):\n print('')\n for row in range(self.height):\n for x in range(self.x):\n print(' ', end='')\n for col in range(self.width):\n print('#', end='')\n print('')", "def display_board(self):\n \n oled.fill(0)\n for r in range(_HEIGHT):\n for c in range(_WIDTH):\n sprite = _SPRITES[self.board[r * _WIDTH + c]]\n oled.blit(sprite, c * 9, (_HEIGHT - r - 1) * 8 + 16)\n \n r, c = self.last_play_rc\n if r != None:\n oled.hline(c * 9, (_HEIGHT - r - 1) * 8 + 16 + 6, 5, 1) \n self.draw_header()\n oled.show()", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def display_number(number1, number2):\n top = TOP_DISPLAY[STATES[index_state]]\n ret_tab = [O]*64\n for i in range(8):\n for j in range(3):\n ret_tab[8 * j + i] = top[j][i]\n for i in range(3):\n for j in range(5):\n ret_tab[8 * (j+3) + i+1] = [255, 255, 255] if number1[j][i] == 1 else [0, 0, 0]\n ret_tab[8 * (j+3) + i+5] = [255, 255, 255] if number2[j][i] == 1 else [0, 0, 0]\n sense.set_pixels(ret_tab)", "def display(self):\n for row in self._board_area:\n print(row, end=\"\\n\")", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def _colorstr(self, args):", "def render_frame_color(self, sim: Simulator) -> np.array:\n (w, h) = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)", "def test_fromarray_rgb_fail():\n arr = numpy.zeros((20, 10, 3), dtype='float')\n\n parameters = {'data': [arr]}\n\n images.fromarray(parameters).convert('RGB')", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def display(self):\n print(\"\\n\" * self.__y, end=\"\")\n for i in range(self.__height):\n print(\" \" * self.__x, end=\"\")\n print(\"#\" * self.__width, end=\"\")\n print()", "def display_board(self):\n print(self.game_board)", "def show(type,img):\n # print(img)\n cv2.imshow(type, img)\n cv2.waitKey()", "def display(self):\n print(\"\\n\" * self.y, end='')\n for i in range(self.height):\n for j in range(self.width + self.x):\n if j < self.x:\n print(' ', end='')\n else:\n print('#', end='')\n print('')", "def save_array_as_rgb_image(data, image_name):\n data_dim = len(data.shape)\n if(data_dim == 3):\n assert(data.shape[0] == 3 or data.shape[2] == 3)\n if(data.shape[0] == 3):\n data = np.transpose(data, [1, 2, 0])\n img = Image.fromarray(data)\n img.save(image_name)", "def render(self) -> Any:\n if self.render_mode == \"rgb_array\":\n return self.ale.getScreenRGB()\n elif self.render_mode == \"human\":\n pass\n else:\n raise Error(\n f\"Invalid render mode `{self.render_mode}`. Supported modes: `human`, `rgb_array`.\"\n )", "def draw(self, canvas_arr: np.ndarray, color: Tuple[int, int, int] = DEFAULT_COLOR):\n pass", "def gammaDisplay(img_path: str, rep: int):\n if rep == 2:\n # reading as BGR\n image = cv2.imread(img_path)/255\n elif rep == 1:\n image=cv2.imread(img_path,0)/255\n else:\n raise ValueError('Only RGB or GRAY_SCALE ')\n\n # the callback function, to find the gamma -divide by 100 to get values between 0.01 to 2\n def on_trackbar(val):\n gamma = val / 100\n corrected_image = np.power(image, gamma)\n cv2.imshow('Gamma display', corrected_image)\n\n cv2.namedWindow('Gamma display')\n trackbar_name = 'Gamma'\n cv2.createTrackbar(trackbar_name, 'Gamma display', 1, 200, on_trackbar)\n # Show some stuff\n on_trackbar(1)\n # Wait until user press some key\n cv2.waitKey()\n pass", "def lab_to_rgb(img_l, img_ab):\n lab = np.empty([*img_l.shape[0:2], 3])\n lab[:, :, 0] = np.squeeze(((img_l + 1) * 50))\n lab[:, :, 1:] = img_ab * 127\n return color.lab2rgb(lab)", "def print_board(self):\n\n print\n\n for row in xrange(8):\n for column in xrange(8):\n if self.squares[row][column]:\n print self.squares[row][column],; sys.stdout.write(u'')\n else:\n if self.dark_square((row, column)):\n print u' __ ',; sys.stdout.write(u'')\n else:\n print u' . ',; sys.stdout.write(u'')\n print\n print" ]
[ "0.8111257", "0.7347514", "0.72989017", "0.6935118", "0.67059577", "0.6704922", "0.663102", "0.6547356", "0.63414645", "0.6225402", "0.6129868", "0.61273956", "0.6029412", "0.6021858", "0.5998369", "0.59782404", "0.59257555", "0.59240973", "0.59080243", "0.59073865", "0.58943903", "0.58841413", "0.58437127", "0.57673067", "0.57032025", "0.56775755", "0.566519", "0.56189543", "0.56163865", "0.5608966", "0.5592681", "0.5568946", "0.5562551", "0.5558037", "0.5550702", "0.5547276", "0.5544975", "0.5536588", "0.5533278", "0.5531753", "0.552701", "0.5509594", "0.55087227", "0.550164", "0.54996675", "0.5495558", "0.5484067", "0.54838914", "0.5474596", "0.5443995", "0.5443749", "0.54432434", "0.5441307", "0.54271775", "0.541225", "0.5405705", "0.54045725", "0.53915733", "0.53752065", "0.5370201", "0.5367519", "0.53605926", "0.5357629", "0.5354096", "0.53510994", "0.5350764", "0.5349534", "0.5345368", "0.5342289", "0.532234", "0.53147817", "0.53066903", "0.52978474", "0.52969825", "0.52966136", "0.52794904", "0.527249", "0.5266628", "0.5263599", "0.5239578", "0.52344006", "0.52341443", "0.5230519", "0.5224174", "0.522006", "0.5216627", "0.5213075", "0.52038133", "0.5201223", "0.519735", "0.51971203", "0.51951927", "0.51936746", "0.5189845", "0.5189776", "0.5189089", "0.5188862", "0.5180294", "0.517593", "0.5169705", "0.51691556" ]
0.0
-1
Converts the generated fractal into an RGB image array.
def _toRgbImage(self, fractal, colors, color_offset): hsv_img = np.array( [ # Cycle through color wheel. (fractal * colors + color_offset) % 1, # Saturation = 1 where fractal values > 0, # Saturation = 0 otherwise. fractal.astype(dtype=bool).astype(dtype=float), # Invert colours 1 - fractal ] ).astype(dtype=float).T rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8) return rgb_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def _toRgbImage(self, fractal, colors, color_offset):\n soln_real = adjustRange(fractal[0], 0, 127)\n soln_imag = adjustRange(fractal[1], 0, 127)\n iters = adjustRange(fractal[2], 0, 128)\n\n rgb_image = np.array([\n soln_real + iters,\n soln_imag + iters,\n iters\n ]\n ).astype(dtype=np.uint8)\n\n return rgb_image.T", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = fractal value.\n fractal,\n\n # Maximum value.\n np.ones(fractal.shape)\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def GetRGBArray(self, p_int):\n ...", "def img_to_rgb(img):\r\n if len(img.shape) < 3 or img.shape[2] == 1:\r\n return np.repeat(img, 3).reshape(img.shape[0], img.shape[1], 3)\r\n else:\r\n return img", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)", "def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)", "def load_image_as_rgb(image_path):\n im = imageio.imread(image_path)\n y_size = im.shape[0]\n x_size = im.shape[1]\n logging.info(\"Image has dimensions X:%d Y:%d\" % (x_size, y_size))\n arr = np.zeros((im.shape[0],im.shape[1]), dtype=int)\n i = 0\n for im_row in im:\n j = 0\n for vec in im_row:\n arr[i,j] = rgb_vec_to_num(vec)\n j = j + 1\n i = i + 1\n return arr", "def generate_array_image(R, G, B, height, width):\n R = R.reshape((height, width))\n G = G.reshape((height, width))\n B = B.reshape((height, width))\n \n return np.moveaxis(np.array([R, G, B]), 0, -1)", "def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out", "def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr", "def image2array(filename, shape=None):\n # Open the image and change it to black and white\n im = Image.open(filename).convert('1', dither=Image.NONE)\n\n im = im.resize(shape, Image.ANTIALIAS)\n pattern = np.array(im)\n \n return pattern", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)", "def get_rgbColorArray(self, ledIndex, count):\n # buff\n res = []\n # idx\n # r\n # g\n # b\n\n buff = self._download(\"rgb.bin?typ=0&pos=\" + str(int(3*ledIndex)) + \"&len=\" + str(int(3*count)))\n del res[:]\n\n idx = 0\n while idx < count:\n r = YGetByte(buff, 3*idx)\n g = YGetByte(buff, 3*idx+1)\n b = YGetByte(buff, 3*idx+2)\n res.append(r*65536+g*256+b)\n idx = idx + 1\n\n return res", "def data_to_bytescale_rgb(data): # used to create the SOURCE PNGs (MRI, FA, MD)\n im = bytescale(data)\n w, h = im.shape\n ret = np.empty((w,h,3), dtype=np.uint8)\n ret[:,:,0] = im\n ret[:,:,1] = im\n ret[:,:,2] = im\n return ret", "def to_array(self):\n return np.array(self.to_image())", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def get_image_array(self):\n with picamera.array.PiRGBArray(self.camera) as output:\n self.camera.resolution = (640, 480)\n self.camera.capture(output, 'rgb')\n logging.info(\"Captured image of size {0}x{1}x{2}\".format(\n output.array.shape[0], output.array.shape[1], output.array.shape[2]))\n output.truncate(0)\n return output.array\n # self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)\n # self.stream.seek(0)\n # image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)\n # self.stream.seek(0)\n # self.stream.truncate()\n # self.camera.close()", "def _grey_img_to_arr(image, verbose=False):\n try:\n w, h = image.size\n arr = np.array(image.getdata())\n arr = _rgb_to_grey(arr, (h, w), verbose=verbose)\n if verbose:\n print(\"Converted from RGB to grayscale\")\n except:\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(height, width)\n return arr", "def rgb(self):\n return [self.__r, self.__g, self.__b]", "def _preprocess(self, image):\n\n # Scale from [0, 255] to [0, 1] and BGR to RGB \n return (image / 255.0)[:, :, ::-1]", "def jpg2rgb(image_data: bytes) -> np.ndarray:\n\n im = Image.open(io.BytesIO(image_data))\n im = im.convert(\"RGB\")\n im = im.resize((96, 96))\n data = np.array(im)\n\n data = rgb2gray(data)\n\n return data", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def read_color_image(path):\n with open(path, 'rb') as f:\n img = Image.fromarray(read_ppm(f), mode='RGB')\n img = tf.keras.preprocessing.image.img_to_array(img, dtype=int)\n img = tf.convert_to_tensor(img)\n return img", "def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)", "def get_np_image(self, save_image=False, filename=\"curr_image.png\"):\n responses = client.simGetImages([airsim.ImageRequest(\"front_left\", airsim.ImageType.Scene, False, False)])\n response = responses[0]\n\n # get numpy array\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)\n\n # reshape array to 4 channel image array H X W X 4\n img_rgb = img1d.reshape(response.height, response.width, 3)\n\n # # original image is fliped vertically\n # img_rgb = np.flipud(img_rgb)\n\n if save_image:\n cv2.imwrite(filename, img_rgb)\n\n return img_rgb", "def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def _pillow2array(img, flag='color', channel_order='bgr'):\n channel_order = channel_order.lower()\n if channel_order not in ['rgb', 'bgr']:\n raise ValueError('channel order must be either \"rgb\" or \"bgr\"')\n\n if flag == 'unchanged':\n array = np.array(img)\n if array.ndim >= 3 and array.shape[2] >= 3: # color image\n array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR\n else:\n # If the image mode is not 'RGB', convert it to 'RGB' first.\n if img.mode != 'RGB':\n if img.mode != 'LA':\n # Most formats except 'LA' can be directly converted to RGB\n img = img.convert('RGB')\n else:\n # When the mode is 'LA', the default conversion will fill in\n # the canvas with black, which sometimes shadows black objects\n # in the foreground.\n #\n # Therefore, a random color (124, 117, 104) is used for canvas\n img_rgba = img.convert('RGBA')\n img = Image.new('RGB', img_rgba.size, (124, 117, 104))\n img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha\n if flag == 'color':\n array = np.array(img)\n if channel_order != 'rgb':\n array = array[:, :, ::-1] # RGB to BGR\n elif flag == 'grayscale':\n img = img.convert('L')\n array = np.array(img)\n else:\n raise ValueError(\n 'flag must be \"color\", \"grayscale\" or \"unchanged\", '\n f'but got {flag}')\n return array", "def process_image(self, image_path):\n\n img = load_img(image_path, target_size=IMAGE_SIZE)\n img_array = img_to_array(img)\n # Create a batch by increase dimensions\n img_array = expand_dims(img_array, 0)\n print(img_array.shape)\n return img_array", "def to_rgb(im):\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret", "def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values", "def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp", "def render(self):\n\n pixels = [\n [Color() for _ in range(self.width)] for _ in range(self.height)]\n\n for y in range(self.height):\n for x in range(self.width):\n ray_direction = Point(x, y) - self.camera\n ray = Ray(self.camera, ray_direction)\n pixels[y][x] = self._trace_ray(ray)\n\n return pixels", "def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)", "def save_array_as_rgb_image(data, image_name):\n data_dim = len(data.shape)\n if(data_dim == 3):\n assert(data.shape[0] == 3 or data.shape[2] == 3)\n if(data.shape[0] == 3):\n data = np.transpose(data, [1, 2, 0])\n img = Image.fromarray(data)\n img.save(image_name)", "def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im", "def convert_grayscale_to_rgb(x: np.ndarray) -> np.ndarray:\n return np.stack((x, ) * 3, axis=-1)", "def get_rgb_frame(self) -> np.array:\n return self.rstate.render_frame_rgb(self.rsimulator)", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def image2array(im):\n\n arr = numpy.zeros(im.size)\n\n for x in xrange(im.size[0]):\n for y in xrange(im.size[1]):\n arr[x,y] = im.getpixel((x,y))\n\n return arr", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255", "def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b", "def get_BGR_img(self):\n img = self.img.copy()\n # Convert BGR to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # define range of BGR color in HSV\n threshold_blue = np.array([[100,43,46], [124,255,255]])\n threshold_green = np.array([[35,43,46], [77,255,255]])\n threshold_red1 = np.array([[0,43,46], [10,255,255]])\n threshold_red2 = np.array([[156,43,46], [180,255,255]])\n # Threshold the HSV image to get only BGR colors\n mask_blue = cv2.inRange(hsv, threshold_blue[0], threshold_blue[1])\n mask_green = cv2.inRange(hsv, threshold_green[0], threshold_green[1])\n mask_red1 = cv2.inRange(hsv, threshold_red1[0], threshold_red1[1])\n mask_red2 = cv2.inRange(hsv, threshold_red2[0], threshold_red2[1])\n mask_red = mask_red1 | mask_red2\n # Bitwise-AND mask and original image\n self.blue = cv2.bitwise_and(img, img, mask=mask_blue)\n self.green = cv2.bitwise_and(img, img, mask=mask_green)\n self.red = cv2.bitwise_and(img, img, mask=mask_red)\n # 返回 bgr 三通道的分量合成的图片\n return np.stack((self.blue[:, :, 0], self.green[:, :, 1], self.red[:, :, 2]), axis=2)", "def generate_normalized_rgb(self):\n \n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r/256.0,g/256.0,b/256.0", "def render_array(self, resolution=300, channel=\"GRAYSCALE\"):\n # Method below returns a cairocffi.ImageSurface object\n # https://cairocffi.readthedocs.io/en/latest/api.html#cairocffi.ImageSurface\n surface, width, height = self._document.write_image_surface(\n resolution=resolution\n )\n img_format = surface.get_format()\n\n # This is BGRA channel in little endian (reverse)\n if img_format != FORMAT_ARGB32:\n raise RuntimeError(\n f\"Expect surface format to be 'cairocffi.FORMAT_ARGB32', but got {img_format}.\" +\n \"Please check the underlining implementation of 'weasyprint.document.Document.write_image_surface()'\"\n )\n\n img_buffer = surface.get_data()\n # Returns image array in \"BGRA\" channel\n img_array = np.ndarray(\n shape=(height, width, 4), dtype=np.uint8, buffer=img_buffer\n )\n if channel == \"GRAYSCALE\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2GRAY)\n elif channel == \"RGBA\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGBA)\n elif channel == \"RGB\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGB)\n elif channel == \"BGRA\":\n return np.copy(img_array)\n elif channel == \"BGR\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2BGR)\n else:\n valid_channels = [\"GRAYSCALE\", \"RGB\", \"RGBA\", \"BGR\", \"BGRA\"]\n raise ValueError(\n f\"Invalid channel code {channel}. Valid values are: {valid_channels}.\"\n )", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def figure_to_RGB_array(fig):\r\n figure_buffer = io.BytesIO()\r\n\r\n # Save the figure:\r\n fig.savefig(figure_buffer, format='png')\r\n\r\n figure_buffer.seek(0)\r\n\r\n figure_string = figure_buffer.getvalue()\r\n\r\n return figure_string", "def array_from_img(image):\n return np.array(image)", "def test_fromarray_rgb_fail():\n arr = numpy.zeros((20, 10, 3), dtype='float')\n\n parameters = {'data': [arr]}\n\n images.fromarray(parameters).convert('RGB')", "def _raw_to_gray(self):\n img_rgb = np.zeros((self.y_res, self.x_res, 3), dtype=np.uint8)\n img_rgb = np.array(self.img_raw)\n img_gray = np.zeros((self.y_res, self.x_res))\n img_gray[:, :] = img_rgb[:, :, 2]\n\n return img_gray", "def img_from_array(array):\n return Image.fromarray(array)", "def read_image_greyscale(path: str) -> np.ndarray:\n img = imread(path)\n if len(img.shape) > 2:\n img = np.dot(img[..., :3], [0.299, 0.587, 0.114])\n return img", "def _iter_images(self):\n for image in self._images:\n yield np.array(image.convert('RGB'))", "def get_array(self) -> numpy.array:\r\n \r\n return self.pic_array", "def createRGBImage(self, filepath, width=None, outdir=None):\n print('[createRGBImage] filepath, outdir', filepath, outdir)\n\n index = 0\n rgb_data = []\n\n # Read binary file\n binary_data = self.getBinaryData(filepath)\n\n # Create R,G,B pixels\n while (index + 3) < len(binary_data):\n R = binary_data[index]\n G = binary_data[index+1]\n B = binary_data[index+2]\n index += 3\n rgb_data.append((R, G, B))\n\n size = self.get_size(len(rgb_data), width)\n image = Image.new('RGB', size)\n image.putdata(rgb_data)\n if width > 0:\n image = image.resize((width, width))\n if outdir is not None:\n self.save_file(filepath, image, size, 'RGB', width, outdir)\n # print('np.array(image)', np.array(image).shape)\n return np.array(image)/255.0", "def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb", "def to_color(self):\n if self.channels == 4:\n color = opencv.cvtColor(self.img, opencv.COLOR_BGRA2BGR)\n return Image(color)\n elif self.channels == 1:\n color = opencv.cvtColor(self.img, opencv.COLOR_GRAY2BGR)\n return Image(color)\n else:\n return Image(self.img)", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def get_image_and_prep(self,file_path):\r\n img = np.array(Image.open(file_path).convert('1'))\r\n img = img.reshape(28,28,1)\r\n return img", "def image_to_array(self, img):\n x = np.asarray(img, dtype=self.dtype)\n if len(x.shape) == 3:\n if self.channels_first:\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if self.channels_first:\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x", "def get_image(filepath,size):\n image = Image.open(filepath)\n newimage = image.resize((size,size)).convert('LA')\n pixels = np.asarray(newimage,dtype = np.float32)[:,:,0]\n return pixels", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def format_data(img_path, size):\n img_color = cv2.imread(img_path)\n img_color = img_color[:, :, ::-1]\n img_color = cv2.resize(img_color, (size, size), interpolation=cv2.INTER_AREA)\n img_color = img_color.reshape((1, size, size, 3))\\\n #.transpose(0, 3, 1, 2)\n\n return img_color", "def Array2PIL(a,lut=None,minvalue=None,maxvalue=None,width=None,height=None,\n flip=None):\n import Image # we only need it here ...\n\n if flip==\"ud\": #up-down exchange\n a=a[::-1,:]\n h,w=Numeric.shape(a)\n## a_min=Numeric.minimum.reduce((Numeric.ravel(a)))\n## a_max=Numeric.maximum.reduce((Numeric.ravel(a)))\n a_min=min(Numeric.ravel(a))\n a_max=max(Numeric.ravel(a))\n\n # allow for an user-specified maximal value:\n if maxvalue!=None and maxvalue>a_max:\n a_max=maxvalue\n # allows for an user-specified minimal value:\n if minvalue!=None and minvalue<a_min:\n a_min=minvalue\n\n if lut is not None:\n if len(lut[0]) == 256:\n \n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n\n rgb=Numeric.zeros( (len(a),3),typecode=Numeric.UInt8)\n\n\n lut_=Numeric.zeros( (3,len(lut[0])),Numeric.UInt8)\n lut_[0]=lut[0].astype(Numeric.UInt8)\n lut_[1]=lut[1].astype(Numeric.UInt8)\n lut_[2]=lut[2].astype(Numeric.UInt8)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0]=Numeric.take(lut_[0],a)\n #print \"rtake\"\n rgb[:,1]=Numeric.take(lut_[1],a)\n #print \"gtake\"\n rgb[:,2]=Numeric.take(lut_[2],a)\n #print \"btake\"\n #rgb=Numeric.ravel(((Numeric.array(zip(r,g,b),\n # typecode=Numeric.UInt8))))\n\n #print \"rgb done\"\n else:\n N = len(lut[0])\n print \"LUT with N=%d entries\" % N\n if N>=256*256:\n print \"UUPS, more than uint16 colors??\", N\n raise ValueError(\"N too large\")\n \n a = (Numeric.ravel((N-1)*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt16)\n\n rgb = Numeric.zeros( (len(a), 3), typecode=Numeric.UInt16)\n\n lut_ = Numeric.zeros( (3,len(lut[0])), Numeric.UInt16)\n lut_[0] = lut[0].astype(Numeric.UInt16)\n lut_[1] = lut[1].astype(Numeric.UInt16)\n lut_[2] = lut[2].astype(Numeric.UInt16)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0] = Numeric.take(lut_[0],a)\n rgb[:,1] = Numeric.take(lut_[1],a)\n rgb[:,2] = Numeric.take(lut_[2],a)\n\n rgb = (rgb*256.0/N).astype(Numeric.UInt8)\n\n else: # simple grey scale ramp...\n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n # convert to (r_0,g_0,b_0,r_1,g_1,b_1,....)\n rgb=Numeric.ravel(Numeric.array(zip(a,a,a)))\n\n # create a PIL RGB image\n #print \"w/h\",w,h\n im=Image.new(\"RGB\",(w,h))\n #print \"imfromstring:\"\n im.fromstring(rgb.tostring())\n #print \"done ...\"\n \n # scale image ?\n if height!=None and width==None:\n im=im.resize(w/h*height,height)\n elif height==None and width!=None:\n im=im.resize(width,h/w*width)\n elif height!=None and width!=None:\n im=im.resize(width,height)\n\n return(im)", "def get_image(image_path):\n image = Image.open(image_path, \"r\")\n width, height = image.size\n pixel_values = list(image.getdata())\n\n if (image.mode != \"RGBA\"):\n image = image.convert(\"RGB\")\n pixel_values = list(image.getdata())\n for idx, px in enumerate(pixel_values):\n pixel_values[idx] = [px[0], px[1], px[2], 255]\n\n return (list(chunks(pixel_values, width)), width, height)", "def load_ipl_as_array(path): \n img = PIL.Image.open(path).convert('RGBA')\n img = np.array(img)\n return img", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def get_array(self, scale=1):\n array = cv2.imread(str(self.path), self.read_type)\n\n # resize original image so it can be be scaled without fractions\n x_extra = array.shape[0] % self.scaling\n y_extra = array.shape[1] % self.scaling\n\n x_extra = self.scaling - x_extra if x_extra != 0 else x_extra\n y_extra = self.scaling - y_extra if y_extra != 0 else y_extra\n\n padded_array = cv2.resize(\n array, (int(array.shape[1] + y_extra), int(array.shape[0] + x_extra))\n )\n\n # scale image\n resized_array = cv2.resize(\n padded_array,\n (int(padded_array.shape[1] * scale), int(padded_array.shape[0] * scale)),\n )\n\n # cv2 reads in array as BGR, tensorboard shows as RGB\n if not self.greyscale:\n x = np.copy(resized_array)\n resized_array[:, :, 0] = x[:, :, 2]\n resized_array[:, :, 2] = x[:, :, 0]\n\n # cv2.imshow('image',array)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n if self.greyscale:\n resized_array = np.expand_dims(resized_array, 2)\n return resized_array", "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "def _to_rgb(thing, name=\"input\"):\n thing = np.array(thing)\n assert thing.shape == (3, ), (\n \"Expected %r to be a length-3 array-like object, but found shape %s\" %\n (name, thing.shape))\n return thing", "def create_array_from_rgb_layers(vk4_container, layer_list):\n log.debug(\"Entering create_array_from_rgb_layers\")\n width = vk4_container.image_width\n height = vk4_container.image_height\n\n new_array = np.zeros(((width * height), 3), dtype=np.uint8)\n for layer in layer_list:\n i = 0\n for rgb in layer:\n new_array[i][0] += rgb[0]\n new_array[i][1] += rgb[1]\n new_array[i][2] += rgb[2]\n i = i + 1\n log.debug(\"In create_array_from_rgb_layers()\\n\\tArray for rgb image \" \\\n \"output:\\n{}\".format(new_array))\n log.debug(\"Exiting create_array_from_rgb_layers\")\n\n return new_array", "def image(self):\n return self.pixels.get_array()", "def to_numpy(self) -> Tuple[np.ndarray, np.ndarray]:\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n\n for sprite in self.sprites[1:]: # skip self\n sprite_state, sprite_rendering = sprite.to_numpy(self.height, self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[sprite_rendering != 0]\n return state, rendering", "def newimagefromarray(self, *args, **kwargs):\n return _image.image_newimagefromarray(self, *args, **kwargs)", "def read_img(path):\n img = Image.open(path)\n img_arr = np.array(img, dtype='int32')\n img.close()\n return img_arr", "def array2img(array):\n if len(array.shape) == 2:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='L')\n elif len(array.shape) == 3:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='RGB')\n else:\n print('Income array is not at appropriate shape!')", "def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def process_screen(screen):\n\n # Indexing convention varies between PIL and numpy\n screen = np.swapaxes(screen, 0, 1)\n # Load the array in PIL\n im = Image.fromarray(screen, 'RGB')\n # Convert to grayscale\n im = im.convert(mode='L')\n # Crop\n im = im.crop((0, 0, 288, 405))\n # Downscale and resize\n im = im.resize((84, 84))\n # Normalise\n im = np.array(im) / 255\n\n return im", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def imagefile_to_array(imagefname):\n with Image.open(imagefname) as image: \n im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)\n rows = image.size[1]\n cols = image.size[0]\n no_channels = int(len(im_arr)/rows/cols)\n im_arr = im_arr.reshape((rows, cols, no_channels))\n im_arr = np.rollaxis(im_arr,-1)\n return im_arr", "def greyScale(img, shape):\n s, v = shape\n greyPicture = [sum(img[i]) / 3 for i in range(v * s)]\n\n return greyPicture", "def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img", "def create_colorful_test_image(self):\n ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)\n ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)\n ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)\n imr = np.concatenate((ch255, ch128, ch128), axis=2)\n img = np.concatenate((ch255, ch255, ch0), axis=2)\n imb = np.concatenate((ch255, ch0, ch255), axis=2)\n imw = np.concatenate((ch128, ch128, ch128), axis=2)\n imu = np.concatenate((imr, img), axis=1)\n imd = np.concatenate((imb, imw), axis=1)\n image = np.concatenate((imu, imd), axis=0)\n return image", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def load_rgb(path):\n bands = band_list['rgb']\n img = None\n fmt = \"_{}.tif\"\n for b in bands:\n band_ds = rasterio.open(path + fmt.format(b))\n aux = band_ds.read(1)\n aux = norm_band(aux)\n aux = np.expand_dims(aux, axis=-1)\n if img is None:\n img = aux\n else:\n img = np.concatenate((img, aux), axis=-1)\n return img", "def render(self):\n np_img = np.array(self.prev_img, dtype=np.uint8)\n np_img = np.swapaxes(np_img, 0, 2)\n return np_img", "def _process_img_rgb(self, sensor_data):\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.rgb = img # need to scale rgb values to be {0,1}", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva" ]
[ "0.7121028", "0.70841926", "0.6746689", "0.66450286", "0.6585525", "0.6470386", "0.64537305", "0.6411404", "0.63537574", "0.63390535", "0.62979555", "0.62929916", "0.6276879", "0.62607044", "0.62399733", "0.6200515", "0.61336935", "0.6093553", "0.6084001", "0.608396", "0.60822624", "0.60705554", "0.6052285", "0.60361946", "0.6034702", "0.60346353", "0.6008246", "0.60037637", "0.5983197", "0.59723526", "0.5969241", "0.5960871", "0.5951258", "0.5940149", "0.5918388", "0.59145933", "0.58856773", "0.5879849", "0.58720726", "0.58689934", "0.5857958", "0.5841806", "0.5824331", "0.5822387", "0.5810616", "0.5806708", "0.58008724", "0.5792252", "0.5790427", "0.5787448", "0.576509", "0.57471895", "0.5737841", "0.5728063", "0.5725756", "0.5715845", "0.57139206", "0.57006884", "0.5697174", "0.5694381", "0.5693297", "0.5684736", "0.5681925", "0.56817126", "0.567767", "0.56771773", "0.5674219", "0.5663981", "0.5662222", "0.56585085", "0.5655876", "0.565498", "0.565186", "0.56511307", "0.56466925", "0.56464016", "0.56435406", "0.5641105", "0.56278545", "0.5626459", "0.562479", "0.5602585", "0.5602049", "0.5598622", "0.5596453", "0.55951524", "0.55905557", "0.5586758", "0.55863327", "0.55842924", "0.5583416", "0.5579959", "0.55669254", "0.55664885", "0.5563222", "0.55578756", "0.55560726", "0.55514324", "0.55485815", "0.5546809" ]
0.67405456
3
Return matrix representing the complex plane.
def _complexPlane(self, n, m, xmin, xmax, ymin, ymax): # Create two matrices of size n x m ix, iy = np.mgrid[0:n, 0:m] # Create range of values in the x- and y-axis real_part = np.linspace(xmin, xmax, n)[ix] imag_part = np.linspace(ymin, ymax, m)[iy] * complex(0, 1) complex_plane = real_part + imag_part return complex_plane
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_matrix(self):\n return numpy.array([[1, 0],\n [0, 1]], dtype=complex)", "def to_matrix(self):\n return numpy.array([[1, 1],\n [1, -1]], dtype=complex) / numpy.sqrt(2)", "def __complex__(self):\n return complex(self._reNum, self._imNum)", "def to_matrix(self):\n return numpy.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]], dtype=complex)", "def complex_matrix(n):\n\n matrix = np.zeros(shape=(n, n), dtype=complex)\n\n for row in np.arange(n):\n if row == 0:\n for col in np.arange(n):\n matrix[row, col] = 1\n else:\n for col in np.arange(n):\n if col == 0:\n matrix[row, col] = 1\n else:\n matrix[row, col] = roots(n, col*row)\n\n return matrix", "def __complex__(self):\n return complex(self.q[0], self.q[1])", "def real_matrix_to_complex(m):\n n = int(np.shape(m)[0]/2)\n a = m[0:n, 0:n]\n b = m[0:n, n:2*n]\n return a + 1.0j*b", "def real_of_complex(z):\n return np.vstack((np.real(z[:,0]),np.imag(z[:,0]),np.real(z[:,1]),np.imag(z[:,1]))).T", "def matrix(self):\n\t\treturn Matrix((\n\t\t\t( math.cos(self.val), -math.sin(self.val) ),\n\t\t\t( math.sin(self.val), math.cos(self.val) )\n\t\t))", "def matrix(self):\n return self._matrix(*self.parameters)", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def getMatrix(self) -> CMatrix4:\n ...", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complexify(self,z):\r\n \r\n import numpy as np\r\n \r\n if not np.iscomplex(z).any():\r\n if len(z.shape) != 2 or z.shape[1] != 2:\r\n raise Exception('Shape format not understood. Provide shape vertices either as a complex vector, or as a N-by-2 real numpy array.')\r\n else:\r\n z = z[:,0] + 1j*z[:,1]\r\n \r\n return z", "def complex(real, imag):", "def compute_matrix(self):\n\n fac = self.a / self.dx ** 2\n\n diagonal = np.ones(self.nx) * 2 * fac\n lower = np.ones(self.nx - 1) * -fac\n upper = np.ones(self.nx - 1) * -fac\n\n matrix = sp.diags(\n diagonals=[diagonal, lower, upper],\n offsets=[0, -1, 1], shape=(self.nx, self.nx),\n format='csr')\n\n return matrix", "def get_complex_representation(points: ArrayLike) -> np.ndarray:\n len_n = len(points)\n\n y_s = np.array(points, dtype=np.complex_)\n\n w_comp = e_complex_pow(2 * np.pi / len_n)\n # Fourier-Matrix\n f_matrix = np.array(\n [\n [w_comp ** (j * k) for j in range(len_n)]\n for k in range(-(len_n - 1) // 2, (len_n - 1) // 2 + 1)\n ]\n )\n\n ck_s = (1 / len_n) * (np.conj(f_matrix) @ y_s) # Fourier-Coefficients\n return ck_s", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def getMatrixRepresentation(self):\n \n S = self.spinSystem\n dim = S.dimension\n matRep = np.zeros([dim, dim], dtype = complex)\n\n for i, b1 in enumerate(S.basis.Bras):\n for j, b2 in enumerate(S.basis.Kets):\n ket = self.__mul__(b2)\n\n matRep[i,j] += b1*ket\n\n return matRep", "def complex_matrix_to_real(m):\n a, b = np.real(m), np.imag(m)\n return np.vstack([np.hstack([a,b]), np.hstack([-b, a])])", "def get_transformation_matrix(self):\n\n s = self.sin()\n c = self.cos()\n return np.array(\n [\n c ** 2,\n c * s,\n -(c ** 2),\n -c * s,\n c * s,\n s ** 2,\n -c * s,\n -(s ** 2),\n -(c ** 2),\n -c * s,\n c ** 2,\n c * s,\n -c * s,\n -(s ** 2),\n c * s,\n s ** 2,\n ],\n dtype=np.float64,\n ).reshape(4, 4)", "def as_matrix(self) -> types.Matrix:", "def complex_inverse(c1,cr):", "def Matrix(arg0: List[List[complex]]) -> ngsolve.bla.MatrixC:", "def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)", "def complexTransform(self,z):\n N=self.coefficients_number\n T=self.period\n #wo=2*math.pi/N\n #an=[1/N*sum([z[t]*cmath.exp(complex(0,-n*k)) for t in range(T)]) for n in range(-(N-1)//2,(N-1)//2+1)]\n a=lambda n:1/T*sum([z[t]*cmath.exp(-1j*n*t) for t in range(T)])\n #a=lambda n:1/T*scipy.integrate.quad(z[t]*cmath.exp(-1j*n*t),0,T)\n c=[a(n) for n in range(-N,N+1)]\n return c", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[0, 0]=c\n self.matrix[0, 1]=s\n self.matrix[1, 0]=-s\n self.matrix[1, 1]=c\n return self.matrix", "def toComplex(self):\n return (self._points[0].value(),\n self._points[1].value(),\n self._points[2].value())", "def matrix(self):\n return np.matrix(list(self._columns.values()))", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[1, 1]=c\n self.matrix[1, 2]=s\n self.matrix[2, 1]=-s\n self.matrix[2, 2]=c\n return self.matrix", "def matrix(self, modulus=None):\n basis = self.domain.basis_elements()\n cols = [self.codomain.represent(self.mapping(elt)) for elt in basis]\n if not cols:\n return DomainMatrix.zeros((self.codomain.n, 0), ZZ).to_dense()\n M = cols[0].hstack(*cols[1:])\n if modulus:\n M = M.convert_to(FF(modulus))\n return M", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def _xyz_matrix():\n fx = 583.0\n fy = 583.0\n cx = 321\n cy = 249\n a = -0.0028300396\n b = 3.1006268\n mat = np.array([[1/fx, 0, 0, -cx/fx],\n [0, -1/fy, 0, cy/fy],\n [0, 0, 0, -1],\n [0, 0, a, b]])\n return mat", "def getMatrix(self, frame):\n radian=np.radians(self.getValue(frame))\n c=np.cos(radian)\n s=np.sin(radian)\n self.matrix[2, 2]=c\n self.matrix[2, 0]=s\n self.matrix[0, 2]=-s\n self.matrix[0, 0]=c\n return self.matrix", "def dcomplex(self):\n return self._dcomplex", "def _complex(real, imag):\n real = np.asarray(real)\n imag = np.asarray(imag)\n cplx = 1j * imag \n return cplx + real", "def generate_random_matrix(dim):\n\n A = np.complex128(np.random.random([dim, dim]))\n A_adjoint = A.conj().T\n\n P = A @ A_adjoint\n P += np.identity(len(P))\n\n P_inverse = np.linalg.inv(P)\n\n return P_inverse", "def matrix(self) -> sympy.Matrix:\n return self.matrix_factory(*self.params)", "def makedense(self):\n M = np.zeros((self.Dimension, self.Dimension), dtype= complex)\n for me in self.Elements:\n M[me.i][me.j] = me.val\n return M", "def create_A_matrix(J,sigma):\n A = np.zeros((3,J),dtype=complex)\n A[0,1] = 0\n A[0,2:] = -sigma\n A[1,:] = 1+2*sigma\n A[1,0] = 1\n A[1,-1] = 1\n A[2,:-2] = -sigma\n A[2,-2] = 0\n return A", "def get_matrix(self):\n return self._matrix[:3, :]", "def cartan_matrix(self):\n # as soon as CartanMatrix is implemented we should use it here:\n # from sage.combinat.root_system.cartan_matrix import CartanMatrix\n cmat = copy(self.b_matrix())\n for i,j in cmat.nonzero_positions():\n a = cmat[i,j]\n if a > 0: cmat[i,j] = -a\n for i in range(self._rank):\n cmat[i,i] = 2\n # return CartanMatrix(cmat)\n return cmat", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def complex_magnitude(c):\n return (c * c.conjugate()) ** 0.5", "def _q_matrix(self):\n return np.array([\n [self.q[0], -self.q[1], -self.q[2], -self.q[3]],\n [self.q[1], self.q[0], -self.q[3], self.q[2]],\n [self.q[2], self.q[3], self.q[0], -self.q[1]],\n [self.q[3], -self.q[2], self.q[1], self.q[0]]])", "def matrix(self) -> sympy.Matrix:\n raise NotImplementedError()", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def matrix(self):\n m = Matrix.identity(4, 4)\n\n m[:3, :3] = self.rotation.matrix.data\n m[:3, 3:4] = self.translation.matrix.data\n\n return m", "def _r_matrix_xxz(self, root):\n r_matrix = np.eye(4, dtype=np.complex128)\n if self.delta == 1:\n b = (root - 1j) / (root + 1j)\n c = 2j / (root + 1j)\n\n elif self.delta > 1:\n gamma = np.arccosh(self.delta)\n b = np.sin(gamma / 2 * (root - 1j)) / np.sin(gamma / 2 * (root + 1j))\n c = 1j * np.sinh(gamma) / np.sin(gamma / 2 * (root + 1j))\n else:\n gamma = np.arccos(self.delta)\n b = np.sinh(gamma / 2 * (root - 1j)) / np.sinh(gamma / 2 * (root + 1j))\n c = 1j * np.sin(gamma) / np.sinh(gamma / 2 * (root + 1j))\n r_matrix[1, 1] = r_matrix[2, 2] = c\n r_matrix[1, 2] = r_matrix[2, 1] = b\n return r_matrix", "def Omat(self):\n if self.standard:\n return np.matrix(((0, -1, 0), (0, 0, 1), (-1, 0, 0)))\n else:\n return np.matrix(((0, 0, 1), (0, 1, 0), (-1, 0, 0)))", "def matrix(self):\n return self._matrix", "def matrix(self):\n return self._matrix", "def contours_to_matrix(self):\n return np.vstack([c.to_matrix() for c in self.contours])", "def mat(self) -> np.ndarray:\n Tp = ToeplitzificationOperator(P=self.P, M=self.M, dtype=self.x.dtype)\n return Tp.matvec(self.x)", "def coord_to_complex(i, j):\n re = float(4*i)/float(W-1) - 2.0 # real component\n im = float(2.2*j)/float(H-1) - 1.1 # imaginary component\n # return complex number as order pair of reals\n return (re,im)", "def take_matrix(self):\n matrix = aux.matrix(self.take_vec(), self.order)\n\n return matrix", "def to_coo_matrix(self):\n row_indices, column_indices, nonzero_elements = self.to_ijv()\n return coo_matrix((nonzero_elements, (row_indices, column_indices)),\n shape=(self.size, self.size))", "def getTransposeMatrix(self) -> CMatrix4:\n ...", "def build_stoichiometric_matrix(incidence_matrix, complexes_matrix):\n\n #@ is matrix multiplication\n\n #print(\"complexes matrix\")\n\n #This is matrix N in toric paper\n return complexes_matrix.transpose() @ incidence_matrix", "def tensor_to_complex_np(data):\n data = data.numpy()\n return data[..., 0] + 1j * data[..., 1]", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def to_coo_matrix(self):\n if self.E > 0:\n i, j = self.edges.T\n sm = coo_matrix((np.ones(self.E), (i, j)),\n shape=(self.V, self.V))\n else:\n sm = coo_matrix((self.V, self.V))\n return sm", "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def matrixRepresentation(self,decimals=8):\n temp = self.circuit.copy()\n temp.remove_final_measurements()\n \n simulator = Aer.get_backend('unitary_simulator')\n result = execute(temp, backend=simulator).result()\n unitary = result.get_unitary(decimals=decimals).tolist()\n for i in range(len(unitary)):\n for j in range(len(unitary[i])):\n if unitary[i][j]==0:\n unitary[i][j]=\"0\"\n else:\n string=str(unitary[i][j].real).replace(\".0\", \"\")\n string=\"\" if unitary[i][j].real==0 else string\n string+=self.numberFormat(unitary[i][j].imag,True)\n unitary[i][j]=string.lstrip(\"+\")\n return unitary", "def to_mat(self) -> np.matrix:\n raise NotImplementedError", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def _get_unitary_matrix(self, unitary): # pylint: disable=no-self-use\n if unitary in diagonal_in_z_basis:\n return unitary.eigvals()\n\n return unitary.matrix()", "def get_matrix(self, transform):\r\n\r\n rotation = transform.rotation\r\n location = transform.location\r\n c_y = np.cos(np.radians(rotation.yaw))\r\n s_y = np.sin(np.radians(rotation.yaw))\r\n c_r = np.cos(np.radians(rotation.roll))\r\n s_r = np.sin(np.radians(rotation.roll))\r\n c_p = np.cos(np.radians(rotation.pitch))\r\n s_p = np.sin(np.radians(rotation.pitch))\r\n matrix = np.matrix(np.identity(4))\r\n matrix[0, 3] = location.x\r\n matrix[1, 3] = location.y\r\n matrix[2, 3] = location.z\r\n matrix[0, 0] = c_p * c_y\r\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\r\n matrix[1, 0] = s_y * c_p\r\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\r\n matrix[2, 0] = s_p\r\n matrix[2, 1] = -c_p * s_r\r\n matrix[2, 2] = c_p * c_r\r\n return matrix", "def dct_matrix(rows, cols, unitary=True):\r\n rval = numpy.zeros((rows, cols))\r\n col_range = numpy.arange(cols)\r\n scale = numpy.sqrt(2.0/cols)\r\n for i in xrange(rows):\r\n rval[i] = numpy.cos(i * (col_range*2+1)/(2.0 * cols) * numpy.pi) * scale\r\n\r\n if unitary:\r\n rval[0] *= numpy.sqrt(0.5)\r\n return rval", "def matrix(self):\n return self._rotation", "def _rmatrix(theta):\n r = np.zeros((4, 4), np.complex128)\n\n cos_term = np.cos(theta / 2.0) * complex(1, 0)\n sin_term = np.sin(theta / 2.0) * complex(1, 0)\n\n r[0, 0] = cos_term\n r[1, 1] = cos_term\n\n r[0, 2] = sin_term\n r[1, 3] = sin_term\n\n r[2, 0] = -sin_term\n r[3, 1] = -sin_term\n\n r[2, 2] = cos_term\n r[3, 3] = cos_term\n\n return r", "def complex(self, real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def CreateMatrix(self) -> BaseMatrix:", "def CreateMatrix(self) -> BaseMatrix:", "def conjugate(self):\n return Complex(self._reNum, -self._imNum)", "def matrix(self, full=False, keeppads=True):\n\n v = np.fft.hfft(self._u, n=self.N) / self.N\n idx = sum(np.ogrid[0:self.N, -self.N:0])\n C = v[idx] # follow scipy.linalg.{circulant,toeplitz,hankel}\n\n if keeppads:\n a = self._yfac_.copy()\n b = self._xfac_.copy()\n else:\n a = self.yfac.copy()\n b = self.xfac.copy()\n C = self._unpad(C, 0, True)\n C = self._unpad(C, 1, False)\n a = a.reshape(-1, 1)\n\n if not full:\n return a, b, C\n else:\n return a * C * b", "def to_matrix(self):\n return self.to_operator().data", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def to_matrix(self):\n\n return self._tensor_to_matrix(self._t)", "def T(self):\n # TODO - your code here\n matrix_transpose = [];\n \n for j in range(self.w):\n matrix_transpose.append(self.get_column(j));\n \n return Matrix(matrix_transpose);", "def _cmplx_to_complex_ ( s ) :\n return complex ( s.real , s.imag )", "def matrix(self):\n return self.composition(self._parent.edge_to_matrix)", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def information_matrix(self):\n return self._cov.inv()", "def theta_phi_of_complex(z):\n return np.stack([theta_of_complex(z), phi_of_complex(z)], axis=1)", "def to_matrix(self): \n warnings.warn(f'{self} is being reconstructed into a matrix, consider operating on the decomposed form.')\n\n full = self.to_tensor()\n if self.n_matrices == ():\n return full.reshape(self.shape)\n else:\n return full.reshape(self.n_matrices + self.shape)", "def get_complex_type(self):\n import numpy\n return numpy.complex128", "def z_operator_matrix(self):\n n, r_1, r_2, k = self.n, self.r_1, self.r_2, self.k\n\n # Use the row vector [ A2^T 0 I ], which commutes with the check matrix.\n check_mat = np.zeros((k, n), dtype='int')\n check_mat[:, 0:r_1] = np.transpose(self.parity_check_c1[:, (r_1 + r_2):n])\n check_mat[:, (r_1 + r_2):n] = np.identity(k)\n return check_mat", "def complex_array(real, imag):\n if real.size > 50000:\n return _nb_complex_par(real, imag)\n return _nb_complex_seq(real, imag)", "def getRotationMatrix(x, y, z, angle):\n # impossible to have a rotational matrix around (0, 0 ,0)\n if x == 0 and y == 0 and z == 0:\n raise Exception(\"Cannot have a rotation matrix around (0, 0, 0)\")\n\n # normalize vector\n vec = MatrixExtended([x, y, z])\n length = np.linalg.norm(vec)\n x /= length\n y /= length\n z /= length\n\n # some shortcuts for readability\n xx = x * x\n yy = y * y\n zz = z * z\n C = math.cos\n S = math.sin\n\n # calculate matrix elements\n e11 = xx + (1 - xx) * C(angle)\n e12 = x * y * (1 - C(angle)) - z * S(angle)\n e13 = x * z * (1 - C(angle)) + y * S(angle)\n e21 = x * y * (1 - C(angle)) + z * S(angle)\n e22 = yy + (1 - yy) * C(angle)\n e23 = y * z * (1 - C(angle)) - x * S(angle)\n e31 = x * z * (1 - C(angle)) - y * S(angle)\n e32 = y * z * (1 - C(angle)) + x * S(angle)\n e33 = zz + (1 - zz) * C(angle)\n\n return MatrixExtended([\n [e11, e12, e13, 0],\n [e21, e22, e23, 0],\n [e31, e32, e33, 0],\n [0, 0, 0, 1]])" ]
[ "0.7560941", "0.7344105", "0.71097344", "0.69919634", "0.6817131", "0.6790059", "0.66661674", "0.66255957", "0.65777934", "0.65038025", "0.6480605", "0.64471483", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6355539", "0.6345991", "0.6324258", "0.6311948", "0.630553", "0.6298554", "0.62793154", "0.62792474", "0.62621725", "0.6254704", "0.62544924", "0.62341386", "0.6217704", "0.6205966", "0.61968917", "0.6174655", "0.61687803", "0.6167607", "0.6157696", "0.6130524", "0.6127452", "0.6114291", "0.6108214", "0.60898596", "0.6085947", "0.6084371", "0.60714465", "0.60669667", "0.6043775", "0.6034315", "0.6029588", "0.6010704", "0.5994032", "0.59923625", "0.59873927", "0.5942475", "0.5934934", "0.5934934", "0.5925636", "0.59120613", "0.5906896", "0.59066", "0.5905317", "0.5903777", "0.5885552", "0.58847326", "0.5881123", "0.58707905", "0.5853421", "0.58524626", "0.5842843", "0.58367854", "0.5834474", "0.5827039", "0.5823623", "0.5820061", "0.58033574", "0.5798398", "0.57727224", "0.57713836", "0.57713836", "0.57670087", "0.57567585", "0.57535213", "0.5748776", "0.5748776", "0.5742288", "0.5738255", "0.5736249", "0.5735964", "0.5733798", "0.5729445", "0.5727329", "0.5721304", "0.5720656", "0.571083", "0.5706708", "0.5706101" ]
0.6266454
30
Return default zoom setting.
def _defaultZoom(self): return (-1.0, 1.0, -1.0, 1.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zoom(self):\n return self['zoom']", "def zoom(self):\n return self.container['zoom']", "def _get_zoom(self) :\n \n # TODO : make it absolute zoom value : a zoom of 1 displays one data\n # pixel in one viewport pixel.\n \n return self._zoom", "def zoom(self) -> Optional[int]:\n return self.get(\"/Zoom\", None)", "def zoom(self) -> float:\n return self._zoom", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")", "def get_zoom_transform(self):\n return self.zoom_levels[self.cur_zoom][1]", "def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}", "def action_set_zoom(self, value):\n if value >= 0 and value < len(self.zoom_levels) and value != self.cur_zoom:\n self.cur_zoom = value\n self.apply_zoom()", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def __zoomReset(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomTo(0)\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(0)\n self.sbZoom.setValue(aw.getZoom())", "def __zoom(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n from QScintilla.ZoomDialog import ZoomDialog\n dlg = ZoomDialog(aw.getZoom(), self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n value = dlg.getZoomSize()\n self.__zoomTo(value)", "def setZoom(self, zoom):\r\n self._viewZoom = zoom", "def _autozoom(self):\n bounds = self._autobounds()\n attrs = {}\n\n midpoint = lambda a, b: (a + b)/2\n attrs['location'] = (\n midpoint(bounds['min_lat'], bounds['max_lat']),\n midpoint(bounds['min_lon'], bounds['max_lon'])\n )\n\n # remove the following with new Folium release\n # rough approximation, assuming max_zoom is 18\n import math\n try:\n lat_diff = bounds['max_lat'] - bounds['min_lat']\n lon_diff = bounds['max_lon'] - bounds['min_lon']\n area, max_area = lat_diff*lon_diff, 180*360\n if area:\n factor = 1 + max(0, 1 - self._width/1000)/2 + max(0, 1-area**0.5)/2\n zoom = math.log(area/max_area)/-factor\n else:\n zoom = self._default_zoom\n zoom = max(1, min(18, round(zoom)))\n attrs['zoom_start'] = zoom\n except ValueError as e:\n raise Exception('Check that your locations are lat-lon pairs', e)\n\n return attrs", "def test_map_settings_default():\n m = view(world)\n assert m.location == [\n pytest.approx(-3.1774349999999956, rel=1e-6),\n pytest.approx(2.842170943040401e-14, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 10\n assert m.options[\"zoomControl\"] == True\n assert m.position == \"relative\"\n assert m.height == (100.0, \"%\")\n assert m.width == (100.0, \"%\")\n assert m.left == (0, \"%\")\n assert m.top == (0, \"%\")\n assert m.global_switches.no_touch is False\n assert m.global_switches.disable_3d is False\n assert \"openstreetmap\" in m.to_dict()[\"children\"].keys()", "def zoom(self, zoom):\n\n self.container['zoom'] = zoom", "def scale_settings(self) -> Optional[pulumi.Input['ScaleSettingsArgs']]:\n return pulumi.get(self, \"scale_settings\")", "def max_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / REGION_DIM)", "def normalize_zoomlvl(lvl):\n if lvl < gMinZoomLevel:\n return gMinZoomLevel\n elif lvl > gMaxZoomLevel:\n return gMaxZoomLevel\n else:\n return lvl - gMinZoomLevel", "def get_scale():\r\n\r\n \r\n return 0.5", "def __zoomTo(self, value):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomTo(value)\n self.sbZoom.setValue(aw.getZoom())", "def __zoomIn(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomIn()\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomIn()\n self.sbZoom.setValue(aw.getZoom())", "def getorelse(self, name, default=None):\n try:\n return self._defaults[name]\n except KeyError:\n return default", "def min_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / np.max(self._m.world.t_size))", "def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()", "def at_zoom(self, zoom):\n params = {}\n for name, element in self.config.iteritems():\n out_element = _element_at_zoom(name, element, zoom)\n if out_element != None:\n params[name] = out_element\n\n return params", "def set_zooming_keyboard(self):\n # Zooming: ALT + key arrows\n self.set('KeyPress', 'Zoom',\n key='Left', key_modifier='Control', \n param_getter=lambda p: (-.25, 0, 0, 0))\n self.set('KeyPress', 'Zoom',\n key='Right', key_modifier='Control', \n param_getter=lambda p: (.25, 0, 0, 0))\n self.set('KeyPress', 'Zoom',\n key='Up', key_modifier='Control', \n param_getter=lambda p: (0, 0, .25, 0))\n self.set('KeyPress', 'Zoom',\n key='Down', key_modifier='Control', \n param_getter=lambda p: (0, 0, -.25, 0))", "def getDefaultLevel():\n return _libsbml.LayoutExtension_getDefaultLevel()", "def DoZoom(self, mode):\n id_type = mode\n zoomlevel = self.GetZoom()\n if id_type == ed_glob.ID_ZOOM_OUT:\n if zoomlevel > -9:\n self.ZoomOut()\n elif id_type == ed_glob.ID_ZOOM_IN:\n if zoomlevel < 19:\n self.ZoomIn()\n else:\n self.SetZoom(0)\n return self.GetZoom()", "def get_setting_default(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('default', '')", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def showscale(self):\n return self[\"showscale\"]", "def get_default_geometry(self):", "def scale(self):\n return self._scale", "def GetScale(self):\n ...", "def _get_antialiasing_sigma(self, zoom):\n k = 1 / zoom\n variance = (k ** 2 - 1 ** 2) * (2 * np.sqrt(2 * np.log(2))) ** (-2)\n sigma = np.sqrt(variance)\n return sigma", "def default():\n return DefaultGeothermal.default()", "def auto_scale_factor(self):\r\n return self.gref.auto_scale_factor", "def showscale(self):\n return self['showscale']", "def get_inverted_zoom_transform(self):\n return self.zoom_levels[self.cur_zoom][2]", "def __getZoomScaler(self,zoomLevel):\n\n if (zoomLevel == 0):\n zoomLevel = 0\n elif (zoomLevel > 0):\n if (self.zoomLevel + zoomLevel <= self.maxZoom):\n # we're not at maximum\n self.zoomLevel = self.zoomLevel + zoomLevel\n elif (self.zoomLevel < self.maxZoom):\n # we'll exceed max zoom - clamp to max\n zoomLevel = self.maxZoom - self.zoomLevel\n self.zoomLevel = self.maxZoom\n else:\n # we're at maximum zoom\n zoomLevel = 0\n else:\n if (self.zoomLevel + zoomLevel >= self.minZoom):\n # we're not at min\n self.zoomLevel = self.zoomLevel + zoomLevel\n elif (self.zoomLevel > self.minZoom):\n # we'll exceed min zoom - clamp to min\n zoomLevel = self.minZoom - self.zoomLevel\n self.zoomLevel = self.minZoom\n else:\n # we're at minimum zoom\n zoomLevel = 0\n\n # calculate the scaling factor\n if (zoomLevel != 0):\n zoomLevel = self.zoomFactor ** zoomLevel\n\n return zoomLevel", "def set_ui_scale():\n # TODO test on other OS and resolutions\n moniter_h = QtWidgets.QDesktopWidget().screenGeometry(-1).height()\n if sys.platform == 'win32':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.0\n else:\n scale = 1.0\n elif sys.platform == 'linux':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.23\n else:\n scale = 1.4\n elif sys.platform == 'darwin':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.25\n else:\n scale = 1.55\n return scale", "def get_default_settings(cfg):\n cfg = deepcopy(cfg)\n cfg.setdefault('metric', 'regression_slope')\n cfg.setdefault('n_jobs', 1)\n cfg.setdefault('savefig_kwargs', {\n 'bbox_inches': 'tight',\n 'dpi': 600,\n 'orientation': 'landscape',\n })\n logger.info(\"Using at most %i processes\", cfg['n_jobs'])\n return cfg", "def zoom_markers(self, zoom=0.0, marker=-1, **kwargs):\n self._p('*{:.6f} {}'.format(zoom, marker),\n *[kwargs.get(k, -1) for k in 'abcdefghijklmnopqrstuvwxyz'])", "def is_zoom_mode(self):\n return self._myMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM", "def default_config(self):\n return {\n \"xtype\": \"scalar\",\n \"ytype\": \"scalar\",\n \"xtick-delta\": 50, \n \"ytick-delta\": 20, \n \"xtick-format\": '{:g}', \n \"ytick-format\": '{:g}',\n }", "def getDefaultLevel():\n return _libsbml.MultiExtension_getDefaultLevel()", "def zoom(self, amount):\n pass", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def GetDefaultLayerProperties():\r\n pass", "def default_value(self):\n return self.__class__.get_setting_default(self.key, **self.get_kwargs())", "def default_rotor_setting(self):\n self._rot_offset = 0", "def get_default_plot(self):\n return self.default_plot", "def scale_settings(self) -> pulumi.Output[Optional['outputs.ScaleSettingsResponse']]:\n return pulumi.get(self, \"scale_settings\")", "def set_zoom_on_edit(self, should_zoom):\n self._should_zoom = should_zoom", "def getDefaultSettings():\n return {}", "def get_default(self):\n\n\t\treturn self.__default", "def get_default(cls, opt):\n try:\n return cls._OPTS[opt].default\n except KeyError:\n raise ValueError('unknown option name %r' % (opt,))", "def LayoutExtension_getDefaultLevel():\n return _libsbml.LayoutExtension_getDefaultLevel()", "def default_rounding_mode(self) -> str:\n return pulumi.get(self, \"default_rounding_mode\")", "def zoomReset(self):\n self.viewNP.setScale(0.5)\n self.nodeMgr.updateConnections()", "def default_scale(scale):\n return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2,\n 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))", "def test_map_settings_custom():\n m = view(nybb, zoom_control=False, width=200, height=200, tiles=\"CartoDB positron\")\n assert m.location == [\n pytest.approx(40.70582377450201, rel=1e-6),\n pytest.approx(-73.9778006856748, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 10\n assert m.options[\"zoomControl\"] == False\n assert m.height == (200.0, \"px\")\n assert m.width == (200.0, \"px\")\n assert \"cartodbpositron\" in m.to_dict()[\"children\"].keys()\n\n # custom XYZ tiles\n m = view(\n nybb,\n zoom_control=False,\n width=200,\n height=200,\n tiles=\"https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}\",\n attr=\"Google\",\n )\n\n out_str = _fetch_map_string(m)\n assert (\n 'tileLayer(\"https://mt1.google.com/vt/lyrs=m\\\\u0026x={x}\\\\u0026y={y}\\\\u0026z={z}\",{\"attribution\":\"Google\"'\n in out_str\n )\n\n m = view(nybb, location=(40, 5))\n assert m.location == [40, 5]\n assert m.options[\"zoom\"] == 10\n\n m = view(nybb, zoom_start=8)\n assert m.location == [\n pytest.approx(40.70582377450201, rel=1e-6),\n pytest.approx(-73.9778006856748, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 8\n\n m = view(nybb, location=(40, 5), zoom_start=8)\n assert m.location == [40, 5]\n assert m.options[\"zoom\"] == 8", "def initDefaults(self):\n return _libsbml.Dimensions_initDefaults(self)", "def zoom(self, step):\n current_zoom = self.scale_factor\n self.scale_factor = current_zoom + step / 100 * current_zoom\n # self.scale_factor = max(min(self.scale_factor + step, 1000000), 5)", "def zoom_to(self):\n # Will seek user feedback. QGIS will\n # Pan to first layer loaded", "def default_setting(self):\n\t\tunknown_as_zero = False\n\t\tbinary_rele = False # using the original values\n\t\tpresort = False # a default setting\n\n\t\tscale_data, scaler_id, scaler_level = get_default_scaler_setting(data_id=self.data_id)\n\n\t\t# more data settings that are rarely changed\n\t\tself.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1,\n\t\t\t\t\t\t sample_rankings_per_q=1, unknown_as_zero=unknown_as_zero, binary_rele=binary_rele,\n\t\t\t\t\t\t presort=presort, scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)\n\n\t\tdata_meta = get_data_meta(data_id=self.data_id) # add meta-information\n\t\tself.data_dict.update(data_meta)\n\n\t\treturn self.data_dict", "def get_zoom(input_box, z=18):\n box_tile = smopy.get_tile_box(input_box, z)\n box = smopy.correct_box(box_tile, z)\n sx, sy = smopy.get_box_size(box)\n if sx * sy >= MAXTILES:\n z = get_zoom(input_box, z - 1)\n return z", "def setMplDefaults():\n\n rcParams['figure.dpi'] = 300\n rcParams['figure.figsize'] = (4.5, 3)\n rcParams['savefig.dpi'] = 300\n rcParams['axes.grid'] = True\n rcParams['grid.linewidth'] = 0.5\n rcParams['grid.linestyle'] = ':'\n rcParams['font.family'] = 'Arial', 'Helvetica', 'DejaVu Sans'\n rcParams['font.size'] = 6\n rcParams['lines.markersize'] = 4\n rcParams['lines.linestyle'] = '-'\n rcParams['savefig.transparent'] = False\n rcParams['figure.subplot.bottom'] = 0.15\n rcParams['figure.subplot.top'] = 0.85\n rcParams['figure.subplot.left'] = 0.15\n rcParams['figure.subplot.right'] = 0.9", "def resetMinZoomVisibility(self):\n self._min_zoom = None", "def autocolorscale(self):\n return self[\"autocolorscale\"]", "def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"", "def getDefault():", "def zoom_to_span(zoom):\n assert(zoom >=0 and zoom <= MAX_ZOOM)\n return MAX_FREQ/2**zoom", "def autocolorscale(self):\n return self['autocolorscale']", "def _GetDefaultConfig(self) -> str:\n try:\n region = util.GetRegionFromZone(\n FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0])\n except IndexError:\n region = _DEFAULT_REGION\n return f'regional-{region}'", "def zoom(self, zoomIn):\n zoomFactor = 0.05\n maxZoomIn = 2\n maxZoomOut = 0.1\n if zoomIn:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor < maxZoomIn and s.getY()-zoomFactor < maxZoomIn and s.getZ()-zoomFactor < maxZoomIn:\n self.viewNP.setScale(s.getX()+zoomFactor,s.getY()+zoomFactor,s.getZ()+zoomFactor)\n else:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor > maxZoomOut and s.getY()-zoomFactor > maxZoomOut and s.getZ()-zoomFactor > maxZoomOut:\n self.viewNP.setScale(s.getX()-zoomFactor,s.getY()-zoomFactor,s.getZ()-zoomFactor)\n self.nodeMgr.updateConnections()", "def get_setting(self, name, default=None):\n w = self.choices['which']\n if w == 'global_default':\n return self.settings.get_global_default(name, default)\n elif w == 'project_default':\n return self.settings.get_project_default(name, default)\n elif w == 'global_variant':\n return self.settings.get_global_variant(self.choices['variant'],\n name, default)\n elif w == 'project_variant':\n return self.settings.get_project_variant(self.choices['variant'],\n name, default)\n elif w == 'project_package_default':\n return self.settings.get_project_package_default(\n self.choices['package'], name, default)\n elif w == 'project_package_variant':\n return self.settings.get_project_package_variant(\n self.choices['package'], self.choices['variant'], name, default)\n elif w == 'project_package_target':\n return self.settings.get_project_package_target(\n self.choices['package'], self.choices['target'], name, default)\n else:\n raise AssertionError(w)", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def default_metric_value(self) -> float:", "def get_defaults(cls, mode):\n mode_defaults = getattr(cls, f\"{mode.upper()}_DEFAULTS\")\n defaults = PlotConfig({**cls.COMMON_DEFAULTS, **mode_defaults})\n return defaults", "def get_defaults(cls, mode):\n mode_defaults = getattr(cls, f\"{mode.upper()}_DEFAULTS\")\n defaults = PlotConfig({**cls.COMMON_DEFAULTS, **mode_defaults})\n return defaults", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def set_zooming_wheel(self):\n # Zooming: wheel\n self.set('Wheel', 'Zoom',\n param_getter=lambda p: (\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][0],\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][1]))", "def zoomValueChanged(self, value, zoomingWidget):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n aw = e5App().getObject(\"Shell\")\n else:\n aw = self.activeWindow()\n if aw and aw == zoomingWidget:\n self.sbZoom.setValue(value)", "def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def get_zoom(df: pd.DataFrame) -> int:\n if 0 <= df['radius'][0] <= 0.2:\n zoom = 22\n elif 0.2 < df['radius'][0] <= 0.22:\n zoom = 18\n elif 0.22 < df['radius'][0] <= 0.5:\n zoom = 16\n elif 0.5 < df['radius'][0] <= 1.5:\n zoom = 15\n elif 1.5 < df['radius'][0] <= 3.3:\n zoom = 14\n elif 3.3 < df['radius'][0] <= 7:\n zoom = 13\n elif 7 < df['radius'][0] <= 10:\n zoom = 12\n elif 10 < df['radius'][0] <= 25:\n zoom = 11\n elif 25 < df['radius'][0] <= 50:\n zoom = 10\n elif 50 < df['radius'][0] <= 90:\n zoom = 9\n elif 90 < df['radius'][0] <= 170:\n zoom = 8\n elif 170 < df['radius'][0] <= 340:\n zoom = 7\n else:\n zoom = 6\n return zoom", "def getInitDefault(self):\n return self.initDefault", "def default(self):\n return self.__default", "def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages", "def getDefaultLevel():\n return _libsbml.SBMLDocument_getDefaultLevel()", "def scale_smaller(self):\n new_factor = self._zoom_factor - 0.1\n if 0 < float(new_factor) < self._MAX_ZOOM:\n self._zoom_factor = new_factor", "def default_space_settings(self) -> Optional[pulumi.Input['DomainDefaultSpaceSettingsArgs']]:\n return pulumi.get(self, \"default_space_settings\")", "def update_zoom_plot(self):\n self.plot_zoom.setXRange(*self.linear_region.getRegion(), padding=0)", "def zoom_to_size(self, *p):\n\t\tif self.image is None or self.allocation is None:\n\t\t\treturn\n\t\tif __debug__: print self.allocation.width, self.image.get_width()\n\t\tif __debug__: print self.allocation.width, self.image.get_width(), self.allocation.width/self.image.get_width()\n\t\tz = min(\n\t\t\tself.allocation.width/self.image.get_width(),\n\t\t\tself.allocation.height/self.image.get_height()\n\t\t\t)\n\t\tif __debug__: print \"zoom_to_size\", \"z=\", z\n\t\tself.zoom = z", "def defaultWindowSize(self):\n self.resize(self.defaultWindowWidth, self.defaultWindowHeight)", "def zoom(self, *args):\n NavigationToolbar2.zoom(self, args)\n\n if self._myMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM:\n # out of zoom mode\n self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE\n else:\n # into zoom mode\n self._myMode = MyNavigationToolbar.NAVIGATION_MODE_ZOOM\n\n return", "def default_space_settings(self) -> pulumi.Output[Optional['outputs.DomainDefaultSpaceSettings']]:\n return pulumi.get(self, \"default_space_settings\")" ]
[ "0.7723424", "0.7299348", "0.72973317", "0.71936", "0.70050627", "0.68496126", "0.65712726", "0.625384", "0.6145273", "0.6142065", "0.61153233", "0.6006282", "0.5985987", "0.5982547", "0.59785664", "0.59773666", "0.5952824", "0.58633906", "0.5851663", "0.58423215", "0.57971275", "0.5784571", "0.57064813", "0.5677304", "0.56635326", "0.5645525", "0.5631767", "0.5624415", "0.56236774", "0.5616822", "0.5583164", "0.55795175", "0.5559482", "0.5553522", "0.5552875", "0.5547535", "0.55400443", "0.5534302", "0.55340713", "0.5517684", "0.5495409", "0.54939026", "0.54923123", "0.5457685", "0.54472345", "0.54460686", "0.54425627", "0.54394406", "0.54329383", "0.5432671", "0.5428147", "0.54265165", "0.5421405", "0.54205453", "0.5419559", "0.5414858", "0.5411569", "0.54053754", "0.5395546", "0.53869426", "0.5383226", "0.5383008", "0.53792465", "0.5376375", "0.5370349", "0.5352113", "0.5343745", "0.5343549", "0.53367203", "0.5336056", "0.5334981", "0.53304386", "0.5329237", "0.53247905", "0.5321442", "0.53138757", "0.5309591", "0.5302675", "0.5301", "0.5288804", "0.52862346", "0.5285257", "0.5285257", "0.5267605", "0.5262782", "0.52599066", "0.52517605", "0.5244297", "0.52399355", "0.5229177", "0.5226403", "0.52240574", "0.52177256", "0.52175385", "0.520162", "0.520053", "0.52001554", "0.51968193", "0.51922417", "0.51917565" ]
0.8090395
0
Converts the generated fractal into an RGB image array
def _toRgbImage(self, fractal, colors, color_offset): hsv_img = np.array( [ # Cycle through color wheel. (fractal * colors + color_offset) % 1, # Saturation = fractal value. fractal, # Maximum value. np.ones(fractal.shape) ] ).astype(dtype=float).T rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8) return rgb_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _toRgbImage(self, fractal, colors, color_offset):\n soln_real = adjustRange(fractal[0], 0, 127)\n soln_imag = adjustRange(fractal[1], 0, 127)\n iters = adjustRange(fractal[2], 0, 128)\n\n rgb_image = np.array([\n soln_real + iters,\n soln_imag + iters,\n iters\n ]\n ).astype(dtype=np.uint8)\n\n return rgb_image.T", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = 1 where fractal values > 0,\n # Saturation = 0 otherwise.\n fractal.astype(dtype=bool).astype(dtype=float),\n\n # Invert colours\n 1 - fractal\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def img_to_rgb(img):\r\n if len(img.shape) < 3 or img.shape[2] == 1:\r\n return np.repeat(img, 3).reshape(img.shape[0], img.shape[1], 3)\r\n else:\r\n return img", "def GetRGBArray(self, p_int):\n ...", "def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def generate_array_image(R, G, B, height, width):\n R = R.reshape((height, width))\n G = G.reshape((height, width))\n B = B.reshape((height, width))\n \n return np.moveaxis(np.array([R, G, B]), 0, -1)", "def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr", "def data_to_bytescale_rgb(data): # used to create the SOURCE PNGs (MRI, FA, MD)\n im = bytescale(data)\n w, h = im.shape\n ret = np.empty((w,h,3), dtype=np.uint8)\n ret[:,:,0] = im\n ret[:,:,1] = im\n ret[:,:,2] = im\n return ret", "def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out", "def load_image_as_rgb(image_path):\n im = imageio.imread(image_path)\n y_size = im.shape[0]\n x_size = im.shape[1]\n logging.info(\"Image has dimensions X:%d Y:%d\" % (x_size, y_size))\n arr = np.zeros((im.shape[0],im.shape[1]), dtype=int)\n i = 0\n for im_row in im:\n j = 0\n for vec in im_row:\n arr[i,j] = rgb_vec_to_num(vec)\n j = j + 1\n i = i + 1\n return arr", "def _preprocess(self, image):\n\n # Scale from [0, 255] to [0, 1] and BGR to RGB \n return (image / 255.0)[:, :, ::-1]", "def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im", "def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b", "def read_color_image(path):\n with open(path, 'rb') as f:\n img = Image.fromarray(read_ppm(f), mode='RGB')\n img = tf.keras.preprocessing.image.img_to_array(img, dtype=int)\n img = tf.convert_to_tensor(img)\n return img", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def image2array(filename, shape=None):\n # Open the image and change it to black and white\n im = Image.open(filename).convert('1', dither=Image.NONE)\n\n im = im.resize(shape, Image.ANTIALIAS)\n pattern = np.array(im)\n \n return pattern", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def Array2PIL(a,lut=None,minvalue=None,maxvalue=None,width=None,height=None,\n flip=None):\n import Image # we only need it here ...\n\n if flip==\"ud\": #up-down exchange\n a=a[::-1,:]\n h,w=Numeric.shape(a)\n## a_min=Numeric.minimum.reduce((Numeric.ravel(a)))\n## a_max=Numeric.maximum.reduce((Numeric.ravel(a)))\n a_min=min(Numeric.ravel(a))\n a_max=max(Numeric.ravel(a))\n\n # allow for an user-specified maximal value:\n if maxvalue!=None and maxvalue>a_max:\n a_max=maxvalue\n # allows for an user-specified minimal value:\n if minvalue!=None and minvalue<a_min:\n a_min=minvalue\n\n if lut is not None:\n if len(lut[0]) == 256:\n \n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n\n rgb=Numeric.zeros( (len(a),3),typecode=Numeric.UInt8)\n\n\n lut_=Numeric.zeros( (3,len(lut[0])),Numeric.UInt8)\n lut_[0]=lut[0].astype(Numeric.UInt8)\n lut_[1]=lut[1].astype(Numeric.UInt8)\n lut_[2]=lut[2].astype(Numeric.UInt8)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0]=Numeric.take(lut_[0],a)\n #print \"rtake\"\n rgb[:,1]=Numeric.take(lut_[1],a)\n #print \"gtake\"\n rgb[:,2]=Numeric.take(lut_[2],a)\n #print \"btake\"\n #rgb=Numeric.ravel(((Numeric.array(zip(r,g,b),\n # typecode=Numeric.UInt8))))\n\n #print \"rgb done\"\n else:\n N = len(lut[0])\n print \"LUT with N=%d entries\" % N\n if N>=256*256:\n print \"UUPS, more than uint16 colors??\", N\n raise ValueError(\"N too large\")\n \n a = (Numeric.ravel((N-1)*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt16)\n\n rgb = Numeric.zeros( (len(a), 3), typecode=Numeric.UInt16)\n\n lut_ = Numeric.zeros( (3,len(lut[0])), Numeric.UInt16)\n lut_[0] = lut[0].astype(Numeric.UInt16)\n lut_[1] = lut[1].astype(Numeric.UInt16)\n lut_[2] = lut[2].astype(Numeric.UInt16)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0] = Numeric.take(lut_[0],a)\n rgb[:,1] = Numeric.take(lut_[1],a)\n rgb[:,2] = Numeric.take(lut_[2],a)\n\n rgb = (rgb*256.0/N).astype(Numeric.UInt8)\n\n else: # simple grey scale ramp...\n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n # convert to (r_0,g_0,b_0,r_1,g_1,b_1,....)\n rgb=Numeric.ravel(Numeric.array(zip(a,a,a)))\n\n # create a PIL RGB image\n #print \"w/h\",w,h\n im=Image.new(\"RGB\",(w,h))\n #print \"imfromstring:\"\n im.fromstring(rgb.tostring())\n #print \"done ...\"\n \n # scale image ?\n if height!=None and width==None:\n im=im.resize(w/h*height,height)\n elif height==None and width!=None:\n im=im.resize(width,h/w*width)\n elif height!=None and width!=None:\n im=im.resize(width,height)\n\n return(im)", "def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def jpg2rgb(image_data: bytes) -> np.ndarray:\n\n im = Image.open(io.BytesIO(image_data))\n im = im.convert(\"RGB\")\n im = im.resize((96, 96))\n data = np.array(im)\n\n data = rgb2gray(data)\n\n return data", "def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp", "def get_BGR_img(self):\n img = self.img.copy()\n # Convert BGR to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # define range of BGR color in HSV\n threshold_blue = np.array([[100,43,46], [124,255,255]])\n threshold_green = np.array([[35,43,46], [77,255,255]])\n threshold_red1 = np.array([[0,43,46], [10,255,255]])\n threshold_red2 = np.array([[156,43,46], [180,255,255]])\n # Threshold the HSV image to get only BGR colors\n mask_blue = cv2.inRange(hsv, threshold_blue[0], threshold_blue[1])\n mask_green = cv2.inRange(hsv, threshold_green[0], threshold_green[1])\n mask_red1 = cv2.inRange(hsv, threshold_red1[0], threshold_red1[1])\n mask_red2 = cv2.inRange(hsv, threshold_red2[0], threshold_red2[1])\n mask_red = mask_red1 | mask_red2\n # Bitwise-AND mask and original image\n self.blue = cv2.bitwise_and(img, img, mask=mask_blue)\n self.green = cv2.bitwise_and(img, img, mask=mask_green)\n self.red = cv2.bitwise_and(img, img, mask=mask_red)\n # 返回 bgr 三通道的分量合成的图片\n return np.stack((self.blue[:, :, 0], self.green[:, :, 1], self.red[:, :, 2]), axis=2)", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def get_np_image(self, save_image=False, filename=\"curr_image.png\"):\n responses = client.simGetImages([airsim.ImageRequest(\"front_left\", airsim.ImageType.Scene, False, False)])\n response = responses[0]\n\n # get numpy array\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)\n\n # reshape array to 4 channel image array H X W X 4\n img_rgb = img1d.reshape(response.height, response.width, 3)\n\n # # original image is fliped vertically\n # img_rgb = np.flipud(img_rgb)\n\n if save_image:\n cv2.imwrite(filename, img_rgb)\n\n return img_rgb", "def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)", "def _pillow2array(img, flag='color', channel_order='bgr'):\n channel_order = channel_order.lower()\n if channel_order not in ['rgb', 'bgr']:\n raise ValueError('channel order must be either \"rgb\" or \"bgr\"')\n\n if flag == 'unchanged':\n array = np.array(img)\n if array.ndim >= 3 and array.shape[2] >= 3: # color image\n array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR\n else:\n # If the image mode is not 'RGB', convert it to 'RGB' first.\n if img.mode != 'RGB':\n if img.mode != 'LA':\n # Most formats except 'LA' can be directly converted to RGB\n img = img.convert('RGB')\n else:\n # When the mode is 'LA', the default conversion will fill in\n # the canvas with black, which sometimes shadows black objects\n # in the foreground.\n #\n # Therefore, a random color (124, 117, 104) is used for canvas\n img_rgba = img.convert('RGBA')\n img = Image.new('RGB', img_rgba.size, (124, 117, 104))\n img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha\n if flag == 'color':\n array = np.array(img)\n if channel_order != 'rgb':\n array = array[:, :, ::-1] # RGB to BGR\n elif flag == 'grayscale':\n img = img.convert('L')\n array = np.array(img)\n else:\n raise ValueError(\n 'flag must be \"color\", \"grayscale\" or \"unchanged\", '\n f'but got {flag}')\n return array", "def get_image_array(self):\n with picamera.array.PiRGBArray(self.camera) as output:\n self.camera.resolution = (640, 480)\n self.camera.capture(output, 'rgb')\n logging.info(\"Captured image of size {0}x{1}x{2}\".format(\n output.array.shape[0], output.array.shape[1], output.array.shape[2]))\n output.truncate(0)\n return output.array\n # self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)\n # self.stream.seek(0)\n # image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)\n # self.stream.seek(0)\n # self.stream.truncate()\n # self.camera.close()", "def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)", "def _grey_img_to_arr(image, verbose=False):\n try:\n w, h = image.size\n arr = np.array(image.getdata())\n arr = _rgb_to_grey(arr, (h, w), verbose=verbose)\n if verbose:\n print(\"Converted from RGB to grayscale\")\n except:\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(height, width)\n return arr", "def create_colorful_test_image(self):\n ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)\n ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)\n ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)\n imr = np.concatenate((ch255, ch128, ch128), axis=2)\n img = np.concatenate((ch255, ch255, ch0), axis=2)\n imb = np.concatenate((ch255, ch0, ch255), axis=2)\n imw = np.concatenate((ch128, ch128, ch128), axis=2)\n imu = np.concatenate((imr, img), axis=1)\n imd = np.concatenate((imb, imw), axis=1)\n image = np.concatenate((imu, imd), axis=0)\n return image", "def format_data(img_path, size):\n img_color = cv2.imread(img_path)\n img_color = img_color[:, :, ::-1]\n img_color = cv2.resize(img_color, (size, size), interpolation=cv2.INTER_AREA)\n img_color = img_color.reshape((1, size, size, 3))\\\n #.transpose(0, 3, 1, 2)\n\n return img_color", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def get_rgbColorArray(self, ledIndex, count):\n # buff\n res = []\n # idx\n # r\n # g\n # b\n\n buff = self._download(\"rgb.bin?typ=0&pos=\" + str(int(3*ledIndex)) + \"&len=\" + str(int(3*count)))\n del res[:]\n\n idx = 0\n while idx < count:\n r = YGetByte(buff, 3*idx)\n g = YGetByte(buff, 3*idx+1)\n b = YGetByte(buff, 3*idx+2)\n res.append(r*65536+g*256+b)\n idx = idx + 1\n\n return res", "def to_array(self):\n return np.array(self.to_image())", "def flow_to_image(flow):\n out = []\n maxu = -999.\n maxv = -999.\n minu = 999.\n minv = 999.\n maxrad = -1\n for i in range(flow.shape[0]):\n u = flow[i, :, :, 0]\n v = flow[i, :, :, 1]\n idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)\n u[idxunknow] = 0\n v[idxunknow] = 0\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n rad = np.sqrt(u ** 2 + v ** 2)\n maxrad = max(maxrad, np.max(rad))\n u = u / (maxrad + np.finfo(float).eps)\n v = v / (maxrad + np.finfo(float).eps)\n img = compute_color(u, v)\n out.append(img)\n return np.float32(np.uint8(out))", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def save_array_as_rgb_image(data, image_name):\n data_dim = len(data.shape)\n if(data_dim == 3):\n assert(data.shape[0] == 3 or data.shape[2] == 3)\n if(data.shape[0] == 3):\n data = np.transpose(data, [1, 2, 0])\n img = Image.fromarray(data)\n img.save(image_name)", "def get_image_and_prep(self,file_path):\r\n img = np.array(Image.open(file_path).convert('1'))\r\n img = img.reshape(28,28,1)\r\n return img", "def process_image(self, image_path):\n\n img = load_img(image_path, target_size=IMAGE_SIZE)\n img_array = img_to_array(img)\n # Create a batch by increase dimensions\n img_array = expand_dims(img_array, 0)\n print(img_array.shape)\n return img_array", "def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img", "def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )", "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "def generate_normalized_rgb(self):\n \n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r/256.0,g/256.0,b/256.0", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def img_from_array(array):\n return Image.fromarray(array)", "def get_image(filepath,size):\n image = Image.open(filepath)\n newimage = image.resize((size,size)).convert('LA')\n pixels = np.asarray(newimage,dtype = np.float32)[:,:,0]\n return pixels", "def read_image_greyscale(path: str) -> np.ndarray:\n img = imread(path)\n if len(img.shape) > 2:\n img = np.dot(img[..., :3], [0.299, 0.587, 0.114])\n return img", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def to_rgb(im):\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret", "def render(self):\n\n pixels = [\n [Color() for _ in range(self.width)] for _ in range(self.height)]\n\n for y in range(self.height):\n for x in range(self.width):\n ray_direction = Point(x, y) - self.camera\n ray = Ray(self.camera, ray_direction)\n pixels[y][x] = self._trace_ray(ray)\n\n return pixels", "def rgb(self):\n return [self.__r, self.__g, self.__b]", "def q_1(input_file, output_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n\n # Convert image to gray channel\n np_img = np.array(img)\n b = np_img[:,:,0]\n g = np_img[:,:,1]\n r = np_img[:,:,2]\n img_gray = 0.21 * b + 0.72 * g + 0.07 * r\n img_gray = np.array(img_gray, dtype='uint8')\n cv2.imwrite(output_file, img_gray)\n print(np_img)", "def _raw_to_gray(self):\n img_rgb = np.zeros((self.y_res, self.x_res, 3), dtype=np.uint8)\n img_rgb = np.array(self.img_raw)\n img_gray = np.zeros((self.y_res, self.x_res))\n img_gray[:, :] = img_rgb[:, :, 2]\n\n return img_gray", "def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb", "def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_path=args.output_path, method=args.method, size=args.size, p=args.p, gpu_id=args.gpu_id, plot=args.plot)\n\n ec.encode(input_image_path)\n img_gray_name = ar_utils.gen_new_gray_filename(input_image_path)\n img_gray_path = os.path.join(args.intermediate_representation, img_gray_name)\n dc.decode(img_gray_path)\n\n if args.delete_gray and os.path.exists(img_gray_path):\n os.remove(img_gray_path)", "def image2array(im):\n\n arr = numpy.zeros(im.size)\n\n for x in xrange(im.size[0]):\n for y in xrange(im.size[1]):\n arr[x,y] = im.getpixel((x,y))\n\n return arr", "def load_color_image_features(img_path):\n ac = scipy.misc.imread(img_path, mode='RGB')\n ac = ac / (255.0 / 2) - 1.0\n return np.array(ac)", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def get_image(image_path):\n image = Image.open(image_path, \"r\")\n width, height = image.size\n pixel_values = list(image.getdata())\n\n if (image.mode != \"RGBA\"):\n image = image.convert(\"RGB\")\n pixel_values = list(image.getdata())\n for idx, px in enumerate(pixel_values):\n pixel_values[idx] = [px[0], px[1], px[2], 255]\n\n return (list(chunks(pixel_values, width)), width, height)", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def greyScale(img, shape):\n s, v = shape\n greyPicture = [sum(img[i]) / 3 for i in range(v * s)]\n\n return greyPicture", "def test_fromarray_rgb_fail():\n arr = numpy.zeros((20, 10, 3), dtype='float')\n\n parameters = {'data': [arr]}\n\n images.fromarray(parameters).convert('RGB')", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def convert_grayscale_to_rgb(x: np.ndarray) -> np.ndarray:\n return np.stack((x, ) * 3, axis=-1)", "def generate_image(size, bitdepth, pattern):\n\n width, height = size\n\n maxval = 2**bitdepth-1\n if maxval > 255:\n a = array('H')\n else:\n a = array('B')\n fw = float(width)\n fh = float(height)\n pfun = PATTERN[pattern]\n for y in range(height):\n fy = float(y)/fh\n for x in range(width):\n a.append(int(round(pfun(float(x)/fw, fy) * maxval)))\n return a", "def process_screen(screen):\n\n # Indexing convention varies between PIL and numpy\n screen = np.swapaxes(screen, 0, 1)\n # Load the array in PIL\n im = Image.fromarray(screen, 'RGB')\n # Convert to grayscale\n im = im.convert(mode='L')\n # Crop\n im = im.crop((0, 0, 288, 405))\n # Downscale and resize\n im = im.resize((84, 84))\n # Normalise\n im = np.array(im) / 255\n\n return im", "def recreate_image(x):\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n in_channel = x.shape[-1]\n recreated_im = copy.copy(x) # C, H, W\n if in_channel == 3:\n for c in range(in_channel):\n recreated_im[:, :, c] /= reverse_std[c]\n recreated_im[:, :, c] -= reverse_mean[c]\n elif in_channel == 1:\n recreated_im[:, :, 0] /= reverse_std[1]\n recreated_im[:, :, 0] -= reverse_mean[1]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im) # H, W, C\n return recreated_im", "def main():\n import numpy as np\n from numpy import int32, uint\n\n pg.init()\n\n print(\"Using Numpy\")\n print(\"Press the left mouse button to advance image.\")\n print('Press the \"s\" key to save the current image.')\n\n # allblack\n allblack = np.zeros((128, 128), int32)\n surfdemo_show(allblack, \"allblack\")\n\n # striped\n # the element type is required for np.zeros in numpy else\n # an array of float is returned.\n striped = np.zeros((128, 128, 3), int32)\n striped[:] = (255, 0, 0)\n striped[:, ::3] = (0, 255, 255)\n surfdemo_show(striped, \"striped\")\n\n # rgbarray\n imagename = os.path.join(main_dir, \"data\", \"arraydemo.bmp\")\n imgsurface = pg.image.load(imagename)\n rgbarray = surfarray.array3d(imgsurface)\n surfdemo_show(rgbarray, \"rgbarray\")\n\n # flipped\n flipped = rgbarray[:, ::-1]\n surfdemo_show(flipped, \"flipped\")\n\n # scaledown\n scaledown = rgbarray[::2, ::2]\n surfdemo_show(scaledown, \"scaledown\")\n\n # scaleup\n # the element type is required for np.zeros in numpy else\n # an #array of floats is returned.\n shape = rgbarray.shape\n scaleup = np.zeros((shape[0] * 2, shape[1] * 2, shape[2]), int32)\n scaleup[::2, ::2, :] = rgbarray\n scaleup[1::2, ::2, :] = rgbarray\n scaleup[:, 1::2] = scaleup[:, ::2]\n surfdemo_show(scaleup, \"scaleup\")\n\n # redimg\n redimg = np.array(rgbarray)\n redimg[:, :, 1:] = 0\n surfdemo_show(redimg, \"redimg\")\n\n # soften\n # having factor as an array forces integer upgrade during multiplication\n # of rgbarray, even for numpy.\n factor = np.array((8,), int32)\n soften = np.array(rgbarray, int32)\n soften[1:, :] += rgbarray[:-1, :] * factor\n soften[:-1, :] += rgbarray[1:, :] * factor\n soften[:, 1:] += rgbarray[:, :-1] * factor\n soften[:, :-1] += rgbarray[:, 1:] * factor\n soften //= 33\n surfdemo_show(soften, \"soften\")\n\n # crossfade (50%)\n src = np.array(rgbarray)\n dest = np.zeros(rgbarray.shape) # dest is float64 by default.\n dest[:] = 20, 50, 100\n diff = (dest - src) * 0.50\n xfade = src + diff.astype(uint)\n surfdemo_show(xfade, \"xfade\")\n\n # all done\n pg.quit()", "def to_color(self):\n if self.channels == 4:\n color = opencv.cvtColor(self.img, opencv.COLOR_BGRA2BGR)\n return Image(color)\n elif self.channels == 1:\n color = opencv.cvtColor(self.img, opencv.COLOR_GRAY2BGR)\n return Image(color)\n else:\n return Image(self.img)", "def array_from_img(image):\n return np.array(image)", "def array2img(array):\n if len(array.shape) == 2:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='L')\n elif len(array.shape) == 3:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='RGB')\n else:\n print('Income array is not at appropriate shape!')", "def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def image_to_data(image):\n pixels = image.convert('RGB').load()\n width, height = image.size\n for y in range(height):\n for x in range(width):\n r,g,b = pixels[(x,y)]\n color = rgb(r, g, b)\n yield (color >> 8) & 0xFF\n yield color & 0xFF", "def load_rgb(path):\n bands = band_list['rgb']\n img = None\n fmt = \"_{}.tif\"\n for b in bands:\n band_ds = rasterio.open(path + fmt.format(b))\n aux = band_ds.read(1)\n aux = norm_band(aux)\n aux = np.expand_dims(aux, axis=-1)\n if img is None:\n img = aux\n else:\n img = np.concatenate((img, aux), axis=-1)\n return img", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def slice_array():\n img = Image.open(\"flamingo.jpg\")\n image_as_array = np.array(img)\n width, height, depth = image_as_array.shape\n\n red_channel = image_as_array[:, :, 0]\n green_channel = image_as_array[:, :, 1]\n blue_channel = image_as_array[:, :, 2]\n\n top_left_corner = image_as_array[:height // 2, :width // 2, :]\n top_right_corner = image_as_array[:height // 2, width // 2:, :]\n random_middle_pixels = image_as_array[11:29, 101:400, :]", "def render_array(self, resolution=300, channel=\"GRAYSCALE\"):\n # Method below returns a cairocffi.ImageSurface object\n # https://cairocffi.readthedocs.io/en/latest/api.html#cairocffi.ImageSurface\n surface, width, height = self._document.write_image_surface(\n resolution=resolution\n )\n img_format = surface.get_format()\n\n # This is BGRA channel in little endian (reverse)\n if img_format != FORMAT_ARGB32:\n raise RuntimeError(\n f\"Expect surface format to be 'cairocffi.FORMAT_ARGB32', but got {img_format}.\" +\n \"Please check the underlining implementation of 'weasyprint.document.Document.write_image_surface()'\"\n )\n\n img_buffer = surface.get_data()\n # Returns image array in \"BGRA\" channel\n img_array = np.ndarray(\n shape=(height, width, 4), dtype=np.uint8, buffer=img_buffer\n )\n if channel == \"GRAYSCALE\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2GRAY)\n elif channel == \"RGBA\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGBA)\n elif channel == \"RGB\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGB)\n elif channel == \"BGRA\":\n return np.copy(img_array)\n elif channel == \"BGR\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2BGR)\n else:\n valid_channels = [\"GRAYSCALE\", \"RGB\", \"RGBA\", \"BGR\", \"BGRA\"]\n raise ValueError(\n f\"Invalid channel code {channel}. Valid values are: {valid_channels}.\"\n )", "def render(self):\n np_img = np.array(self.prev_img, dtype=np.uint8)\n np_img = np.swapaxes(np_img, 0, 2)\n return np_img", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(np.array(MATRIX).T))", "def preprocess(self, data):\n data_unnorm = data / 2.0 + 0.5\n \n if self.permute == 1:\n permute = [2, 1, 0]\n data_rgb_unnorm = data_unnorm[:, permute]\n elif self.permute == 0:\n data_rgb_unnorm = data_unnorm\n \n data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')\n data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std\n return data_rgb" ]
[ "0.70360917", "0.7032956", "0.6748998", "0.66770595", "0.64644593", "0.6463771", "0.64612466", "0.6436402", "0.6378915", "0.6336183", "0.6302488", "0.6263761", "0.624275", "0.6217485", "0.62155837", "0.61859244", "0.61853856", "0.61381644", "0.61203206", "0.60929006", "0.6056744", "0.6051514", "0.6051217", "0.6048501", "0.60263324", "0.6018934", "0.5997953", "0.5955463", "0.5953483", "0.5934768", "0.5922572", "0.5919215", "0.5914749", "0.59146744", "0.5900208", "0.58949894", "0.5891846", "0.58684546", "0.5863706", "0.5857556", "0.58479416", "0.5846311", "0.5844837", "0.58416307", "0.5839048", "0.58335084", "0.5829988", "0.5828206", "0.5827445", "0.5817116", "0.5814034", "0.58062905", "0.58015674", "0.5777475", "0.5776714", "0.5775356", "0.57723266", "0.57723194", "0.5765495", "0.5754229", "0.5746401", "0.5746149", "0.5743001", "0.5741567", "0.5740249", "0.5737142", "0.57360756", "0.5734477", "0.5729071", "0.5719495", "0.571884", "0.57145816", "0.5702659", "0.57005656", "0.5692819", "0.56894594", "0.56890243", "0.56868684", "0.5679998", "0.5679032", "0.56786776", "0.5678284", "0.5677728", "0.5675702", "0.567371", "0.56719375", "0.5668369", "0.56682366", "0.5667854", "0.5659597", "0.56579345", "0.5653183", "0.5652828", "0.5643126", "0.56411356", "0.56327164", "0.56263506", "0.5600548", "0.5600352", "0.559847" ]
0.6756124
2
Approximates root of this function using single iteration of Newton's method.
def newtonsMethod(self, x, a): return x - a * (self._f(x) / self._df(x))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newton(f, xinit, tol, N):\n if f(xinit) < tol:\n return xinit\n else:\n n = 1\n while n < N:\n xnew = xinit - (f(xinit) / derivative(f, xinit))\n if abs(f(xnew)) < tol:\n print('Root found. Number of iterations: ', n)\n return xnew\n break\n else:\n xinit = xnew\n n = n + 1\n else:\n return 'Max iterations reached. No root found within chosen tolerance.'", "def newton_method(f, x_init = 0, epsilon = 1e-10):\n prev_value = x_init + 2 * epsilon\n value = x_init\n\n iterations = 0\n while abs(prev_value - value) > epsilon:\n prev_value = value\n\n f_dash = derivative(f, value)\n value = value - f(value) / f_dash\n\n iterations += 1\n\n print(f\"Newton Method converged in {iterations} iterations\")\n\n return value", "def newton(f, x0, Df, tol=1e-5, maxiter=15, alpha=1.):\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def square_root_with_newton_method(number, iterations):\n # Inital value of g.\n # Cycle based on the iterations number.\n # Formula in the cycle.\n # Return the rounded final result.\n\n if number <= 0 or iterations < 0:\n return None\n g = number * 0.5\n for i in range(int(iterations)):\n g2 = (g + number / g) * 0.5\n g = g2\n return round(g, 3)", "def rootfind_newton(func, x0, a, b, maxiter=50, tol=1.0e-11):\n\n for iter in xrange(maxiter):\n\n fval, fpval, args = func(x0)\n # print \"x0=%.4f fval=%.2e fpval=%.2e [%.4f, %.4f]\" % (x0, fval, fpval, a, b)\n\n if fval < 0:\n a = x0\n else:\n b = x0\n\n x = x0 - fval/fpval\n if not (a < x < b):\n # Once we have bracketed the root, we don't allow the\n # iterations to leave the bracket.\n x = 0.5*(a+b)\n\n if np.abs(x-x0) < tol or np.abs(fval) < tol:\n break\n\n x0 = x\n\n return x, fval, iter, args", "def test_newton():\n\n f = lambda x: x**2 + np.sin(5*x)\n df = lambda x: 2*x + 5*np.cos(5*x)\n ddf = lambda x: 2 + 0,-25*np.sin(5*x)\n\n\n print newtonsMethod(f,df,ddf, 0, niter = 100)", "def newtons_method(f, initial_guess, max_iter = 1000, method = 'exact', tol =1e-12):\n\n if method not in ['inverse', 'exact', 'gmres', 'gmres_action']:\n raise Exception(\"Not a valid method.\")\n if len(f(initial_guess)) != len(initial_guess):\n raise Exception('Output dimension of f should be the same as the input dimension of f.')\n if method == 'gmres_action':\n return _newtons_method_gmres_action(f, initial_guess, max_iter, tol)\n x0 = ad.create_vector('x0', initial_guess)\n for iter_num in range(max_iter):\n fn = np.array(f(x0)); #need convert the list/array that is passed back from function, so downstream autodiff functions for vectors work properly\n jacob = ad.get_jacobian(fn, ['x0{}'.format(i) for i in range(1, len(fn) + 1)])\n if method == 'inverse':\n step = np.linalg.inv(-jacob).dot(ad.get_value(fn))\n if method == 'exact':\n step = np.linalg.solve(-jacob, ad.get_value(fn))\n elif method == 'gmres':\n step, _ = gmres(jacob, -ad.get_value(fn), tol = tol, atol = 'legacy')\n xnext = x0 + step\n \n #check if we have converged\n if np.all(np.abs(ad.get_value(xnext) - ad.get_value(x0)) < tol):\n return (ad.get_value(xnext), iter_num + 1);\n \n #update x0 because we have not converged yet\n x0 = xnext\n \n raise RuntimeError(\"Failed to converge after {0} iterations, value is {1}\".format(max_iter, ad.get_value(x0)) );", "def my_Newton( fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs( fct( xn**(i + 1)) - fct( xn**i)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print( i, 'fct value', abs( fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs( fct( xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan", "def newton_method(f, x, Ep, step):\n\n while True:\n step = step + 1\n # print(\"bnd1:=\",bnd1)\n h = f(x) / derivative(f, x)\n x = x - h\n if (decide(abs(h) <= Ep)):\n break\n # print(\"Root in Approximation: \",bnd1)\n return step", "def newton(f, x0, dx, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Fill the Jacobian matrix\n for j in range(n):\n x[j] += dx[j]\n df = f(x) - fx\n\n for i in range(n):\n J[i, j] = df[i] / dx[j]\n\n x[j] -= dx[j]\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin backtracking linesearch\n lamb = 2.0\n while True: \n lamb /= 2\n y = x + Dx * lamb\n fy = f(y)\n\n fynorm = np.linalg.norm(fy)\n fxnorm = np.linalg.norm(fx)\n\n if (fynorm < (1 - lamb / 2) * fxnorm) or (lamb < (1 / 128.0)):\n break\n\n # Save latest approximation\n x = y\n fx = fy\n\n Dxnorm = np.linalg.norm(Dx)\n fxnorm = np.linalg.norm(fx)\n dxnorm = np.linalg.norm(dx)\n if Dxnorm < dxnorm or fxnorm < eps:\n break\n\n return x", "def newton_iteration(f: Callable, df: Callable, eps: float, x0: float = None, a: float = None, b: float = None,\n weight: float = 0.9, display: bool = False, max_iterations: int = 100) -> float:\n x = np.inf\n if x0 is None:\n x0 = (a + b) / 2\n if a is not None and b is not None and a == b:\n return a\n x_next = x0\n iterations = 0\n while abs(x - x_next) > eps and iterations < max_iterations:\n iterations += 1\n x = x_next\n\n if display:\n import matplotlib.pyplot as plt\n xx0 = a or x-1\n xx1 = b or x+1\n xx = np.linspace(xx0, xx1, 100)\n yy = np.array(list(map(f, xx)))\n plt.plot(xx, yy)\n plt.axvline(x=x)\n plt.show()\n\n f_x = f(x)\n try:\n df_x = df(x)\n except ZeroDivisionError:\n df_x = (f_x - f(x-eps))/eps\n if df_x != 0:\n x_next = x - f_x / df_x\n\n if a is not None and x_next < a:\n x_next = weight * a + (1 - weight) * x\n elif b is not None and x_next > b:\n x_next = weight * b + (1 - weight) * x\n\n if a is not None and x_next < a:\n x_next = a\n if b is not None and x_next > b:\n x_next = b\n\n return x_next", "def my_Newton(fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs(fct (xn)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print(i , 'fct_value', abs(fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs(fct(xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan", "def newton(f, x0, Df, tol=1e-5, maxiter=15, alpha=1.):\r\n #initialize variables\r\n iter = 0\r\n xk = x0\r\n change = tol+1\r\n #perform newton's method until maxiter hit or under tolerance\r\n #if x is in R\r\n if np.isscalar(x0):\r\n while iter < maxiter and change > tol:\r\n iter += 1\r\n xk1 = xk\r\n xk = xk - alpha*f(xk)/Df(xk)\r\n change = abs(xk-xk1)\r\n #if x is in Rn\r\n else:\r\n while iter < maxiter and change > tol:\r\n iter += 1\r\n xk1 = xk\r\n D = Df(xk)\r\n #make sure the matrix isn't singular\r\n if la.det(D)==0:\r\n break\r\n yk = la.solve(D,f(xk))\r\n xk = xk - alpha*yk\r\n change = la.norm(xk-xk1)\r\n #check if method converged\r\n if change > tol:\r\n conv = False\r\n else:\r\n conv = True\r\n return xk,conv,iter", "def root_finding_newton_previously(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value)\n x = x + delta\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError('Maximum iteration reached in newton root finding!')\n return x, iteration_counter", "def root_finding_newton_previously(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n # F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value)\n x = x + delta\n F_value = fun(x, args)\n # F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError(\"Maximum iteration reached in newton root finding!\")\n return x, iteration_counter", "def root_finding_newton(fun, J, x, eps, max_iter, args):\n F_value = fun(x, *args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value_)\n\n for i in range(x.size): #wtf numba!?!?!\n x[i] += delta[i,0]\n\n F_value = fun(x, *args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError('Maximum iteration reached in newton root finding!')\n return x, iteration_counter", "def newton1d(f, df, ddf, x, niter=10):\n for i in xrange(niter):\n x_new = x - df(x)/ddf(x)\n x = x_new\n return x", "def newton(f, f_prime, x0, n):\n approximations = [x0]\n\n xnm1 = x0\n for i in range(1, n):\n xn = xnm1 - f(xnm1) / f_prime(xnm1)\n approximations.append(xn)\n xnm1 = xn\n\n for n in range(len(approximations)):\n print(f'Approximation {n + 1}: {approximations[n]:<17.13f} Exact: {pi:<17.13f} Difference: {abs(pi - approximations[n]):.13e}')", "def newtons_method_1d(f, df_dx, x0, tol):\n # begin solution\n x = x0\n while abs(f(x)) > tol:\n x -= f(x) / df_dx(x)\n return x\n # end solution", "def newton_method_vector(f, x_init, epsilon = 1e-10):\n prev_value = x_init + 2 * epsilon\n value = x_init\n\n iterations = 0\n while np.all(np.abs(prev_value - value)) > epsilon:\n prev_value = value\n\n j = jacobian(f, value)\n value = value - np.dot(np.linalg.pinv(j), f(value))\n\n iterations += 1\n\n print(f\"Newton Method converged in {iterations} iterations\")\n\n return value", "def quasi_newtons_method(f, initial_guess, max_iter = 10000, method = 'BFGS', tol = 1e-12):\n \n if method not in ['BFGS', 'DFP', 'Broyden']:\n raise Exception(\"Not a valid method.\")\n x = initial_guess\n H = np.identity(len(x))\n for i in range(max_iter):\n x_vector = ad.create_vector('x', x)\n fn_at_x = f(x_vector)\n gradient = fn_at_x.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n\n p = -H @ gradient\n \n alpha = line_search(f, x, p)\n delta_x = alpha * p\n\n x = x + delta_x\n x_vector2 = ad.create_vector('x', x)\n fn_at_x2 = f(x_vector2)\n gradient2 = fn_at_x2.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n if np.sqrt(np.abs(gradient2).sum()) < tol:\n break\n y = (gradient2 - gradient).reshape(-1, 1)\n delta_x = delta_x.reshape(-1, 1)\n if method == 'BFGS':\n H = (np.identity(len(H)) - (delta_x @ y.T) / (y.T @ delta_x)) @ H \\\n @ (np.identity(len(H)) - (y @ delta_x.T) / (y.T @ delta_x)) + (delta_x @ delta_x.T) / (y.T @ delta_x)\n elif method == 'DFP':\n H = H + (delta_x @ delta_x.T) / (delta_x.T @ y) - (H @ y @ y.T @ H) / (y.T @ H @ y)\n elif method == 'Broyden':\n H = H + ((delta_x - H @ y) @ delta_x.T @ H) / (delta_x.T @ H @ y)\n\n return (x, i + 1)", "def newton_raphson(f,x0,iterations): \n current = x0\n fdash = differentiate_polynomial(f)\n print(fdash)\n for i in range(iterations): \n current = current - evaluate_polynomial(f,current)/evaluate_polynomial(fdash,current)\n return current", "def NewtonMethod(f, df, x=0.75, tol=1e-10):\n\tstart = time()\n\terror = tol + 1\n\t\n\ti = 0\n\terrs = []\n\n\twhile error > tol:\n\t\terrs.append(error)\n\n\t\tx_temp = x\n\t\tx = x - f(x) / df(x)\n\t\terror = np.abs(x-x_temp)\n\t\ti = i+1\n\tend = time()\n\treturn x, (end-start), i", "def sqrt_newton(a):\n\tdef sqrt_update(x):\n\t\treturn 0.5 * (x + a / x)\n\tdef sqrt_close(x):\n\t\treturn approx_eq(x * x, a)\n\treturn improve(sqrt_update, sqrt_close)", "def newton1d(f, df, ddf, x, niter=10):\n\n x_0 = x\n x_k = x\n\n for i in xrange(niter):\n x_k1 = x_k - df(x_k)/ddf(x_k)\n x_k = x_k1\n\n return x_k", "def newton_update(f, df):\n def update(x):\n return x - f(x) / df(x)\n return update", "def newton(n):\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x", "def test_newton_root_finder(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n initial_values = np.ones(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(objective_and_gradient, initial_values)\n root_values, converged, failed = self.evaluate(roots)\n\n # Reference values.\n roots_bench = np.array([2.0, 3.0, 4.0])\n converged_bench = np.array([True, True, True])\n failed_bench = np.array([False, False, False])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)\n np.testing.assert_almost_equal(root_values, roots_bench, decimal=7)", "def root_finding_newton(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value_)\n\n for i in range(x.size): # wtf numba!?!?!\n x[i] += delta[i, 0]\n\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError(\"Maximum iteration reached in newton root finding!\")\n return x, iteration_counter", "def _newton_update(func):\n return lambda x: x - func[0](x) / func[1](x)", "def test_find_root_multi(self):\n\n print(\"\\nNewton-Raphson Method: Multivariate\")\n\n # input\n xi = 1 # initial value of first variable\n yi = 1 # initial value of second variable\n em = 1e-6 # error margin\n imax = 1e3 # maximum number of iterations to consider\n\n # function\n root, ic, msg = NewtonRaphson.find_root_multi([self.f, self.g], [[self.dfdx, self.dfdy],[self.dgdx, self.dgdy]], [xi, yi], em, imax)\n\n # output\n if root != None:\n print(\"\\tInitial value: {xi}\\n\\tRoot: {X}\\n\\tFirst function Value: {f}\\n\\tSecond function Value: {g}\\n\\tIterations: {ic}\".format(xi=xi, X=root, f=self.f(root), g=self.g(root), ic=ic))\n else:\n print(\"\\t{msg}.\".format(msg=msg))", "def newtons_method(fn, grad_fn, max_iters=20):\n\n x_0 = torch.normal(mean=torch.tensor(0.), std=10)\n x = newton_restart(fn, grad_fn, x_0, max_iters=20)\n\n while x is None:\n x_0 = torch.normal(mean=torch.tensor(0.), std=10)\n x = newton_restart(fn, grad_fn, x_0, max_iters=20)\n\n return x", "def newtonsMethod(f, df, ddf, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = np.dot(-la.inv(ddf(x)), (df(x)))\n\n slope = np.dot(df(x), point)\n\n a = backtracking(f, slope, x, point)\n \n #update point\n x_k = x + a*point\n points.append(x_k)\n x = x_k\n\n return points", "def newton_quad(f, x0, dx, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Fill the Jacobian matrix\n for j in range(n):\n x[j] += dx[j]\n df = f(x) - fx\n\n for i in range(n):\n J[i, j] = df[i] / dx[j]\n\n x[j] -= dx[j]\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin quadratic linesearch \n lamb = 1.0\n y = x + Dx * lamb\n fy = f(y)\n\n fxnorm = np.linalg.norm(fx)\n fynorm = np.linalg.norm(fy)\n\n # Define the known values of the minimization function (Eq. 9)\n g0 = 0.5 * fxnorm ** 2\n dg0 = - fxnorm ** 2\n\n while (fynorm > (1 - lamb / 2) * fxnorm) and (lamb > (1 / 128.0)):\n glamb = 0.5 * fynorm ** 2\n c = (glamb - g0 - dg0 * lamb) / (lamb ** 2)\n\n # Update step\n lamb = - dg0 / (2 * c)\n y = x + Dx * lamb\n fy = f(y)\n fynorm = np.linalg.norm(fy)\n\n # Save latest approximation\n x = y\n fx = fy\n\n Dxnorm = np.linalg.norm(Dx)\n dxnorm = np.linalg.norm(dx)\n if Dxnorm < dxnorm or fxnorm < eps:\n break\n\n return x", "def Newton(f, x0, tol, kmax, c=0.5, tao=1e-6, reg_const=1e-7):\r\n\r\n k = 1\r\n\r\n xk = x0.copy() # We don't want to change x0 from the outside scope\r\n\r\n # Cache for f(xk) in case the calculation is expensive\r\n # - Should save 2-3 function evaluations per outside loop iteration\r\n f_xk = f(xk)\r\n\r\n s = LA.norm(f_xk)**2\r\n\r\n while np.sqrt(s) > tol and k < kmax:\r\n # Build the Jacobian matrix approximation using Central Differences\r\n H = _CentralDifferencesJacobian(f, xk, tao)\r\n\r\n try:\r\n # Uses LAPACK _gesv to solve H*dk = -f(xk)\r\n dk = LA.solve(H, -f_xk)\r\n except LA.LinAlgError:\r\n # Most likely here because the above H is singular\r\n # Therefore we regularize by adding a small (1e-7) multiple of I:\r\n dk = LA.solve(H + reg_const*np.eye(xk.shape[0]), -f_xk)\r\n\r\n z = c*LA.norm(np.matmul(f_xk.T, H))*LA.norm(dk)\r\n\r\n # Solve the step size using the method by Stoer and Bulirsch\r\n j = 0\r\n L = LA.norm(f(xk + dk))**2\r\n R = s-z\r\n Lmin = L\r\n index = 0\r\n while L > R:\r\n j += 1\r\n L = LA.norm(f(xk + 2**(-j)*dk))**2\r\n R = s - 2**(-j)*z\r\n if L < Lmin:\r\n Lmin = L\r\n index = j\r\n\r\n # Update xk\r\n xk = xk + 2**(-index)*dk\r\n\r\n k += 1\r\n\r\n # Cache the new f(xk)\r\n f_xk = f(xk)\r\n\r\n s = LA.norm(f_xk)**2\r\n\r\n # If kmax gets exceeded, we can't trust the answer so issue a warning\r\n if k >= kmax:\r\n # For debugging purposes\r\n # print(\"k exceeded kmax, can't trust answer\")\r\n # return xk\r\n warnings.warn(\"kmax exceeded, consider raising it\", NonConvergenceWarning)\r\n\r\n # Otherwise, we stopped the above loop because we're within tolerance so the answer is good\r\n return xk, k", "def test_newton_rhapson(testFunctions, tol, printFlag): \n pass", "def NewtonRaphson(z_ini, f, fprime, tol):\r\n z_current = z_ini\r\n while np.linalg.norm(f(z_current)) > tol:\r\n hess = fprime(z_current)\r\n inv_hess = np.linalg.solve(hess, np.eye(hess.shape[0]))\r\n z_new = z_current - inv_hess.dot(f(z_current))\r\n z_current = z_new\r\n return z_current", "def newton_1d(f, df, ddf, x, n=10):\n ret = [x]\n xk = x\n for i in range(n):\n if(ddf(xk) == 0):\n break\n xk1 = xk - df(xk) / ddf(xk)\n xk = xk1\n ret.append(xk)\n return ret", "def test_newton_rhapson_system(testFunctions, tol, printFlag): \n pass", "def newton_sqrt(n, threshold):\n\tx = n\n\twhile True:\n\t\troot = 0.5 * (x + n/x)\n\n\t\tif abs(root - x) < threshold:\n\t\t\treturn root\n\n\t\tx = root", "def _compute_newton_step(lambdas, p_norm, w_norm):\n return lambdas.candidate + (p_norm / w_norm) ** 2 * (p_norm - 1)", "def gaussNewton(f, df, jac, r, x, niter=10):\n\n for i in xrange(niter):\n #check if it's close enough\n if np.allclose(np.dot(jac(x).T, r(x)), 0):\n return x\n\n else:\n p = la.solve(np.dot(jac(x).T, jac(x)), -np.dot(jac(x).T, r(x)))\n\n a = line_search(f, df, x, p)[0]\n if a is None:\n return x\n else:\n x_k = x + a*p\n\n return x_k", "def _newtons_method_gmres_action(f, initial_guess, max_iter=50, tol=1e-12):\n\n output_dim = len(f(initial_guess))\n \n @np.vectorize\n def sum_values(dictionary):\n return sum(dictionary.values())\n \n def create_action(x0):\n \n def L_fun(x):\n \"\"\"\n Action\n Returns J_f(x0)*x by setting the values of 'x' as the initial derivatives for the variables in x0.\n \"\"\"\n \n f_x0 = f(ad.create_vector('x0', x0, seed_vector=x));\n f_x0 = np.array(f_x0) #ensure that f_x0 is np.array\n action = sum_values(ad.get_deriv(f_x0))\n return action\n \n L = LinearOperator(shape=(output_dim, len(x0)), matvec=L_fun)\n \n return L\n \n x0 = initial_guess\n for iter_num in range(max_iter):\n L = create_action(x0)\n b = -1 * np.array(f(x0))\n if len(x0) == 1:\n b = np.array([b])\n step, _ = gmres(L, b, tol = tol, atol = 'legacy')\n xnext = x0 + step \n if np.all(np.abs(xnext - x0) < tol):\n return (xnext, iter_num + 1);\n x0 = xnext\n \n raise RuntimeError(\"Failed to converge after {0} iterations, value is {1}\".format(max_iter, x0) );", "def halley_newton ( fun , ## the function \n x , ## x \n deriv1 , ## the first derivative \n deriv2 = None , ## the second derivative\n fx = None , ## value of fun(x)\n args = () ) : ## additional arguments for function calls\n \n ## newton corrections\n d1 = float ( deriv1 ( x , *args ) ) \n fx = float ( fun ( x , *args ) ) if fx is None else fx \n \n if d1 : rn = fx / d1\n else : return None ## error here! \n \n ## make corrections: Halley's steps\n if deriv2 : \n d2 = float ( deriv2 ( x , *args ) )\n if d2 : rn /= ( 1.0 - 0.5 * rn * d2 / d1 ) ## Halley's correction \n \n return x - rn ## Newton/Halley's iteration", "def Newton_system(F, J, cst, x, max_iter=100, eps=1e-4):\n F_value = F(cst, x)\n F_norm = np.linalg.norm(F_value, ord=2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n try:\n delta = np.linalg.solve(J(cst, x), -F_value)\n except LinAlgError:\n print(\"Singular matrix in np.linalg.solve, after \", iteration_counter, \" iterations.\")\n return x, -1\n else:\n x = x + delta\n F_value = F(cst, x)\n F_norm = np.linalg.norm(F_value, ord=2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n return x, iteration_counter", "def Stochastic_RootFinder(F, x0, a=.5, alpha=0.5, max_iter=20, xtol=.01, ValidInterval=[0.00001,100]):\n\n xp,n= ValidInterval[1]*2,1\n n0 = int(max_iter/20)+1\n xn = x0\n displacement = 10 * xtol\n\n while displacement > xtol and n<max_iter:\n an = a/ ( (n+n0)** alpha)\n Fn=F(xn)\n print('SRF step '+ str(n))\n print('--------------------------')\n print('current value of x is '+ str(xn))\n print('current value of F is '+ str(Fn))\n xnext = xn-an*np.asscalar(Fn)\n \n if xnext>ValidInterval[1]:\n xnext = ValidInterval[1]\n elif xnext<ValidInterval[0]:\n xnext = ValidInterval[0]\n \n print('new x is ' + str(xnext))\n\n\n\n # shift and compute total displacement in the last 2 steps\n displacement = np.ptp(np.array([xp,xn,xnext]))\n print('displacement in the last 3 steps is ' + str(displacement))\n xp=xn\n xn=xnext\n \n\n n=n+1\n\n return xnext,Fn", "def newtons_method(expr, wrt, atol=1e-12, delta=None, debug=False,\n itermax=None, counter=None):\n\n if delta is None:\n delta = Dummy()\n Wrapper = Scope\n name_d = 'delta'\n else:\n Wrapper = lambda x: x\n name_d = delta.name\n\n delta_expr = -expr/expr.diff(wrt)\n whl_bdy = [Assignment(delta, delta_expr), AddAugmentedAssignment(wrt, delta)]\n if debug:\n prnt = Print([wrt, delta], r\"{0}=%12.5g {1}=%12.5g\\n\".format(wrt.name, name_d))\n whl_bdy = [whl_bdy[0], prnt] + whl_bdy[1:]\n req = Gt(Abs(delta), atol)\n declars = [Declaration(Variable(delta, type=real, value=oo))]\n if itermax is not None:\n counter = counter or Dummy(integer=True)\n v_counter = Variable.deduced(counter, 0)\n declars.append(Declaration(v_counter))\n whl_bdy.append(AddAugmentedAssignment(counter, 1))\n req = And(req, Lt(counter, itermax))\n whl = While(req, CodeBlock(*whl_bdy))\n blck = declars + [whl]\n return Wrapper(CodeBlock(*blck))", "def newtona(f, Df, x_0, M, epsilon):\n xn = []\n v = f(x_0)\n k = 1\n\n while k<M:\n xn.append(x_0)\n if abs(v) < epsilon: #warunek pierwszy\n break\n\n x_1 = x_0 - v/Df(x_0) #wzor metody iteracyjnej newtona\n v = f(x_1)\n\n if abs(x_1 - x_0) < epsilon: #warunek drugi\n break\n x_0 = x_1\n k=k+1\n\n return(x_0, xn)", "def newtonraphson(self,g_temp,var_init):\n n_step=0\n error=np.linalg.norm(self.evaluate(var_init,g_temp))\n\n while (error > 1e-12 and n_step < 50):\n #Improve solution while error is too large and the number of steps does not exceed a limit\n J_inv=np.linalg.pinv(self.jacobian(var_init,g_temp))\n var_new=var_init-np.dot(J_inv,self.evaluate(var_init,g_temp))\n error=np.linalg.norm(self.evaluate(var_new,g_temp))\n var_init=var_new\n n_step+=1\n\n return var_init", "def newton(n, k):\n if k == 0 or k == n:\n return 1\n else:\n return newton(n - 1, k - 1) + newton(n - 1, k)", "def Newton(self, newton=True, eps=0.0000001):\n x_new=np.zeros(2)\n x_old=np.array([self.x_init,self.y_init])\n #eps=10.0**(-5)\n delta=10\n count=0\n if(newton):\n while(True):\n count+=1\n invJacobi=self.secJacobian(x_old)\n if(type(invJacobi)==type(0)):##check that the jacobian is not singular\n return x_new.tolist()\n part2=np.dot(invJacobi,np.array(self.derFunc(x_old)))\n\n x_new[0]=x_old[0]-part2[0]\n x_new[1]=x_old[1]-part2[1]\n if(abs(x_new[0]-x_old[0])>abs(x_new[1]-x_old[1])):\n delta=abs(x_new[0]-x_old[0])\n else:\n delta=abs(x_new[1]-x_old[1])\n\n x_old[0]=x_new[0]\n x_old[1]=x_new[1]\n print \"printing x\",x_new\n if(count>=self.step or eps>=delta):\n return x_new.tolist()\n \n \n else:\n while(True):\n count+=1\n invJacobi=self.secJacobian(x_old)\n if(type(invJacobi)==type(0)):##check that the jacobian is not singular\n return x_new.tolist()\n part2=np.dot(invJacobi,np.array(self.derFunc(x_old)))\n \n x_new[0]=x_old[0]-part2[0]\n x_new[1]=x_old[1]-part2[1]\n \n if(abs(x_new[0]-x_old[0])>abs(x_new[1]-x_old[1])):\n delta=abs(x_new[0]-x_old[0])\n else:\n delta=abs(x_new[1]-x_old[1])\n \n x_old[0]=x_new[0]\n x_old[1]=x_new[1]\n print \"printing x\", x_new\n if(count>=self.step or eps>=delta):\n return x_new.tolist()", "def smale_newton(f, x0, df=(), args=(), tol=1.48e-8, maxiter=50):\n # skip using smale alpha if not enough derivatives are provided\n if len(df) < 2:\n warnings.warn('not enough derivatives provided for smale',\n RuntimeWarning)\n else:\n # raise a warning if x0 is not an approximate solution\n alpha = smale_alpha(f, x0, df, args=args)\n if alpha > 0.15767078078675478:\n warnings.warn('the estimate may not be an approximate solution to the '\n 'function', RuntimeWarning)\n\n # newton iterate (omit use of fprime2 due to unknown issues)\n fprime = df[0] if len(df) > 0 else None\n zero = newton(f, x0, fprime=fprime, args=args, tol=tol, maxiter=maxiter)\n return zero", "def newton_jacobian(f, x0, Jf, eps=1e-10):\n # Initialization\n globvar.ncalls = 0\n x = np.copy(x0)\n n = len(x)\n J = np.zeros((n, n), dtype='float64')\n fx = f(x)\n\n # Begin root search\n while True:\n globvar.ncalls += 1\n\n # Calculate Jacobian\n J = Jf(x)\n\n # Decompose and solve using Given's rotations\n decomp(J)\n Dx = -fx\n solve(J, Dx)\n\n # Begin backtracking linesearch\n lamb = 2.0\n while True: \n lamb /= 2\n y = x + Dx * lamb\n fy = f(y)\n\n fynorm = np.linalg.norm(fy)\n fxnorm = np.linalg.norm(fx)\n\n if (fynorm < (1 - lamb / 2) * fxnorm) or (lamb < (1 / 128.0)):\n break\n\n # Save latest approximation\n x = y\n fx = fy\n\n fxnorm = np.linalg.norm(fx)\n if fxnorm < eps:\n break\n\n return x", "def sqrt(x):\n return 0.0", "def newton_restart(fn, grad_fn, x_0, max_iters=20):\n y_i = fn(x_0)\n x_i = x_0\n i = 0\n while torch.abs(y_i) > 1e-6:\n i = i + 1\n grad_i = grad_fn(x_i)\n x_i = x_i - y_i / grad_i\n y_i = fn(x_i)\n\n if i == max_iters:\n return None\n\n return x_i", "def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,\n order, step_size, time, tol, unitary, upper):\n initial_guess = tf.reduce_sum(\n tf1.where(\n tf.range(MAX_ORDER + 1) <= order,\n backward_differences[:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),\n axis=0)\n\n np_dtype = np_dtype = dtype_util.as_numpy_dtype(backward_differences.dtype)\n\n rhs_constant_term = newton_coefficient * tf.reduce_sum(\n tf1.where(\n tf.range(1, MAX_ORDER + 1) <= order,\n RECIPROCAL_SUMS[1:, np.newaxis].astype(np_dtype) *\n backward_differences[1:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),\n axis=0)\n\n next_time = time + step_size\n step_size_cast = tf.cast(step_size, backward_differences.dtype)\n real_dtype = tf.abs(backward_differences).dtype\n\n def newton_body(iterand):\n \"\"\"Performs one iteration of Newton's method.\"\"\"\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]\n\n iterand = _NewtonIterand(\n converged=False,\n finished=False,\n next_backward_difference=tf.zeros_like(initial_guess),\n next_state_vec=tf.identity(initial_guess),\n num_iters=0,\n prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))\n [iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),\n newton_body, [iterand])\n return (iterand.converged, iterand.next_backward_difference,\n iterand.next_state_vec, iterand.num_iters)", "def newton_method_bidirectional(f, bnd1, bnd2, Ep, step):\n\n while True:\n step = step + 1\n\n # print(\"bnd1=\",bnd1,\" and bnd2=\",bnd2)\n\n h_bnd1 = f(bnd1) / derivative(f, bnd1)\n bnd1 = bnd1 - h_bnd1\n if (decide(abs(h_bnd1) <= Ep)):\n # print(\"Root in Approximation: \",bnd1)\n return step\n\n h_bnd2 = f(bnd2) / derivative(f, bnd2)\n bnd2 = bnd2 - h_bnd2\n if (decide(abs(h_bnd2) <= Ep)):\n # print(\"Root in Approximation: \",bnd2)\n return step", "def test_find_root_uni(self):\n\n print(\"\\nNewton-Raphson Method: Univariate\")\n\n # input\n xi = 1 # initial value\n et = 1e-6 # relative error threshold\n imax = 1e6 # maximum number of iterations to consider\n\n # function\n root, ic, msg = NewtonRaphson.find_root_uni(self.fn, self.df, xi, et, imax)\n\n # output\n if root != None:\n print(\"\\tInitial value: {xi}\\n\\tRoot: {x}\\n\\tFunction Value: {fx}\\n\\tIterations: {ic}\".format(xi=xi, x=root, fx=self.fn(root), ic=ic))\n else:\n print(\"\\t{msg}.\".format(msg=msg))", "def find_root_1(x, n, p=0.001):\n step = p / 10\n guess = step\n while abs(guess ** n - x) > p:\n guess += step\n return round(guess, 3)", "def newtons_method(function, start, epsilon_rounding=6):\n point = start\n\n f = get_gradient(function)\n jacobian_matrix = get_jacobian(f)\n inverse_jacobian = jacobian_matrix.inv()\n\n f_subs = gradient_subs(f, point)\n\n temp = [0, 0]\n\n points = [point]\n while temp != point:\n jacobian_subs_matrix = matrix_subs(jacobian_matrix, point)\n inverse_subs_jacobian = matrix_subs(inverse_jacobian, point)\n negative_gradient = Matrix([-x for x in f_subs])\n solution = Ax_b(jacobian_subs_matrix, negative_gradient)\n temp = [round(float(x), epsilon_rounding) for x in point]\n point = [a + b for a, b in zip(solution, point)]\n point = [round(float(x), epsilon_rounding) for x in point]\n points.append(point)\n f_subs = gradient_subs(f, point)\n new_minimum = [float(x) for x in point]\n\n return new_minimum, points, f\"The minimum is {new_minimum}, with a starting point of {start}\"", "def test_basic_newton_finder(self):\n forwards = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n strikes = np.array([1.0, 2.0, 1.0, 0.5, 1.0, 1.0])\n expiries = np.array([1.0, 1.0, 1.0, 1.0, 0.5, 2.0])\n discounts = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n init_vols = np.array([2.0, 0.5, 2.0, 0.5, 1.5, 1.5])\n option_signs = np.array([1.0, 1.0, -1.0, -1.0, 1.0, 1.0])\n volatilities = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])\n prices = np.array([\n 0.38292492, 0.19061012, 0.38292492, 0.09530506, 0.27632639, 0.52049988\n ])\n results = implied_vol(\n forwards,\n strikes,\n expiries,\n discounts,\n prices,\n option_signs,\n initial_volatilities=init_vols,\n max_iterations=100)\n implied_vols, converged, failed = self.evaluate(results)\n num_volatilities = len(volatilities)\n self.assertAllEqual(np.ones(num_volatilities, dtype=np.bool), converged)\n self.assertAllEqual(np.zeros(num_volatilities, dtype=np.bool), failed)\n self.assertArrayNear(volatilities, implied_vols, 1e-7)", "def optimal_alpha(f, x0, Df, tol=1e-5, maxiter=15):\r\n #initialize alphas to check\r\n alphas = np.linspace(.001,1,100, endpoint=True)\r\n results = []\r\n for a in alphas:\r\n #run newton's method for all alphas\r\n new = newton(f,x0,Df,tol,maxiter,a)\r\n results.append(list(new))\r\n #look at just the iterations\r\n iters = np.array(results)[:,2]\r\n #plot graph\r\n plt.plot(alphas,iters)\r\n plt.xlabel('alpha')\r\n plt.ylabel('iterations')\r\n plt.title('Newton\\'s Method Comparisons')\r\n plt.show()\r\n #find index of least iterations\r\n smallest = np.argmin(iters)\r\n return alphas[smallest]", "def find_root(f, df, ddf, initial_guess = 0.0, limit = 0.00001, max_iterations = 1000):\n xn_1 = initial_guess\n i = 0\n while i < max_iterations:\n fx = f(xn_1)\n dfx = df(xn_1)\n ddfx = ddf(xn_1)\n xn = xn_1 - 2 * fx * dfx / (2 * dfx ** 2 - fx * ddfx)\n if abs(xn - xn_1) < limit:\n return xn\n xn_1 = xn\n i += 1\n return None", "def actual_root(x):\n root = x ** (1/n)\n\tprint(x)\n return root", "def find_zero(f, df):\n def near_zero(x):\n return approx_eq(f(x), 0)\n return improve(newton_update(f, df), near_zero)", "def newtons_method(expr, wrt, atol=1e-12, delta=None, *, rtol=4e-16, debug=False,\n itermax=None, counter=None, delta_fn=lambda e, x: -e/e.diff(x),\n cse=False, handle_nan=None,\n bounds=None):\n\n if delta is None:\n delta = Dummy()\n Wrapper = Scope\n name_d = 'delta'\n else:\n Wrapper = lambda x: x\n name_d = delta.name\n\n delta_expr = delta_fn(expr, wrt)\n if cse:\n from sympy.simplify.cse_main import cse\n cses, (red,) = cse([delta_expr.factor()])\n whl_bdy = [Assignment(dum, sub_e) for dum, sub_e in cses]\n whl_bdy += [Assignment(delta, red)]\n else:\n whl_bdy = [Assignment(delta, delta_expr)]\n if handle_nan is not None:\n whl_bdy += [While(isnan(delta), CodeBlock(handle_nan, break_))]\n whl_bdy += [AddAugmentedAssignment(wrt, delta)]\n if bounds is not None:\n whl_bdy += [Assignment(wrt, Min(Max(wrt, bounds[0]), bounds[1]))]\n if debug:\n prnt = Print([wrt, delta], r\"{}=%12.5g {}=%12.5g\\n\".format(wrt.name, name_d))\n whl_bdy += [prnt]\n req = Gt(Abs(delta), atol + rtol*Abs(wrt))\n declars = [Declaration(Variable(delta, type=real, value=oo))]\n if itermax is not None:\n counter = counter or Dummy(integer=True)\n v_counter = Variable.deduced(counter, 0)\n declars.append(Declaration(v_counter))\n whl_bdy.append(AddAugmentedAssignment(counter, 1))\n req = And(req, Lt(counter, itermax))\n whl = While(req, CodeBlock(*whl_bdy))\n blck = declars\n if debug:\n blck.append(Print([wrt], r\"{}=%12.5g\\n\".format(wrt.name)))\n blck += [whl]\n return Wrapper(CodeBlock(*blck))", "def nthRoot(x,n):\n return op.pow(x,1/n)", "def brents(f, x0, x1, max_iter=50, tolerance=1e-5):\n \n fx0 = f(x0)\n fx1 = f(x1)\n \n assert (fx0 * fx1) <= 0, \"Root not bracketed\" \n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n \n x2, fx2 = x0, fx0\n \n d = np.nan\n mflag = True\n steps_taken = 0\n \n while steps_taken < max_iter and abs(x1-x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n \n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n \n else:\n new = x1 - ( (fx1 * (x1 - x0)) / (fx1 - fx0) )\n \n tt1 = (new < ((3 * x0 + x1) / 4) or new > x1)\n tt2 = (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2))\n tt3 = (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2))\n tt4 = (mflag == True and (abs(x1 - x2)) < tolerance)\n tt5 = (mflag == False and (abs(x2 - d)) < tolerance)\n if (tt1 or\n tt2 or\n tt3 or\n tt4 or\n tt5):\n new = (x0 + x1) / 2\n mflag = True\n \n else:\n mflag = False\n \n fnew = f(new)\n d, x2 = x2, x1\n \n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n \n steps_taken += 1\n \n return x1, steps_taken", "def newton_direction(\n x,\n problem: MinimizationProblem,\n prev_state: IterationState):\n\n hessian = problem.calc_hessian_at(x)\n grad = problem.calc_gradient_at(x)\n\n p = -solve(hessian, grad,\n problem.settings.cholesky_linear_systems_solver_enabled,\n problem.settings.gaussian_elimination_linear_systems_solver_enabled)\n\n return IterationState(x, p, grad)", "def newton(*args, attenuation: Union[float, bool]=0.0, magnitude: Union[float, bool]=0.0,\n maxDistance: Union[float, bool]=0.0, minDistance: Union[float, bool]=0.0, name:\n Union[AnyStr, bool]=\"\", perVertex: bool=True, position: Union[List[float, float,\n float], List[List[float, float, float]], bool]=None, torusSectionRadius: Union[float,\n bool]=0.0, volumeExclusion: bool=True, volumeOffset: Union[List[float, float, float],\n bool]=None, volumeShape: Union[AnyStr, bool]=\"\", volumeSweep: Union[float, bool]=0.0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def gauss_newton(sys, x0, tol = 1e-10, maxits = 256):\n\n\n dx = np.ones(len(x0)) # Correction vector\n xn = np.array(x0) # Approximation of solution\n\n i = 0\n while (i < maxits) and (dx[dx > tol].size > 0):\n # correction = pinv(jacobian) . residual vector\n dx = np.dot(np.linalg.pinv(sys.jacobian(xn)), -sys.residuals(xn))\n xn += dx # x_{n + 1} = x_n + dx_n\n i += 1\n\n return xn, i", "def sqrtm_newton_schulz(A, numIters, reg=None, return_error=False, return_inverse=False):\n if A.ndim <= 2: # Non-batched mode\n A = A.unsqueeze(0)\n batched = False\n else:\n batched = True\n batchSize = A.shape[0]\n dim = A.shape[1]\n normA = (A**2).sum((-2,-1)).sqrt() # Slightly faster than : A.mul(A).sum((-2,-1)).sqrt()\n\n if reg:\n ## Renormalize so that the each matrix has a norm lesser than 1/reg,\n ## but only normalize when necessary\n normA *= reg\n renorm = torch.ones_like(normA)\n renorm[torch.where(normA > 1.0)] = normA[cp.where(normA > 1.0)]\n else:\n renorm = normA\n\n Y = A.div(renorm.view(batchSize, 1, 1).expand_as(A))\n I = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).to(A.device)#.type(dtype)\n Z = torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).to(A.device)#.type(dtype)\n for i in range(numIters):\n T = 0.5*(3.0*I - Z.bmm(Y))\n Y = Y.bmm(T)\n Z = T.bmm(Z)\n sA = Y*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)\n sAinv = Z/torch.sqrt(normA).view(batchSize, 1, 1).expand_as(A)\n if not batched:\n sA = sA[0,:,:]\n sAinv = sAinv[0,:,:]\n\n if not return_inverse and not return_error:\n return sA\n elif not return_inverse and return_error:\n return sA, compute_error(A, sA)\n elif return_inverse and not return_error:\n return sA,sAinv\n else:\n return sA, sAinv, compute_error(A, sA)", "def sqrt(x):\n # lets check that x is positive\n if x < 0:\n print(\"Error: negative value was supplied\")\n return -1\n\n\n # Initial guess for the square root \n z = x / 2.0 \n \n # Continuously improve the guess.\n while abs(x - (z*z)) > 0.01: \n z = z - (((z*z) - x) / (2*z))\n \n return z", "def newton_body(iterand):\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]", "def test_kempton_taylor_q(self):\n c = array([2,3,3,3,3,3,4,4,4,6,6,7,7,9,9,11,14,15,15,20,29,33,34,\n 36,37,53,57,138,146,170])\n self.assertFloatEqual(kempton_taylor_q(c), 14/log(34/4))", "def sqrt(x):\n def good_enough(guess):\n precision = 0.001\n f = abs(guess ** 2 - x)\n return (f < precision)\n \n def improve(guess):\n return (guess + x/guess) / 2.0\n \n counter = 1\n guess = 1\n while not good_enough(guess) and counter <= 100:\n guess = improve(guess)\n counter += 1\n assert counter <= 100,'100 iterations done and no good answer' \n return int(guess)", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)\n\n return solver", "def compute_root(poly, x_0, epsilon):\n # TO DO ...\n diff = evaluate_poly(poly, x_0)\n count = 0\n\n if abs(diff) > epsilon:\n #Newton's Method Formula\n x_1 = x_0 - (evaluate_poly(poly, x_0) / evaluate_poly(compute_deriv(poly), x_0))\n #Recursion!\n x_0 , count = compute_root(poly, x_1, epsilon)\n else:\n pass\n\n return x_0, count + 1", "def sqrt(self, a):\n raise NotImplementedError", "def test_too_low_max_iterations(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n initial_values = np.ones(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(\n objective_and_gradient, initial_values, max_iterations=1)\n _, converged, failed = self.evaluate(roots)\n\n # Reference values - we should neither have converged nor failed.\n converged_bench = np.array([False, False, False])\n failed_bench = np.array([False, False, False])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)", "def sqrt(a):", "def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n d = 0.1\n y = x / 2\n z = (y + x/y) / 2\n e = abs(z-y)\n while e > d:\n y = z\n z = (y + x/y) / 2\n e = abs(z - y)\n return int(z)", "def F(x):\n soln = x - (1.0/5.0)*math.cos(10.0*x+1.0) \n return soln", "def n_root_of_x(n, x):\n if n==0:\n return 1\n \n return 1 if n==0 else x**(1.0/n)", "def sqrt(x):\n if x < 0:\n raise ValueError(f\"Cannot compute sqrt of negative number {x}\")\n guess = x\n i = 0\n while guess * guess !=x and i < 20:\n guess = (guess + x / guess) / 2.0\n i += 1\n return guess", "def newtonJacobian(self,r):\n #x_vec=np.array(r)\n x=r[0]\n y=r[1]\n jacobi=np.zeros([2,2], float)\n \n \n jacobi[0][0]=(4.0*(self.x_0-x)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][1]=(4.0*(self.y_0-y)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][0]=4.0*(self.x_0-x)*(self.y_0-y)*self.sfunc(x,y)\n jacobi[0][1]=jacobi[1][0]\n #print \"newton jacobian is \",jacobi\n try:\n return mat.inv(jacobi)\n except:\n print \"singular jacobi not invertable\"\n return 0", "def sqrt(self):\n\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__pow__(0.5)", "def sqrt(number):\n\n # if n is negative then bail out\n try:\n if number < 0:\n msg = \"\\nSquare root of a negative number is undefined.\\n\"\n raise ValueError(msg)\n except ValueError as err_msg:\n print(err_msg)\n return False\n\n # take care of the easy cases\n if number == 0 or number == 1:\n return number\n\n # num is initial value to begin with and divided by a factor\n # some guess work/experimenting here, if you divide the number in half\n # this can fail on square root of 9 and other small numbers.\n #\n # *** otherwise helps reduce the number of iterations ***\n # *** performs slightly better than binary method on large numbers ***\n if number < 9:\n num = number // 1.5\n if 9 <= number <= 15:\n num = number\n else:\n num = number // 2\n\n # initially this is 1\n # as num decreases this increases until the difference\n # between the two is less then the estimate_error\n incremental = 1\n\n # making this number too small increases iterations\n estimate_error = 1 # 0.01\n\n # not needed: used to see the effect of tweaking above variables\n iterations = 0\n\n while (num - incremental > estimate_error):\n num = (num + incremental) / 2\n incremental = number / num\n iterations += 1\n # here for testing, can watch the function converge to answer\n # print(\"num={} one={}\".format(num, incremental))\n\n #print(\"sqrt({})={}. Number of iterations is {}\".format(number, num//1, iterations))\n\n # floor: integer division gets rid of digits to the right of decimal pt\n return num // 1", "def newton(flogl, start, fgrad, fhess, maxiter):\r\n warn = 0\r\n iteration = 0\r\n par_hat0 = start\r\n m = 1\r\n while (iteration < maxiter and m >= 1e-04):\r\n H = -la.inv(fhess(par_hat0))\r\n g = fgrad(par_hat0).reshape(start.shape)\r\n Hg = np.dot(H, g)\r\n par_hat0 = par_hat0 + Hg\r\n iteration += 1\r\n m = np.dot(g.T, Hg)\r\n if iteration == maxiter:\r\n warn = 1\r\n logl = flogl(par_hat0)\r\n return (par_hat0, logl, warn)", "def nth_root(n):\n def actual_root(x):\n \"\"\"Returns the nth root of x\"\"\"\n root = x ** (1/n)\n\tprint(x)\n return root\n return actual_root", "def my_sqrt(x):\n square_root = x**(0.5)\n return square_root", "def line_search(solve_eq, find_jac, x, alpha=1e-4, NRtol=1e-6):\n\n print \"\\n\\tBegin Newton line search method\"\n print \"\\t------------------------------------\"\n finCond = False\n while not finCond:\n # Calculate the newton step, dx\n F_x0 = solve_eq(x)\n j_x0 = find_jac(x)\n dx = linalg.solve(j_x0, -F_x0)\n\n # Define the master function\n f_x0 = real(0.5*dot(conj(F_x0), F_x0))\n\n slope_x0dx = real(-2*f_x0) #-dot(conj(F_x0), F_x0)\n\n # Decide whether to take the Newton Step by Armijo line search method\n # First initialise variables so that first iteration happens \n lam = 1 \n lamPrev = 1 \n f_xn = f_x0 + alpha*lam*slope_x0dx + 1\n f_lam2Prev = 0 # Doesn't matter, will be set before it is used\n f_lamPrev = 0 \n counter = 0\n\n # Now choose a lambda and see if it is good.\n while f_xn >= f_x0 + alpha*lam*slope_x0dx:\n\n if counter == 1:\n # set lambda by a quadratic model for the residual master function f\n lam = - slope_x0dx / 2*(f_xn - f_x0 - slope_x0dx)\n #print \"square model lambda =\", lam\n \n # impose upper and lower bounds on lambda \n if lam > 0.5:\n lam = 0.5\n if lam < 0.1:\n lam = 0.1\n\n elif counter > 1:\n # set lambda by a cubic model for the residual master function f\n abmat = zeros((2,2))\n abmat[0,0] = 1/(lamPrev*lamPrev)\n abmat[0,1] = -1/(lam2Prev*lam2Prev)\n abmat[1,0] = -lam2Prev/(lamPrev*lamPrev)\n abmat[1,1] = lamPrev/(lam2Prev*lam2Prev)\n\n f3vec = zeros(2)\n f3vec[0] = f_lamPrev - f_x0 - slope_x0dx*lamPrev\n f3vec[1] = f_lam2Prev - f_x0 - slope_x0dx*lam2Prev\n\n abvec = (1./(lamPrev-lam2Prev)) * dot(abmat, f3vec)\n aaa = abvec[0]\n bbb = abvec[1]\n lam = (- bbb + sqrt(bbb**2 - 3*aaa*slope_x0dx)) / 3*aaa\n\n # impose upper and lower bounds on lambda \n if lam > 0.5*lamPrev:\n lam = 0.5*lamPrev\n if lam < 0.1*lamPrev:\n lam = 0.1*lamPrev\n\n #print \"cubic model lambda\", lam\n\n if lam < 1e-6:\n print \" loop counter of last step = \", counter-1\n print \"step too small, take full Newton step and hope for the best.\"\n lam = 1\n break\n\n # calculate the residual and master function so we can see if the\n # step was a good one.\n F_xn = solve_eq(x + lam*dx)\n f_xn = real(0.5*dot(conj(F_xn), F_xn))\n #print \"\"\" |F_xn| = \"\"\", linalg.norm(F_xn) \n\n # update old values for cubic method\n lam2Prev = lamPrev\n lamPrev = lam\n f_lam2Prev = f_lamPrev\n f_lamPrev = f_xn\n\n counter += 1\n \n\n # change x to the value at the step we just took\n x = x + lam*dx\n\n # Extra symmerterisation step\n x[0:vecLen] = symmetrise(x[0:vecLen])\n x[vecLen:2*vecLen] = symmetrise(x[vecLen:2*vecLen])\n x[2*vecLen:3*vecLen] = symmetrise(x[2*vecLen:3*vecLen])\n x[3*vecLen:4*vecLen] = symmetrise(x[3*vecLen:4*vecLen])\n \n # Print norm and check if we can exit yet.\n L2 = linalg.norm(F_xn)\n print \"\"\"|F_xn| = {0:10.5g}, |dx| = {1:10.5g}, lambda = {2}\"\"\".format(\n L2, linalg.norm(dx), lam)\n\n # Quit if L2 norm is getting huge\n if L2 > 1e50:\n print \"Error: Shooting off to infinity!\"\n exit(1)\n\n if L2 < NRtol:\n print \"Solution found!\"\n finCond = True\n\n\n PSI = xVec[0:vecLen] \n Cxx = xVec[1*vecLen:2*vecLen] \n Cyy = xVec[2*vecLen:3*vecLen] \n Cxy = xVec[3*vecLen:4*vecLen]\n pickle.dump((PSI,Cxx,Cyy,Cxy), open(outFileName, 'w'))\n\n return x", "def _nth_root(value, n_root) -> float:\r\n\r\n root_value = 1 / float(n_root)\r\n return Decimal(value) ** Decimal(root_value)", "def quadratic_root_goes_to_infinity():\n for dt in 1E-7, 1E-12, 1E-16:\n a = dt\n b = 1 - dt\n c = -0.1\n print((dt, quadratic_roots(a, b, c)))", "def integrator_system(self):\n\n xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()\n VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])\n solver = rootfinder('solver', 'newton', VV)#, {'error_on_fail':False})\n\n return solver", "def test_3(self):\n # Run Newton's method. Last starting point is wildly high. How will Newton perform??\n starting_points = (0.1, 4.0, -0.2, -0.1)\n assert len(starting_points) == 4\n\n logging.info(\"\\nRUNNING EXERCISE 1.3C\")\n newton_roots = [undertest.newton(self.func, self.derivative, x0, 50)\n for x0 in starting_points]\n\n # Run secant-based methods. Last interval has a high right endpoint. How will the algos do?\n secant_intervals = [(0.9, 10.0), (-0.2, 3.0), (0.1, 6.0), (1.9, 20.0), (20.0, 1.9)]\n assert len(secant_intervals) == 5\n logging.info(\"\\nRUNNING EXERCISE 1.3D\")\n secant_results = [undertest.secant(self.func, prev, current, self.maxit)\n for (prev, current) in secant_intervals]\n logging.info(\"\\nRUNNING EXERCISE 1.3E\")\n regula_falsi_results = [undertest.regula_falsi(self.func, prev, current, 100)\n for (prev, current) in secant_intervals]\n logging.info(\"\\nRUNNING EXERCISE 1.3F\")\n wheeler_results = [undertest.wheeler(self.func, prev, current, 20)\n for (prev, current) in secant_intervals]", "def sqrt(n):\n pass" ]
[ "0.7563373", "0.75321746", "0.7448436", "0.74475104", "0.7388106", "0.7297471", "0.7297028", "0.72617483", "0.72546536", "0.7249506", "0.72134876", "0.7199049", "0.7197292", "0.71775806", "0.71667355", "0.71341896", "0.71326596", "0.7126753", "0.7094095", "0.7091968", "0.70573956", "0.7051226", "0.7014604", "0.7011255", "0.6941722", "0.69401205", "0.6873508", "0.68681645", "0.68659025", "0.67397463", "0.66978675", "0.66938925", "0.66864926", "0.6671256", "0.6641514", "0.6587841", "0.65537745", "0.6532033", "0.65261024", "0.6465178", "0.6453985", "0.6430531", "0.6404766", "0.63604355", "0.6355746", "0.63328177", "0.62917316", "0.62810004", "0.6264913", "0.6249715", "0.6227931", "0.62038916", "0.6179996", "0.61768115", "0.617198", "0.6135978", "0.6075703", "0.6068956", "0.60588413", "0.60328114", "0.6021551", "0.59984195", "0.59904397", "0.5929856", "0.586898", "0.5858099", "0.5842043", "0.5793119", "0.57536566", "0.5745201", "0.57203686", "0.5718023", "0.5703053", "0.5681906", "0.56367147", "0.5614586", "0.55996203", "0.55996203", "0.55996203", "0.55996203", "0.5580063", "0.5580047", "0.55707914", "0.5570543", "0.5568014", "0.55665255", "0.5565699", "0.55615395", "0.55556315", "0.5542454", "0.5538477", "0.553622", "0.5525222", "0.5506521", "0.55056286", "0.54985994", "0.5496036", "0.5491466", "0.5466144", "0.5455003" ]
0.76958627
0
Converts the generated fractal into an RGB image array
def _toRgbImage(self, fractal, colors, color_offset): soln_real = adjustRange(fractal[0], 0, 127) soln_imag = adjustRange(fractal[1], 0, 127) iters = adjustRange(fractal[2], 0, 128) rgb_image = np.array([ soln_real + iters, soln_imag + iters, iters ] ).astype(dtype=np.uint8) return rgb_image.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = fractal value.\n fractal,\n\n # Maximum value.\n np.ones(fractal.shape)\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = 1 where fractal values > 0,\n # Saturation = 0 otherwise.\n fractal.astype(dtype=bool).astype(dtype=float),\n\n # Invert colours\n 1 - fractal\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", "def reconstructImage(self,arr):\n\t\tarr = arr * 256\n\t\tarr = np.array(np.round(arr),dtype=np.uint8)\n\t\t#arr = np.array(arr,dtype=np.uint8)\n\n\t\t# We need to transpose the array because we flatten X by columns\n\t\t#arr = arr.T\n\t\t#a = arr.reshape((self.width, self.height,3))\n\t\t\n\t\tif self.mode == 'L':\n\t\t\ta = arr.reshape((self.width, self.height))\n\t\telse:\n\t\t\ta = arr.reshape((self.width, self.height,3))\n\n\t\t#a = arr.reshape((3,self.width, self.height))\t\t\n\t\t#a = arr.transpose(0, 3, 1, 2)\n\n\t\tim = Image.fromarray(a,mode=self.mode)\n\n\t\treturn im", "def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)", "def grey_to_rgb_imitation(img):\n return np.repeat(img[...,np.newaxis], 3, -1)", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def img_to_rgb(img):\r\n if len(img.shape) < 3 or img.shape[2] == 1:\r\n return np.repeat(img, 3).reshape(img.shape[0], img.shape[1], 3)\r\n else:\r\n return img", "def GetRGBArray(self, p_int):\n ...", "def reveal_RGB_image(filename):\n\tnew_array = [[], [], []]\n\tim = Image.open(filename)\n\tpixels = convert_image_to_pixels(filename) # get RGB array\n\tfor pixel in pixels: # get tuple of RGB\n\t\tfor x in range(3): # get R, G, B lists\n\t\t\tnew_array[x].append(85 * (pixel[x] & 3)) # change 0-3 to 0-255\n\t\t# get hidden 2 least significant bits\n\tfinal_array = list(zip(new_array[0], new_array[1], new_array[2]))\n\t# create a new image container in RGB mode,\n\t# and import array pixels data into the container\n\treturn convert_pixels_to_image(final_array, im.size)", "def get_image():\n image_response = client.simGetImages([airsim.ImageRequest(\"0\", airsim.ImageType.Scene, False, False)])[0]\n image1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8)\n image_rgba = image1d.reshape(image_response.height, image_response.width, 4)\n return image_rgba[78:144,1:255,0:3].astype(float)\n # return image_rgba[78:144,76:255,0:3].astype(float)", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def generate_array_image(R, G, B, height, width):\n R = R.reshape((height, width))\n G = G.reshape((height, width))\n B = B.reshape((height, width))\n \n return np.moveaxis(np.array([R, G, B]), 0, -1)", "def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr", "def data_to_bytescale_rgb(data): # used to create the SOURCE PNGs (MRI, FA, MD)\n im = bytescale(data)\n w, h = im.shape\n ret = np.empty((w,h,3), dtype=np.uint8)\n ret[:,:,0] = im\n ret[:,:,1] = im\n ret[:,:,2] = im\n return ret", "def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out", "def load_image_as_rgb(image_path):\n im = imageio.imread(image_path)\n y_size = im.shape[0]\n x_size = im.shape[1]\n logging.info(\"Image has dimensions X:%d Y:%d\" % (x_size, y_size))\n arr = np.zeros((im.shape[0],im.shape[1]), dtype=int)\n i = 0\n for im_row in im:\n j = 0\n for vec in im_row:\n arr[i,j] = rgb_vec_to_num(vec)\n j = j + 1\n i = i + 1\n return arr", "def _preprocess(self, image):\n\n # Scale from [0, 255] to [0, 1] and BGR to RGB \n return (image / 255.0)[:, :, ::-1]", "def to_image(x):\n x = denorm(x.data.cpu())\n ndarr = x.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = ndarr\n return im", "def generate_lut(self):\n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r,g,b", "def read_color_image(path):\n with open(path, 'rb') as f:\n img = Image.fromarray(read_ppm(f), mode='RGB')\n img = tf.keras.preprocessing.image.img_to_array(img, dtype=int)\n img = tf.convert_to_tensor(img)\n return img", "def get_rendered_image(self) -> np.ndarray:\n return np.transpose(self.state['observation'], [1, 2, 0])", "def image2array(filename, shape=None):\n # Open the image and change it to black and white\n im = Image.open(filename).convert('1', dither=Image.NONE)\n\n im = im.resize(shape, Image.ANTIALIAS)\n pattern = np.array(im)\n \n return pattern", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def get_image(image_path):\r\n image = Image.open(image_path, 'r')\r\n width, height = image.size\r\n pixel_values = list(image.getdata())\r\n if image.mode == 'RGB':\r\n channels = 3\r\n elif image.mode == 'L':\r\n channels = 1\r\n else:\r\n print(\"Unknown mode: %s\" % image.mode)\r\n return None\r\n pixel_values = np.array(pixel_values).reshape((1,width, height, channels))\r\n # print(pixel_values.shape)\r\n return pixel_values", "def red_channel(img):\n\n red = np.zeros(img.shape,dtype=float)\n\n red[:,:,2] = np.copy(img[:,:,2])\n\n return red", "def Array2PIL(a,lut=None,minvalue=None,maxvalue=None,width=None,height=None,\n flip=None):\n import Image # we only need it here ...\n\n if flip==\"ud\": #up-down exchange\n a=a[::-1,:]\n h,w=Numeric.shape(a)\n## a_min=Numeric.minimum.reduce((Numeric.ravel(a)))\n## a_max=Numeric.maximum.reduce((Numeric.ravel(a)))\n a_min=min(Numeric.ravel(a))\n a_max=max(Numeric.ravel(a))\n\n # allow for an user-specified maximal value:\n if maxvalue!=None and maxvalue>a_max:\n a_max=maxvalue\n # allows for an user-specified minimal value:\n if minvalue!=None and minvalue<a_min:\n a_min=minvalue\n\n if lut is not None:\n if len(lut[0]) == 256:\n \n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n\n rgb=Numeric.zeros( (len(a),3),typecode=Numeric.UInt8)\n\n\n lut_=Numeric.zeros( (3,len(lut[0])),Numeric.UInt8)\n lut_[0]=lut[0].astype(Numeric.UInt8)\n lut_[1]=lut[1].astype(Numeric.UInt8)\n lut_[2]=lut[2].astype(Numeric.UInt8)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0]=Numeric.take(lut_[0],a)\n #print \"rtake\"\n rgb[:,1]=Numeric.take(lut_[1],a)\n #print \"gtake\"\n rgb[:,2]=Numeric.take(lut_[2],a)\n #print \"btake\"\n #rgb=Numeric.ravel(((Numeric.array(zip(r,g,b),\n # typecode=Numeric.UInt8))))\n\n #print \"rgb done\"\n else:\n N = len(lut[0])\n print \"LUT with N=%d entries\" % N\n if N>=256*256:\n print \"UUPS, more than uint16 colors??\", N\n raise ValueError(\"N too large\")\n \n a = (Numeric.ravel((N-1)*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt16)\n\n rgb = Numeric.zeros( (len(a), 3), typecode=Numeric.UInt16)\n\n lut_ = Numeric.zeros( (3,len(lut[0])), Numeric.UInt16)\n lut_[0] = lut[0].astype(Numeric.UInt16)\n lut_[1] = lut[1].astype(Numeric.UInt16)\n lut_[2] = lut[2].astype(Numeric.UInt16)\n\n # This is much faster than the original zip/ravel variant ...\n rgb[:,0] = Numeric.take(lut_[0],a)\n rgb[:,1] = Numeric.take(lut_[1],a)\n rgb[:,2] = Numeric.take(lut_[2],a)\n\n rgb = (rgb*256.0/N).astype(Numeric.UInt8)\n\n else: # simple grey scale ramp...\n a=(Numeric.ravel(255.0*(a-a_min)/\n (a_max-a_min))).astype(Numeric.UInt8)\n # convert to (r_0,g_0,b_0,r_1,g_1,b_1,....)\n rgb=Numeric.ravel(Numeric.array(zip(a,a,a)))\n\n # create a PIL RGB image\n #print \"w/h\",w,h\n im=Image.new(\"RGB\",(w,h))\n #print \"imfromstring:\"\n im.fromstring(rgb.tostring())\n #print \"done ...\"\n \n # scale image ?\n if height!=None and width==None:\n im=im.resize(w/h*height,height)\n elif height==None and width!=None:\n im=im.resize(width,h/w*width)\n elif height!=None and width!=None:\n im=im.resize(width,height)\n\n return(im)", "def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def _images(path):\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def jpg2rgb(image_data: bytes) -> np.ndarray:\n\n im = Image.open(io.BytesIO(image_data))\n im = im.convert(\"RGB\")\n im = im.resize((96, 96))\n data = np.array(im)\n\n data = rgb2gray(data)\n\n return data", "def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp", "def get_BGR_img(self):\n img = self.img.copy()\n # Convert BGR to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # define range of BGR color in HSV\n threshold_blue = np.array([[100,43,46], [124,255,255]])\n threshold_green = np.array([[35,43,46], [77,255,255]])\n threshold_red1 = np.array([[0,43,46], [10,255,255]])\n threshold_red2 = np.array([[156,43,46], [180,255,255]])\n # Threshold the HSV image to get only BGR colors\n mask_blue = cv2.inRange(hsv, threshold_blue[0], threshold_blue[1])\n mask_green = cv2.inRange(hsv, threshold_green[0], threshold_green[1])\n mask_red1 = cv2.inRange(hsv, threshold_red1[0], threshold_red1[1])\n mask_red2 = cv2.inRange(hsv, threshold_red2[0], threshold_red2[1])\n mask_red = mask_red1 | mask_red2\n # Bitwise-AND mask and original image\n self.blue = cv2.bitwise_and(img, img, mask=mask_blue)\n self.green = cv2.bitwise_and(img, img, mask=mask_green)\n self.red = cv2.bitwise_and(img, img, mask=mask_red)\n # 返回 bgr 三通道的分量合成的图片\n return np.stack((self.blue[:, :, 0], self.green[:, :, 1], self.red[:, :, 2]), axis=2)", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def imageprepare():\r\n file_name = 'temp_image.png'\r\n im = Image.open(file_name).convert('L')\r\n im = im.resize((20, 20))\r\n p = Image.new('L', (28,28), (255))\r\n p.paste(im,(4,4,24,24))\r\n p.save(\"last_image.png\")\r\n\r\n tv = list(p.getdata()) # get pixel values\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n tva = np.reshape(tva, (28, 28))\r\n\r\n return tva", "def get_np_image(self, save_image=False, filename=\"curr_image.png\"):\n responses = client.simGetImages([airsim.ImageRequest(\"front_left\", airsim.ImageType.Scene, False, False)])\n response = responses[0]\n\n # get numpy array\n img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)\n\n # reshape array to 4 channel image array H X W X 4\n img_rgb = img1d.reshape(response.height, response.width, 3)\n\n # # original image is fliped vertically\n # img_rgb = np.flipud(img_rgb)\n\n if save_image:\n cv2.imwrite(filename, img_rgb)\n\n return img_rgb", "def _arr_to_img(arr, verbose=False):\n return Image.fromarray(arr)", "def _pillow2array(img, flag='color', channel_order='bgr'):\n channel_order = channel_order.lower()\n if channel_order not in ['rgb', 'bgr']:\n raise ValueError('channel order must be either \"rgb\" or \"bgr\"')\n\n if flag == 'unchanged':\n array = np.array(img)\n if array.ndim >= 3 and array.shape[2] >= 3: # color image\n array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR\n else:\n # If the image mode is not 'RGB', convert it to 'RGB' first.\n if img.mode != 'RGB':\n if img.mode != 'LA':\n # Most formats except 'LA' can be directly converted to RGB\n img = img.convert('RGB')\n else:\n # When the mode is 'LA', the default conversion will fill in\n # the canvas with black, which sometimes shadows black objects\n # in the foreground.\n #\n # Therefore, a random color (124, 117, 104) is used for canvas\n img_rgba = img.convert('RGBA')\n img = Image.new('RGB', img_rgba.size, (124, 117, 104))\n img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha\n if flag == 'color':\n array = np.array(img)\n if channel_order != 'rgb':\n array = array[:, :, ::-1] # RGB to BGR\n elif flag == 'grayscale':\n img = img.convert('L')\n array = np.array(img)\n else:\n raise ValueError(\n 'flag must be \"color\", \"grayscale\" or \"unchanged\", '\n f'but got {flag}')\n return array", "def get_image_array(self):\n with picamera.array.PiRGBArray(self.camera) as output:\n self.camera.resolution = (640, 480)\n self.camera.capture(output, 'rgb')\n logging.info(\"Captured image of size {0}x{1}x{2}\".format(\n output.array.shape[0], output.array.shape[1], output.array.shape[2]))\n output.truncate(0)\n return output.array\n # self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)\n # self.stream.seek(0)\n # image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)\n # self.stream.seek(0)\n # self.stream.truncate()\n # self.camera.close()", "def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)", "def _grey_img_to_arr(image, verbose=False):\n try:\n w, h = image.size\n arr = np.array(image.getdata())\n arr = _rgb_to_grey(arr, (h, w), verbose=verbose)\n if verbose:\n print(\"Converted from RGB to grayscale\")\n except:\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(height, width)\n return arr", "def create_colorful_test_image(self):\n ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)\n ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)\n ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)\n imr = np.concatenate((ch255, ch128, ch128), axis=2)\n img = np.concatenate((ch255, ch255, ch0), axis=2)\n imb = np.concatenate((ch255, ch0, ch255), axis=2)\n imw = np.concatenate((ch128, ch128, ch128), axis=2)\n imu = np.concatenate((imr, img), axis=1)\n imd = np.concatenate((imb, imw), axis=1)\n image = np.concatenate((imu, imd), axis=0)\n return image", "def format_data(img_path, size):\n img_color = cv2.imread(img_path)\n img_color = img_color[:, :, ::-1]\n img_color = cv2.resize(img_color, (size, size), interpolation=cv2.INTER_AREA)\n img_color = img_color.reshape((1, size, size, 3))\\\n #.transpose(0, 3, 1, 2)\n\n return img_color", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def get_rgbColorArray(self, ledIndex, count):\n # buff\n res = []\n # idx\n # r\n # g\n # b\n\n buff = self._download(\"rgb.bin?typ=0&pos=\" + str(int(3*ledIndex)) + \"&len=\" + str(int(3*count)))\n del res[:]\n\n idx = 0\n while idx < count:\n r = YGetByte(buff, 3*idx)\n g = YGetByte(buff, 3*idx+1)\n b = YGetByte(buff, 3*idx+2)\n res.append(r*65536+g*256+b)\n idx = idx + 1\n\n return res", "def to_array(self):\n return np.array(self.to_image())", "def flow_to_image(flow):\n out = []\n maxu = -999.\n maxv = -999.\n minu = 999.\n minv = 999.\n maxrad = -1\n for i in range(flow.shape[0]):\n u = flow[i, :, :, 0]\n v = flow[i, :, :, 1]\n idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)\n u[idxunknow] = 0\n v[idxunknow] = 0\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n rad = np.sqrt(u ** 2 + v ** 2)\n maxrad = max(maxrad, np.max(rad))\n u = u / (maxrad + np.finfo(float).eps)\n v = v / (maxrad + np.finfo(float).eps)\n img = compute_color(u, v)\n out.append(img)\n return np.float32(np.uint8(out))", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def save_array_as_rgb_image(data, image_name):\n data_dim = len(data.shape)\n if(data_dim == 3):\n assert(data.shape[0] == 3 or data.shape[2] == 3)\n if(data.shape[0] == 3):\n data = np.transpose(data, [1, 2, 0])\n img = Image.fromarray(data)\n img.save(image_name)", "def get_image_and_prep(self,file_path):\r\n img = np.array(Image.open(file_path).convert('1'))\r\n img = img.reshape(28,28,1)\r\n return img", "def process_image(self, image_path):\n\n img = load_img(image_path, target_size=IMAGE_SIZE)\n img_array = img_to_array(img)\n # Create a batch by increase dimensions\n img_array = expand_dims(img_array, 0)\n print(img_array.shape)\n return img_array", "def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img", "def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )", "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "def generate_normalized_rgb(self):\n \n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r/256.0,g/256.0,b/256.0", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def img_from_array(array):\n return Image.fromarray(array)", "def get_image(filepath,size):\n image = Image.open(filepath)\n newimage = image.resize((size,size)).convert('LA')\n pixels = np.asarray(newimage,dtype = np.float32)[:,:,0]\n return pixels", "def read_image_greyscale(path: str) -> np.ndarray:\n img = imread(path)\n if len(img.shape) > 2:\n img = np.dot(img[..., :3], [0.299, 0.587, 0.114])\n return img", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def to_rgb(im):\n w, h = im.shape\n ret = np.empty((w, h, 3), dtype=np.uint8)\n ret[:, :, 2] = ret[:, :, 1] = ret[:, :, 0] = im\n return ret", "def render(self):\n\n pixels = [\n [Color() for _ in range(self.width)] for _ in range(self.height)]\n\n for y in range(self.height):\n for x in range(self.width):\n ray_direction = Point(x, y) - self.camera\n ray = Ray(self.camera, ray_direction)\n pixels[y][x] = self._trace_ray(ray)\n\n return pixels", "def rgb(self):\n return [self.__r, self.__g, self.__b]", "def q_1(input_file, output_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n\n # Convert image to gray channel\n np_img = np.array(img)\n b = np_img[:,:,0]\n g = np_img[:,:,1]\n r = np_img[:,:,2]\n img_gray = 0.21 * b + 0.72 * g + 0.07 * r\n img_gray = np.array(img_gray, dtype='uint8')\n cv2.imwrite(output_file, img_gray)\n print(np_img)", "def _raw_to_gray(self):\n img_rgb = np.zeros((self.y_res, self.x_res, 3), dtype=np.uint8)\n img_rgb = np.array(self.img_raw)\n img_gray = np.zeros((self.y_res, self.x_res))\n img_gray[:, :] = img_rgb[:, :, 2]\n\n return img_gray", "def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb", "def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_path=args.output_path, method=args.method, size=args.size, p=args.p, gpu_id=args.gpu_id, plot=args.plot)\n\n ec.encode(input_image_path)\n img_gray_name = ar_utils.gen_new_gray_filename(input_image_path)\n img_gray_path = os.path.join(args.intermediate_representation, img_gray_name)\n dc.decode(img_gray_path)\n\n if args.delete_gray and os.path.exists(img_gray_path):\n os.remove(img_gray_path)", "def image2array(im):\n\n arr = numpy.zeros(im.size)\n\n for x in xrange(im.size[0]):\n for y in xrange(im.size[1]):\n arr[x,y] = im.getpixel((x,y))\n\n return arr", "def load_color_image_features(img_path):\n ac = scipy.misc.imread(img_path, mode='RGB')\n ac = ac / (255.0 / 2) - 1.0\n return np.array(ac)", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def get_image(image_path):\n image = Image.open(image_path, \"r\")\n width, height = image.size\n pixel_values = list(image.getdata())\n\n if (image.mode != \"RGBA\"):\n image = image.convert(\"RGB\")\n pixel_values = list(image.getdata())\n for idx, px in enumerate(pixel_values):\n pixel_values[idx] = [px[0], px[1], px[2], 255]\n\n return (list(chunks(pixel_values, width)), width, height)", "def rgb_processing(rgb_img, center, scale, rot=0):\n rgb_img = crop(rgb_img, center, scale, \n [constants.IMG_RES, constants.IMG_RES], rot=rot)\n # (3,224,224),float,[0,1]\n rgb_img = np.transpose(rgb_img.astype('float32'),(2,0,1))/255.0\n return rgb_img", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def greyScale(img, shape):\n s, v = shape\n greyPicture = [sum(img[i]) / 3 for i in range(v * s)]\n\n return greyPicture", "def test_fromarray_rgb_fail():\n arr = numpy.zeros((20, 10, 3), dtype='float')\n\n parameters = {'data': [arr]}\n\n images.fromarray(parameters).convert('RGB')", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def convert_grayscale_to_rgb(x: np.ndarray) -> np.ndarray:\n return np.stack((x, ) * 3, axis=-1)", "def generate_image(size, bitdepth, pattern):\n\n width, height = size\n\n maxval = 2**bitdepth-1\n if maxval > 255:\n a = array('H')\n else:\n a = array('B')\n fw = float(width)\n fh = float(height)\n pfun = PATTERN[pattern]\n for y in range(height):\n fy = float(y)/fh\n for x in range(width):\n a.append(int(round(pfun(float(x)/fw, fy) * maxval)))\n return a", "def process_screen(screen):\n\n # Indexing convention varies between PIL and numpy\n screen = np.swapaxes(screen, 0, 1)\n # Load the array in PIL\n im = Image.fromarray(screen, 'RGB')\n # Convert to grayscale\n im = im.convert(mode='L')\n # Crop\n im = im.crop((0, 0, 288, 405))\n # Downscale and resize\n im = im.resize((84, 84))\n # Normalise\n im = np.array(im) / 255\n\n return im", "def recreate_image(x):\n reverse_mean = [-0.485, -0.456, -0.406]\n reverse_std = [1/0.229, 1/0.224, 1/0.225]\n in_channel = x.shape[-1]\n recreated_im = copy.copy(x) # C, H, W\n if in_channel == 3:\n for c in range(in_channel):\n recreated_im[:, :, c] /= reverse_std[c]\n recreated_im[:, :, c] -= reverse_mean[c]\n elif in_channel == 1:\n recreated_im[:, :, 0] /= reverse_std[1]\n recreated_im[:, :, 0] -= reverse_mean[1]\n recreated_im[recreated_im > 1] = 1\n recreated_im[recreated_im < 0] = 0\n recreated_im = np.round(recreated_im * 255)\n\n recreated_im = np.uint8(recreated_im) # H, W, C\n return recreated_im", "def main():\n import numpy as np\n from numpy import int32, uint\n\n pg.init()\n\n print(\"Using Numpy\")\n print(\"Press the left mouse button to advance image.\")\n print('Press the \"s\" key to save the current image.')\n\n # allblack\n allblack = np.zeros((128, 128), int32)\n surfdemo_show(allblack, \"allblack\")\n\n # striped\n # the element type is required for np.zeros in numpy else\n # an array of float is returned.\n striped = np.zeros((128, 128, 3), int32)\n striped[:] = (255, 0, 0)\n striped[:, ::3] = (0, 255, 255)\n surfdemo_show(striped, \"striped\")\n\n # rgbarray\n imagename = os.path.join(main_dir, \"data\", \"arraydemo.bmp\")\n imgsurface = pg.image.load(imagename)\n rgbarray = surfarray.array3d(imgsurface)\n surfdemo_show(rgbarray, \"rgbarray\")\n\n # flipped\n flipped = rgbarray[:, ::-1]\n surfdemo_show(flipped, \"flipped\")\n\n # scaledown\n scaledown = rgbarray[::2, ::2]\n surfdemo_show(scaledown, \"scaledown\")\n\n # scaleup\n # the element type is required for np.zeros in numpy else\n # an #array of floats is returned.\n shape = rgbarray.shape\n scaleup = np.zeros((shape[0] * 2, shape[1] * 2, shape[2]), int32)\n scaleup[::2, ::2, :] = rgbarray\n scaleup[1::2, ::2, :] = rgbarray\n scaleup[:, 1::2] = scaleup[:, ::2]\n surfdemo_show(scaleup, \"scaleup\")\n\n # redimg\n redimg = np.array(rgbarray)\n redimg[:, :, 1:] = 0\n surfdemo_show(redimg, \"redimg\")\n\n # soften\n # having factor as an array forces integer upgrade during multiplication\n # of rgbarray, even for numpy.\n factor = np.array((8,), int32)\n soften = np.array(rgbarray, int32)\n soften[1:, :] += rgbarray[:-1, :] * factor\n soften[:-1, :] += rgbarray[1:, :] * factor\n soften[:, 1:] += rgbarray[:, :-1] * factor\n soften[:, :-1] += rgbarray[:, 1:] * factor\n soften //= 33\n surfdemo_show(soften, \"soften\")\n\n # crossfade (50%)\n src = np.array(rgbarray)\n dest = np.zeros(rgbarray.shape) # dest is float64 by default.\n dest[:] = 20, 50, 100\n diff = (dest - src) * 0.50\n xfade = src + diff.astype(uint)\n surfdemo_show(xfade, \"xfade\")\n\n # all done\n pg.quit()", "def to_color(self):\n if self.channels == 4:\n color = opencv.cvtColor(self.img, opencv.COLOR_BGRA2BGR)\n return Image(color)\n elif self.channels == 1:\n color = opencv.cvtColor(self.img, opencv.COLOR_GRAY2BGR)\n return Image(color)\n else:\n return Image(self.img)", "def array_from_img(image):\n return np.array(image)", "def array2img(array):\n if len(array.shape) == 2:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='L')\n elif len(array.shape) == 3:\n return Image.fromarray(np.clip(array, 0, 255).astype('uint8'), mode='RGB')\n else:\n print('Income array is not at appropriate shape!')", "def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)", "def matplotlib_image(image):\n if image.ndim == 2:\n rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n else:\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return rgb", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def image_to_data(image):\n pixels = image.convert('RGB').load()\n width, height = image.size\n for y in range(height):\n for x in range(width):\n r,g,b = pixels[(x,y)]\n color = rgb(r, g, b)\n yield (color >> 8) & 0xFF\n yield color & 0xFF", "def load_rgb(path):\n bands = band_list['rgb']\n img = None\n fmt = \"_{}.tif\"\n for b in bands:\n band_ds = rasterio.open(path + fmt.format(b))\n aux = band_ds.read(1)\n aux = norm_band(aux)\n aux = np.expand_dims(aux, axis=-1)\n if img is None:\n img = aux\n else:\n img = np.concatenate((img, aux), axis=-1)\n return img", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def slice_array():\n img = Image.open(\"flamingo.jpg\")\n image_as_array = np.array(img)\n width, height, depth = image_as_array.shape\n\n red_channel = image_as_array[:, :, 0]\n green_channel = image_as_array[:, :, 1]\n blue_channel = image_as_array[:, :, 2]\n\n top_left_corner = image_as_array[:height // 2, :width // 2, :]\n top_right_corner = image_as_array[:height // 2, width // 2:, :]\n random_middle_pixels = image_as_array[11:29, 101:400, :]", "def render_array(self, resolution=300, channel=\"GRAYSCALE\"):\n # Method below returns a cairocffi.ImageSurface object\n # https://cairocffi.readthedocs.io/en/latest/api.html#cairocffi.ImageSurface\n surface, width, height = self._document.write_image_surface(\n resolution=resolution\n )\n img_format = surface.get_format()\n\n # This is BGRA channel in little endian (reverse)\n if img_format != FORMAT_ARGB32:\n raise RuntimeError(\n f\"Expect surface format to be 'cairocffi.FORMAT_ARGB32', but got {img_format}.\" +\n \"Please check the underlining implementation of 'weasyprint.document.Document.write_image_surface()'\"\n )\n\n img_buffer = surface.get_data()\n # Returns image array in \"BGRA\" channel\n img_array = np.ndarray(\n shape=(height, width, 4), dtype=np.uint8, buffer=img_buffer\n )\n if channel == \"GRAYSCALE\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2GRAY)\n elif channel == \"RGBA\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGBA)\n elif channel == \"RGB\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2RGB)\n elif channel == \"BGRA\":\n return np.copy(img_array)\n elif channel == \"BGR\":\n return cv2.cvtColor(img_array, cv2.COLOR_BGRA2BGR)\n else:\n valid_channels = [\"GRAYSCALE\", \"RGB\", \"RGBA\", \"BGR\", \"BGRA\"]\n raise ValueError(\n f\"Invalid channel code {channel}. Valid values are: {valid_channels}.\"\n )", "def render(self):\n np_img = np.array(self.prev_img, dtype=np.uint8)\n np_img = np.swapaxes(np_img, 0, 2)\n return np_img", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(np.array(MATRIX).T))", "def preprocess(self, data):\n data_unnorm = data / 2.0 + 0.5\n \n if self.permute == 1:\n permute = [2, 1, 0]\n data_rgb_unnorm = data_unnorm[:, permute]\n elif self.permute == 0:\n data_rgb_unnorm = data_unnorm\n \n data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')\n data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std\n return data_rgb" ]
[ "0.7032956", "0.6756124", "0.6748998", "0.66770595", "0.64644593", "0.6463771", "0.64612466", "0.6436402", "0.6378915", "0.6336183", "0.6302488", "0.6263761", "0.624275", "0.6217485", "0.62155837", "0.61859244", "0.61853856", "0.61381644", "0.61203206", "0.60929006", "0.6056744", "0.6051514", "0.6051217", "0.6048501", "0.60263324", "0.6018934", "0.5997953", "0.5955463", "0.5953483", "0.5934768", "0.5922572", "0.5919215", "0.5914749", "0.59146744", "0.5900208", "0.58949894", "0.5891846", "0.58684546", "0.5863706", "0.5857556", "0.58479416", "0.5846311", "0.5844837", "0.58416307", "0.5839048", "0.58335084", "0.5829988", "0.5828206", "0.5827445", "0.5817116", "0.5814034", "0.58062905", "0.58015674", "0.5777475", "0.5776714", "0.5775356", "0.57723266", "0.57723194", "0.5765495", "0.5754229", "0.5746401", "0.5746149", "0.5743001", "0.5741567", "0.5740249", "0.5737142", "0.57360756", "0.5734477", "0.5729071", "0.5719495", "0.571884", "0.57145816", "0.5702659", "0.57005656", "0.5692819", "0.56894594", "0.56890243", "0.56868684", "0.5679998", "0.5679032", "0.56786776", "0.5678284", "0.5677728", "0.5675702", "0.567371", "0.56719375", "0.5668369", "0.56682366", "0.5667854", "0.5659597", "0.56579345", "0.5653183", "0.5652828", "0.5643126", "0.56411356", "0.56327164", "0.56263506", "0.5600548", "0.5600352", "0.559847" ]
0.70360917
0
Return array with values compressed into given range.
def adjustRange(a, vmin=0, vmax=255): new_a = ( ( # Represent array as floats ranging between 0 and 1. a.astype(dtype=float) / np.nanmax(a) # Fill given range. * (vmax - vmin) + vmin ) # Convert back to regular array. .astype(dtype=np.uint8) ) return new_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inflate_lateral(source,inflate_factor):\n\treturn source[np.meshgrid(*[np.arange(-inflate_factor,i+inflate_factor+1)%i for i in source.shape])]", "def rangeArray(first, last):\n \n return np.arange(first, last+1)", "def crange(*args):\r\n result = [[]]\r\n for arg in args:\r\n result = [x + [y] for x in result for y in range(arg)]\r\n return result", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def arange(start, stop=None, step=1) -> NDArrayNumericExpression:\n return array(hl.range(start, stop, step))", "def create_bin_values(self):\n values = [-float(\"inf\"), self.offset, float(\"inf\")]\n value = self.start\n while self.offset + value <= self.stop:\n values.insert(1, self.offset - value)\n values.insert(-1, self.offset + value)\n value *= self.step\n return values", "def binrange(_min, _max, stepsize, include_upper=False):\n _min = _min - _min % stepsize\n _max = _max - _max % stepsize + stepsize * (1 + include_upper)\n return np.arange(_min, _max, stepsize)", "def range(self):\n return self.range_array", "def get_range(value):\n return list(range(value))", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def range_maker(low, hi, step, lst=None):\n return numpy.arange(low, hi, step)", "def sa_range(start: int, end: int) -> StaticArray:\n forward = True # Declares variable for direction\n # Sets the number of elements to create\n if end > start:\n length = abs((end - start) + 1)\n else:\n length = abs((start - end) + 1)\n forward = False\n arr = StaticArray(length) # Creates a length n array\n\n # Fills array with consecutive integers\n for index in range(length):\n arr.set(index, start)\n if forward:\n start += 1\n else:\n start -= 1\n\n return arr", "def get_range( value ):\n return list(range(value))", "def open_range(start, stop, step):\n return np.arange(start, stop+step/2, step)", "def expand_ranges(ranges):\n for low, high in low_high_pairs:\n for j in range(low, high+1):\n yield j", "def recursive_index_decode(int_array, max=32767, min=-32768):\n out_arr = []\n decoded_val = 0\n for item in int_array.tolist():\n if item==max or item==min:\n decoded_val += item\n else:\n decoded_val += item\n out_arr.append(decoded_val)\n decoded_val = 0\n return numpy.asarray(out_arr,dtype=numpy.int32)", "def fill_between(initial,final):\n return np.arange(initial + 1, final)", "def returnArray(self, dataSheet, lowRange, highRange):\n cells = dataSheet[lowRange:highRange]\n cells = np.transpose(cells)\n cells = np.reshape(cells, cells.size)\n values = [cell.value for cell in cells]\n return values", "def compress(self, *args):\n return _osgAnimation.Vec3ArrayPacked_compress(self, *args)", "def arange_sequence(ranges: Tensor) -> Tensor:\n maxcnt = torch.max(ranges).item()\n numuni = ranges.shape[0]\n complete_ranges = torch.arange(maxcnt, device=ranges.device).unsqueeze(0).expand(numuni, -1)\n\n return complete_ranges[complete_ranges < ranges.unsqueeze(-1)]", "def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def getZRange(self):\n return self.z_array", "def compress(ts, tol=0.5, max_len=np.inf):\n\n start = 0\n end = 1\n pieces = list() # np.empty([0, 3])\n x = np.arange(0, len(ts))\n epsilon = np.finfo(float).eps\n\n while end < len(ts):\n inc = ts[end] - ts[start]\n err = ts[start] + (inc/(end-start))*x[0:end-start+1] - ts[start:end+1]\n err = np.inner(err, err)\n\n if (err <= tol*(end-start-1) + epsilon) and (end-start-1 < max_len):\n (lastinc, lasterr) = (inc, err) \n end += 1\n else:\n # pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])\n pieces.append([end-start-1, lastinc, lasterr])\n start = end - 1\n\n # pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])\n pieces.append([end-start-1, lastinc, lasterr])\n\n return pieces", "def ex_pingpong(data):\n a, b, step = _cleanse_range_args(data)\n rv = list(range(a, b+sign(step), step))\n if rv:\n rv += list(range(rv[-1]-step, a, -step))\n return rv", "def buildIntervalSegs(array, interval: int):\n interSegs = []\n for i in range(interval):\n interSegs.append(array[i::interval])\n return np.array(interSegs)", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def concat_ranges_1d_nb(a, start_idxs, end_idxs):\n out = np.empty((end_idxs[0] - start_idxs[0], start_idxs.shape[0]), dtype=a.dtype)\n for idx in range(start_idxs.shape[0]):\n out[:, idx] = a[start_idxs[idx]:end_idxs[idx]]\n return out", "def concat_ranges_nb(a, start_idxs, end_idxs):\n out = np.empty((end_idxs[0] - start_idxs[0], start_idxs.shape[0] * a.shape[1]), dtype=a.dtype)\n for col in range(a.shape[1]):\n out[:, col * start_idxs.shape[0]:(col + 1) * start_idxs.shape[0]] = \\\n concat_ranges_1d_nb(a[:, col], start_idxs, end_idxs)\n return out", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def genvals():\n vals = np.empty(200)\n vals[:50] = np.arange(50) / 50\n vals[50:100] = (50 - np.arange(50)) / 50\n vals[100:] = -vals[:100]\n return vals", "def range() -> List[int]:\n pass", "def convert_range(g, op, block):\n\n start = g.get_node(op.input(\"Start\")[0])\n stop = g.get_node(op.input(\"End\")[0])\n step = g.get_node(op.input(\"Step\")[0])\n dtype = infer_type(start).checked_type.dtype\n\n params = []\n for param in (start, stop, step):\n param, infered = try_infer_value(param, g.get_params())\n if infered:\n param = param.tolist()\n if isinstance(param, list):\n param = param[0]\n if isinstance(param, _expr.Expr):\n param = _op.squeeze(param)\n else:\n param = _op.const(param, dtype=dtype)\n params.append(param)\n\n out = _op.transform.arange(params[0], params[1], params[2], dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def arange(start, stop=None, dtype=None):\n if stop is None:\n start, stop = 0, start\n if K.backend() == 'theano':\n from theano import tensor as T\n range_ = T.arange(start, stop)\n else:\n assert K.backend() == 'tensorflow'\n import tensorflow as tf\n range_ = tf.range(start, stop)\n if dtype is not None:\n range_ = K.cast(range_, dtype=dtype)\n return range_", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def data_range(x):\n return max(x)-min(x)", "def new_ranges(rs):\n return tuple(chain(*[new_range(r) for r in rs]))", "def value_range(self, rng):\n start, end = rng.split(':')\n (row_offset, column_offset) = a1_to_rowcol(start)\n (last_row, last_column) = a1_to_rowcol(end)\n\n out = []\n for col in self.values[row_offset - 1:last_row]:\n out.extend(col[column_offset - 1:last_column])\n return out", "def get_compressed(self, value):\r\n output = []\r\n lz_data = (value >> 8) & 0xFF\r\n lz_counter = value & 0xFF\r\n # Define the relative offset on LZ Window\r\n lz_offset = ((lz_counter & 0xF0) << 4) | lz_data\r\n # Define the LZ Counter for repeat data N times\r\n lz_counter = (lz_counter & 0xF) + 0x2\r\n # Start Repeat Loop\r\n while (lz_counter >= 0):\r\n # Seek the window on LZ Offset and get the LZ Data\r\n self.__lzwindow__.seek(lz_offset, FROM_START)\r\n lz_data = (lz_data & 0xFF00) + \\\r\n int.from_bytes(self.__lzwindow__.read(1), byteorder='big')\r\n # Write the LZ data to the output\r\n output.append((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Seek the LZ Window on current LZ Window Counter value and write the current LZ Data (LZBuffer)\r\n self.__lzwindow__.seek(self.__lzwindowcounter__, FROM_START)\r\n self.__lzwindow__.write((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Increment LZ Window Counter\r\n self.__lzwindowcounter__ = (\r\n self.__lzwindowcounter__ + 0x1) & self.__lzwindowmax__\r\n # Increment LZ Offset\r\n lz_offset = (lz_offset + 0x1) & self.__lzwindowmax__\r\n # Decrement number of data to decompress\r\n self.__maxlen__ -= 0x1\r\n # Decrement LZ Loop counter\r\n lz_counter -= 0x1\r\n return output", "def from_range(data, max_samples, start, end, lowpass):\n\n # handle the case of no data\n if data.shape[0] == 0:\n return data\n\n if start is None:\n start = 0\n else:\n try:\n start = data.index.get_loc(start, method=\"nearest\")\n except InvalidIndexError:\n # handle non-ordered/non-unique index\n start = np.argmin(np.abs(data.index - start))\n\n if end is None:\n end = data.shape[0]\n else:\n try:\n end = data.index.get_loc(end, method=\"nearest\") + 1\n except InvalidIndexError:\n # handle non-ordered/non-unique index\n end = np.argmin(np.abs(data.index - end)) + 1\n\n step = int(np.ceil((end - start) / max_samples))\n\n # TODO: handle NaNs at start/end\n if step == 0:\n # hacky solution for range reset\n data_new = pd.concat((data.iloc[:1], data.iloc[-1:]))\n else:\n data_new = data.iloc[start:end]\n if step > 1 and lowpass:\n # TODO make this work\n from scipy.signal import butter, filtfilt\n\n for c in data_new.columns:\n if c != \"selected\":\n coefs = butter(3, 1 / step)\n data_new[c] = filtfilt(\n coefs[0], coefs[1], data_new.loc[:, c]\n )\n data_new = data_new.iloc[::step]\n # hacky solution for range reset\n if start > 0:\n data_new = pd.concat((data.iloc[:1], data_new))\n if end < data.shape[0] - 1:\n data_new = data_new.append(data.iloc[-1])\n\n return data_new", "def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges", "def range(series):\n return min(series), max(series)", "def _range_to_list(cls, rng):\n ends = rng.split(\"-\")\n if len(ends) != 2:\n return []\n\n return list(range(int(ends[0]), int(ends[1]) + 1))", "def expand_ip_range(logger, ip_range):\n logger.debug(f\"Expanding IP range: {ip_range} to individual IPs\")\n r = ipaddress.IPv4Network(ip_range)\n return [str(ip) for ip in r]", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums", "def autorange(minmax: Tuple[int, int]) -> List[int]:\n min, max = minmax\n if min > max:\n return list(reversed(range(max + 1, min + 1)))\n return list(range(min, max))", "def domain_range(domain, _range=[0, 1], return_transform=False):\n\n if not return_transform:\n return interp1d([min(domain), max(domain)], [min(_range), max(_range)], bounds_error=False)\n else:\n m = interp1d([min(domain), max(domain)], [min(_range), max(_range)])\n return [float(m(v)) for v in domain] # Take float, else returns weird numpy.ndarray element", "def label_to_range(label):\r\n C = int(label.max())\r\n arange = np.zeros((C+1,), dtype=np.int)\r\n cumsum = 0\r\n for i in xrange(C):\r\n cumsum += np.where(label == (i+1))[0].size\r\n arange[i+1] = cumsum\r\n return arange", "def to_bins(arr):\n result = np.zeros(len(arr)+1)\n result[1:-1] = 0.5 * (arr[1:] + arr[:-1])\n result[0] = arr[0] - 0.5*(arr[1] - arr[0])\n result[-1] = arr[-1] + 0.5*(arr[-1] - arr[-2])\n return result", "def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]", "def arange(self, start: float, stop: float, step: float = 1.0) -> None:\n self.values = []\n assert step != 0.0\n while abs(start) < abs(stop):\n self.values.append(start)\n start += step", "def expand_number_range(range_list):\n if '-' in range_list:\n range_list = range_list.split('-')\n assert len(range_list) == 2\n range_list = list(range(int(range_list[0]), int(range_list[1])+1))\n else: # Not a range, just return the number as a list.\n range_list = [int(range_list),]\n return range_list", "def normalize_range(array, floor=0, ceil=1):\n scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True)\n return scaler.fit_transform(array)", "def test_raster_x_descending_y_ascending_partial_range():\n xs = np.arange(10)[::-1]\n ys = np.arange(5)\n arr = xs*ys[np.newaxis].T\n xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])\n cvs = ds.Canvas(7, 2, x_range=(.5, 7.5), y_range=(1.5, 3.5))\n agg = cvs.raster(xarr)\n\n assert np.allclose(agg.data, xarr.sel(X=slice(7, 1), Y=slice(2, 3)).data)\n assert np.allclose(agg.X.values, xs[2:9])\n assert np.allclose(agg.Y.values, ys[2:4])", "def ex_crange(data):\n center = minv = maxv = spread = 0\n step = 1\n try:\n center = int(data[0])\n spread = int(data[1])\n if len(data) > 2:\n step = int(data[2])\n minv = center - spread/2\n maxv = center + spread/2\n except ValueError:\n pass\n if step == 0:\n step = 1\n if minv > maxv:\n minv, maxv = maxv, minv\n rv = [center]\n v = center - step\n while minv <= v <= maxv:\n rv.insert(0, v)\n v -= step\n v = center + step\n while minv <= v <= maxv:\n rv.append(v)\n v += step\n return rv", "def _get_shear_vals(lower_bound: float,\n upper_bound: float,\n step: float) -> Tuple[float]:\n return tuple(np.arange(lower_bound, upper_bound + step, step))", "def to_range(images, min_value=0.0, max_value=1.0, dtype=None):\n assert \\\n np.min(images) >= -1.0 - 1e-5 and np.max(images) <= 1.0 + 1e-5 \\\n and (images.dtype == np.float32 or images.dtype == np.float64), \\\n 'The input images should be float64(32) and in the range of [-1.0, 1.0]!'\n if dtype is None:\n dtype = images.dtype\n return ((images + 1.) / 2. * (max_value - min_value) + min_value).astype(dtype)", "def collect(self, start=None, stop=None, step=None):\n counts_compressed = self.counts_compressed()\n if start is None:\n if len(counts_compressed) > 0:\n start = self.values[counts_compressed[0][0]]\n else:\n start = -1.0\n if stop is None:\n if len(counts_compressed) > 1:\n stop = self.values[counts_compressed[-1][0]]\n else:\n stop = 1.0\n if step is None:\n step = (stop - start) / 10.0\n\n counts = self.get_counts(start, stop + step, step)\n current = start\n bins = []\n next_one = current + step\n i = 0\n while next_one <= stop + (step) and i < len(counts):\n start_bin = self.get_bin_index(current)\n stop_bin = self.get_bin_index(next_one)\n bin = {\n \"value_start\": current,\n \"value_stop\": next_one,\n \"bin_index_start\": start_bin,\n \"bin_index_stop\": stop_bin,\n \"count\": counts[i],\n }\n bins.append(bin)\n current = next_one\n next_one = current + step\n i += 1\n return bins", "def construct_b(self, start, end):\n return np.concatenate((start, end), axis = 0)", "def arange(start=0, stop=None, step=None):\n raise NotImplementedError", "def array_range(a, low, high, ref=None):\n if ref is None:\n ref = a\n return a[np.logical_and(ref >= low, ref < high)]", "def test_raster_x_ascending_y_descending_partial_range():\n xs = np.arange(10)\n ys = np.arange(5)[::-1]\n arr = xs*ys[np.newaxis].T\n xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])\n cvs = ds.Canvas(7, 2, x_range=(0.5, 7.5), y_range=(1.5, 3.5))\n agg = cvs.raster(xarr)\n\n assert np.allclose(agg.data, xarr.sel(X=slice(1, 7), Y=slice(3, 2)).data)\n assert np.allclose(agg.X.values, xs[1:8])\n assert np.allclose(agg.Y.values, ys[1:3])", "def slice_zvals(self):\n return np.sort([z.val for z in self.zvals])", "def regular_to_ragged(array):\n array_list = []\n for i in range(array.shape[0]):\n array_list.append(trim_zeros(array[i]))\n return array_list", "def range(self) -> NDArrayFloat: # noqa: A003\n\n return ndarray_copy(self._range)", "def encode(cmp):\n \n low = np.float128(0)\n high = np.float128(1)\n range = high - low\n for s in cmp:\n if not s in interval:\t\t\t\t\t# Round value in range [-32, 31]\n if s > 31:\n s = 31\n if s < -32:\n s = -32\n high = low + range * interval[s][1]\t\t# Update high value\n low = low + range * interval[s][0]\t\t# Update low value\n range = high - low\t\t\t\t\t\t# Update range\n return low + (high - low) / 2", "def slice(A,rowrange,colrange):\n\n\treturn [[get_elem(A,j,i) for j in rowrange] for i in colrange]", "def to_arrays(self, xmin=None, xmax=None):\n sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]\n eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]\n\n if eidx < len(self.xvec) and self.xvec[eidx] == xmax:\n eidx += 1\n\n xtemp = self.xvec[sidx:eidx]\n if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):\n np.insert(xtemp, 0, [xmin])\n if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):\n np.append(xtemp, [xmax])\n return xtemp, self(xtemp)", "def generate_possible_coords(starting,a_range,min_cell_distance): \n a_raw= np.arange(a_range[0]+starting,a_range[1]-starting+1,min_cell_distance)\n \n if len(a_raw) == 0:\n return a_raw\n \n if not check_if_range_filled(a_range,a_raw[-1], min_cell_distance):\n # put one more number on the end if the range is not filled\n a_raw= np.arange(a_range[0]+starting,a_range[1],min_cell_distance) \n\n return a_raw", "def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks", "def transform_basis(self, values):\n block_len = len(values)/self.base\n blocks = [values[i*block_len:(i+1)*block_len] for i in range(self.base)]\n return blocks", "def test_raster_both_descending_partial_range():\n xs = np.arange(10)[::-1]\n ys = np.arange(5)[::-1]\n arr = xs*ys[np.newaxis].T\n xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])\n cvs = ds.Canvas(7, 3, x_range=(.5, 7.5), y_range=(.5, 3.5))\n agg = cvs.raster(xarr)\n\n assert np.allclose(agg.data, xarr.sel(Y=slice(3,1), X=slice(7, 1)).data)\n assert np.allclose(agg.X.values, xs[2:9])\n assert np.allclose(agg.Y.values, ys[1:4])", "def create_range_map(points_xyz: NDArrayFloat) -> NDArrayByte:\n range = points_xyz[..., 2]\n range = np.round(range).astype(int)\n color = plt.get_cmap(\"turbo\")(np.arange(0, range.max() + 1))\n color = color[range]\n range_cmap: NDArrayByte = (color * 255.0).astype(np.uint8)\n return range_cmap", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def transform(self, j_object):\n return Range(java_ref=j_object)", "def vrange(starts, stops):\n stops = np.asarray(stops)\n l = stops - starts # Lengths of each range.\n return np.repeat(stops - l.cumsum(), l) + np.arange(l.sum()), l.cumsum()", "def normalizeToRange(data,max=255,min=0):\n if min: return (max-min)*normalize(data)+min\n else: return max*normalize2(data) # speeds up operation", "def slice0(A,rowrange,colrange):\n\treturn [[A[i][j] for j in range(rowrange[0],rowrange[1])] for i in range(colrange[0],colrange[1])]", "def split_range(valsize, step, start, end):\n \n shift = 0\n while True:\n diff = 1 << (shift + step)\n mask = ((1 << step) - 1) << shift\n setbits = lambda x: x | ((1 << shift) - 1)\n \n haslower = (start & mask) != 0\n hasupper = (end & mask) != mask\n \n not_mask = ~mask & ((1 << valsize + 1) - 1)\n nextstart = (start + diff if haslower else start) & not_mask\n nextend = (end - diff if hasupper else end) & not_mask\n \n if shift + step >= valsize or nextstart > nextend:\n yield (start, setbits(end), shift)\n break\n \n if haslower:\n yield (start, setbits(start | mask), shift)\n if hasupper:\n yield (end & not_mask, setbits(end), shift)\n \n start = nextstart\n end = nextend\n shift += step", "def Range(self, from: int, to: int) -> BaseVector:", "def merge_ranges():", "def get_rangelist(start, end, count):\n if start is not None and end is not None:\n if count != 0 and not (start == 0 and count < end):\n start = int(start)\n end = int(end)\n cnt = end - start\n rangelist = []\n div = int(start) / count + 1\n multiple = round(div, 0)\n start_range = int(count * multiple)\n n = 1\n for itr in range(0, start_range + count, (end - start)):\n if itr < count:\n rangelist.append([itr, itr + cnt, n])\n n += 1\n return rangelist\n return []", "def equipartition(ar, nbins, vmin=None, vmax=None):\n\n a_s = np.sort(ar)\n\n if vmax is not None:\n a_s = a_s[a_s <= vmax]\n if vmin is not None:\n a_s = a_s[a_s > vmin]\n\n return a_s[np.array(np.linspace(0, len(a_s) - 1, nbins + 1), dtype='int')]", "def rasterize(x):\n for i, y in enumerate(x):\n for v in y:\n yield i, v", "def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n\n\t# shift the zero\n\tzero = (output_interval_start - input_interval_start)\n\t\n\t# stretch/compress after shifting\n\tchange_factor = float(output_interval_end)/ (input_interval_end + zero)\n\n\t# apply stretching/compression\n\tnew_value = (val+zero)*change_factor\n\n\treturn new_value", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )", "def rescale_to_range(\n array: vtk.vtkDoubleArray,\n to_range: typing.Tuple[float, float],\n rel_tol: float = sys.float_info.epsilon,\n abs_tol: float = sys.float_info.epsilon,\n) -> vtk.vtkDoubleArray:\n to_span = to_range[1] - to_range[0]\n assert to_span >= 0\n\n # The values need to span a positive range to be able to scale to `to_range`.\n # We use at least a small span derived from the tolerances.\n array_range = array.GetValueRange()\n array_span = array_range[1] - array_range[0]\n array_center = array_range[0] + array_span / 2\n from_range = (\n array_range\n if not math.isclose(array_span, 0.0, rel_tol=rel_tol, abs_tol=abs_tol)\n else (\n array_center - max(rel_tol * abs(array_center), abs_tol),\n array_center + max(rel_tol * abs(array_center), abs_tol),\n )\n )\n from_span = from_range[1] - from_range[0]\n\n assert not math.isclose(from_span, 0.0, rel_tol=rel_tol, abs_tol=abs_tol)\n factor = to_span / from_span\n\n result = vtk.vtkDoubleArray()\n result.SetNumberOfValues(array.GetNumberOfValues())\n for id in range(array.GetNumberOfValues()):\n result.InsertValue(\n id, to_range[0] + (array.GetValue(id) - from_range[0]) * factor\n )\n\n return result", "def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))", "def create_repeated_indexes(data):\n from numpy import arange\n\n index_range = arange(0, len(data))\n return (index_range for i in index_range)", "def from_inclusive(a, b):\n c = int(b > a)*2-1\n return range(a, b+c, c)", "def test_raster_both_ascending_partial_range():\n xs = np.arange(10)\n ys = np.arange(5)\n arr = xs*ys[np.newaxis].T\n xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])\n cvs = ds.Canvas(7, 3, x_range=(.5, 7.5), y_range=(.5, 3.5))\n agg = cvs.raster(xarr)\n\n assert np.allclose(agg.data, xarr.sel(X=slice(1, 7), Y=slice(1, 3)))\n assert np.allclose(agg.X.values, xs[1:8])\n assert np.allclose(agg.Y.values, ys[1:4])", "def linspace(start, stop, n, istart=True, istop=True):\r\n n = n-1\r\n arr = [start + ((stop-start)/n) * i for i in range(n+1)]\r\n return arr", "def create_ip_range(start_ip, end_ip):\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n temp = start\n ip_range = []\n\n ip_range.append(start_ip)\n while temp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if temp[i] == 256:\n temp[i] = 0\n temp[i - 1] += 1\n ip_range.append(\".\".join(map(str, temp)))\n\n return ip_range", "def get_redshifts_with_interval(zmin, zmax, interval):\n\n # If interval is not a quantity convert it\n if not isinstance(interval, u.Quantity):\n interval = interval * u.Mpc\n\n num_intervals = num_intervals_between_redshifts(zmin, zmax, interval)\n dist_min = z_to_mpc(zmin)\n redshifts = np.empty(num_intervals)\n\n for i in range(num_intervals):\n total_dist = dist_min + i * interval\n redshifts[i] = mpc_to_z(total_dist)\n\n return redshifts", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def get_contiguous_inds(inds, min_contig_len=5, trim_left=0, trim_right=0, **kwargs):\r\n start_inds, stop_inds = get_inds_start_stop(inds, **kwargs)\r\n cont_inds = [np.arange(start+trim_left, stop-trim_right) for start, stop in zip(start_inds, stop_inds) if stop-start > min_contig_len]\r\n return cont_inds", "def amp_bin(raw, depth, low, high):\n\n max_in_depth = 2 ** depth\n bin_range = np.linspace(low, high, max_in_depth)\n data = []\n for b in raw:\n i = 0\n while i <= (max_in_depth - 2):\n if (bin_range[i] <= b < bin_range[i+1]):\n data.append(i)\n i += 1\n break\n elif (b <= low):\n data.append(0)\n break\n elif (b >= high):\n data.append(max_in_depth - 1)\n break\n else:\n i += 1\n return np.array(data)", "def make_b_array(n):\n array = np.linspace(-3, 3, n)\n for i, x in enumerate(array[1:-1], start=1):\n if abs(x) < 1:\n array[i] = 2\n else:\n array[i] = 0\n array[0] = 0\n array[n-1] = 0\n\n return array", "def convert_to_compact_array(data, label):\n indices = np.arange(230 * 230).reshape(230, 230)\n t1, t2, pd, dn = label\n m = np.ma.masked_equal(pd, 0)\n t1_masked, t2_masked, pd_masked, indices_masked, dn_masked = \\\n np.ma.masked_array(t1, m.mask), np.ma.masked_array(t2, m.mask), \\\n np.ma.masked_array(pd, m.mask), np.ma.masked_array(indices, m.mask), \\\n np.ma.masked_array(dn, m.mask)\n t1_compressed, t2_compressed, pd_compressed, indices_compressed, dn_compressed = \\\n np.ma.compressed(t1_masked), np.ma.compressed(t2_masked), \\\n np.ma.compressed(pd_masked), np.ma.compressed(indices_masked), \\\n np.ma.compressed(dn_masked)\n\n fp_compressed = []\n for index in indices_compressed:\n x = int(index // 230)\n y = int(index % 230)\n fp_compressed.append(data[x][y])\n fp_compressed = np.asarray(fp_compressed)\n # recon = np.zeros((230, 230))\n # x = indices_compressed // 230\n # y = indices_compressed % 230\n # recon[x, y] = t1_compressed[np.arange(len(indices_compressed))]\n\n label = np.asarray([t1_compressed, t2_compressed, pd_compressed, indices_compressed, dn_compressed])\n label = np.transpose(label)\n data = fp_compressed\n return data, label" ]
[ "0.6024952", "0.5948004", "0.5860119", "0.5817936", "0.575059", "0.5741007", "0.5694915", "0.56648386", "0.56388867", "0.5611858", "0.5607776", "0.5596188", "0.55553037", "0.55267346", "0.5508638", "0.5502483", "0.54900694", "0.5425912", "0.54251516", "0.53815985", "0.53721654", "0.53647536", "0.53642946", "0.536071", "0.535719", "0.53394836", "0.533426", "0.53167415", "0.5283016", "0.5274369", "0.5244095", "0.5222189", "0.52040225", "0.5201586", "0.5192321", "0.51896626", "0.51886034", "0.51883733", "0.51732206", "0.51730525", "0.51677823", "0.5154155", "0.51489955", "0.51397395", "0.5137929", "0.5135478", "0.5126223", "0.5124758", "0.51220715", "0.5108758", "0.5103724", "0.50745773", "0.507352", "0.5072466", "0.506195", "0.50581044", "0.5051686", "0.50473684", "0.5041626", "0.5030203", "0.502818", "0.5022417", "0.50175726", "0.50118667", "0.50110006", "0.5002047", "0.49956313", "0.49904042", "0.49902323", "0.49888977", "0.4969671", "0.4969671", "0.49628112", "0.49589187", "0.4932903", "0.49289072", "0.49264282", "0.49242556", "0.49220246", "0.49187574", "0.49180186", "0.4906466", "0.49017963", "0.49008173", "0.489822", "0.48976505", "0.48942742", "0.48935992", "0.48930413", "0.4884087", "0.48769012", "0.48714775", "0.48666185", "0.48642367", "0.48615062", "0.48600742", "0.4848801", "0.48456913", "0.48296177", "0.48284382" ]
0.49630272
72
Return a bind with the given name.
def __getitem__(self, name): try: field = self.fields[name] except KeyError: raise KeyError( "Key '%s' not found in '%s'. Choices are: %s." % ( name, self.__class__.__name__, ', '.join(sorted(f for f in self.fields)), ) ) return self._fields[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_named_bind_string(self, name):\n\n return \":\" + name", "def bound(name):", "def __new__(cls, loc=None, name=None):\n assert ((loc is None and isinstance(name, str)) or\n (name is None and 0 <= loc))\n return super(Bind, cls).__new__(cls, loc, name)", "def reg_binding(self, gate_reg_name):\n return self.reg_bind.get(gate_reg_name)", "def bound_for(self, name):\n if '.' in name:\n module, name = name.split('.', 1)\n if module in self._modules:\n return self.__getattr__(module).bound_for(name)\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no module %s' % (name, type(self).__name__, module))\n else:\n if name in self._parameters:\n return self._bounds[name]\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no parameter %s' % (name, type(self).__name__, module))", "def bind_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bind_name\")", "def get_socket_by_name(self, name):\n with self.register_lock:\n return self.name_socket[name]", "def bind_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bind_name\")", "def param_binding(self, gate_param_name):\n return self.param_bind.get(gate_param_name)", "def getBinding(o, name):\n raise RuntimeError()", "def __getattr__(self,name):\r\n w=self.mapping.get(name,None)\r\n if w is not None:\r\n w.set_name(name)\r\n w.bind(self)\r\n return w\r\n else:\r\n raise AttributeError('{} not found in {}'.format(name,self.name))", "def bind_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bind_name\")", "def declare(self, name):\n return named_ports(**{name: self})", "def bind(self, *args):\r\n return self._fd.bind(*args)", "def find_bindings(self, bindName):\n try:\n return self.bind2index[bindName]\n except:\n raise KeyError(\n f\"The binding {bindName} is not in the general list... check your input file!\")", "def get_binding(self, orgname):\n pass", "def toBind(node, fail_display=None):\n bind_name = node.name().replace(pcfg.skeleton_namespace, pcfg.bind_namespace)\n\n if not pm.objExists(bind_name) and fail_display:\n fail_display(bind_name + ' does not exist!')\n return\n\n return pm.PyNode(bind_name)", "def by_name(cls, name):\n if name in cls._registry:\n result = cls._registry[name]\n else:\n result = cls._registry[name] = cls(bind=Session._datastores.get(name))\n return result", "def use_named_expression(self, name):\n return self.named_expression[name]", "def bind(self, args=(), kwargs=None):\n kwargs = kwargs or {}\n if self.loc is not None:\n v = args[self.loc:self.loc+1]\n return self if not v else v[0]\n else:\n return kwargs.get(self.name, self)", "def get_by_name(self, name: str) -> BoundLoadBalancerType | None:\n return self._get_first_by(name=name)", "def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]", "def with_name(self, name):\n return self.func(name, self.expr)", "def get_by_name(self, name):\n # type: (str) -> BoundLoadBalancer\n return super(LoadBalancersClient, self).get_by_name(name)", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):\n project_id = project_id if project_id != '' else self.__project_id\n if ip_address == '':\n headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response\n ip = requests.get('http://ifconfig.co', headers)\n ip_address = ip.text.rstrip()\n logger.info(f'bind: looked up ip address: {ip_address}')\n #key = self.create_programatic_apikey(description=description,project_id=project_id)\n db_user = { 'username' : 'foo'\n ,'password' : 'changeme'\n ,'databaseName' : 'admin'\n ,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ] \n }\n user = self.create_database_user(db_user,project_id=project_id) \n cluster = self.get_cluster(cluster_name)\n cs = cluster['mongoURIWithOptions'].split('/',1)\n #conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'\n return conn_str", "def bind(self, bindname, sqltype, value=None):\n datatype = _TYPES[sqltype.upper()]\n var = self.cursor.var(datatype)\n\n if value is not None:\n var.setvalue(0,value)\n\n self.bindparams[bindname.upper()] = var", "def get_bound_state(self, name=None):\n if name is None:\n name = self.__state\n return None if name is None else getattr(self, name, None)", "def __getitem__(self, name):\n return self.connection(name)", "def _get_one_bound(self, param_name):\n return getattr(self, '__' + param_name + '_bounds')", "def test_named_bind_string(self):\n binds = ['v1', 'v2', 'v3']\n\n qry = self.dbh.get_expr_exec_format() % ','.join(\n [self.dbh.get_named_bind_string(v) for v in binds])\n\n cursor = self.dbh.cursor()\n try:\n cursor.execute(qry, dict([(v, 3) for v in binds]))\n self.assertEqual(cursor.fetchone(), tuple([3 for v in binds]))\n finally:\n cursor.close()\n self.dbh.rollback()", "def lookup(self, name):\n if not self.running:\n return succeed(None)\n\n return self.resolv.lookup(name)", "def get(name: str) -> RWLock:\n lock = RwLocks.by_name.get(name)\n if lock is None:\n lock = RwLocks.by_name[name] = RWLock()\n return lock", "def bind( self, mode ):\n vbo = self.vbo(mode)\n vbo.bind()\n return vbo", "def make_rng(self, name: str) -> PRNGKey:\n if self.scope is None:\n raise ValueError(\"Can't use RNGs on unbound modules\")\n return self.scope.make_rng(name)", "def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db", "def resolve(self, binding_key: Hashable) -> Any:\n binding = self._bindings[binding_key](self)\n return binding", "def bind(self, address: Tuple[str, int]) -> None:\n ...", "def lookup(self, label):\n if label in self.bindings:\n return self.bindings[label]\n else:\n if self.parent:\n return self.parent.lookup(label)\n else:\n raise SnekNameError(\"name '{}' is not defined\".format(label))", "def maybe_bind(value, args, kwargs):\n return value.bind(args, kwargs) if isinstance(value, Bind) else value", "def new(cls, name):\n vim.command(\"silent! badd {:s}\".format(name))\n\n # Now that the buffer has been added, we can try and fetch it by name\n return cls.of(name)", "def _get_pool_by_name(self, pool_name):\n pool_manager = PoolManager(organization_name=self._organization_name,\n project_name=self._project_name, creds=self._creds)\n pools = pool_manager.list_pools()\n return next((pool for pool in pools.value if pool.name == pool_name), None)", "def __getattr__(self, name):\n return self.connection(name)", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def connectionFromName(self, name):\n for item in self.items():\n if isinstance(item, ConnectionItem):\n if item.name() == name:\n return item\n return None", "def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]", "def bind(self):\n self.session.configure(bind=self.sa_engine)\n return self.session", "def get_dbapi_module(name):\n return import_module(name)", "def remove_binding(ctx, binding_name):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if not data:\n click.echo(\"Trying to delete binding, which is not present.\")\n entryFound = True\n\n if entryFound == False:\n config_db.set_entry(table, key, None)", "def select_server_by_name(self, name):\n return self._select_server(name=name)", "def connection(self, name=None):\n if not name:\n name = threading.currentThread().getName()\n if name in self:\n return self[name]\n self[name] = self.database.connection()\n return self[name]", "def find_module(self, name):\n if name in self.pool:\n return self.pool[name]\n else:\n return None", "def bind(self, bindee):\n def state_binder(state, runner=self, maker=bindee):\n value, runner_state = runner(state)\n return maker(value)(runner_state)\n return State(state_binder)", "def bind(port, socket_type, socket_proto):\n return _bind(port, socket_type, socket_proto)", "def bind(self, addr):\n self._close_socket()\n self._open_socket()\n self._socket.bind(addr)", "def _bind(self, command_name, **kwargs):\n\n if command_name in ('bind_receiver', 'bind_transceiver'):\n logging.debug('Receiver mode')\n self.receiver_mode = True\n\n p = smpp.make_pdu(command_name, client=self, **kwargs)\n self.send_pdu(p)\n\n try:\n resp = self.read_pdu()\n except socket.timeout:\n raise exceptions.ConnectionError()\n if resp.is_error():\n raise exceptions.PDUError(\n '({status}) {command}: {error}'.format(status=resp.status,\n command=resp.command,\n error=consts.DESCRIPTIONS.get(resp.status, 'unknown code')),\n int(resp.status))\n return resp", "def storage_backend_get_by_name(context, name, inactive=False):\n return _find_storage_backend(context, dict(name = name), True, None, inactive=inactive)", "def name(self, name):\n return self[self.name_cache[name]]", "def get_engine(self, app=None, bind=None):\n # dynamic bind database\n # 如果model中指定了bind_key则,永远是指定的bind_key,即便g.bind_key指定了也是使用的model中的bind_key\n bind = g.bind_key if bind is None and self.is_binds and getattr(g, \"bind_key\", None) else bind\n return super().get_engine(app=app, bind=bind)", "def get_by_name(cls, context, name, eager=False):\n\n db_strategy = cls.dbapi.get_strategy_by_name(\n context, name, eager=eager)\n strategy = cls._from_db_object(cls(context), db_strategy, eager=eager)\n return strategy", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)", "def layer_by_name(net, name):\n for l in net.named_modules():\n if l[0] == name:\n return l[1]", "def _get_pool(name=None, session=None):\n if session is None:\n session = _get_session()\n pools = session.xenapi.pool.get_all()\n for pool in pools:\n pool_record = session.xenapi.pool.get_record(pool)\n if name in pool_record.get(\"name_label\"):\n return pool\n return None", "def get_binds(self, app=None):\n bind_name = g.bind_key if self.is_binds and getattr(g, \"bind_key\", None) else None\n\n if not _lru_cache.get(bind_name):\n _lru_cache[bind_name] = super().get_binds(app)\n return _lru_cache[bind_name]", "def get_window_by_name(self, name):\n for window in self.windows:\n if window.name == name:\n return window\n else:\n raise ValueError(\"No source with that name.\")", "def findHost(name):\n return Host(Cuebot.getStub('host').FindHost(\n host_pb2.HostFindHostRequest(name=name), timeout=Cuebot.Timeout).host)", "def _get_binding_record(self):\n binding = self.binder.get_bindings(domain=[('id', '=', self.binding_id)])\n assert len(binding) <= 1, \"More than one binding record returned!\"\n if binding:\n assert binding.id == self.binding_id, \"Id of returned binding does not match self.binding_id!\"\n assert binding._name == self.model._name, \"Model of binding record does not match self.model\"\n return binding", "def get_ip_freebind(self):\n if hasattr(socket, \"IP_FREEBIND\"):\n # Valid distribution\n return socket.IP_FREEBIND\n if sys.platform == \"linux2\":\n return 15\n return None", "def create_or_get_buffer(self, name):\n for b in self.nvim.buffers:\n bname = path.basename(b.name)\n if bname == name:\n return b\n\n # Create new buffer\n self.nvim.command('set splitbelow')\n self.nvim.command('new')\n self.nvim.command('setlocal buftype=nofile noswapfile ro')\n self.nvim.command('res 2')\n\n b = self.nvim.current.buffer\n b.name = name\n\n return b", "def get_binding(self, v: str) -> Optional[str]:\n assert is_var(v)\n t = self\n ret = t.binding.get(v)\n while not ret and t.parent:\n t = t.parent\n ret = t.binding.get(v)\n return ret", "def get_network_with_name(self, name):\n for network in self.networks:\n if network.name == name:\n return network\n return None", "def bind(self, server_name: str, port: int) -> None:\n self.socket.bind((server_name, port))", "def bind(self, name, property_type, module):\n return BoundProperty(\n self._default, self.help, self.kind, name, property_type, module,\n self.param_name)", "def get_db(request, name=None):\n\n dbname = name\n registry = request.registry\n\n if name is None:\n dbname = registry.settings.get(DBNAME)\n\n if dbname is None:\n raise ConfigurationError('There is no defined database name')\n\n mongodbs = getattr(request, '_mongo_dbs', dict())\n\n db = mongodbs.get(dbname)\n\n if db is None:\n conn = getattr(registry, '_mongo_conn', None)\n\n if conn is None:\n raise ConfigurationError(\n 'There is no database connection available')\n\n db = conn[dbname]\n\n mongodbs[dbname] = db\n request._mongo_dbs = mongodbs\n\n username = registry.settings.get(USERNAME + '.' + dbname)\n password = registry.settings.get(PASSWORD + '.' + dbname)\n\n if not username is None and not password is None:\n db.authenticate(username, password)\n\n def end_request(request):\n db.logout()\n db.connection.end_request() \n\n request.add_finished_callback(end_request)\n\n return db", "def get_server(name):\n if name in SERVERZ:\n return SERVERZ[name]\n\n server = server_from_config(name)\n return start_server_thread(server)", "def get_bound_adapter_name(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetBoundAdapterName', self.handle)", "def find_by_name(self, name):\n return self.get(name)", "def lookup(name):", "def lookup(name):", "def lookup(self, name):\n try:\n return self._baseLookup(name)\n except ImportError:\n raise ImportError(\"No module named %r in mapper %r\" % (name, self))", "def get_server_by_name(name):\n servers = get_servers()\n\n name = name.lower()\n\n for server in servers:\n if name in server.name.lower():\n return server\n\n return None", "def nameToAddress(self, name):\n pass", "def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n dataKey1 = 'access_list'\n dataKey2 = 'nat_pool'\n dataKey3 = 'nat_type'\n dataKey4 = 'twice_nat_id'\n\n if acl_name is None:\n acl_name = \"\"\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == acl_name and data[dataKey2] == pool_name:\n click.echo(\"Trying to add binding, which is already present.\")\n entryFound = True\n\n binding_dict = config_db.get_table(table)\n if len(binding_dict) == 16:\n click.echo(\"Failed to add binding, as already reached maximum binding limit 16.\")\n entryFound = True\n\n if nat_type is not None:\n if nat_type == \"dnat\":\n click.echo(\"Ignored, DNAT is not yet suported for Binding \")\n entryFound = True\n else:\n nat_type = \"snat\"\n\n if twice_nat_id is None:\n twice_nat_id = \"NULL\"\n\n if entryFound is False:\n count = 0\n if twice_nat_id is not None:\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAT', count)\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAPT', count)\n count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, key)\n if count > 1:\n ctx.fail(\"Same Twice nat id is not allowed for more than 2 entries!!\")\n\n config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id})", "def get_module(self, name: str) -> ModuleInstance:\n return self.modules[name]", "def reifyBinding(slot):\n return Binding(slot)", "def get_network_by_name(self, name: str) -> Network:\n for network in self._networks:\n if network.name == name:\n return network\n raise errors.NotFoundError(f\"there exists no network named {name!r}\")", "def bind(expr, mfunc):\n return mfunc(expr.node)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def lookup_pattern(name):\n\treturn _registered_patterns[name]", "def load_room(name):\n return globals().get(name)", "def load_room(name):\n return globals().get(name)", "def get_named_lock(self, name):\r\n # Global critical section\r\n self._named_locks_lock.acquire()\r\n if not name in self._named_locks:\r\n self._named_locks[name] = BoundedSemaphore()\r\n self._named_locks_lock.release()\r\n # End global critical section\r\n\r\n self.log.debug(\"Grabbing named lock (%s)\" % name)\r\n self._named_locks[name].acquire()\r\n self.log.debug(\"Got named lock (%s)\" % name)", "def bind_address(self):\n result = c_char_p(self.lib.iperf_get_test_bind_address(self._test)).value\n if result:\n self._bind_address = result.decode('utf-8')\n else:\n self._bind_address = '*'\n\n return self._bind_address", "def get_func_by_name(self, name):\n if(name == self.name):\n res = self\n else:\n res = None\n return res", "def get(cls, context, host_name, expected_attrs=None):\n db_host = cls.dbapi.host_get_by_name(context, host_name)\n host = Host._from_db_object(\n context, cls(context), db_host, expected_attrs)\n return host", "def get(cls, ns, name):\n key_name = '%s:%s' % (ns, name)\n return cls.get_by_key_name(key_name)", "def fromName(name):\n matches = [nn for nn in instances if nn.name == name]\n if len(matches) != 1:\n raise Exception(\n \"Too many or too few ({}) matches for {}\" \"\".format(len(matches), name)\n )\n return matches[0]", "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)" ]
[ "0.7514782", "0.6765802", "0.6392351", "0.63033557", "0.62660104", "0.6254184", "0.6237753", "0.60957813", "0.6089807", "0.60743713", "0.6036943", "0.60255086", "0.6018643", "0.6014774", "0.59750015", "0.5960484", "0.5835848", "0.57503366", "0.56634986", "0.5630385", "0.56151366", "0.5603026", "0.5388879", "0.5386477", "0.53679335", "0.53679335", "0.53679335", "0.53400207", "0.53335655", "0.5308809", "0.5276524", "0.52759755", "0.527317", "0.52626187", "0.52615106", "0.5260148", "0.52454555", "0.52410924", "0.52185917", "0.5210944", "0.52099067", "0.51937413", "0.51792026", "0.5173576", "0.51725864", "0.51569474", "0.51299876", "0.5112417", "0.5110237", "0.5105142", "0.5077962", "0.5068313", "0.50649065", "0.50636077", "0.50390905", "0.5034031", "0.502886", "0.50281996", "0.50225466", "0.5008291", "0.50076616", "0.5003849", "0.5001391", "0.49935868", "0.49760628", "0.4971748", "0.496701", "0.49630573", "0.49628657", "0.4961637", "0.49590167", "0.4946479", "0.49374658", "0.49372253", "0.49318498", "0.4928342", "0.4920891", "0.4914913", "0.4899832", "0.4899718", "0.48972967", "0.48972967", "0.48953366", "0.48879755", "0.48669454", "0.4863677", "0.48611185", "0.48575282", "0.4852914", "0.4845692", "0.4830069", "0.48219094", "0.48059803", "0.48059803", "0.48032704", "0.47996017", "0.4797795", "0.47927985", "0.47864798", "0.47849998", "0.47763824" ]
0.0
-1
solver qui recherche dans le dictionnaire
def solver2_init(self): result = [] colors = ["white" for e in self.lettersList] for letter in self.dico: #print(letter) self.solver2_rec([], letter, result, colors, self.dico) res = set(result) res = self.decreasingList(res) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self):", "def solve(self):\n ...", "def get_sol(self):", "def solve(self):\n pass", "def solve(self):\n pass", "def solve(ctx):\n my_solver(ctx.obj['filename'])", "def solve(self, solver):\n solver.solve()", "def satisfied_constraints(self,word_id, possible_word):\r\n constraints = self.words[word_id].constraints\r\n results = {}\r\n # print(\"word_id: {}, possible_word: {}, visited: {}, num_of_satisfied: {}\".format(word_id, possible_word, visited, num_of_satisfied)) \r\n for constraint in constraints:\r\n possibilities = []\r\n if word_id == constraint.word1:\r\n for possible_word2 in self.words[constraint.word2].domain:\r\n check = constraint.check_constraint(possible_word,possible_word2)\r\n if check:\r\n possibilities.append(possible_word2)\r\n if len(possibilities) != 0:\r\n results[constraint.word2] = possibilities\r\n elif word_id == constraint.word2:\r\n for possible_word2 in self.words[constraint.word1].domain:\r\n check = constraint.check_constraint(possible_word2,possible_word)\r\n if check:\r\n possibilities.append(possible_word2)\r\n if len(possibilities) != 0:\r\n results[constraint.word1] = possibilities\r\n return results", "def solve(self, current_state: dict) -> dict:", "def add_searcher_constraints(md, g, my_vars: dict, start: list, vertices_t: dict, deadline: int):\n # get variables\n X = get_var(my_vars, 'x')\n Y = get_var(my_vars, 'y')\n\n S, m = ext.get_set_searchers(start)\n Tau_ext = ext.get_set_time_u_0(deadline)\n\n # legality of the paths, for all s = {1,...m}\n for s in S:\n # 0, 1, 2... T\n for t in Tau_ext:\n v_t = vertices_t.get((s, t))\n # each searcher can only be at one place at each time (including the start vertex), Eq. (1, 7)\n if t == 0:\n md.addConstr(X[s, v_t[0], 0] == 1)\n\n for u in v_t:\n my_next_v = cm.get_next_vertices(g, s, u, t, vertices_t, Tau_ext)\n my_previous_v = cm.get_previous_vertices(g, s, u, t, vertices_t)\n if my_next_v is not None:\n # (Eq. 9) searcher can only move to: i in delta_prime(v) AND V^tau(t+1)\n # sum == 1 if searcher is at u, sum == zero if searcher is not at u (depends on X[s, u, t])\n md.addConstr(quicksum(Y[s, u, i, t] for i in my_next_v) == X[s, u, t])\n\n if my_previous_v is not None:\n # (Eq. 8) searcher can only move to v from j in delta_prime(v) AND V^tau(t-1)\n md.addConstr(quicksum(Y[s, i, u, t - 1] for i in my_previous_v) == X[s, u, t])", "def run(self, problem):\n\n self.pyopt_solution = None\n rel = problem.root._probdata.relevance\n\n # Metadata Setup\n self.metadata = create_local_meta(None, self.options['optimizer'])\n self.iter_count = 0\n update_local_meta(self.metadata, (self.iter_count,))\n\n # Initial Run\n with problem.root._dircontext:\n problem.root.solve_nonlinear(metadata=self.metadata)\n\n opt_prob = Optimization(self.options['title'], self._objfunc)\n\n # Add all parameters\n param_meta = self.get_desvar_metadata()\n self.indep_list = indep_list = list(param_meta)\n param_vals = self.get_desvars()\n\n for name, meta in iteritems(param_meta):\n opt_prob.addVarGroup(name, meta['size'], type='c',\n value=param_vals[name],\n lower=meta['lower'], upper=meta['upper'])\n\n opt_prob.finalizeDesignVariables()\n\n # Figure out parameter subsparsity for paramcomp index connections.\n # sub_param_conns is empty unless there are some index conns.\n # full_param_conns gets filled with the connections to the entire\n # parameter so that those params can be filtered out of the sparse\n # set if the full path is also relevant\n sub_param_conns = {}\n full_param_conns = {}\n for name in indep_list:\n pathname = problem.root.unknowns.metadata(name)['pathname']\n sub_param_conns[name] = {}\n full_param_conns[name] = set()\n for target, info in iteritems(problem.root.connections):\n src, indices = info\n if src == pathname:\n if indices is not None:\n # Need to map the connection indices onto the desvar\n # indices if both are declared.\n dv_idx = param_meta[name].get('indices')\n indices = set(indices)\n if dv_idx is not None:\n indices.intersection_update(dv_idx)\n ldv_idx = list(dv_idx)\n mapped_idx = [ldv_idx.index(item) for item in indices]\n sub_param_conns[name][target] = mapped_idx\n else:\n sub_param_conns[name][target] = indices\n else:\n full_param_conns[name].add(target)\n\n # Add all objectives\n objs = self.get_objectives()\n self.quantities = list(objs)\n self.sparsity = OrderedDict()\n self.sub_sparsity = OrderedDict()\n for name in objs:\n opt_prob.addObj(name)\n self.sparsity[name] = self.indep_list\n\n # Calculate and save gradient for any linear constraints.\n lcons = self.get_constraints(lintype='linear').keys()\n if len(lcons) > 0:\n self.lin_jacs = problem.calc_gradient(indep_list, lcons,\n return_format='dict')\n #print(\"Linear Gradient\")\n #print(self.lin_jacs)\n\n # Add all equality constraints\n econs = self.get_constraints(ctype='eq', lintype='nonlinear')\n con_meta = self.get_constraint_metadata()\n self.quantities += list(econs)\n\n self.active_tols = {}\n for name in self.get_constraints(ctype='eq'):\n meta = con_meta[name]\n size = meta['size']\n lower = upper = meta['equals']\n\n # Sparsify Jacobian via relevance\n rels = rel.relevant[name]\n wrt = rels.intersection(indep_list)\n self.sparsity[name] = wrt\n\n if meta['linear']:\n opt_prob.addConGroup(name, size, lower=lower, upper=upper,\n linear=True, wrt=wrt,\n jac=self.lin_jacs[name])\n else:\n\n jac = self._build_sparse(name, wrt, size, param_vals,\n sub_param_conns, full_param_conns, rels)\n opt_prob.addConGroup(name, size, lower=lower, upper=upper,\n wrt=wrt, jac=jac)\n\n active_tol = meta.get('active_tol')\n if active_tol:\n self.active_tols[name] = active_tol\n\n # Add all inequality constraints\n incons = self.get_constraints(ctype='ineq', lintype='nonlinear')\n self.quantities += list(incons)\n\n for name in self.get_constraints(ctype='ineq'):\n meta = con_meta[name]\n size = meta['size']\n\n # Bounds - double sided is supported\n lower = meta['lower']\n upper = meta['upper']\n\n # Sparsify Jacobian via relevance\n rels = rel.relevant[name]\n wrt = rels.intersection(indep_list)\n self.sparsity[name] = wrt\n\n if meta['linear']:\n opt_prob.addConGroup(name, size, upper=upper, lower=lower,\n linear=True, wrt=wrt,\n jac=self.lin_jacs[name])\n else:\n\n jac = self._build_sparse(name, wrt, size, param_vals,\n sub_param_conns, full_param_conns, rels)\n opt_prob.addConGroup(name, size, upper=upper, lower=lower,\n wrt=wrt, jac=jac)\n\n active_tol = meta.get('active_tol')\n if active_tol is not None:\n self.active_tols[name] = active_tol\n\n # Instantiate the requested optimizer\n optimizer = self.options['optimizer']\n try:\n _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n opt = getattr(_tmp, optimizer)()\n except ImportError:\n msg = \"Optimizer %s is not available in this installation.\" % \\\n optimizer\n raise ImportError(msg)\n\n #Set optimization options\n for option, value in self.opt_settings.items():\n opt.setOption(option, value)\n\n self._problem = problem\n self.opt_prob = opt_prob\n\n # Execute the optimization problem\n if self.options['gradient method'] == 'pyopt_fd':\n\n # Use pyOpt's internal finite difference\n fd_step = problem.root.deriv_options['step_size']\n sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file)\n\n elif self.options['gradient method'] == 'snopt_fd':\n if self.options['optimizer']=='SNOPT':\n\n # Use SNOPT's internal finite difference\n fd_step = problem.root.deriv_options['step_size']\n sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file)\n\n else:\n msg = \"SNOPT's internal finite difference can only be used with SNOPT\"\n raise Exception(msg)\n else:\n\n # Use OpenMDAO's differentiator for the gradient\n sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file)\n\n self._problem = None\n\n # Print results\n if self.options['print_results']:\n print(sol)\n\n # Pull optimal parameters back into framework and re-run, so that\n # framework is left in the right final state\n dv_dict = sol.getDVs()\n for name in indep_list:\n val = dv_dict[name]\n self.set_desvar(name, val)\n\n with self.root._dircontext:\n self.root.solve_nonlinear(metadata=self.metadata)\n\n # Save the most recent solution.\n self.pyopt_solution = sol\n try:\n exit_status = sol.optInform['value']\n self.exit_flag = 1\n if exit_status > 2: # bad\n self.exit_flag = 0\n except KeyError: #nothing is here, so something bad happened!\n self.exit_flag = 0", "def __defineVariablesAndConstraints(self):\n # VM usage vector vm in {0, 1}, k = 1..M; vm_k = 1 if at least one component is assigned to vm_k.\n #self.vm = {}\n # Assignment matrix a_{alpha,k}: 1 if component alpha is on machine k, 0 otherwise\n self.a = {}\n # VMType - type of a leased VM\n self.VMType = {}\n\n # values from availableConfigurations\n self.ProcProv = [Real('ProcProv%i' % j) for j in range(1, self.nrVM + 1)]\n self.MemProv = [Real('MemProv%i' % j) for j in range(1, self.nrVM + 1)]\n self.StorageProv = [Real('StorageProv%i' % j) for j in range(1, self.nrVM + 1)]\n self.PriceProv = [Real('PriceProv%i' % j) for j in range(1, self.nrVM + 1)]\n\n self.a = [Bool('C%i_VM%i' % (i + 1, j + 1)) for i in range(self.nrComp) for j in range(self.nrVM)]\n self.vmType = [Real('VM%iType' % j) for j in range(1, self.nrVM + 1)]\n # vmType is one of the types from availableConfigurations\n for i in range(len(self.vmType)):\n lst = [self.vmType[i] == t for t in range(1, len(self.availableConfigurations) + 1)]\n self.solver.add(Or(lst))\n\n #If a machine is not leased then its price is 0\n for j in range(self.nrVM):\n bvars = [self.a[i + j] for i in range(0, len(self.a), self.nrVM)]\n self.solver.add(Implies(PbEq([(x, 1) for x in bvars], 0), self.PriceProv[j] == 0))\n\n for j in range(self.nrComp):\n lst = [self.a[i+j] for i in range(0, len(self.a), self.nrVM)]\n print(\"lst \", lst)\n for l in range(len(lst)-1):\n print(\"???\", lst[l] == lst[l+1])\n self.solver.add(lst[l] == lst[l+1])\n\n\n # encode offers\n for t in range(len(self.availableConfigurations)):\n for j in range(self.nrVM):\n if self.solverTypeOptimize:\n bvars = [self.a[i+j] for i in range(0, len(self.a), self.nrVM)]\n\n self.solver.add(Implies(And(PbGe([(x, 1) for x in bvars], 1), self.vmType[j] == t+1),\n And(self.PriceProv[j] == (self.availableConfigurations[t][len(self.availableConfigurations[0]) - 1]),\n self.ProcProv[j] == self.availableConfigurations[t][1],\n self.MemProv[j] == (self.availableConfigurations[t][2]),\n self.StorageProv[j] == (self.availableConfigurations[t][3])\n )\n ))\n else:\n self.solver.assert_and_track(Implies(And(PbGe([(x, 1) for x in bvars], 1), self.vmType[j] == t+1),\n And(self.PriceProv[j] == (self.availableConfigurations[t][len(self.availableConfigurations[0]) - 1]),\n self.ProcProv[j] == self.availableConfigurations[t][1],\n self.MemProv[j] == (self.availableConfigurations[t][2]),\n self.StorageProv[j] == (self.availableConfigurations[t][3])\n )\n ), \"LabelOffer\" + str(self.labelIdx_offer))\n self.labelIdx_offer += 1", "def lookup():", "def solve(grid):\n #translate from string representation to dict to solve it further\n values = grid_values(grid)\n return search(values)", "def __init__(self, database):\n self.max_axiom_arity = max([p.arity() for p in database.non_entails_axioms.itervalues()]) + 1 # one more than the max\n\n self.database = database\n\n self.max_unconstrained_arity = 10000000\n self.searcher = SearchProblem(database, max_unconstrained_arity = self.max_unconstrained_arity)\n\n self.tautologies = set()\n for p in self.database.propositions.itervalues():\n e_hyps = [h for h in p.hyps if h.type == 'e']\n if p.vclass=='|-' and len(e_hyps) == 0:\n self.tautologies.add(p.label)\n print 'tautologies:', len(self.tautologies)\n \n # the propositions with trivial unconstrained arity. That is, the ones\n # that are really easy to apply.\n self.constrained_propositions = set(\n p.label for p in self.database.propositions.itervalues()\n if p.vclass == '|-' and p.unconstrained_arity() == 0\n )\n \n # figure out the names of the read variables\n # self.real_wff_names = set()\n # self.real_set_names = set()\n # self.real_class_names = set()\n # real_name_dict = {'wff': self.real_wff_names, 'set': self.real_set_names, 'class': self.real_class_names}\n #\n # for p in self.database.propositions.itervalues():\n # for label in p.f:\n # vclass = p.f[label].vclass\n # real_name_dict[vclass].add(label)\n # print real_name_dict\n\n self.constructor_dictionary = [{} for _ in range(self.max_axiom_arity)]\n\n # we need to define some extra variables, which we'll randomly assign when we read in a statement\n # this is a reasonable amount of data augmentation.\n self.extra_wffs = language_model_extra_variables_of_each_type+max(len([f for f in p.f.itervalues() if f.vclass=='wff']) for p in database.propositions.itervalues() )\n self.extra_classes = language_model_extra_variables_of_each_type+max(len([f for f in p.f.itervalues() if f.vclass=='class']) for p in database.propositions.itervalues() )\n self.extra_sets = language_model_extra_variables_of_each_type+max(len([f for f in p.f.itervalues() if f.vclass=='set']) for p in database.propositions.itervalues() )\n\n # hand code these in.\n self.extra_sets = 20\n self.extra_wffs = 18\n self.extra_classes = 27\n\n self.wff_names = ['WFFVar'+str(i) for i in range(self.extra_wffs)]\n self.set_names = ['SetVar'+str(i) for i in range(self.extra_sets)]\n self.class_names = ['ClassVar'+str(i) for i in range(self.extra_classes)]\n\n self.num_extra_variable_names = len(self.wff_names)+len(self.set_names)+len(self.class_names)\n self.extra_variable_dict = {}\n\n # the names for the unconstrained variables\n #self.ua_names = ['UA'+str(i) for i in range(self.max_unconstrained_arity)]\n\n # add them to the dictionary\n arityzerodict = self.constructor_dictionary[0]\n for i in range(self.extra_wffs):\n arityzerodict['WFFVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['WFFVar'+str(i)]=len(self.extra_variable_dict)\n for i in range(self.extra_classes):\n arityzerodict['ClassVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['ClassVar'+str(i)]=len(self.extra_variable_dict)\n for i in range(self.extra_sets):\n arityzerodict['SetVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['SetVar'+str(i)]=len(self.extra_variable_dict)\n # for i in range(len(self.ua_names)):\n # arityzerodict['UA'+str(i)]=len(arityzerodict)\n # self.extra_variable_dict['UA'+str(i)]=len(self.extra_variable_dict)\n\n # a block to create a dictionary that takes a symbol to its vclass\n self.symbol_to_vclass = {label:database.propositions[label].vclass for label in database.non_entails_axioms}\n for symbol in self.wff_names:\n self.symbol_to_vclass[symbol] = 'wff'\n for symbol in self.set_names:\n self.symbol_to_vclass[symbol] = 'set'\n for symbol in self.class_names:\n self.symbol_to_vclass[symbol] = 'class'\n\n # a list of all of the extra variables, for use later\n self.new_names = self.wff_names+self.set_names+self.class_names\n\n # describe the number of variables we've used\n print 'wff variables:',self.extra_wffs\n print 'class variables:',self.extra_classes\n print 'set variables:',self.extra_sets\n #print 'ua variables:', self.ua_names\n\n # now add the actual constructor axioms to our dictionary\n for p in database.non_entails_axioms.itervalues():\n c_dict = self.constructor_dictionary[p.arity()]\n c_dict[p.label] = len(c_dict)\n\n for i in range(self.max_axiom_arity):\n print len(self.constructor_dictionary[i]),'constructor axioms with arity',i\n\n # build a pair of dictionaries that convert (arity,num) to total_num\n # and vice versa. This is ugly. Whatever\n self.arity_num_to_global_index = {}\n self.global_index_to_arity_num=[]\n global_index = 0\n for arity in range(self.max_axiom_arity):\n for num in range(len(self.constructor_dictionary[arity])):\n self.global_index_to_arity_num.append((arity,num))\n self.arity_num_to_global_index[(arity,num)]=global_index\n global_index+=1\n\n \"\"\"sets up the data sets. We divide the propositions into training/validation/test and\n then compile the corresponding list of statements\"\"\"\n list_of_propositions = self.database.propositions_list[:] # database.propositions.values()\n np.random.seed(seed=121451345)\n list_of_propositions = np.random.permutation(list_of_propositions)\n\n num_validation = len(list_of_propositions)/10\n num_test = num_validation\n num_training = len(list_of_propositions)-num_test-num_validation\n self.training_propositions = list_of_propositions[:num_training]\n self.training_propositions = [_ for _ in self.training_propositions if _.type=='p']\n self.validation_propositions = list_of_propositions[num_training:num_training+num_validation]\n self.validation_propositions = [_ for _ in self.validation_propositions if _.type=='p']\n self.test_propositions = list_of_propositions[num_training+num_validation:]\n self.test_propositions = [_ for _ in self.test_propositions if _.type=='p']\n\n if self.database.remember_proof_steps:\n self.all_proof_steps = [] # except those that refer to e or f-type hypotheses\n for p in self.database.propositions.itervalues():\n self.all_proof_steps += [step for step in p.entails_proof_steps if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n\n self.training_proof_steps = []\n for p in self.training_propositions:\n self.training_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n self.validation_proof_steps = []\n for p in self.validation_propositions:\n self.validation_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n self.test_proof_steps = []\n for p in self.test_propositions:\n self.test_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n print\n print 'training steps:', len(self.training_proof_steps)\n print 'validation steps:', len(self.validation_proof_steps)\n print 'test steps:', len(self.test_proof_steps)\n\n\n # figure out how frequenly each proposition is used\n self.prop_usage = [0 for p in self.database.propositions]\n for s in self.all_proof_steps:\n self.prop_usage[s.prop.number]+=1\n\n # figure out what the most difficult proof step is\n self.max_depth = max([s.height for s in self.all_proof_steps]) + 1\n print 'max proof step depth:', self.max_depth-1\n\n\n # figure out the number of times each proposition is used.\n # self.prop_uses = [0.1] * len(self.database.propositions) # for numberical stability\n # for step in self.all_proof_steps:\n # self.prop_uses[step.prop.number] += 1\n # self.initial_b = np.log(1.0*np.array(self.prop_uses)/sum(self.prop_uses))\n\n\n # build up a database of propositions by unconstrained arity\n # that is, total_unconstrained_arity is the total\n # of all of the unconstrained arities of all of the propositions.\n # and unconstrained_arity_indices is a list of p.unconstrained_arity()\n # unique indices for each proposition p.\n self.total_unconstrained_arity = 0\n self.unconstrained_arity_indices = {}\n self.unconstrained_label_to_number = {}\n for p in self.database.propositions_list: # in order of proposition number\n u_arity = p.unconstrained_arity()\n self.unconstrained_arity_indices[p.label]=range(self.total_unconstrained_arity, self.total_unconstrained_arity + u_arity)\n self.total_unconstrained_arity += u_arity\n self.unconstrained_label_to_number[p.label]=len(self.unconstrained_label_to_number)\n #self.max_unconstrained_arity = max([p.unconstrained_arity() for p in self.database.propositions.itervalues()])\n\n self.total_constructor_arity = 0\n self.constructor_arity_indices = {}\n self.constructor_label_to_number = {}\n self.constructor_labels = []\n for p in database.non_entails_axioms.itervalues():\n u_arity = p.arity()\n self.constructor_arity_indices[p.label]=range(self.total_constructor_arity, self.total_constructor_arity + u_arity)\n self.total_constructor_arity += u_arity\n self.constructor_label_to_number[p.label]=len(self.constructor_label_to_number)\n self.constructor_labels.append(p.label)\n for name in self.wff_names+self.set_names+self.class_names: #+self.ua_names:\n self.constructor_arity_indices[name] = [] # the extra arity 0 constructors\n self.constructor_label_to_number[name]=len(self.constructor_label_to_number)\n self.constructor_labels.append(name)\n\n # a lookup table for the index into all the propositions of the label\n self.label_to_number = {x.label:x.number for x in self.database.propositions.itervalues()}\n for x in self.new_names:\n self.label_to_number[x] = -1 # all variables should always be included", "def solve(grid):\n\n return search(grid_values(grid))", "def solve(grid):\n\n return search(grid_values(grid))", "def _solve(self, solver):\n self.prob.solve(solver)\n if self.prob.status <= 0:\n raise Exception(\"Infeasible Solution.\")\n return {pid for pid, variable \n in self.player_vars.iteritems()\n if variable.varValue}", "def lookup(self, key):", "def solve(grid):\n return search(grid_values(grid))", "def solve(grid):\n return search(grid_values(grid))", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def solve(grid):\n #setting the units\n values = grid_values(grid)\n #display(values)\n sol = search(values)\n return sol", "def search_method(self): \n self.setup_problem()\n self.problem_results = minimize(self._problem, \n self.algorithm, \n self.termination,\n seed=1,\n verbose=False)\n \n self.log_debug('Core design variables determined: {}'.format(self.current_design_variables))", "def solve(grid):\n puzzle_dict = grid_values(grid)\n return search(puzzle_dict)", "def __init__(self):\n self.lookup = {}", "def solve(self):\n solved = self.formula.satisfy_one() or {}\n sol = [self.fid_to_var(str(var)) for var in list(solved.keys()) if solved[var] == 1]\n sol.sort(key = lambda var: var.split('_')[-1])\n count = self.formula.satisfy_count()\n\n return (sol, count)", "def solve(self):\r\n words = list(self.words.keys())\r\n words.sort(key= self.get_length,reverse = True)\r\n self.satisfiers = {}\r\n print(\"\\nTrying to populate the grid...\")\r\n for word_id in words:\r\n self.satisfiers[word_id] = {}\r\n for possible_word in self.words[word_id].domain:\r\n result = self.satisfied_constraints(word_id,possible_word)\r\n self.satisfiers[word_id][possible_word] = result\r\n # print(\"\\nword_id: {}, possible_word: {}, result: {}\".format(word_id,possible_word, result))\r\n \r\n final_answers = {}\r\n highest_conf = 0\r\n for word_id in words:\r\n found_words,score = self.evaluate_score(word_id)\r\n # print(\"\\nword_id: {}, found: {}, score: {}\".format(word_id,found_words,score))\r\n for el in found_words.keys():\r\n if el in final_answers.keys():\r\n if found_words[el][1] > final_answers[el][0]:\r\n final_answers[el] = [found_words[el][1],found_words[el][0]]\r\n elif found_words[el][1] == final_answers[el][0] and found_words[el][0] not in final_answers[el]:\r\n final_answers[el].append(found_words[el][0])\r\n else:\r\n final_answers[el] = [found_words[el][1],found_words[el][0]]\r\n if final_answers[el][0] > highest_conf:\r\n highest_conf = final_answers[el][0] \r\n print()\r\n print(final_answers) \r\n \r\n #sort the elements of dictionary so that highest confidence comes first in for loop\r\n final_answers = {k: v for k, v in sorted(final_answers.items(), key=lambda item: item[1][0],reverse=True)}\r\n secondary = dict(final_answers)\r\n #first run that we restrict the confidence to be minimum 50%\r\n for key in final_answers.keys():\r\n if final_answers[key][0] >= self.words[key].length/2:\r\n high_conf = final_answers[key][0] == highest_conf\r\n check, word = self.check_grid(key,final_answers[key][1:],high_conf)\r\n if check:\r\n if word != None:\r\n self.words[key].assign_word(word,self.cells)\r\n print(\"Assigned word for {}: {}\".format(key,word))\r\n secondary.pop(key)\r\n \r\n #secondary run that any confidence value can be assigned \r\n for key in secondary.keys():\r\n if secondary[key][0] > 0:\r\n check, word = self.check_grid(key,secondary[key][1:],False)\r\n if check:\r\n if word != None:\r\n self.words[key].assign_word(word,self.cells)\r\n print(\"Assigned word for {}: {}\".format(key,word))", "def uniformCostSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\r\n\tutil.raiseNotDefined()", "def solve(self):\n \n raise NotImplementedError(\"not implemented!\")", "def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"", "def __init__(self, equation_dict):\n self.equation = equation_dict['equation']\n self.variables = equation_dict['variables']\n self.dict = equation_dict\n self.x = list(self.variables)[-1]['variable'] # The variable to solve for", "def _extract_solution(self, manager: RoutingIndexManager, routing: RoutingModel, assignment: Assignment, indices_to_visit: List[int]) -> Dict[str, Any]:\n sln = {\"objective\": assignment.ObjectiveValue()}\n \n stop_indices = []\n index = routing.Start(0)\n while not routing.IsEnd(index):\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n sln[\"order\"] = stop_indices\n return sln", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def solve(self, **kwargs):\n return self.system.solve(**kwargs)", "def solve(puzzle_input):\r\n return {'a': part_a(puzzle_input), 'b': part_b(puzzle_input)}", "def main():\n # Instantiate a mixed-integer solver.\n solver = pywraplp.Solver('SolveAssignmentProblemMIP',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n # Number of teams (h and i)\n n = 9\n # Number of rooms (j)\n r = 3\n # Number of timeslots (k)\n t = 4\n # Number of matches\n m = 4\n\n # List of teams\n teams = [i for i in range(9)]\n\n x = {}\n\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if (h == i):\n x[h, i, j, k] = solver.IntVar(0, 0, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n else:\n x[h, i, j, k] = solver.IntVar(0, 1, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n\n # # Objective\n # solver.Minimize(solver.Sum([cost[i][j] * x[i,j] for i in range(num_workers)\n # for j in range(num_tasks)]))\n\n # Constraints\n\n # 2 Ensures that the matrix is the same across the diagonal\n for h in range(n):\n for j in range(r):\n for k in range(t):\n solver.Add((x[h, i, j, k] == x[i, h, j, k]))\n\n # 3 No pair plays each other more than once\n for h in range(n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t)]) <= 1)\n\n # 4 No team can be in more than one place at a time\n for h in range(n):\n for k in range(t):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n) for j in range(r)]) <= 2)\n\n # 5 Each team plays exactly m matches\n for i in range(n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t) for h in range(n)]) == 2 * m)\n\n # 6 Need 3 teams in a room at each timeslot\n for j in range(r):\n for k in range(t - 1):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n - 1) for h in range(i + 1, n)]) == 3)\n\n # Need 3 teams in a room at each timeslot\n for g in range(n - 2):\n for h in range(g + 1, n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum(\n [x[g, h, j, k] + x[h, i, j, k] + x[g, i, j, k] for j in range(r) for k in range(t)]) != 2)\n\n sol = solver.Solve()\n\n print('Total cost = ', solver.Objective().Value())\n print()\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if x[h, i, j, k].solution_value() > 0:\n print('teams %i,%i assigned to room %i at time %i.' % (h, i, j, k))\n\n print()\n print(\"Time = \", solver.WallTime(), \" milliseconds\")", "def solve(d):\n # Initialize concrete model.\n model = ConcreteModel()\n # We'll have variables g1 and g2\n model.g = Var([1, 2])\n model.obj = Objective(expr=25*model.g[1] + 35*model.g[2])\n model.constraint1 = Constraint(expr=model.g[1] + model.g[2] == d)\n model.constraint2 = Constraint(rule=gen1_constraint)\n model.constraint3 = Constraint(rule=gen2_constraint)\n # Alternatively to specifying a rule which returns a 3 element\n # tuple, the generator constraints can be added individually.\n # model.constraint4 = Constraint(expr=model.g[2] >= 20)\n # model.constraint5 = Constraint(expr=model.g[2] <= 100)\n return model", "def solve(self):\n for step in self.run.values():\n step.solve()", "def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def solve(self, grid):\n return self.search(self.parse_grid(grid))", "def search(self):\n\n term = self.substitute()\n ##print (\"searching:\",term)\n ##print (\"in facts\",self.facts)\n ##input()\n bindings = deepcopy(self.bindings)\n found = False\n for fact in self.facts:\n found = self.unify(term,fact,bindings)\n if found:\n bound_vars = list(bindings.keys())\n n_bound_vars = len(bound_vars)\n for i in range(n_bound_vars):\n for j in range(i+1,n_bound_vars):\n if bindings[bound_vars[i]] == bindings[bound_vars[j]]:\n return False\n self.facts.remove(self.substitute_with_bindings(bindings)) #THINK ABOUT THIS\n break\n return found", "def search(d,key):\n\treturn dfs(d,key)", "def __init__(self, variables, domains, neighbors, constraints, C):\r\n super().__init__(())\r\n variables = variables or list(domains.keys())\r\n self.variables = variables\r\n self.domains = domains\r\n self.neighbors = neighbors\r\n self.constraints = constraints\r\n self.curr_domains = None\r\n # visited nodes\r\n self.nassigns = 0\r\n self.conflict_set = {} #dictionary which stores the conflict set of each variable for fc - cbj\r\n self.prev_conflict_set = [] # we store the conflict set from the variable that causes dead-end\r\n self.deadend = None # we save the dead end variable in fc - cbj\r\n # initializating the conflict set array\r\n for x in self.variables:\r\n self.conflict_set[x]=[]\r\n # --------------------------\r\n # keep track of total checks for each algo\r\n self.totchecks=0\r\n # dict for later use in dom / wdeg heuristic\r\n # we initializating weights from constraints to 1\r\n self.weight = {}\r\n for each in C.keys():\r\n self.weight[(each[0],each[1])] = 1", "def check_sol(filepath, value_dict, eps=1e-8, print_values=False):\n\n graph = nx.DiGraph()\n relu_nodes = set()\n max_pool_nodes = set()\n linear_nodes = set()\n relu_in_nodes = set()\n mip = MIPwithBounds(filepath, 1e-7)\n model, vars = mip.read_file_into_graph()\n # vars is a dict of the input nodes\n\n output_cons = []\n input_cons = []\n\n input_bounds = {}\n with open(filepath, \"r\") as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n elements = line.split()\n\n if elements[0] == \"Input\":\n input_bounds[elements[1]] = {\"lb\": None, \"ub\": None}\n graph.add_node(elements[1], node_type=\"input\")\n\n if elements[0] == \"ReLU\":\n bias = float(elements[2])\n variables, coeffs = get_vars_and_coefficients(elements)\n\n relu_nodes.add(elements[1])\n graph.add_node(elements[1] + \"_in\", bias=bias)\n graph.add_edge(elements[1] + \"_in\", elements[1])\n relu_in_nodes.add(elements[1] + \"_in\")\n for v, w in zip(variables, coeffs):\n graph.add_edge(v, elements[1] + \"_in\", weight=w)\n\n if elements[0] == \"Linear\":\n linear_nodes.add(elements[1])\n bias = float(elements[2])\n variables, coeffs = get_vars_and_coefficients(elements)\n\n graph.add_node(elements[1], bias=bias)\n for v, w in zip(variables, coeffs):\n graph.add_edge(v, elements[1], weight=w)\n\n if elements[0] == \"MaxPool\":\n max_pool_nodes.add(elements[1])\n graph.add_node(elements[1], node_type=\"max_pool\")\n graph.add_edges_from(((v, elements[1]) for v in elements[2:]), weight=1)\n\n if elements[0] == \"AssertOut\":\n output_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))\n\n if elements[0] == \"Assert\":\n input_cons.append((float(elements[2]), elements[1], get_vars_and_coefficients(elements)))\n \"\"\"if len(elements) == 5 and elements[-1] in input_bounds:\n if elements[1] == \"<=\":\n new_lb = float(elements[2]) / float(elements[3])\n if input_bounds[elements[-1]][\"lb\"] is None or input_bounds[elements[-1]][\"lb\"] < new_lb:\n input_bounds[elements[-1]][\"lb\"] = new_lb\n\n elif elements[1] == \">=\":\n new_ub = float(elements[2]) / float(elements[3])\n if input_bounds[elements[-1]][\"ub\"] is None or input_bounds[elements[-1]][\"ub\"] > new_ub:\n input_bounds[elements[-1]][\"ub\"] = new_ub\"\"\"\n\n\n\n\n val = True\n for lhs, direction, (variables, coeffs) in input_cons:\n if direction == \"<=\":\n if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:\n val = False\n print(lhs, direction, variables, coeffs)\n break\n elif direction == \">=\":\n if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:\n val = False\n print(lhs, direction, variables, coeffs)\n\n break\n else:\n raise NotImplementedError\n\n if not val: # input constraints do not hold\n print(\"input constraints not fulfilled\")\n return False\n else:\n if print_values:\n print(\"input constraints hold\")\n\n\n\n nodes_sorted = list(nx.topological_sort(graph))\n relu_phases = {x: -1 for x in relu_nodes}\n relu_phases_all = {x: 0 for x in relu_nodes}\n\n\n for node in nodes_sorted:\n if node in vars:\n continue # skip the input nodes\n\n new_value = 0\n\n if node in linear_nodes or node in relu_in_nodes:\n for n in graph.predecessors(node):\n new_value += graph.edges[n, node][\"weight\"] * value_dict[n]\n\n new_value += graph.node[node][\"bias\"]\n\n elif node in max_pool_nodes:\n new_value = max(value_dict[n] for n in graph.predecessors(node))\n\n\n elif node in relu_nodes:\n pred = list(graph.predecessors(node))\n assert len(pred) == 1\n\n if value_dict[pred[0]] > 0: # apply ReLU here\n new_value = value_dict[pred[0]]\n relu_phases[node] = 1\n else:\n relu_phases[node] = 0\n\n value_dict[node] = new_value\n\n\n for relu, phase in relu_phases.items():\n assert phase >= 0\n\n relu_phases_all[relu] += phase\n\n if print_values:\n for s in value_dict.items():\n print(s)\n\n val = True\n # check the ouput constraints\n #print(output_cons)\n for lhs, direction, (variables, coeffs) in output_cons:\n if direction == \"<=\":\n if lhs > sum(c * value_dict[v] for v, c in zip(variables, coeffs)) + eps:\n val = False\n break\n elif direction == \">=\":\n if lhs < sum(c * value_dict[v] for v, c in zip(variables, coeffs)) - eps:\n val = False\n break\n else:\n raise NotImplementedError\n\n return val", "def solve(team, chal, request):\n provided_key = request.form['key'].strip()\n solve = Solves(teamid=team.id, chalid=chal.id, ip=utils.get_ip(req=request), flag=provided_key)\n db.session.add(solve)\n db.session.commit()\n db.session.close()", "def solver(formula):\n # dictionary initializing output solution\n assignments={}\n\n # check and simplify unit clauses\n for clause in formula:\n # if clause is a unit clause\n if len(clause)==1:\n # extract random literal from clause\n var,val=get_from_set(clause)\n # make assignment such that unit clause is true\n assignments[var] = val\n # update rest of the formula with such assignment\n formula = expand(formula,var,val)\n\n # RECURSION BASE CASE 1: found one of possible solutions\n # NOTE: since I eliminate clauses once satisfied, list is \n # empty when all clauses are satisfied. \n if not formula:\n return assignments\n\n # RECURSION BASE CASE 2: impossible due to contradiction\n # NOTE: if any of the clauses is false, then no solution\n if not all(formula):\n return None\n\n # CORE OF RECURSION: recursive simplification of CNF formula\n var, val = get_from_set(formula[0])\n for attempt in (val, not val): # e.g try True, if no success try False \n assignments[var] = attempt\n new_assignments = solver(expand(formula,var,attempt))\n if new_assignments is not None:\n assignments.update(new_assignments)\n return assignments\n\n # if we get to this line, neither attempt yields a solution\n return None", "def setup_solver(self):\n option = Options()\n if logger.getEffectiveLevel() == logging.DEBUG:\n # option.printLevel = PrintLevel.HIGH\n option.printLevel = PrintLevel.NONE\n else:\n option.printLevel = PrintLevel.NONE\n self.solver_minimizing = SQProblem(self.nV, self.nC)\n self.solver_minimizing.setOptions(option)\n self.solver_maximizing = SQProblem(self.nV, self.nC)\n self.solver_maximizing.setOptions(option)\n\n self.solver_minimizing_recent_index = -2\n self.solver_maximizing_recent_index = -2", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def solve(grid):\n\tvalues = grid2values(grid)\n\tvalues = search(values)\n\treturn values", "def prime_cache(cfg):\n from dictionaria.util import add_links2\n\n labels = {}\n for type_, cls in [('source', common.Source), ('unit', common.Unit)]:\n labels[type_] = defaultdict(set)\n for r in DBSession.query(cls.id):\n sid, _, lid = r[0].partition('-')\n labels[type_][sid].add(lid)\n\n for d in DBSession.query(Dictionary):\n for type_ in ['source', 'unit']:\n d.description = add_links2(d.id, labels[type_][d.id], d.description, type_)\n\n for meaning in DBSession.query(ComparisonMeaning).options(\n joinedload_all(common.Parameter.valuesets, common.ValueSet.values)\n ):\n meaning.representation = sum([len(vs.values) for vs in meaning.valuesets])\n if meaning.representation == 0:\n meaning.active = False\n\n def joined(iterable):\n return ' / '.join(sorted(nfilter(set(iterable))))\n\n q = DBSession.query(Word)\\\n .order_by(Word.dictionary_pk, common.Unit.name, common.Unit.pk)\\\n .options(joinedload(Word.meanings), joinedload(Word.dictionary))\n for _, words in groupby(q, lambda u: u.name):\n words = list(words)\n for i, word in enumerate(words):\n word.description = ' / '.join(m.name for m in word.meanings)\n word.comparison_meanings = joined(m.reverse for m in word.meanings)\n word.semantic_domain = joined(m.semantic_domain for m in word.meanings)\n word.number = i + 1 if len(words) > 1 else 0\n\n for suffix in ['1', '2']:\n alt_t, alt_l = [], []\n for m in word.meanings:\n if getattr(m, 'alt_translation' + suffix):\n alt_l.append(getattr(m, 'alt_translation_language' + suffix))\n alt_t.append(getattr(m, 'alt_translation' + suffix))\n if alt_t and len(set(alt_l)) == 1:\n DBSession.add(common.Unit_data(\n object_pk=word.pk, key='lang-' + alt_l.pop(), value=join(alt_t)))\n\n def count_unit_media_files(contrib, mtype):\n return DBSession.query(common.Unit_files)\\\n .join(Word, common.Unit_files.object_pk == Word.pk)\\\n .filter(Word.dictionary_pk == contrib.pk)\\\n .filter(common.Unit_files.mime_type.ilike(mtype + '/%'))\\\n .count() + \\\n DBSession.query(Meaning_files)\\\n .join(Meaning, Meaning_files.object_pk == Meaning.pk)\\\n .join(Word, Meaning.word_pk == Word.pk)\\\n .filter(Word.dictionary_pk == contrib.pk)\\\n .filter(Meaning_files.mime_type.ilike(mtype + '/%'))\\\n .count()\n\n for d in DBSession.query(Dictionary).options(joinedload(Dictionary.words)):\n d.count_words = len(d.words)\n sds = set(chain(*[w.semantic_domain_list for w in d.words]))\n d.semantic_domains = join(sorted(sds))\n d.count_audio = count_unit_media_files(d, 'audio')\n d.count_image = count_unit_media_files(d, 'image')\n\n word_pks = [w.pk for w in d.words]\n choices = {}\n for col in d.jsondata.get('custom_fields', []):\n values = [\n r[0] for r in DBSession.query(common.Unit_data.value)\n .filter(common.Unit_data.object_pk.in_(word_pks))\n .filter(common.Unit_data.key == col)\n .distinct()]\n if len(values) < 40:\n choices[col] = sorted(values)\n d.update_jsondata(choices=choices)\n\n DBSession.execute(\"\"\"\n UPDATE word\n SET example_count = s.c \n FROM (\n SELECT m.word_pk AS wpk, count(ms.sentence_pk) AS c\n FROM meaning AS m, meaningsentence AS ms\n WHERE m.pk = ms.meaning_pk\n GROUP BY m.word_pk\n ) AS s\n WHERE word.pk = s.wpk\n \"\"\")", "def npa_constraints(\n assemblage: dict[tuple[int, int], cvxpy.Variable], k: int | str = 1, referee_dim: int = 1\n) -> list[cvxpy.constraints.constraint.Constraint]:\n a_out, a_in, b_out, b_in = _get_nonlocal_game_params(assemblage, referee_dim)\n\n words = _gen_words(k, a_out, a_in, b_out, b_in)\n dim = len(words)\n\n r_var = cvxpy.Variable((referee_dim * dim, referee_dim * dim), PSD=True, name=\"R\")\n # Normalization.\n norm = sum(r_var[i * dim, i * dim] for i in range(referee_dim))\n constraints = [norm == 1]\n\n seen = {}\n for i in range(dim):\n for j in range(i, dim):\n w_i, w_j = words[i], words[j]\n w_i = tuple(reversed(w_i))\n word = _reduce(w_i + w_j)\n\n sub_mat = r_var[i::dim, j::dim]\n # if i = 0 we would consider (ε, ε) as an empty word.\n if i != 0 and _is_zero(word):\n constraints.append(sub_mat == 0)\n\n elif _is_meas(word):\n s_a, s_b = word\n constraints.append(\n sub_mat\n == assemblage[s_a.question, s_b.question][\n s_a.answer * referee_dim : (s_a.answer + 1) * referee_dim,\n s_b.answer * referee_dim : (s_b.answer + 1) * referee_dim,\n ]\n )\n\n elif _is_meas_on_one_player(word):\n symbol = word[0]\n if symbol.player == \"Alice\":\n sum_all_bob_meas = sum(\n assemblage[symbol.question, 0][\n symbol.answer * referee_dim : (symbol.answer + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n constraints.append(sub_mat == sum_all_bob_meas)\n\n if symbol.player == \"Bob\":\n sum_all_alice_meas = sum(\n assemblage[0, symbol.question][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n symbol.answer * referee_dim : (symbol.answer + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n constraints.append(sub_mat == sum_all_alice_meas)\n\n elif word in seen:\n old_i, old_j = seen[word]\n old_sub_mat = r_var[old_i::dim, old_j::dim]\n constraints.append(sub_mat == old_sub_mat)\n\n else:\n seen[word] = (i, j)\n\n # now we impose constraints to the assemblage operator\n for x_alice_in in range(a_in):\n for y_bob_in in range(b_in):\n sum_all_meas_and_trace = 0\n for a_ans in range(a_out):\n for b_ans in range(b_out):\n sum_all_meas_and_trace += sum(\n assemblage[x_alice_in, y_bob_in][\n i + a_ans * referee_dim, i + b_ans * referee_dim\n ]\n for i in range(referee_dim)\n )\n\n # r x r sub - block is PSD since it's an unnormalized quantum state.\n constraints.append(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n >> 0\n )\n\n constraints.append(sum_all_meas_and_trace == 1)\n\n # Bob marginal consistency\n for y_bob_in in range(b_in):\n for b_ans in range(b_out):\n sum_first_question = sum(\n assemblage[0, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n for x_alice_in in range(1, a_in):\n sum_cur_question = sum(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n constraints.append(sum_first_question == sum_cur_question)\n\n # Alice marginal consistency\n for x_alice_in in range(a_in):\n for a_ans in range(a_out):\n sum_first_question = sum(\n assemblage[x_alice_in, 0][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n for y_bob_in in range(1, b_in):\n sum_cur_question = sum(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n constraints.append(sum_first_question == sum_cur_question)\n\n return constraints", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def lookup(*args):\n lemma, results = etym(*args)\n languages = nest()\n if not results:\n query, _, dictionary = args\n lemma, results = etym(query, None, dictionary)\n for result in results:\n languages[lemma][unicode(result['pos'])] = result['languages']\n return languages", "def evaluate(self, edict):\n pass", "def solve(self, state, times):", "def T(v,securite):\n to_return = {} #renvoie le dictionnaire {indice du contact (0 -> direct / sinon -> plus ou moins direct) : set({disque})} \n Cv = set(C(v,securite))\n Tv = set(Cv)\n i=0\n xv,yv=l[v][0],l[v][1]\n while Cv != set() and i<5:\n to_return[str(i)]=Cv\n new_Cv = set()\n for j in Cv:\n xj,yj=l[j][0],l[j][1]\n #si j est devant v, on ne le copte pas\n if sqrt((xj-xt)**2+(yj-yt)**2)<sqrt((xv-xt)**2+(yv-yt)**2):\n continue\n new_Cv= new_Cv.__or__(C(j,securite).__sub__(Tv.__or__(set(j).__or__({v}))))\n Tv = Tv.__or__(new_Cv)\n Cv = new_Cv\n i+=1\n return to_return", "def backtrack_search(self, values):\n values = self.forward_check(values) #forward checking to reduce again\n if values is False:\n return False # Invalid domain through forward checking\n if all(len(values[s]) == 1 for s in self.boxes):\n return values #All boxes have 1 number => solved\n # SELECT-UNASSIGNED-VARIABLE -> used MRV and highest degree\n box = self.MRV_and_degree(values)\n for value in values[box]: #already ordered from smallest to largest, ex: A1: 1257 -> ORDER-DOMAIN-VALUES\n #print(values[s])\n new_sudoku = values.copy()\n new_sudoku[box] = value\n guess = self.backtrack_search(new_sudoku)\n if guess:\n return guess", "def postsolve_manual(sol, dname, statelist = 'auto', printdone = 'yes'):\n global vmdict\n if type(statelist) == str:\n statelist = ['P_r', 'P_w', 'C4_nc_wb', 'C4_nc_WC']\n\n for i in range(len(np.array(dxlist))):\n if any([n == dxlist[i] for n in statelist]):\n vmdict[dxlist[i]][dname].append(sol[i][-1])\n ##\n vmdict['err'][dname].append(vmdict['P_w'][dname][-1]/ (vmdict['P_r'][dname][-1] + vmdict['P_w'][dname][-1]))\n vmdict['eta'][dname].append(vmdict['P_w'][dname][-1]/ vmdict['P_r'][dname][-1])\n if 'C4_nc_WC' in statelist:\n vmdict['pwc_c4'][dname].append(vmdict['C4_nc_WC'][dname][-1]/ (vmdict['C4_nc_WC'][dname][-1] + vmdict['C4_nc_wb'][dname][-1]))\n ##\n if printdone == 'yes':\n print('finished ' + str(len(vmdict[statelist[0]][dname])) + ' ' + dname)", "def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2:my_var_name.find(\",\")])\n v = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\",\")])\n t = int(my_var_name[my_var_name.rfind(\",\") + 1:-1])\n\n # print('%s = %f ' % (my_var_name, my_var_value))\n x_searchers[(s, v, t)] = my_var_value\n\n if t > t_max:\n t_max = t\n\n elif 'beta' in my_var_name and '_s' not in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember: b[0] is probability of capture\n v = int(my_var_name[5:my_var_name.find(\",\")])\n t = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\"]\")])\n b_target[(v, t)] = my_var_value\n\n # make sure x is binary\n x_searchers = enforce_binary(x_searchers, t_max)\n b_target = enforce_sum_1(b_target, t_max)\n\n # x_searchers[(s, v, t)] and b_target[(v, t)]\n return x_searchers, b_target", "def try1():\n path = '/Users/mayankkejriwal/datasets/eswc2017/disasters/'\n model = Word2Vec.load_word2vec_format(path+'GoogleNews-vectors-negative300.bin', binary=True)\n model.init_sims(replace=True)\n keys = ['charlotte', 'Charlotte', 'yorktown', 'LA']\n for key in keys:\n try:\n # print model.most_similar(positive=['woman', 'king'], negative=['man'])\n j = model[key]\n print 'found...',\n print key\n except KeyError:\n print 'not found...',\n print key\n continue\n print model.similarity('charlotte', 'carolina')\n print model.similarity('LA', 'California')", "def generate(self):\n \n ##Dictionary of put values\n self.put = {}\n ##Dictionary of call values\n self.call = {}\n \n bottom = self.range[0]\n top = self.range[2]\n iter = self.range[1]\n \n if self.meth == 'bi':\n for E in range(bottom,top+iter,iter):\n solver = bi.binomial_euro(S=self.S,E=E,r=self.r,M=400,sigma=self.sigma,method='higham',T=self.T,opt='put')\n self.put[E] = solver.solve()\n del solver\n for E in range(bottom,top+iter,iter):\n solver = bi.binomial_euro(S=self.S,E=E,r=self.r,M=400,sigma=self.sigma,method='higham',T=self.T,opt='call')\n self.call[E] = solver.solve()\n del solver\n elif self.meth == 'mc':\n for E in range(bottom,top+iter,iter):\n solver = mc.mcfast_euro(S=self.S,E=E,r=self.r,M=20000,sigma=self.sigma,T=self.T,opt='put')\n self.put[E] = solver.solve()\n del solver\n for E in range(bottom,top+iter,iter):\n solver = mc.mcfast_euro(S=self.S,E=E,r=self.r,M=20000,sigma=self.sigma,T=self.T,opt='call')\n self.call[E] = solver.solve()\n del solver\n elif self.meth == 'all':\n pass", "def solver_objects(\n kappa, f, u_D, Nx, Ny, degree=1,\n linear_solver='Krylov', # Alt: 'direct'\n abs_tol=1E-5, # Absolute tolerance in Krylov solver\n rel_tol=1E-3, # Relative tolerance in Krylov solver\n max_iter=1000, # Max no of iterations in Krylov solver\n log_level=PROGRESS, # Amount of solver output\n dump_parameters=False, # Write out parameter database?\n ):\n # Create mesh and define function space\n mesh = UnitSquareMesh(Nx, Ny)\n V = FunctionSpace(mesh, 'P', degree)\n\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(V, u_D, boundary)\n\n # Define variational problem\n u = TrialFunction(V)\n v = TestFunction(V)\n a = kappa*dot(grad(u), grad(v))*dx\n L = f*v*dx\n\n # Compute solution\n u = Function(V)\n problem = LinearVariationalProblem(a, L, u, bc)\n solver = LinearVariationalSolver(problem)\n\n if linear_solver == 'Krylov':\n solver.parameters['linear_solver'] = 'gmres'\n solver.parameters['preconditioner'] = 'ilu'\n prm = solver.parameters['krylov_solver'] # short form\n prm['absolute_tolerance'] = abs_tol\n prm['relative_tolerance'] = rel_tol\n prm['maximum_iterations'] = max_iter\n print(parameters['linear_algebra_backend'])\n set_log_level(log_level)\n if dump_parameters:\n info(parameters, True)\n solver_parameters = {'linear_solver': 'gmres',\n 'preconditioner': 'ilu'}\n else:\n solver_parameters = {'linear_solver': 'lu'}\n\n solver.solve()\n return u", "def __init__(self):\r\n self.trie = {}", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n aStarSearch(problem)", "def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (priority queue y set)\n openNodes = util.PriorityQueue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Calculamos funcion heuristica y el coste acumulado para sacar la funcion de evaluacion del nodo inicial\n fn = problem.getCostOfActions(node.path) + nullHeuristic(node.name, problem);\n\n #Lo metemos en la cola con su funcion de evaluacion como prioridad\n openNodes.push(node, fn)\n\n #Iteramos para cada nodo\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #sacamos el nodo de arriba de la cola\n node = openNodes.pop()\n if problem.isGoalState(node.name): #Comprobamos si el nodo es Goal. Si lo es terminamos.\n break\n else: #Expandimos los nodos sucesores del nodo si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n fn = problem.getCostOfActions(findPath(succNode)) + nullHeuristic(succNode.name, problem);\n openNodes.push(succNode, fn)\n #Metemos el nodo en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def lookup(self, pos, word_pat, enable_de=True):\n from sagas.ru.ru_dictionary import RuDictionary\n print('.. load dictionary')\n dic=RuDictionary(pos=pos)\n rs=dic.lookup(word_pat, enable_de)\n print(rs)", "def _check_c(self, constraint, *variables):\n c = {c.__class__ for c in self._layout.solver.get(*variables)}\n if constraint:\n self.assertTrue(constraint in c)\n else:\n self.assertFalse(c)", "def main():\n\n precomp = {}\n for op1 in '+-*/':\n for op3 in '+-*/':\n for op5 in '+-*/':\n text = '4 ' + ' 4 '.join([op1, op3, op5]) + ' 4'\n precomp[eval2(text)] = text\n\n for _ in range(int(input())):\n number = int(input())\n if number in precomp:\n print(precomp[number], '=', number)\n else:\n print('no solution')", "def _create_solver(self):\n # https://petsc.org/release/docs/manualpages/KSP/KSPType.html\n iterative = [\n 'richardson', 'chebyshev', 'cg', 'groppcg', 'pipecg', 'pipecgrr',\n 'cgne', 'nash', 'stcg', 'gltr', 'fcg', 'pipefcg', 'gmres',\n 'pipefgmres', 'fgmres', 'lgmres', 'dgmres', 'pgmres', 'tcqmr',\n 'bcgs', 'ibcgs', 'fbcgs', 'fbcgsr', 'bcgsl', 'pipebcgs', 'cgs',\n 'tfqmr', 'cr', 'pipecr', 'lsqr', 'preonly', 'qcg', 'bicg',\n 'minres', 'symmlq', 'lcd', 'python', 'gcr', 'pipegcr', 'tsirm',\n 'cgls', 'fetidp']\n # https://petsc.org/release/docs/manualpages/PC/PCType.html\n preconditioners = [\n 'none', 'jacobi', 'sor', 'lu', 'shell', 'bjacobi', 'mg',\n 'eisenstat', 'ilu', 'icc', 'asm', 'gasm', 'ksp', 'composite',\n 'redundant', 'spai', 'nn', 'cholesky', 'pbjacobi', 'mat', 'hypre',\n 'parms', 'fieldsplit', 'tfs', 'ml', 'galerkin', 'exotic', 'cp',\n 'bfbt', 'lsc', 'python', 'pfmg', 'syspfmg', 'redistribute', 'svd',\n 'gamg', 'sacusp', 'sacusppoly', 'bicgstabcusp', 'ainvcusp',\n 'chowiluviennacl', 'rowscalingviennacl', 'saviennacl', 'bddc',\n 'kaczmarz', 'telescope']\n direct_lu = ['mumps', 'superlu_dist', 'umfpack', 'klu']\n direct_cholesky = ['mumps', 'cholmod']\n valid_solvers = iterative + direct_lu + direct_cholesky\n\n solver = self.solver_type\n preconditioner = self.preconditioner\n\n if solver not in valid_solvers:\n raise Exception(f\"{solver} solver not availabe, choose another solver\")\n if preconditioner not in preconditioners:\n raise Exception(f\"{preconditioner} not found, choose another preconditioner\")\n\n self.ksp = PETSc.KSP()\n self.ksp.create(PETSc.COMM_WORLD)\n\n if solver in direct_lu:\n self.ksp.getPC().setType('lu')\n self.ksp.getPC().setFactorSolverType(solver)\n self.ksp.setType('preonly')\n elif solver in direct_cholesky:\n self.ksp.getPC().setType('cholesky')\n self.ksp.getPC().setFactorSolverType(solver)\n self.ksp.setType('preonly')\n elif solver in preconditioners:\n self.ksp.getPC().setType(solver)\n self.ksp.setType('preonly')\n elif solver in iterative:\n self.ksp.getPC().setType(preconditioner)\n self.ksp.setType(solver)", "def _set_solver(self):\n self.solver = Solver.select_solver(self.method, self.solver_args)\n if self.method.lower()==\"elastic-net\":\n self.solver.elements=self.basis.elements", "def initialize_model(m, disp = False):\n\n # Initializaion parameters\n m.bounds = pe.ConstraintList()\n\n for i in atr:\n try :\n m.bounds.add(expr = getattr(m,i) == getattr(m,i).value)\n\n except :\n pass \n\n opt = SolverFactory('ipopt') # Solver\n opt.solve(m, tee=disp) # Initialize\n m.del_component(m.bounds) # Delets initial specification \n \n return m", "def solvepredicates(predicates, objects_dic, predicates_rules, gstate):\n \"\"\"This function will pop an predicate from a list of predicates, and try to solve\n it, the predicate will be put back to the predicates list if it can not be solved at\n one turn. The funtion will return true if all the predicates has been solved.\n Args:\n predicates(list of String): a list of predicates that need to be solved.\n objects_dic(dictionary): a dictionary of objects that its attribtes has to be solved\n predicates_rules(dictonaru): animation rules of predictates.\n space(array):an array that will be used for distributex funtion, it remeber the current obj\n that in the space.\n\n \"\"\"\n i = 0\n while (predicates and i < 2000):\n predicate = predicates.pop(0)\n if predicate[\"name\"] not in predicates_rules:\n continue\n if check_rule_complete(predicate, objects_dic, predicates_rules):\n\n applypredicates(predicate, objects_dic, predicates_rules, gstate)\n else:\n if not predicates: # if the last predicate can not be solved\n return False\n predicates.append(predicate)\n i += 1\n return True", "def test_solve(game, optimal):\n\n matching = game.solve(optimal)\n assert isinstance(matching, MultipleMatching)\n\n hospitals = sorted(game.hospitals, key=lambda h: h.name)\n matching_keys = sorted(matching.keys(), key=lambda k: k.name)\n for game_hospital, hospital in zip(matching_keys, hospitals):\n assert game_hospital.name == hospital.name\n assert game_hospital._pref_names == hospital._pref_names\n assert game_hospital.capacity == hospital.capacity\n\n matched_residents = [\n resident for match in matching.values() for resident in match\n ]\n\n assert matched_residents != [] and set(matched_residents).issubset(\n set(game.residents)\n )\n\n for resident in set(game.residents) - set(matched_residents):\n assert resident.matching is None", "def search(values):\n global assignments\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n\n # Check if this solution is unsolvable\n if values is False:\n return False\n\n # Check if we found a solutio, all boxes have one digit\n if all(len(values[s]) == 1 for s in boxes):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n min = 10\n minKey = None\n for v in values:\n if 1 < len(values[v]) < min:\n min = len(values[v])\n minKey = v\n\n for digit in values[minKey]:\n new_values = dict(values)\n assignments_bck = assignments.copy()\n new_values = assign_value(new_values, minKey, digit)\n new_values = search(new_values)\n if new_values != False:\n return new_values\n assignments = assignments_bck.copy()\n return False", "def __init__(self):\n self.trie = {}", "def __init__(self):\n self.trie = {}", "def __init__(self):\n self.trie = {}", "def __init__(self):\n self.trie = {}", "def get_searchable_rules(rules):\n searchable_rules = {rule.variable: {} for rule in rules}\n for rule in rules:\n searchable_rules[rule.variable][tuple(rule.derivation)] = rule\n return searchable_rules", "def tri_si_rencontre(self, joueurs_tries, liste_rencontres, nb_joueurs):\n # We recover the possibilities\n for x in joueurs_tries:\n liste_dict = []\n for y in joueurs_tries:\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n continue\n else:\n liste_dict.append(y)\n self.dict_possiblity[x] = liste_dict\n copy_joueurs = list(joueurs_tries)\n liste_finale = []\n nb_tour = 0\n error = False\n while joueurs_tries:\n x = joueurs_tries[0]\n for y in joueurs_tries:\n if nb_tour > nb_joueurs**2:\n print(\"Il y a une erreur dans l'algorithme.\")\n error = True\n break\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n nb_tour += 1\n continue\n else:\n i = 0\n # we are looking for a unique possibility\n for key in list(self.dict_possiblity):\n if len(self.dict_possiblity[key]) == 1:\n valeur = self.dict_possiblity[key][0]\n liste_finale.append((key, valeur))\n liste_rencontres.append((key, valeur))\n joueurs_tries.remove(key)\n joueurs_tries.remove(valeur)\n self.sup_dicti(valeur, key)\n i += 1\n break\n if i > 0:\n break\n # we remove both of the possibilities\n self.sup_dicti(x, y)\n liste_finale.append((x, y))\n liste_rencontres.append((x, y))\n joueurs_tries.remove(y)\n joueurs_tries.remove(x)\n break\n if error:\n liste_finale = Vue().demander_binomes(copy_joueurs,\n nb_joueurs)\n return liste_finale\n return liste_finale", "def pfd_solve (r, w) :\n f = pfd_read(r)\n v = pfd_eval(f)\n pfd_print(w, v)", "def __init__(self):\n self.trie = dict()", "def __init__(self):\n self.trie = dict()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.PriorityQueueWithFunction(lambda node: node.path_cost)\n return GraphSearch(problem, 'ucs').search(fringe)", "def solver(mesh, model, ele, nodal_load):\r\n A = kinematics.A_matrix(model, ele)\r\n\r\n Ks = stiffness.Ks_matrix(model, ele)\r\n\r\n K = np.dot(A.T, np.dot(Ks, A))\r\n\r\n P = load.P_vector(model, nodal_load)\r\n\r\n Kf, Pf = index.fdof(model, K, P)\r\n\r\n Uf = np.linalg.solve(Kf, Pf)\r\n\r\n U = index.tdof(model, Uf)\r\n\r\n V = np.dot(A, U)\r\n\r\n Q = np.dot(Ks, V)\r\n\r\n return U, Q", "def solve_iteratively(self, conv_crit=1e-10, maxiter=50,\n check_every=4, check_after=1, precision=None, verbose=False):\n sol = self.sol0\n terms = [(get_name(gi), get_name(gj), get_name(uij))\n for term in self.all_terms for (gi, gj, uij) in term]\n gain_map = {}\n ubl_map = {}\n for gi,gj,uij in terms:\n if not gi in gain_map:\n gain_map[gi] = len(gain_map)\n if not gj in gain_map:\n gain_map[gj] = len(gain_map)\n if not uij in ubl_map:\n ubl_map[uij] = len(ubl_map)\n ggu_indices = np.array([(gain_map[gi], gain_map[gj], ubl_map[uij]) \n for (gi, gj, uij) in terms], dtype=np.uint)\n v = sol[gi]\n shape, dtype, ndata = v.shape, v.dtype, v.size\n ngains = len(gain_map)\n nubls = len(ubl_map)\n nbls = len(self.keys)\n assert dtype in (np.complex64, np.complex128)\n if precision is None:\n if dtype == np.complex64:\n precision = 1\n else:\n precision = 2\n if precision == 1:\n real_dtype = np.float32\n else:\n real_dtype = np.float64\n gains = np.empty((ndata, ngains), dtype=dtype)\n for k,v in gain_map.items():\n gains[:,v] = sol[k].flatten()\n ubls = np.empty((ndata, nubls), dtype=dtype)\n for k,v in ubl_map.items():\n ubls[:,v] = sol[k].flatten()\n data = np.empty((ndata, nbls), dtype=dtype)\n wgts = np.empty((ndata, nbls), dtype=real_dtype)\n for i,k in enumerate(self.keys):\n data[:,i] = self.data[k].flatten()\n wgts[:,i] = self.wgts[k].flatten()\n #data = np.array([self.data[k].flatten() for k in self.keys])\n #wgts = np.array([self.wgts[k].flatten() for k in self.keys])\n if wgts.shape != data.shape:\n wgts = np.resize(wgts, data.shape)\n result = omnical(ggu_indices, gains, ubls, data, wgts, \n conv_crit, maxiter, check_every, check_after,\n nthreads=NTHREADS, precision=precision, gain=self.gain, \n verbose=verbose)\n for k,v in gain_map.items():\n sol[k] = np.reshape(result['gains'][:,v], shape)\n for k,v in ubl_map.items():\n sol[k] = np.reshape(result['ubls'][:,v], shape)\n meta = {\n 'iter': np.reshape(result['iters'], shape),\n 'chisq': np.reshape(result['chisq'], shape),\n 'conv_crit': np.reshape(result['conv'], shape),\n }\n return meta, sol", "def solve(P, M, N, C, items, constraints):\n Items = []\n weight_dict = dict()\n cost_dict = dict()\n earn_dict = dict()\n class_dict = dict()\n \n for i in items:\n Items.append(i[0])\n weight_dict[i[0]] = i[2]\n cost_dict[i[0]] = i[3]\n earn_dict[i[0]] = i[4] - i[3]\n class_dict[i[0]] = i[1]\n \n Class_dict = defaultdict(list)\n for key, value in sorted(class_dict.iteritems()):\n Class_dict[value].append(key)\n \n # Create the 'prob' variable to contain the problem data\n prob = LpProblem(\"The PICKITEMS Problem\", LpMaximize)\n # Two dictionary called 'x_vars' and 'y_vars' are created to contain the referenced Variables\n x_vars = LpVariable.dicts(\"\",Items,0,1,cat=LpInteger)\n y_vars = LpVariable.dicts(\"#\",range(N),0,1,cat=LpInteger)\n \n # The objection function is added to 'prob' first\n prob += lpSum([earn_dict[i]*x_vars[i] for i in Items]), \"Total money we can earn in this file\"\n \n # Constraints are added to 'prob'\n prob += lpSum([weight_dict[i]*x_vars[i] for i in Items]) <= P, \"WeightsRequirement\"\n prob += lpSum([cost_dict[i]*x_vars[i] for i in Items]) <= M, \"CostRequirement\"\n # the relations(constraint) between class and item (x and y)\n for num in range(C):\n for i in Class_dict[num]:\n prob += LpConstraint(x_vars[i]-y_vars[num] <= 0), \"\"\n # the relations for class constraints\n for constraint in constraints:\n prob += lpSum([y_vars[i] for i in constraint]) <= 1, \"\"\n # The problem is solved using PuLP's choice of Solver\n prob.solve()\n \n items_chosen = []\n for v in prob.variables():\n if v.varValue == 1 and v.name[0]!='#':\n items_chosen.append(v.name[1:])\n return items_chosen", "def solve(self, opt_prob: OptimizationProblem, opt_param: OptimizationParameter, *args, **kwargs) -> Any:\n pass", "def __solve(self) -> None:\n pyo.TransformationFactory(\"contrib.detect_fixed_vars\").apply_to(self.model) # type: ignore\n pyo.TransformationFactory(\"contrib.deactivate_trivial_constraints\").apply_to(self.model) # type: ignore\n\n # initialise the solver object\n self._logger.debug(\"[ModelSolver] Solver object initiated...\")\n solver = Config.OPTIMISATION_MODEL_CONFIG['SOLVER_TYPE']\n opt = pyo.SolverFactory(solver)\n if Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver) is not None:\n for k, v in Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver).items():\n opt.options[k] = v\n\n try:\n start_time = datetime.now()\n self._logger.debug(\"[ModelSolver] Solver starting...\")\n results = opt.solve(self.model, tee=True)\n self.results = results\n end_time = datetime.now()\n self._logger.info(f\"[ModelSolver] Solver completed in {end_time - start_time}.\")\n except Exception as e:\n raise Exception(f\"Model optimisation failed with {solver} with error message {e}.\")\n\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n self._logger.info(\"Solution is feasible and optimal\")\n results.write()\n elif results.solver.termination_condition == TerminationCondition.infeasible:\n raise ValueError(\"Model optimisation resulted into an infeasible solution\")\n\n self.model.optimised = True", "def solve_problem(filename):\n if len(es.conflict_graph.edges()) == 0: # Checking if a problem is loaded\n print(\"No problem to solve!\") # If it is loaded then len must be > 0\n return()\n\n exams2 = nx.coloring.greedy_color(\n es.conflict_graph, strategy=nx.coloring.strategy_largest_first)\n\n es.optimize_exams = dict(exams2)\n # es.optimize_exams2 = dict(exams2)\n es.best = dict(exams2)\n\n \"\"\" EXPORT SOLUTIONS FILE\n ---------------------------------------------------------------------------\n 1. We itterate through the period_exams dictionary and export to the file\n two columns. The first column contains the subject and the other one\n contains the period that was assigned into.\n ---------------------------------------------------------------------------\n \"\"\"\n\n with open(filename[0:-4]+'.sol', 'w') as f:\n for k, v in exams2.items():\n f.write('{}\\t{}\\n'.format(k, v))\n\n \"\"\"\n In the next itteration of the exams2 dictionary we switch dictionary\n keys and now the period becomes they key and the lessons assigned to it\n the values. It is being saved in the period_exams dictionary.\n \"\"\"\n period_exams = {}\n for k, v in exams2.items():\n if v not in period_exams:\n period_exams[v] = [k]\n else:\n period_exams[v].append(k)\n cost(period_exams)", "def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res", "def main():\n\n rules, evolutions = [int(i) for i in input().strip().split()]\n\n rule = {}\n for _ in range(rules):\n start, finish = input().strip().split(' -> ')\n rule[start] = finish\n\n print(lindenmayor(rule, evolutions, input().strip()))", "def test_solvers():\n # With P1 elements we have an error E-15 with Krylov solver\n # tolerances of 1E-12, but with P2 elements the error is E-6.\n # P3 elements drive the tolerance down to E-3.\n # For higher mesh resolution we also need reduced tolerances.\n # The tol dict maps degree to expected tolerance for the coarse\n # meshes in the test.\n tol = {'direct': {1: 1E-11, 2: 1E-11, 3: 1E-11},\n 'Krylov': {1: 1E-14, 2: 1E-05, 3: 1E-03}}\n u_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')\n kappa = Expression('x[0] + x[1]')\n f = Expression('-8*x[0] - 10*x[1]')\n for Nx, Ny in [(3,3), (3,5), (5,3)]:\n for degree in 1, 2, 3:\n for linear_solver in 'direct', 'Krylov':\n for solver_func in solver, solver_objects:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n # Important: Krylov solver error must be smaller\n # than tol!\n u = solver_func(\n kappa, f, u_D, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol[linear_solver][degree],\n rel_tol=0.1*tol[linear_solver][degree])\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_D_Function = interpolate(u_D, V) # exact solution\n # Check that dof arrays are equal\n u_D_array = u_D_Function.vector().array() # dof values\n max_error = (u_D_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol[linear_solver][degree], msg", "def dpAdvisor(subjects, maxWork):\n # initialize an empty Memo of solutions and dictionary Keep of included\n # subjects\n Memo = {}\n Keep = {}\n \n # get list of subjects\n names = subjects.keys()\n \n # starting index for decision tree\n index = len(names) - 1\n \n # solve using dynamic programming\n ans = dpSolve(subjects, index, maxWork, Memo, Keep)\n \n # create dictionary of subjects\n limit = maxWork\n outputSubjects = {}\n i = len(names) - 1\n while i >= 0:\n # if subject is to be included, copy details\n if Keep[(i,limit)] == 1:\n outputSubjects[names[i]] = subjects[names[i]]\n limit = limit - subjects[names[i]][WORK]\n i = i - 1\n \n # return subjects\n return outputSubjects" ]
[ "0.64691514", "0.62616104", "0.6139838", "0.60149825", "0.60149825", "0.58877885", "0.57446116", "0.5717959", "0.5711279", "0.565116", "0.5614572", "0.5610222", "0.559666", "0.5590503", "0.5546544", "0.5545591", "0.5545591", "0.5508435", "0.5506832", "0.54851216", "0.54851216", "0.54779214", "0.54779214", "0.54779214", "0.5467233", "0.54272676", "0.5416447", "0.5393956", "0.53887725", "0.5343756", "0.53360254", "0.53093916", "0.5283617", "0.52768946", "0.525947", "0.5257138", "0.5250072", "0.52464443", "0.52434075", "0.522903", "0.5227301", "0.5214258", "0.5203619", "0.52032673", "0.52017504", "0.51791817", "0.5178477", "0.51764673", "0.5171156", "0.5171132", "0.51573366", "0.51483995", "0.51479185", "0.51452464", "0.5136094", "0.5136094", "0.5136094", "0.51346385", "0.5131532", "0.51300025", "0.5127577", "0.5125074", "0.5114793", "0.51054525", "0.51044226", "0.509487", "0.50915354", "0.5081704", "0.50779027", "0.5069403", "0.5069101", "0.5057646", "0.505758", "0.5052867", "0.504899", "0.5042347", "0.50409436", "0.5040356", "0.5033396", "0.5028982", "0.5024956", "0.5024956", "0.5024956", "0.5024956", "0.50170404", "0.5016786", "0.5014015", "0.50052804", "0.50052804", "0.49946347", "0.49889368", "0.49880603", "0.4984829", "0.49825573", "0.49744847", "0.4969581", "0.49684164", "0.49672276", "0.4955059", "0.4954172" ]
0.53376806
30
retourne les words d'une liste en ordre decroissant de longueur
def decreasingList(self, words): dic = {} words = self.d.lowerList(words) for word in words: if not len(word) in dic: dic[len(word)] = [] dic[len(word)].append(word) dic = dic.items() dic = sorted(dic) #dic.reverse() return [f for e in dic for f in e[1]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list", "def longwords_Li_Comp(strings):\n return [string for string in strings if len(string)>4 ]", "def longwords_Li_Comp(strings):\n # write your code here\n return [string for string in strings if len(string)>4]", "def longwords(strings):\n # write your code here\n shorter_lst=[]\n for i in strings:\n if len(i)>4:\n shorter_lst.append(i)\n\n return shorter_lst", "def medium_words(a_list):\n\n medium_list = [word for word in a_list if len(word) in range(6,9)]\n return medium_list", "def words(self):\n pass", "def easy_words(a_list):\n\n easy_list = [word for word in a_list if len(word) in range(4,7)]\n return easy_list", "def test_long_list_words(self):\n input_word_list = {\n \"words\": [\"word1\", \"word2\", \"word3\", \"diming\", \"liming\", \"priming\", \"rhyming\", \"timing\", \"absurd\", \"alward\",\n \"bird\", \"blurred\", \"all\", \"antol\", \"appall\", \"aul\", \"aull\", \"bacall\", \"ball\", \"baseball\", \"bawl\",\n \"befall\", \"brawl\", \"caul\", \"crall\", \"crawl\", \"dall\", \"daul\", \"depaul\", \"drawl\", \"edsall\",\n \"engwall\", \"enthral\", \"fairall\", \"fall\", \"faul\", \"faull\", \"forestall\", \"gall\", \"gaul\", \"gaulle\",\n \"grall\", \"graul\", \"hall\", \"haul\", \"install\", \"kall\", \"kaul\", \"krall\", \"krol\", \"kroll\", \"lall\",\n \"lol\", \"luminol\", \"mall\", \"maul\", \"maule\", \"maull\", \"mccall\", \"mccaul\", \"mcfall\", \"mcfaul\",\n \"mcnall\", \"mcphaul\", \"mehall\", \"metall\", \"mol\", \"montreal\", \"nall\", \"nepal\", \"pall\", \"paul\",\n \"paule\", \"paull\", \"peterpaul\", \"pol\", \"prall\", \"rall\", \"raul\", \"rawl\", \"recall\", \"sabol\", \"sall\",\n \"saul\", \"babar\", \"bahr\", \"bahre\", \"balakumar\", \"baldemar\", \"baltazar\", \"bar\", \"bargar\", \"barr\",\n \"barre\", \"bashar\", \"bazaar\", \"bazar\", \"bejar\", \"belvoir\", \"bizarre\", \"bizarre\", \"bodnar\", \"bogar\",\n \"bognar\", \"bomar\", \"bondar\", \"bonior\", \"boyar\", \"car\", \"carr\", \"carre\", \"ceasar\", \"cigar\",\n \"cisar\", \"claar\", \"clar\", \"cotnoir\", \"cousar\", \"csar\", \"csaszar\", \"czar\", \"d'ivoire\", \"dakar\",\n \"dar\", \"dardar\", \"darr\", \"delamar\", \"delebarre\", \"demar\", \"detar\", \"dinar\", \"disbar\", \"dzhokhar\",\n \"dzokhar\", \"emdr\", \"fahr\", \"far\", \"farquar\", \"farr\", \"farrar\", \"ferrar\", \"flohr\", \"foobar\",\n \"gaar\", \"gahr\", \"gar\", \"ghafar\", \"giroir\", \"giscard\", \"godar\", \"gohr\", \"gombar\", \"gregoire\",\n \"guitar\", \"guizar\", \"haar\", \"hajjar\", \"hamar\", \"har\", \"hekmatyar\", \"hocevar\", \"hribar\", \"hribar\",\n \"huizar\", \"jabar\", \"jabbar\", \"jaffar\", \"jahr\", \"jamar\", \"jar\", \"jiar\", \"kadar\", \"kahr\", \"kahre\",\n \"karr\", \"kjar\", \"kjar\", \"klar\", \"kumar\", \"labar\", \"lahr\", \"lamar\", \"lamarr\", \"lamarre\", \"lar\",\n \"lebar\", \"lemar\", \"lemarr\", \"maher\", \"mahr\", \"mar\",\n \"marr\", \"mawr\", \"mcgarr\", \"meagher\", \"melgar\", \"menjivar\", \"minnaar\", \"myanmar\", \"najar\",\n \"najjar\", \"navar\", \"navarre\", \"nazar\", \"npr\", \"o'barr\", \"obar\", \"obarr\", \"our\", \"paar\", \"paccar\",\n \"par\", \"parr\", \"pfarr\", \"phar\", \"pharr\", \"pickar\", \"pintar\", \"preslar\", \"qasr\", \"qatar\", \"quarre\",\n \"r\", \"r.\", \"rajkumar\", \"renoir\", \"revoir\", \"ribar\", \"robar\", \"saar\", \"sagar\", \"saldivar\",\n \"saldovar\", \"sar\", \"scar\", \"schaar\", \"schar\", \"scharr\", \"shahar\",\n \"sharar\", \"sharrar\", \"sitar\", \"skaar\", \"sklar\", \"soutar\", \"spahr\", \"spar\", \"sphar\", \"spohr\",\n \"staar\", \"star\", \"starr\", \"subpar\", \"superstar\", \"tabar\", \"tar\", \"tarr\", \"tatar\", \"tesar\", \"thar\",\n \"tokar\", \"tovar\", \"transtar\", \"tsar\", \"tsar\", \"ulaanbaatar\", \"valdemar\", \"victoire\", \"voir\",\n \"wor\", \"wor\", \"yahr\", \"zachar\", \"zadar\", \"zagar\", \"zalar\", \"zaldivar\", \"zarre\", \"zulfikar\",\n \"stall\", \"tall\", \"taul\", \"thall\", \"thrall\", \"tol\", \"vandall\", \"vanhall\", \"vantol\", \"wahle\", \"wal\",\n \"wall\", \"walle\", \"burd\", \"byrd\", \"chauffeured\", \"concurred\", \"conferred\", \"curd\", \"deferred\",\n \"demurred\", \"deterred\", \"ferd\", \"gerd\", \"gird\", \"gjerde\", \"heard\", \"herd\", \"hird\", \"hurd\",\n \"incurred\", \"inferred\", \"interred\", \"jerde\", \"kurd\", \"leard\", \"misheard\", \"nerd\", \"occurred\",\n \"one-third\", \"overheard\", \"prefered\", \"preferred\", \"preferred\", \"preferred\", \"recurred\",\n \"referred\", \"referred\", \"reword\", \"slurred\", \"spurred\", \"stirred\", \"third\", \"transfered\",\n \"transferred\", \"uncured\", \"undeterred\", \"unheard\"]}\n rv = self.randomWords(input_word=input_word_list)\n response_data = json.loads(rv.get_data(as_text=True))\n self.assertEquals(rv.status_code, 200)\n self.assertIn(response_data[\"words\"], input_word_list[\"words\"])", "def lemmatizeWord(self, lst):\n lemmatized_list = []\n for item in lst:\n lemmatized_list.append(self.lmtzr.lemmatize(item))\n return lemmatized_list", "def get_stopwords(choice = 0):\n low_acc_words = [u'orange', u'game', u'wafe', u'gold', u'gas pump', u'dock', u'magnetic disk', u'beard', u'splash', u'stethoscope', u'clock', u'modem', u'spring', u'dribble', u'scale', u'thing', u'parachute', u'screw', u'haired', u'hair spray', u'stick', u'projectile', u'surface', u'scarf', u'boat', u'lantern', u'weapon', u'fire screen', u'maypole', u'Old World buffalo', u'backpack', u'velvet', u'pistol', u'duplicator', u'tissue', u'holding', u'eel', u'iron', u'zoo', u'toilet seat', u'eye', u'telephone', u'drum', u'pepper', u'church', u'pillow', u'body', u'mink', u'prison', u'color', u'jewelry', u'elephant', u'mug', u'cargo ship', u'football', u'llama', u'wombat', u'ax', u'giant panda', u'bison', u'climber', u'tractor', u'hamster', u'beetle', u'sidewalk', u'oilseed', u'shore', u'feet', u'vending machine', u'nail', u'lock', u'licking', u'crowded', u'pudding', u'library', u'sliding', u'steel drum', u'cutter', u'trench coat', u'plate rack', u'fancy', u'barbershop', u'switch', u'hip', u'petting', u'keyboard', u'drilling platform', u'denim', u'old', u'sewing machine', u'dancing', u'lawn mower', u'jaguar', u'cauliflower', u'bubble', u'tray', u'printer', u'hillside', u'heater', u'store', u'stove', u'hook', u'bed', u'book jacket', u'rain barrel', u'dinosaur', u'rowing', u'surf', u'worm', u'garbage truck', u'laptop', u'mouth', u'flute', u'tape player', u'gym', u'large', u'birdhouse', u'covered', u'groom', u'swan', u'lampshade', u'snowplow', u'ramp', u'bathing cap', u'strainer', u'hard', u'mortarboard', u'penguin', u'wooden spoon', u'loaf of bread', u'window', u\"potter's wheel\", u'branch', u'fly', u'greyhound', u'walk', u'starfish', u'kitchen', u'parking meter', u'cassette', u'work', u'cash machine', u'custard apple', u'play', u'ice cream', u'mosque', u'market', u'swing', u'hay', u'fan', u'surfer', u'number', u'climb', u'golfcart', u'burrito', u'feather boa', u'resting', u'neck brace', u'glove', u'remote control', u'lotion', u'lamp', u'perched', u'jeep', u'necklace', u'shopping basket', u'sea urchin', u'pajama', u'pinwheel', u'foot', u'maze', u'squash', u'dishrag', u'bib', u'ant', u'dumbbell', u'dragonfly', u'bakery', u'lighter', u'salamander', u'sandglass', u'apron', u'cannon', u'palm', u'tent', u'spacecraft', u'oil filter', u'beer bottle', u'throne', u'stretcher', u'bedroom', u'pan', u'camera', u'kiddie', u'mashed potato', u'railing', u'tongue', u'sky', u'event', u'bright', u'curb', u'sundial', u'screwdriver', u'hand blower', u'joystick', u'flower', u'tv', u'back', u'smile', u'mortar', u'bee', u'bath', u'spatula', u'lawn', u'object', u'barrier', u'mailbox', u'fallen', u'crayfish', u'kid', u'metal', u'shot', u'quill', u'snowboarding', u'mud', u'vacuum', u'water tower', u'sleeping bag', u'altar', u'bassoon', u'family', u'shovel', u'leather', u'maillot', u'soap dispenser', u'blurry', u'racetrack', u'dish', u'gondola', u'chewing', u'badger', u'spindle', u'door', u'shaker', u'purse', u'apiary', u'bus', u'wreck', u'cell', u'balance beam', u'lip', u'animal', u'baby', u'toilet', u'armor plate', u'jigsaw puzzle', u'piggy bank', u'leafhopper', u'torch', u'ashcan', u'talking', u'traveling', u'handrail', u'area', u'raft', u'can opener', u'missile', u'syringe', u'pen', u'beacon', u'croquet ball', u'trail', u'snowboard', u'light', u'owl', u'lift', u'acorn', u'pencil box', u'hermit crab', u'binder', u'ladle', u'fire engine', u'tan', u'volcano', u'chocolate sauce', u'crossword puzzle', u'whistle', u'floating', u'forklift', u'hotdog', u'monotreme', u'eggnog', u'traffic', u'envelope', u'surfboard', u'face', u'polecat', u'tiled', u'camel', u'refrigerator', u'carousel', u'parking', u'spider web', u'stream', u'train', u'square', u'candle', u'thimble', u'jellyfish', u'teddy', u'leash', u'wild', u'shopping cart', u'jackfruit', u'office', u'alligator', u'ready', u'end', u'power drill', u'lens cap', u'looking', u'hand', u'fountain', u'radiator', u'French horn', u'graze', u'female', u'koala', u'paper towel', u'artichoke', u'passenger', u'airship', u'cow', u'slug', u'home', u'tug', u'weasel', u'including', u'crutch', u'submarine', u'chime', u'pretty', u'phone', u'barrow', u'purple', u'pulling', u'wing', u'mongoose', u'washer', u'slide', u'Band Aid', u'splashing', u'obstacle', u'flying', u'restaurant', u'pencil sharpener', u'control', u'something', u'tricycle', u'motor', u'watching', u'grey', u'balcony', u'surrounded', u'statue', u'rotisserie', u'puck', u'assorted', u'umbrella', u'measuring cup', u'hanging', u'ride', u'scuba', u'perform', u'tusker', u'desk', u'puddle', u'sea slug', u'team', u'beaker', u'held', u'safe', u'shower curtain', u'isopod', u'tire', u'beaver', u'tower', u'stump', u'dinner', u'conch', u'playground', u'marmot', u'fruit', u'golf ball', u'read', u'tile', u'watch', u'mosquito net', u'goggle', u'swab', u'cricket', u'wheelie', u'guacamole', u'bush', u'cockroach', u'intersection', u'letter opener', u'station', u'plow', u'course', u'aeroplane', u'view', u'racing', u'broom', u'sunny', u'corn', u'matchstick', u'variety', u'messy', u'playpen', u'ambulance', u'perfume', u'brush', u'go', u'shelf', u'look', u'blowing', u'lobster', u'lettuce', u'busy', u'digging', u'trampoline', u'track', u'glass', u'ox', u'handstand', u'assortment', u'vase', u'aircraft carrier', u'microwave', u'high', u'mousetrap', u'bathroom', u'shower cap', u'counter', u'Christmas stocking', u'safety pin', u'plastic', u'garden', u'transit', u'knife', u'docked', u'cluttered', u'serving', u'toddler', u'ledge', u'formation', u'snorkel', u'lying', u'lemon', u'ladybug', u'carry', u'solar dish', u'hammer', u'sleeping', u'saltshaker', u'cowboy', u'unicycle', u'single', u'rule', u'shoji', u'business', u'cup', u'antique', u'catch', u'open', u'carnival', u'cooking', u'rural', u'small', u'wine', u'top', u'flat', u'yurt', u'grasshopper', u'hoop', u'wallet', u'hold', u'someone', u'necked', u'salad', u'leafe', u'paddlewheel', u'porcupine', u'radio telescope', u'preparing', u'canopy', u'pointing', u'honeycomb', u'older', u'hair slide', u'plunger', u'mirror', u'landscape', u'bow', u'cart', u'skateboard', u'device', u'urban', u'sunset', u'attached', u'toward', u'right', u'town', u'four', u'beach wagon', u'close', u'lone', u'chew', u'pile', u'working', u'bottlecap', u'corner', u'swinging', u'behind', u'slot machine', u'food', u'mushroom', u'around', u'tall', u'oxygen mask', u'together', u'veggy', u'skating', u'concrete', u'subway', u'seen', u'head', u'armadillo', u'ly', u'kitten', u'cap', u'painted', u'mustache', u'moving', u'lit', u'sliced', u'sticking', u'milk can', u'roller', u'stainless', u'teeth', u'seated', u'serve', u'lady', u'carriage', u'stand', u'apple', u'paper', u'apartment', u'video', u'eating', u'stadium', u'turn', u'racket', u'stunt', u'plate', u'drinking', u'slice', u'warplane', u'cheese', u'onion', u'backyard', u'coffee', u'peach', u'staring', u'outfit', u'engine', u'coaster', u'striped', u'stacked', u'decorated', u'throwing', u'dirty', u'hula', u'mid', u'catching', u'closed', u'item', u'otter', u'rail', u'tenni', u'sink', u'toaster', u'meal', u'skate', u'fridge', u'pitch', u'kite', u'desktop', u'meat', u'military', u'fireplace', u'show', u'rider', u'rodeo', u'graffiti', u'bunch', u'coming', u'reading', u'walkway', u'another', u'mouse', u'soup', u'hole', u'steel', u'container', u'past', u'carrying', u'equipment', u'farm', u'dressed', u'scooter', u'cellphone', u'stuffed', u'commercial', u'platform', u'full', u'one', u'electronic', u'sprinkler', u'stop', u'along', u'blanket', u'residential', u'kneeling', u'blender', u'oven', u'cattle', u'skateboarder', u'produce', u'book', u'cement', u'bag', u'carrot', u'board', u'round', u'many', u'giant', u'shower', u'asian', u'picnic', u'dining', u'wedding', u'desert', u'huge', u'narrow', u'outside', u'deck', u'three', u'display', u'filled', u'cutting', u'colored', u'ear', u'feeding', u'across', u'eat', u'skateboarding', u'fighter', u'sun', u'darkened', u'brushing', u'ty', u'party', u'pedestrian', u'wet', u'structure', u'different', u'crossbone', u'jet', u'public', u'cooked', u'airplane', u'bread', u'clothe', u'tunnel', u'fishing', u'drife', u'gear', u'birthday', u'frisbee', u'piece', u'row', u'hydrant', u'drawn', u'meter', u'vegetable', u'broccoli', u'country', u'half', u'sandwich', u'doorway', u'lot', u'pair', u'luggage', u'long', u'christma', u'wii', u'guy', u'side', u'leap', u'plane', u'silver', u'post', u'bar', u'reaching', u'drink', u'reflection', u'wand', u'airport', u'photograph', u'type', u'lay', u'lap', u'waterfall', u'banana', u'next', u'baseball', u'hot', u'making', u'gray', u'using', u'batter', u'empty', u'bat', u'clear', u'hospital', u'scissor', u'neck', u'cake', u'alone', u'rope', u'winter', u'runway', u'broken', u'fire', u'getting', u'variou', u'distance', u'beer', u'outstretched', u'chocolate', u'match', u'stopped', u'vintage', u'clean', u'fork', u'cut', u'eaten', u'waiting', u'going', u'onto', u'nintendo', u'time', u'several', u'lined', u'railroad', u'case', u'mother', u'suitcase', u'taking', u'doughnut', u'smoke', u'controller', u'crossing', u'friend', u'closeup', u'couple', u'showing', u'made', u'big', u'trying', u'putting', u'hit', u'male', u'', u'pickelhaube', u'suburban', u'costume', u'enjoy', u'new', u'studio', u'mantis', u'pastum', u'gymnast', u'rafting', u'golden', u'waffle iron', u'watering', u'overhead', u'shoot', u'feature', u'machine', u'attempt', u'third', u'tulip', u'jungle', u'wind', u'fig', u'band', u'bone', u'free', u'cucumber', u'bouncing', u'boarding', u'tackled', u'__background__', u'gymnastic apparatus', u'pineapple', u'folded', u'rice', u'sunglasses', u'cushion', u'net', u'covering', u'pretzel', u'steam', u'santum', u'fair', u'sail', u'score', u'toothbrush', u'loaded', u'fry', u'life', u'glider', u'bounce', u'balance', u'cone', u'containing', u'beside', u'wheel', u'rain', u'spaghetti squash', u'thi', u'left', u'photographer', u'forested', u'vanity', u'shoulder', u'pavement', u'officer', u'creek', u'dead', u'ice', u'slide rule', u'dunking', u'horizon', u'raised', u'fabric', u'fight', u'way', u'war', u'landing', u'umpire', u'fashioned', u'dimly', u'topped', u'setting', u'sling', u'potato', u'painting', u'bottom', u'dance', u'crocodile', u'string', u'dig', u'gun', u'chicken', u'tarmac', u'falling', u'french', u'wait', u'pony', u'decker', u'plaza', u'earphone', u'chip', u'get', u'staircase', u'wakeboarder', u'wheelchair', u'pulled', u'polouse', u'still', u'curly', u'scaling', u'lunch', u'base', u'pizza', u'meat loaf', u'shown', u'opened', u'space', u'mess', u'headband', u'place', u'pelican', u'ring', u'sheet', u'bite', u'frame', u'hug', u'wide', u'lick', u'pastry', u'breakfast', u'take', u'topping', u'multiple', u'knee', u'tackling', u'sale', u'professional', u'german', u'crane', u'snack', u'stair', u'ping-pong ball', u'snowsuit', u'sport', u'bicyclist', u'skyscraper', u'checkered', u'restroom', u'tour', u'nearby', u'foggy', u'bmx', u'newspaper', u'mound', u'foam', u'driven', u'mohawk', u'rest', u'instrument', u'chainsaw', u'towel', u'facing', u'audience', u'served', u'clau', u'go-kart', u'tube', u'throw', u'muddy', u'harness', u'strip', u'racquet', u'prepare', u'low', u'pitcher', u'cardoon', u'gymnasium', u'pull', u'arranged', u'strawberry', u'deep', u'cream', u'rubber', u'trash', u'midair', u'peak', u'remote', u'disc', u'follow', u'potpie', u'enjoying', u'stool', u'leaping', u'action', u'taken', u'chopstick', u'flag', u'mounted', u'grill', u'wrestler', u'marble', u'backpacking', u'breaking', u'fungus', u'shade', u'egg', u'muzzled', u'style', u'carpeted', u'sauce', u'snowball', u'abacus', u'foreground', u'circuit', u'leading', u'airborne', u'hotel', u'leotard', u'kind', u'double', u'scabbard', u'bride', u'stall', u'blond', u'cave', u'electric', u'cigarette', u'sponsored', u'shepherd', u'dandelion', u'catcher', u'movie', u'recently', u'floaty', u'chambered nautilus', u'hitting', u'racer', u'passing', u'leaning', u'kissing', u'chase', u'funny', u'used', u'snail', u'pomegranate', u'stack', u'center', u'grind', u'bin', u'formal', u'shaped', u'signal', u'zucchini', u'parade', u'limb', u'laughing', u'step', u'range', u'slouse', u'block', u'downhill', u'jockey', u'retrieving', u'atop', u'cloth', u'skull', u'diving', u'rainy', u'tarp', u'black-footed ferret', u'nice', u'prepared', u'hot pot', u'land', u'fresh', u'hello', u'wrestle', u'kitty', u'spoon', u'rack', u'smaller', u'hose', u'giving', u'attire', u'leaving', u'chiton', u'singing', u'frog', u'crab', u'porch', u'saddle', u'donut', u'crossed', u'tied', u'tomato', u'chasing', u'scenic', u'beneath', u'boarder', u'hippopotamus', u'wading', u'sea_anemone', u'wrapped', u'shallow', u'steep', u'bagel', u'gather', u'pipe', u'hi', u'ha', u'jar', u'bug', u'finger', u'handle', u'beam', u'bean', u'whilst', u'contain', u'shake', u'attempting', u'merry', u'yawning', u'sniff', u'swimmer', u'commuter', u'bull', u'smoking', u'plain', u'cross', u'member', u'binoculars', u'underneath', u'well', u'fighting', u'bandanna', u'rocket', u'pay-phone', u'five', u'puppy', u'like', u'campfire', u'shaking', u'construction', u'bun', u'partially', u'flip', u'placed', u'bearing', u'pinatum', u'pie', u'boardwalk', u'pit', u'star', u'baked']\n\n STOPWORDS = ['none','inside', 'near', 'one', 'two', 'three', 'day', 'front', u'i', u'me', u'my', u'myself', u'we', u'our', u'ours', u'ourselves', u'you', u'your', u'yours', u'yourself', u'yourselves', u'he', u'him', u'his', u'himself', u'she', u'her', u'hers', u'herself', u'it', u'its', u'itself', u'they', u'them', u'their', u'theirs', u'themselves', u'what', u'which', u'who', u'whom', u'this', u'that', u'these', u'those', u'am', u'is', u'are', u'was', u'were', u'be', u'been', u'being', u'have', u'has', u'had', u'having', u'do', u'does', u'did', u'doing', u'a', u'an', u'the', u'and', u'but', u'if', u'or', u'because', u'as', u'until', u'while', u'of', u'at', u'by', u'for', u'with', u'about', u'against', u'between', u'into', u'through', u'during', u'before', u'after', u'above', u'below', u'to', u'from', u'up', u'down', u'in', u'out', u'on', u'off', u'over', u'under', u'again', u'further', u'then', u'once', u'here', u'there', u'when', u'where', u'why', u'how', u'all', u'any', u'both', u'each', u'few', u'more', u'most', u'other', u'some', u'such', u'no', u'nor', u'not', u'only', u'own', u'same', u'so', u'than', u'too', u'very', u's', u't', u'can', u'will', u'just', u'don', u'should', u'now', 'background', '__background__', '']\n \n\n unselected_words = [u'', u'pickelhaube', u'enjoy', u'new', u'studio', u'kissing', u'mantis', u'pastum', u'rafting', u'golden', u'waffle iron', u'watering', u'overhead', u'shoot', u'feature', u'machine', u'pizza', u'attempt', u'third', u'tulip', u'jungle', u'wind', u'fig', u'band', u'bone', u'free', u'bouncing', u'boarding', u'tackled', u'__background__', u'gymnasium', u'gymnastic apparatus', u'pineapple', u'folded', u'rice', u'sunglasses', u'cushion', u'net', u'covering', u'pretzel', u'steam', u'santum', u'fair', u'sail', u'score', u'toothbrush', u'loaded', u'fry', u'life', u'glider', u'balance', u'cone', u'containing', u'beside', u'wheel', u'rain', u'spaghetti squash', u'thi', u'left', u'photographer', u'forested', u'vanity', u'shoulder', u'pavement', u'officer', u'creek', u'dead', u'slide rule', u'dunking', u'horizon', u'raised', u'fabric', u'fight', u'way', u'war', u'landing', u'umpire', u'fashioned', u'dimly', u'topped', u'setting', u'sling', u'potato', u'bottom', u'dance', u'crocodile', u'ice', u'string', u'dig', u'gun', u'tarmac', u'falling', u'french', u'wait', u'decker', u'earphone', u'chip', u'get', u'staircase', u'wakeboarder', u'wheelchair', u'pulled', u'polouse', u'still', u'curly', u'scaling', u'lunch', u'meat loaf', u'shown', u'opened', u'space', u'mess', u'headband', u'place', u'pelican', u'ring', u'sheet', u'bite', u'hug', u'wide', u'lick', u'pastry', u'breakfast', u'take', u'topping', u'multiple', u'knee', u'bicyclist', u'sale', u'professional', u'german', u'snack', u'stair', u'ping-pong ball', u'snowsuit', u'sport', u'tackling', u'skyscraper', u'checkered', u'restroom', u'tour', u'nearby', u'foggy', u'bmx', u'newspaper', u'mound', u'chopstick', u'foam', u'driven', u'passing', u'mohawk', u'rest', u'instrument', u'chainsaw', u'towel', u'facing', u'audience', u'laughing', u'served', u'clau', u'diving', u'go-kart', u'tube', u'throw', u'harness', u'strip', u'racquet', u'prepare', u'low', u'pitcher', u'cardoon', u'pull', u'arranged', u'strawberry', u'deep', u'cream', u'rubber', u'trash', u'midair', u'peak', u'remote', u'suburban', u'disc', u'follow', u'potpie', u'gymnast', u'enjoying', u'stool', u'leaping', u'action', u'taken', u'flag', u'mounted', u'grill', u'wrestler', u'marble', u'pony', u'backpacking', u'breaking', u'fungus', u'shade', u'egg', u'style', u'carpeted', u'sauce', u'snowball', u'abacus', u'foreground', u'base', u'circuit', u'leading', u'airborne', u'hotel', u'leotard', u'kind', u'double', u'scabbard', u'bride', u'stall', u'blond', u'cave', u'zucchini', u'electric', u'cigarette', u'sponsored', u'shepherd', u'dandelion', u'catcher', u'movie', u'recently', u'floaty', u'chambered nautilus', u'hitting', u'racer', u'leaning', u'chase', u'funny', u'used', u'snail', u'pomegranate', u'cucumber', u'stack', u'center', u'grind', u'bin', u'formal', u'shaped', u'signal', u'parade', u'bounce', u'step', u'plaza', u'range', u'slouse', u'block', u'downhill', u'jockey', u'retrieving', u'atop', u'cloth', u'crane', u'skull', u'rainy', u'tarp', u'black-footed ferret', u'nice', u'prepared', u'hot pot', u'land', u'fresh', u'hello', u'wrestle', u'kitty', u'spoon', u'muzzled', u'rack', u'smaller', u'hose', u'giving', u'attire', u'leaving', u'chiton', u'limb', u'singing', u'frog', u'crab', u'porch', u'donut', u'crossed', u'tied', u'tomato', u'chasing', u'scenic', u'beneath', u'shaking', u'boarder', u'hippopotamus', u'wading', u'sea_anemone', u'wrapped', u'shallow', u'steep', u'bagel', u'gather', u'pipe', u'construction', u'painting', u'chicken', u'jar', u'bug', u'finger', u'handle', u'beam', u'bean', u'whilst', u'contain', u'costume', u'frame', u'shake', u'attempting', u'merry', u'yawning', u'sniff', u'swimmer', u'muddy', u'commuter', u'bull', u'smoking', u'plain', u'cross', u'member', u'binoculars', u'underneath', u'well', u'fighting', u'bandanna', u'rocket', u'pay-phone', u'five', u'puppy', u'like', u'campfire', u'saddle', u'hi', u'bun', u'ha', u'partially', u'flip', u'placed', u'bearing', u'pinatum', u'pie', u'boardwalk', u'pit', u'star', u'baked', u'smoke', u'hospital', u'type', u'hole', u'wand', u'chocolate sauce', u'haired', u'onto', u'drawn', u'wear', u'loaf of bread', u'beer', u'mushroom', u'lift', u'make', u'mother', u'cowboy', u'fork', u'otter', u'playpen', u'alone', u'hamburger', u'bottlecap', u'soup', u'cutter', u'square', u'friend', u'scuba', u'hockey', u'wheelie', u'picnic', u'tug', u'squash', u'case', u'inflatable', u'railroad', u'competition', u'slice', u'broken', u'jeep', u'trying', u'apartment', u'chewing', u'grasshopper', u'guacamole', u'splash', u'male', u'dishrag', u'kayaking', u'acorn', u'snowbank', u'clean', u'hit', u'batter', u'kick', u'jewelry', u'fighter', u'cooked', u'putting', u'try', u'wallet', u'mustache', u'artichoke', u'spaghetti sauce', u'crossing', u'retriever', u'veggy', u'produce', u'darkened', u'kiddie', u'mashed potato', u'closed', u'canopy', u'runway', u'vintage', u'fishing', u'doughnut', u'onion', u'leap', u'rodeo', u'cricket', u'made', u'closeup', u'chew', u'sliced', u'hot', u'deck', u'French horn', u'clothe', u'goggle', u'rowing', u'milk can', u'post', u'outstretched', u'chocolate', u'making', u'course', u'hula', u'carry', u'upside', u'desktop', u'lobster', u'suitcase', u'crossbone', u'ty', u'sea slug', u'polecat', u'sandwich', u'racetrack', u'lettuce', u'cockroach', u'toward', u'eaten', u'blender', u'giant', u'atv', u'big', u'holster', u'splashing', u'commercial', u'tunnel', u'bend', u'meter', u'including', u'badger', u'beach wagon', u'beard', u'beak', u'controller', u'match', u'buckle', u'hiker', u'barometer', u'bread', u'serve', u'object', u'stadium', u'tank', u'waterfall', u'stream', u'neck', u'serving', u'manhole cover', u'pitch', u'pistol', u'dribble', u'isopod', u'transit', u'dragonfly', u'huge', u'backyard', u'foot', u'jet', u'dancing', u'custard apple', u'porcupine', u'assorted', u'rope', u'cut', u'showing', u'lemon', u'armadillo', u'salad', u'carrot', u'biting', u'bee', u'hammer', u'lens cap', u'cauliflower', u'kicking', u'denim', u'marmot', u'nintendo', u'fireplace', u'landscape', u'turn', u'hoop', u'wedding', u'eggnog', u'antique', u'bow', u'winter', u'stacked', u'purse', u'beaver', u'kneeling', u'island', u'slot machine', u'Christmas stocking', u'public', u'narrow', u'ladybug', u'stopped', u'burrito', u'necked', u'cheese', u'crayfish', u'single', u'getting', u'tan', u'lined', u'handstand', u'letter opener', u'pencil box', u'doorway', u'leafhopper', u'residential', u'slug', u'eat', u'carriage', u'end', u'lap', u'distance', u'mink', u'sleeping bag', u'time', u'container', u'stunt', u'drife', u'broccoli', u'docked', u'structure', u'cooker', u'go', u'aircraft carrier', u'pudding', u'tape player', u'outfit', u'coaster', u'reaching', u'meat', u'splashed', u'hair slide', u'roller', u'submarine', u'toaster', u'dining', u'rotisserie', u'football', u'spindle', u'christma', u'thimble', u'giant panda', u'pedestrian', u'compass', u'squirrel', u'sea urchin', u'hotdog', u'peach', u'warplane', u'oil filter', u'waiting', u'hip', u'jaguar', u'mortar', u'gear', u'sprinkler', u'beer bottle', u'gondola', u'half', u'stainless', u'military', u'electronic', u'bat', u'handrail', u'perform', u'coffee maker', u'flat', u'round', u'meal', u'telephone', u'pool table', u'seagull', u'hermit crab', u'fancy', u'obstacle', u'honeycomb', u'gravel', u'ladle', u'farm', u'crossword puzzle', u'steel', u'drink', u'pepper', u'tongue', u'owl', u'rule', u'gym', u'seated', u'monotreme', u'cattle', u'water tower', u'vegetable', u'eel', u'variou', u'messy', u'raft', u'castle', u'fire', u'bib', u'skunk', u'gray', u\"carpenter's kit\", u'wombat', u'carnival', u'equipment', u'mousetrap', u'joystick', u'golf ball', u'shoji', u'banana', u'clear', u'sloth', u'glove', u'reel', u'desert', u'necklace', u'ear', u'digging', u'rural', u'asian', u'school', u'wreck', u'coffee', u'hydrant', u'mouse', u'mid', u'row', u'puddle', u'engine', u'mongoose', u'stopwatch', u'walkway', u'past', u'beacon', u'koala', u'lip', u'gold', u'scooter', u'puck', u\"potter's wheel\", u'ly', u'oilseed', u'tire', u'drum', u'party', u'radio telescope', u'worm', u'lay', u'magnetic disk', u'bar', u'butterfly', u'dinner', u'birthday', u'power drill', u'saltshaker', u'thing', u'ant', u'lantern', u'hard', u'weasel', u'ridden', u'paddlewheel', u'drilling platform', u'climber', u'safe', u'shower', u'airship', u'cassette player', u'printer', u'wooden spoon', u'bassoon', u'reflection', u'scissor', u'apiary', u'ice cream', u'rider', u'boathouse', u'mud', u'corn', u'guinea pig', u'snow leopard', u'mailbox', u'cement', u'bakery', u'taking', u'variety', u'swan', u'velvet', u'couple', u'fridge', u'strainer', u'dirty', u'screwdriver', u'jigsaw puzzle', u'device', u'alligator', u'oven', u'silver', u'urban', u'country', u'opener', u'leather', u'barrel', u'duck', u'drumstick', u'cake', u'ambulance', u'pencil sharpener', u'barrier', u'safety pin', u'right', u'baseball', u'beetle', u'ax', u'cassette', u'assortment', u'entree', u'armor plate', u'going', u'cart', u'can opener', u'curve', u'pointing', u'dribbling', u'sock', u'home', u'catching', u'church', u'mosque', u'measuring cup', u'striped', u'throne', u'skating', u'sundial', u'CD player', u'grille', u'brushing', u'jersey', u'plunger', u'conch', u'several', u'shaker', u'tile', u'stretcher', u'tower', u'plane', u'salamander', u'lock', u'platform', u'airport', u'hamster', u'graffiti', u'jackfruit', u'cabbage', u'blowing', u'kitten', u'yurt', u'cannon', u'powder', u'sea cucumber', u'sea cow', u'dinosaur', u'racing', u'primate', u'wii', u'skateboarding', u'blanket', u'mug', u'cap', u'challenging', u'throwing', u'library', u'quill', u'trench coat', u'microwave', u'tusker', u'cluttered', u'apple', u'duplicator', u'broom', u'wet', u'altar', u'show', u'heater', u'radiator', u'cargo ship', u'spatula', u'screw', u'neck brace', u'flute', u'peacock', u'sewing machine', u'reading', u'dough', u'rifle', u'long', u'penguin', u'playground', u'photograph', u'luggage', u'plow', u'item', u'factory', u'starfish', u'fire engine', u'locomotive', u'piggy bank', u'empty', u'scale', u'plate rack', u'graze', u'cutting', u'feeding', u'cooking', u'rapid', u'ledge', u'business', u'colored', u'forklift', u'boot', u'wing', u'remote control', u'trampoline', u'gas pump', u'space bar', u'snorkel', u'book', u'microscope', u'rain barrel', u'pair', u'Old World buffalo', u'airplane', u'creature', u'knee pad', u'whale', u'birdhouse', u'oxygen mask', u'bag', u'sailboat', u'mat', u'town', u'using', u'rugby ball', u'staring', u'shopping basket', u'binder', u'team', u'sailing vessel', u'ox', u'leopard', u'shield', u'full', u'Band Aid', u'mountaintop', u'crate', u'modem', u'family', u'tennis ball', u'barn', u'work', u'formation', u'barrow', u'goose', u'syringe', u'soap dispenser', u'kite', u'appliance', u'solar dish', u'lizard', u'paddling', u'cardigan', u'sink', u'control', u'toddler', u'mortarboard']\n\n useless_words = ['holding','hold' ,'wearing', 'wear' , 'standing','sitting', 'stand', 'sit' , 'smiling', 'smile', 'clothing', 'shirt', \"next\", 'posing', 'playing']\n abstract_words = ['beautiful', 'young']\n color_words = ['black', 'white', 'red', 'blue', 'brown']\n\n if choice == 1:\n return STOPWORDS\n\n STOPWORDS += unselected_words\n STOPWORDS += useless_words\n STOPWORDS += low_acc_words\n #STOPWORDS += color_words\n #STOPWORDS += abstract_words\n return STOPWORDS", "def hard_words(a_list):\n\n return [word for word in a_list if len(word) > 7]", "def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))", "def long_words(words):\n words_longer_than_four = []\n for word in words:\n if len(word) > 4:\n words_longer_than_four.append(word)\n return words_longer_than_four", "def list_of_words(self):\n\t\treturn str.split(re.sub(r'\\W+', ' ', self.body.encode('ascii', 'replace')))", "def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]", "def filter_long_words(list,n):\n numberlist=[]#set up a new list\n for i in range(0,len(list)):\n if len(list[i]) > n:#pick up the word that is longer than n\n numberlist.append(list[i])#count the length of each word\n else:\n continue\n return numberlist", "def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)", "def makeWords(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if x not in self.words: \r\n self.words[x] = 1\r\n else: \r\n self.words[x] += 1\r\n return self.words", "def get_wordlists():\n\n\tCS = {'ACM', 'IEEE', 'Computer Science', 'Artificial Intelligence',\n\t\t'Pattern Recognition', 'Computer Vision', 'Machine Learning',\n\t\t'Signal Processing', 'Electrical Engineering', 'Image Processing',\n\t\t'Data Mining', 'Neural Networks', 'Computer Graphics', 'Graphics',\n\t\t'Language Processing', 'Internet', 'Intelligent Systems',\n\t\t'Robotic','Data','Software', 'Machine Vision', 'Image Analysis',\n\t\t'Scientific Computing', 'SIAM', 'Malware','World Wide Web', \n\t\t'Computational Intelligence', 'Computational Linguistics',\n\t\t'Computational linguistics','Algorithm','Computer','ITiCSE',\n\t\t'ITICSE','Machine learning','Learning','learning',\n\t\t'Artificial intelligence','CIVR','Document Analysis'}\n\n\tbio = {'Biology', 'Microbiology', 'Molecular', 'Medical', 'Biological',\n\t\t'Cancer', 'Genome', 'Bioinformatics', 'Protein', 'Biocomputing',\n\t\t'Biomedical', 'biology', 'Medicine', 'Biosystems', 'Virology',\n\t\t'Brain', 'Psychology', 'Genetics', 'Bioengineering', 'Cell',\n\t\t'Cardiology', 'Metabolic', 'Biotechnology', 'Pathogens',\n\t\t'Pathology', 'Plant', 'PLANT', 'Virus', 'Drug','Medicinal',\n\t\t'Neuro','Psych',\n\t\t'Genomic','Diseases','Endocrinology', 'Epidemiology',\n\t\t'Proteom','Biochem', 'DNA', 'Pharma', 'Biomedic', 'biomedica',\n\t\t'Neurobiological'}\n\n\tmath = {'Mathemati','Markov','Probability','Algebra','Network',\n\t\t'Topology','Optimization', 'Geometr','Statistic','Algorithm',\n\t\t'Graph ','Graphs','Combinatori','Riemann Surfaces','Permutation Groups',\n\t\t'Functional Analysis', 'SIAM','Fixed Point','Wavelet','Statistics',\n\t\t'Linear Regression','Fractal','geometry','Multivariate','Chaos',\n\t\t'mathemati','Kernel'}\n\n\tlinguistics = {}\n\n\tcomputer_vision = {}\n\n\tchemistry = {}\n\n\tphysics = {}\n\n\t# Rename \"Computer Vision\" to \"Image Processing\"?\n\ttopic_names = ['Computer Science','Biology','Mathematics','Chemistry',\n\t\t'Physics','Computer Vision','Natural Language Processing']\n\ttopics = [CS, bio, math]#, linguistics, computer_vision, chemistry, physics]\n\n\treturn {topic_names[i]:topics[i] for i in range(len(topics))}", "def find_long_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 15])", "def crossword_words(crossword: list) -> list:\n pass", "def wordize(lines):\n parser = Parser()\n tokenizer = Tokenizer()\n word_ctr = WordCounter()\n words = []\n for l in lines :\n if (l.rstrip()) :\n statement = parser.parseSentence(l, int(word_ctr))\n token_lists = tokenizer.tokenizeStatement(statement, int(word_ctr))\n for l in token_lists :\n if len(l) > 0 :\n words.append(l)\n word_ctr += 1\n return words", "def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2", "def words(self, word):\n pass", "def add_to_word_list(strings):\n WordList\n k = 0\n while k < len(strings):\n if word_count(strings[k].text) > 1:\n WordList.append(strings[k].text)\n k += 1", "def split_words(self,data=[None]):\r\n content=[]\r\n self.data=data\r\n for i in data:\r\n liste=list(i)\r\n content.append(liste)\r\n return content", "def words(self, min_word_length=0):\n\n word_tokenizer = nltk.RegexpTokenizer(r'\\b[^\\s]+\\b')\n headline_string = self.headline_string.lower().replace(\"’\", \"'\")\n return [word for word in word_tokenizer.tokenize(headline_string) if len(word) >= min_word_length]", "def longest_word_length(words):", "def create_word(char_list):", "def longwords_Fil(strings):\n # write your code here\n return list(filter(lambda x:len(x)>4,strings))", "def get_words(self):\n return [self.id2word[idx] for idx in range(len(self))]", "def words(self):\n return self.title + self.content", "def getTWordList(text):\n\ttmpwordlist = string.split(text)\n\twordlist = [ [] ]\n\tpos = 0\n\tfor i in range(len(tmpwordlist)):\n\t\tword = getBrownWords(tmpwordlist[i])\n\t\tword[0] = puncTrim(word[0])\n\t\tif len(word[0]) > 0:\n\t\t\twordlist[pos].append(word)\n\t\telse:\n\t\t\tpos += 1\n\t\t\twordlist.append([])\n\treturn wordlist", "def words(self) -> List[str]:\n return pulumi.get(self, \"words\")", "def words(self) -> List[str]:\n return pulumi.get(self, \"words\")", "def get_main_words(idioms_set):\r\n main_words = Counter([idiom.split()[-1] for idiom in idioms_set])\r\n print('main words:', '\\n', main_words)\r\n print('top 50 main words:', '\\n', main_words.most_common(50)) \r\n return list(main_words)", "def all_words(self, all_possible_words):\n result = []\n for word in all_possible_words:\n result = result + [word[i:j]\n for i in range(len(word)) for j in range(i + 2, len(word) + 1)]\n\n return result", "def create_medium_list(self):\n word_list = []\n try:\n f = open(self.index, 'r')\n for line in f:\n if line[0] == 'M' and line[1] == \" \" and line[2] != \" \":\n readout = line[2:].upper()\n has_digit = re.search('\\d', readout)\n # this can be added to if there are more characters that cannot be\n # used in the game\n has_wrong = re.search(\"[-,.' '/!?]\", readout)\n if has_digit is None:\n if has_wrong is None:\n word_list.append(readout.strip('\\n'))\n return word_list\n except IOError:\n print(\"Cannot open file\")\n raise (IOError)", "def create_word_list(text_as_string):\n # print 'creating word list'\n global global_word_list\n\n for w in text_as_string.split():\n word = w.translate(string.maketrans(\"\", \"\"), string.punctuation).lower()\n if len(word) > 0:\n global_word_list.append(word) # Appends each word to global word list\n\n return global_word_list", "def word_list(self) -> List[str]:\n return self._word_list", "def cheat(self) -> List[str]:\n all_possible_words = self.trie.get_all_possible_words(\n self.get_current_reels_letters()\n )\n better_words = OrderedDict()\n for word in all_possible_words:\n score = self.scorer.calculate_word_score(word)\n if len(better_words) > 2:\n first_word = next(iter(better_words.items()))\n if first_word[0] < score:\n better_words.popitem(last=False)\n better_words[score] = word\n else:\n better_words[score] = word\n better_words = OrderedDict(sorted(better_words.items()))\n return [f\"{word} ({score})\" for score, word in better_words.items()]", "def full_text_words(self):\n\n if self._full_text_words == []:\n for s in self.full_text():\n for w in s.split():\n self._full_text_words.append(w)\n\n return self._full_text_words", "def get_track_words(words_per_track,hour_count,lst):\n i = hour_count * words_per_track \n j = i + words_per_track - 1 \n \n return lst[i:j]", "def splitWordList(self, text):\n result = list()\n if text is None:\n return result\n\n t = text + \"⁋\"\n t = t.replace('\\n', '⁋')\n t = re.sub(WordListProcessor.REFERENCE_PATTERN, \"\", t)\n t = re.sub(WordListProcessor.SUPERSCRIPT_PATTERN, \"\", t) # TODO: Extract sense!\n t = re.sub(WordListProcessor.HTML_REMOVER, \"\", t)\n t = t.replace(\"&quot\", \"\\\"\")\n t = t.replace(\",\", \"⁋,\")\n t = t.replace(\";\", \"⁋\")\n # print(t)\n # t = re.sub(WordListProcessor.BRACKETED_DELIMITER, \"$1$2$3$4$5$6\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER1, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER2, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER3, \"$1$2\", t)\n t = self.escapeDelimiters(t)\n # print(t)\n t = t.replace(\"⁋;\", \"⁋\")\n t = t.replace(\"⁋,\", \"⁋\")\n t = t.replace(\"]] or [[\", \"]]⁋[[\")\n t = t.replace(\"]] and [[\", \"]]⁋[[\")\n t = t.replace(\" - \", \"⁋\")\n # t = t.replace(\" / \", \"⁋\")\n j = t.find(\" / \") # Use ' / ' only as a delimiter if there are at least two of them!\n if j >= 0:\n j = t.find(\" / \", j)\n if j >= 0:\n t = t.replace(\" / \", \"⁋\")\n # print(t)\n\n # print(t)\n while True:\n delim = t.find('⁋')\n if delim >= 0:\n word = t[0:delim]\n if word:\n # Normalize the word.\n word = word.strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n word = self.deWikify(word).strip()\n word = self.removeBrackets(word).strip()\n word = self.removeTemplates(word).strip()\n word = self.removeComments(word).strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n if word.endswith(\".\"):\n word = word[:-1].strip()\n if word.endswith(\",\"):\n word = word[:-1].strip()\n\n # Check for slashes.\n word = word.replace(\" / \", \"/\")\n word = word.replace(\"/ \", \"/\")\n i = word.find('/')\n if word:\n if i >= 0 and word.find(' ') < 0:\n while True:\n result.append(word[0:i])\n word = word[i + 1:]\n i = word.find('/')\n if i < 0:\n break\n result.append(word)\n else:\n result.append(word)\n\n t = t[delim + 1:]\n\n else:\n break\n\n return result", "def target_words(self) -> List[str]:\n return list(map(\n lambda w: self.spaces[w.lower()] \n if w.lower() in self.spaces else w.lower(), \n self.keywords\n ))", "def get_words(self):\n wordlist = self.words_box.get().split()\n for word in wordlist:\n self.words[word] = False\n self.logic.set_words_to_find(self.words)", "def words(self) -> List[str]:\n return list(self.solutions)", "def words(self):\n return self.text.split()", "def load_words(): \r\n return lw.load_words()", "def longestword(word_list):\n\n longest = 0\n\n for word in word_list:\n\n if len(word) > longest:\n longest = len(word)\n\n return longest", "async def wordfilter_list(self, ctx):\n await ctx.send(f'Current filtered words ({len(self.words)}):\\n||{\", \".join(self.words)}||')", "def processwords(list_of_matches, lemmatag = False):\n list_of_matches = [w.lower() for w in list_of_matches]\n # remove nonwords, strip . to normalise \"dr.\"\n if translated_option != 'o' and translated_option != 'u':\n list_of_matches = [w.lstrip('.').rstrip('.') for w in list_of_matches if re.search(regex_nonword_filter, w)]\n \n list_of_matches.sort()\n \n # tokenise if multiword:\n if phrases and not n_gramming:\n from nltk import word_tokenize as word_tokenize\n list_of_matches = [word_tokenize(i) for i in list_of_matches]\n\n # this is just for plaintext ... should convert to unicode on file open\n if datatype == 'plaintext':\n try:\n list_of_matches = [unicode(w, errors = 'ignore') for w in list_of_matches]\n except TypeError:\n pass\n\n if not dependency and exclude and 'w' in exclude.keys():\n list_of_matches = [w for w in list_of_matches if not re.match(exclude['w'], w)]\n\n if lemmatise or 'l' in show:\n if not dependency:\n tag = gettag(query, lemmatag = lemmatag)\n lemmata = lemmatiser(list_of_matches, tag)\n tups = zip(list_of_matches, lemmata)\n res = []\n for w, l in tups:\n single_result = []\n if exclude and 'l' in exclude.keys():\n if re.match(exclude['l'], l):\n continue\n if 'w' in show:\n single_result.append(w)\n if 'l' in show:\n single_result.append(l)\n # bad fix:\n # this currently says, if pos in show, there must only be pos ...\n if 'p' in show:\n if lemmatise:\n single_result.append(l)\n else:\n single_result.append(w)\n\n single_result = '/'.join(single_result)\n res.append(single_result)\n list_of_matches = res\n\n if titlefilter and not dependency:\n list_of_matches = titlefilterer(list_of_matches)\n if spelling:\n list_of_matches = convert_spelling(list_of_matches, spelling = spelling)\n\n # use blacklist option in gui\n if 'blacklist' in kwargs.keys():\n stopwords = False\n if kwargs['blacklist'] is not False:\n if kwargs['blacklist'] is True:\n from dictionaries.stopwords import stopwords as my_stopwords\n stopwords = [i.lower() for i in my_stopwords]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n if type(kwargs['blacklist']) == list:\n stopwords = [i.lower() for i in kwargs['blacklist']]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n regexblacklist = re.compile(kwargs['blacklist'])\n list_of_matches = [w for w in list_of_matches if not re.search(regexblacklist, w)]\n\n #if not split_con:\n # list_of_matches = unsplitter(list_of_matches)\n \n # turn every result into a single string again if need be:\n if phrases:\n output = []\n for res in list_of_matches:\n joined = ' '.join(res)\n output.append(joined)\n return output\n else:\n return list_of_matches", "def all_words(self, min_word_length=0):\n return [word for headline in self.headlines for word in\n headline.words(min_word_length=min_word_length)]", "def create_english_word_list(filename):\n global global_english_word_list\n\n if not global_english_word_list:\n with open(filename) as f:\n for line in f:\n global_english_word_list.append(re.sub(r'\\s+', '', line))", "def idx_to_words(ls, words):\n\n output = []\n for sub_ls in ls:\n word = words[sub_ls[0]]\n for idx in sub_ls[1:]:\n word = \" \".join([word, words[idx]])\n output.append([sub_ls, word])\n return output", "def get_phrase_list(self, words, length):\n\n if len(words) >= length:\n return [words[i:i+length] for i in range(len(words) - length + 1)]\n else:\n return None", "def getWordList(text):\n\ttmpwordlist = string.split(text)\n\twordlist = []\n\tfor i in range(len(tmpwordlist)):\n\t\tword = puncTrim(tmpwordlist[i])\n\t\tif len(word) > 0:\n\t\t\twordlist.append(word)\n\treturn wordlist", "def getWords(speech):\r\n return speech.split()", "def disambiguateWordsOld(self, word_list, tag_list):\n\t\t# print u\" \".join(word_list).encode('utf8');\n\t\t# print u\" \".join(tag_list).encode('utf8');\t\t\t\n\t\n\t\tif len(word_list)==0 or len(word_list)!=len(tag_list):\n\t\t\treturn word_list;\n\t\telse:\n\t\t\tnewwordlist=[];\n\t\t\twordtaglist=zip(word_list,tag_list);\n\t\t\t# print wordtaglist\n\t\t\tfor i in range(len(wordtaglist)):\n\t\t\t\tif i+1<=len(wordtaglist):\n\t\t\t\t\t# do tests with next word\n\t\t\t\t\t# إذا كانت الكلمة الحالية \"أن\" تكون \"أنْ\" حرف نصب إذا سبقت فعلا\n\t\t\t\t\t# وتكون أنّ، من أخوات إنّ إذا كان ما بعدها اسما\n\t\t\t\t\tif wordtaglist[i][0]==u'أن' and self.tagger.isVerbTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case1';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنْ','t');\n\t\t\t\t\telif wordtaglist[i][0]==u'أن' and self.tagger.isNounTag(wordtaglist[i+1][1]):\n\t\t\t\t\t\t# print' case 2';\n\t\t\t\t\t\twordtaglist[i]=(u'أَنَّ','t');\n\t\t\t\tnewwordlist.append(wordtaglist[i][0]);\n\t\t\treturn newwordlist;", "def reverse_order_of_words(lst):\n\n\tres = []\n\tend = len(lst)\n\n\tfor i in range(len(lst)-1, -1, -1):\n\t\tif lst[i-1] == ' ' or i == 0:\n\t\t\tfor j in range(i, end):\n\t\t\t\tres.append(lst[j])\n\t\t\tif i != 0:\n\t\t\t\tres.append(' ')\n\t\t\tend = i -1\n\treturn res", "def fit(self, words):\n ### part of 5.2\n chars = set()\n for word in words:\n if len(word) > self.max_wordlen:\n self.max_wordlen = len(word)\n chars.update(word)\n if self.maxlen is not None:\n self.max_wordlen = self.maxlen\n self.char_list = list(chars)\n self.char_list.append(\"unknown\")", "def _gen_loc_words(word_list: list):\r\n loc = 0\r\n res = []\r\n for word in word_list:\r\n res.append((loc, word))\r\n loc += len(word)\r\n return res", "def get_words(self, first=10):\n return get_occurences(self.lemmatized_words)[:first]", "def find_words(word_length: int, fhs: List[TextIO]) -> List[str]:\n\n words: List[str] = []\n clean = partial(re.sub, '[^a-zA-Z]', '')\n accept = lambda word: len(word) == word_length\n\n for fh in fhs:\n for line in fh:\n words.extend(filter(accept, map(clean, line.split())))\n\n return words", "def _create_word_list(self, sentences):\n\n ############ 1.4 TODO\n \"\"\"\n https://machinelearningmastery.com/clean-text-machine-learning-python/\n \"\"\"\n import string\n table = str.maketrans('','',string.punctuation)\n # import ipdb; ipdb.set_trace()\n word_list = []\n if type(sentences) == list:\n for sentence in sentences:\n words = sentence.split(\" \")\n word_list += [word.translate(table).lower() for word in words]\n else:\n words = sentences.split(\" \")\n word_list += [word.translate(table).lower() for word in words]\n ############\n # raise NotImplementedError()\n return word_list", "def list_every_word(file_name): #considers file_name is valid\n file = open(file_name,\"r\")\n words = []\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n line = line.split(\" \")\n for word in line:\n words.append(word)\n return words", "def countWords(text):\r\n\r\n\tlistOfWord = []\r\n\tlistOfFrequency = []\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghitung frekuensi kata\r\n if word == '':\r\n pass\r\n elif word not in listOfWord:\t\t\t\t\t# menyimpan kata ke dalam list\r\n listOfWord.append(word)\r\n listOfFrequency.append(1)\r\n else:\r\n index = listOfWord.index(word)\r\n listOfFrequency[index] = listOfFrequency[index] + 1 # menambah frekuensi kata yang sudah ada\r\n\r\n\r\n\tlst = [listOfWord, listOfFrequency]\r\n\r\n\treturn lst", "def get_word_list(file_name):\n\tnew_list = []\n\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\tend_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\twhile lines[end_line].find('End of the Project Gutenberg EBook') == -1:\n\t\tend_line -= 1\n\tlines = lines[curr_line + 1:end_line]\n\n\tlong_lines = ''.join(str(e) for e in lines)\n\tlong_lines = long_lines.lower()\n\tlong_lines = long_lines.translate(None, punctuation)\n\n\twords = long_lines.split()\n\tfor item in words:\n\t\tnew_list.append(item)\n\n\treturn new_list", "def split_by_words(term):\n if not term:\n return []\n # make all chars in lower case\n term = term.lower()\n # main rules\n splitted_by_size = re.findall(re_words, term) or [term]\n # separators\n splitted_by_seps = [re.split(r'[_ @,.\\-()/№\\\"]', word) for word in splitted_by_size]\n # convert to simple array\n flat_list = [word for wordlist in splitted_by_seps for word in wordlist]\n # transliteration\n translitted = []\n for word in flat_list:\n try:\n translitted += custom_transliterate(word)\n translitted.append(word)\n translitted.append(translit(word, reversed=True))\n except Exception as e:\n logging.debug(\"Translit error: %s - %s\", str(e), word)\n # unique\n unique_list = list(set(translitted))\n return unique_list", "def get_words(self):\n words = self.wiki.get_words(cleaner=self.cleaner)\n df = pd.DataFrame({\"word\": words})\n df = df.drop_duplicates(\"word\")\n df = df.head(100)\n mask = df[\"word\"].isin(self.common[\"word\"])\n mask |= df[\"word\"].str.lower().isin(self.common[\"word\"])\n\n words = [ Word(word) for word in df[~mask][\"word\"] ]\n for word in words:\n word.get_definition(definer=self.definer)", "def non_std_words(work):\n dictionary = enchant.Dict(\"en_US\")\n non_std_word = []\n\n for elem in work:\n lyrics = [item for sublist in elem[1] for item in sublist]\n lyrics = [i for i in lyrics if i[0] not in [',', '.', \"'\", '?', '!', '’', '&', '#', ':']]\n word_count = 1\n not_word_count = 1\n for tuples in lyrics:\n if dictionary.check(tuples[0]):\n word_count += 1\n else:\n not_word_count += 1\n\n non_std_word.append((not_word_count/(not_word_count+word_count), elem[0]))\n\n return non_std_word", "def s_words(words):\n\t\n\treturn words // 100 / 10", "def small_word_filter(words, min_=1):\n new_words = []\n for w in words:\n if(len(w) > min_):\n new_words += [w]\n return new_words", "def n_long_words(words, n):\n words_longer_than_n = []\n for word in words:\n if len(word) > n:\n words_longer_than_n.append(word)\n\n return words_longer_than_n", "def get_word_list_from_item(self, item, remove_stops):\n txt = item['text']\n word_list = list(set([w.replace('\\n', \"\") for w in txt.split(\" \") if len(w) > 0])) # avoid empty string\n\n if remove_stops:\n word_list = sorted([w for w in word_list if w not in self.stop_words])\n\n return word_list", "def listify(words):\n word_list = []\n for word in words:\n if word:\n word = word.lower()\n if word not in word_list: # add it\n word_list.append(word)\n else:\n pass\n word_list.sort()\n return word_list", "def break_words(stuff):\r\n #parte la cadena cada vez que encuentra un espacio\r\n words = stuff.split(' ') \r\n return words", "def wordCount( aList ):\n return len( aList )", "def _possible_words(self):\n new_words = []\n for word in self._words:\n if word not in (self._used_words + tuple(self._tried_words)):\n for i in range(len(self._start)):\n if word[:i] + word[i+1:] == self._start[:i] + self._start[i+1:]:\n new_words.append(word)\n new_words.sort()\n return new_words", "def get_words(f: str, letters: List[str]) -> List[str]:\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n words_file = open(f)\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in words_file:\r\n word = word[:-1].lower()\r\n if len(word) >= 4:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def make_word_list(fin):\n\tword_list = []\n\tfor line in fin:\n\t\tword = line.strip()\n\t\tword_list.append(word)\n\treturn word_list", "def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]", "def words(self, uncased=False):\n if uncased:\n return [t[self.TEXT].lower() for t in self.data]\n else:\n return [t[self.TEXT] for t in self.data]", "def WrapWords(textlist, size, joiner='\\n'):\n # \\S{%d}(?!\\s|\\Z) collets the max size for words that are larger than the max\n # (?<=\\S{%d})\\S+ collects the remaining text for overflow words in their own\n # line\n # \\S.{1,%d}(?=\\s|\\Z)) collects all words and spaces up to max size, breaking\n # at the last space\n rval = []\n linelength_re = re.compile(\n r'(\\S{%d}(?!\\s|\\Z)|(?<=\\S{%d})\\S+|\\S.{1,%d}(?=\\s|\\Z))' %\n (size, size, size - 1))\n for index in range(len(textlist)):\n if len(textlist[index]) > size:\n # insert joiner into the string at appropriate places.\n textlist[index] = joiner.join(linelength_re.findall(textlist[index]))\n # avoid empty comment lines\n rval.extend(x.strip() for x in textlist[index].strip().split(joiner) if x)\n return rval", "def get_word_list(sentence):\n sentence = space1.sub(r'\\1 \\2', sentence)\n sentence = space2.sub(r\"\\1 \\2\", sentence)\n sentence = space3.split(sentence)\n sentence = \" \".join(sentence)\n wordlist = [i for i in sentence.split()]\n return \" \".join(wordlist)", "def extract_words(self):\n str = self.text.lower()\n words = re.sub(r'[?|—|:|\"|,|\\.\\n|\\.|\\s|\\n|\\t|\\v|\\f|\\r]+', \"*\", str)\n self.word_list = words.split(\"*\")", "def get_word(wordlist, args): #{{{\n iters = 0\n while iters < 500:\n if args.lowercase == True:\n word = random.choice(wordlist).strip().lower()\n return word\n elif args.lowercase == False:\n word = random.choice(wordlist).strip().lower().capitalize()\n return word\n\n if args.punctuation == False:\n if len(word) < args.max_length and word.isalpha() == True:\n return word\n iters += 1\n elif args.punctuation == True:\n if len(word) < args.max_length:\n return word\n iters += 1 #}}}", "def words(self):\n return list(self._words())", "def words(self):\n return list(self._words())", "def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words", "def lemmatization(tokenized_word_list):\n porter=nltk.stem.PorterStemmer()\n filtered_tokens = [porter.stem(word) for word in tokenized_word_list]\n return filtered_tokens", "def makeWordLengths(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if len(x) not in self.wordlengths: \r\n self.wordlengths[len(x)] = 1\r\n else: \r\n self.wordlengths[len(x)] += 1\r\n return self.wordlengths", "def review_to_wordlist(review):\n\n words = review.lower().split()\n words = [w for w in words]\n return(words)", "def line_2_words(wordid_list, id2word):\n word_list = []\n for word_id in wordid_list:\n word_list.append(id2word[word_id])\n return word_list", "def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))", "def get_words(self,data):\n f_words = []\n e_words = []\n for d in data:\n f_sent = d[\"fr\"] ## foreign sentence\n e_sent = d[\"en\"] ## English sentence\n f_words.extend(f_sent.split())\n d[\"fr\"] = f_sent.split()\n e_words.extend(e_sent.split())\n d[\"en\"] = e_sent.split()\n return list(set(f_words)),list(set(e_words))", "def loadWords() -> List[str]:\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n\n return wordList", "def get_words(results):\n return ' '.join([result['word'] for result in results])", "def __stringToLetters(self, words):\r\n li = []\r\n \r\n for word in words:\r\n li.append(list(word))\r\n \r\n return li", "def _words(self):\n regex = r'\\b\\w+\\b'\n for word in re.findall(regex, self.text):\n yield word" ]
[ "0.7187629", "0.70133877", "0.7005348", "0.69585055", "0.69476235", "0.68506676", "0.68327844", "0.6649206", "0.6611271", "0.65282124", "0.65262544", "0.6518681", "0.6517774", "0.6490736", "0.645253", "0.6450386", "0.64484304", "0.63865644", "0.6377032", "0.63758963", "0.63743013", "0.6348116", "0.63443273", "0.6334676", "0.633397", "0.6306532", "0.6297638", "0.6293963", "0.62924147", "0.6287762", "0.627402", "0.6265096", "0.62603134", "0.62542474", "0.62542474", "0.624911", "0.6231451", "0.622168", "0.62201196", "0.6216072", "0.6210478", "0.62053674", "0.61957943", "0.6187502", "0.6185961", "0.61602205", "0.6148458", "0.6146381", "0.61450714", "0.6137496", "0.6128898", "0.61156356", "0.61101335", "0.6106775", "0.6105803", "0.61016995", "0.60872704", "0.6072508", "0.6067735", "0.60677016", "0.6063609", "0.6061653", "0.6058754", "0.605826", "0.60512596", "0.6050696", "0.6035115", "0.6030862", "0.60261613", "0.6025908", "0.60219264", "0.60218376", "0.60193425", "0.6016934", "0.60116404", "0.6008001", "0.5999832", "0.59990555", "0.59952074", "0.59949064", "0.5989089", "0.59803426", "0.59803426", "0.5978659", "0.5971322", "0.59677774", "0.5965583", "0.596173", "0.596173", "0.5952835", "0.5944683", "0.5940641", "0.59354657", "0.5925176", "0.59228903", "0.5921749", "0.5918307", "0.5913855", "0.5909545", "0.589635" ]
0.6082544
57
filter the proposal boxes
def boxes_filter(dets, PRE_NMS_TOPN, NMS_THRESH, POST_NMS_TOPN, CONF_THRESH, USE_GPU=False): # speed up nms if PRE_NMS_TOPN > 0: dets = dets[: min(len(dets), PRE_NMS_TOPN), :] # apply nms if NMS_THRESH > 0 and NMS_THRESH < 1: if USE_GPU: keep = nms_gpu(dets, NMS_THRESH) else: keep = nms(dets, NMS_THRESH) dets = dets[keep, :] if POST_NMS_TOPN > 0: dets = dets[: min(len(dets), POST_NMS_TOPN), :] inds = np.where(dets[:, -1] >= CONF_THRESH)[0] dets = dets[inds, :] return dets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = box_confidences * box_class_probs\n box_classes = np.argmax(box_scores, axis=-1)\n box_class_scores = np.max(box_scores, axis=-1)\n pos = np.where(box_class_scores >= self.object_threshold)\n\n boxes = boxes[pos]\n classes = box_classes[pos]\n scores = box_class_scores[pos]\n\n return boxes, classes, scores", "def _filter_boxes(self, patch, boxes):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (\n center[:, 0] < patch[2]) * (\n center[:, 1] < patch[3])\n return mask", "def _filter_boxes(self, min_score, boxes, scores, classes):\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes", "def _filter_boxes(self, boxes, min_size, im_info):\n # Scale min_size to match image scale\n min_size *= im_info[2]\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n x_ctr = boxes[:, 0] + ws / 2.\n y_ctr = boxes[:, 1] + hs / 2.\n keep = np.where((ws >= min_size) & (hs >= min_size) &\n (x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = [x * y for x, y in zip(box_confidences, box_class_probs)]\n box_class_scores = [np.max(x, axis=-1).reshape(-1) for x in box_scores]\n box_class_scores = np.concatenate(box_class_scores)\n box_classes = [np.argmax(x, axis=-1).reshape(-1) for x in box_scores]\n box_classes = np.concatenate(box_classes)\n filtering_mask = box_class_scores >= self.class_t\n list = [np.reshape(x, (-1, 4)) for x in boxes]\n boxes = np.concatenate(list)\n boxes = boxes[filtering_mask]\n scores = box_class_scores[filtering_mask]\n classes = box_classes[filtering_mask]\n return (boxes, classes, scores)", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def filter(self, filters):", "def _filter_boxes2(boxes, max_size, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n if max_size > 0:\n keep = np.where(np.minimum(ws, hs) < max_size)[0]\n elif min_size > 0:\n keep = np.where(np.maximum(ws, hs) > min_size)[0]\n return keep", "def _remove_outliers(self, boxes):\n\n filtered_boxes = []\n for bc in boxes:\n w = bc[2] - bc[0]\n h = bc[3] - bc[1]\n if bc[1] < 450 and w > 32 and h > 32:\n filtered_boxes.append(bc)\n elif bc[1] > 450 and w > 64 and h > 64:\n filtered_boxes.append(bc)\n\n return np.array(filtered_boxes)", "def filter_boxes(self, min_score, boxes, scores, classes):\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes", "def _filter_box_candidates(self, bboxes, labels):\n bbox_w = bboxes[:, 2] - bboxes[:, 0]\n bbox_h = bboxes[:, 3] - bboxes[:, 1]\n valid_inds = (bbox_w > self.min_bbox_size) & \\\n (bbox_h > self.min_bbox_size)\n valid_inds = np.nonzero(valid_inds)[0]\n return bboxes[valid_inds], labels[valid_inds]", "def filter_boxes(self, boxes, box_confidence, box_class_probs):\n f_boxes = []\n b_classes = []\n b_scores = []\n for i in range(len(boxes)):\n boxscore = box_confidence[i] * box_class_probs[i]\n maxes = np.amax(boxscore, axis=3)\n keep = np.argwhere(maxes[:, :, :] >= self.class_t)\n\n for kept in keep:\n f_boxes.append(boxes[i][kept[0], kept[1], kept[2]])\n b_classes.append(np.argmax(boxscore[kept[0],\n kept[1], kept[2]]))\n b_scores.append(maxes[kept[0], kept[1], kept[2]])\n \"\"\" muchj easier in tf 2.x\n\n box_class = tf.argmax(boxscore, axis=-1)\n box_score = tf.math.reduce_max(boxscore, axis=-1)\n mask = boxscore >= self.class_t\n\n boxes = tf.compat.v1.boolean_mask(boxes, mask)\n scores = tf.compaat.v1.boolean_mask(boxscore, mask)\n classes = tf.compat.v1.boolean_mask(box_class, mask)\n\n f_boxes.append(boxes)\n b_classes.append(classes)\n b_scores.append(scores)\n \"\"\"\n filtered_boxes = np.array(f_boxes)\n box_classes = np.array(b_classes)\n box_scores = np.array(b_scores)\n return (filtered_boxes, box_classes, box_scores)", "def _filter_img_boxes(boxes, im_info):\n padding = 50\n w_min = -padding\n w_max = im_info[1] + padding\n h_min = -padding\n h_max = im_info[0] + padding\n keep = np.where((w_min <= boxes[:,0]) & (boxes[:,2] <= w_max) & (h_min <= boxes[:,1]) &\n (boxes[:,3] <= h_max))[0]\n return keep", "def filter_boxes(min_score, boxes, scores, classes):\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes", "def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)", "def boxes_filter(dets, bbox_id=1, class_name='None', color=(255, 255, 255), scale=1.0, thresh=0.5, min_size=(2, 2)):\n _objs = []\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return _objs\n\n for i in inds:\n bbox = dets[i, :4] / scale\n bbox_confidence = dets[i, -1]\n if bbox[3] - bbox[1] <= min_size[0] or bbox[2] - bbox[0] <= min_size[1]:\n continue\n attribute = dict(class_name=class_name, color=color)\n _objs.append(dict(bbox=bbox, bbox_id=bbox_id, bbox_confidence=bbox_confidence, keypoints=[],\n attribute=attribute, person_id=-1, person_confidence=-1, segment=[]))\n\n return _objs", "def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):\r\n\r\n # Step 1: Compute box scores\r\n ### START CODE HERE ### (≈ 1 line)\r\n box_scores = box_confidence * box_class_probs\r\n ### END CODE HERE ###\r\n\r\n # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score\r\n ### START CODE HERE ### (≈ 2 lines)\r\n box_classes = tf.argmax(box_scores, axis=-1)\r\n box_class_scores = tf.reduce_max(box_scores, axis=-1)\r\n ### END CODE HERE ###\r\n\r\n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\r\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\r\n ### START CODE HERE ### (≈ 1 line)\r\n filtering_mask = box_class_scores >= threshold\r\n ### END CODE HERE ###\r\n\r\n # Step 4: Apply the mask to scores, boxes and classes\r\n ### START CODE HERE ### (≈ 3 lines)\r\n scores = tf.boolean_mask(box_class_scores, filtering_mask)\r\n boxes = tf.boolean_mask(boxes, filtering_mask)\r\n classes = tf.boolean_mask(box_classes, filtering_mask)\r\n ### END CODE HERE ###\r\n\r\n return scores, boxes, classes", "def filter_all_objects(bboxes, shovel_type, num_teeth, image_size=640):\n image_w, image_h = image_size, image_size\n shovel_thresholds = thresholds[shovel_type]\n\n all_filtered_bboxes = []\n bboxes, size_filtered_bboxes = filter_wrong_sizes(bboxes, image_size,\n shovel_thresholds[\n \"min_area_threshold\"],\n shovel_thresholds[\n \"max_area_threshold\"],\n min_aspect_ratios,\n max_aspect_ratios)\n # seperate the teeth and toothline bounding boxes from bucket, matInside, and wearArea boxes\n teeth_toothline_bboxes = [bbox for bbox in bboxes if bbox.get_label() == 0 or \\\n bbox.get_label() == 1]\n other_bboxes = [bbox for bbox in bboxes if bbox.get_label() != 0 and \\\n bbox.get_label() != 1]\n teeth_toothline_bboxes, tooth_filtered_bboxes = filter_teeth_toothline(\n teeth_toothline_bboxes,\n shovel_thresholds, num_teeth)\n other_bboxes, filtered_other_bboxes = filter_bucket_mat_inside_wear_area(other_bboxes)\n all_filtered_bboxes += size_filtered_bboxes + tooth_filtered_bboxes + \\\n filtered_other_bboxes\n for bbox in all_filtered_bboxes:\n if not bbox:\n all_filtered_bboxes.remove(bbox)\n else:\n bbox.filtered = True\n\n good_bboxes = teeth_toothline_bboxes + other_bboxes\n return good_bboxes, all_filtered_bboxes", "def filter_boxes(self, min_score, detections):\n mask = detections['detection_scores'] >= min_score\n filtered_detections = (detections['detection_boxes'][mask], \n detections['detection_scores'][mask], \n detections['detection_classes'][mask])\n return filtered_detections", "def filter_bboxes(\n bboxes: Sequence[BoxType],\n rows: int,\n cols: int,\n min_area: float = 0.0,\n min_visibility: float = 0.0,\n min_width: float = 0.0,\n min_height: float = 0.0,\n) -> List[BoxType]:\n resulting_boxes: List[BoxType] = []\n for bbox in bboxes:\n # Calculate areas of bounding box before and after clipping.\n transformed_box_area = calculate_bbox_area(bbox, rows, cols)\n bbox, tail = cast(BoxType, tuple(np.clip(bbox[:4], 0, 1.0))), tuple(bbox[4:])\n clipped_box_area = calculate_bbox_area(bbox, rows, cols)\n\n # Calculate width and height of the clipped bounding box.\n x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)[:4]\n clipped_width, clipped_height = x_max - x_min, y_max - y_min\n\n if (\n clipped_box_area != 0 # to ensure transformed_box_area!=0 and to handle min_area=0 or min_visibility=0\n and clipped_box_area >= min_area\n and clipped_box_area / transformed_box_area >= min_visibility\n and clipped_width >= min_width\n and clipped_height >= min_height\n ):\n resulting_boxes.append(cast(BoxType, bbox + tail))\n return resulting_boxes", "def filter(self, result):\n convexities = []\n for mask_idx in range(result.masks.shape[2]):\n mask = result.masks[:, :, mask_idx]\n props = regionprops(mask.numpy().astype(np.int8))[0]\n convexities.append(props.filled_area/props.convex_area)\n\n convexities = np.array(convexities)\n convexity_stats = stats.describe(convexities)\n\n heights = result.rois[:, 2] - result.rois[:, 0]\n widths = result.rois[:, 3] - result.rois[:, 1]\n gt_max_width = widths > self.width_stats.minmax[1]\n lt_min_width = widths < self.width_stats.minmax[0]\n gt_max_height = heights > self.height_stats.minmax[1]\n lt_min_height = heights < self.height_stats.minmax[0]\n keep = ~(gt_max_width | lt_min_width |\n gt_max_height | lt_min_height)\n initial_size = heights.shape[0]\n new_size = keep.sum()\n if initial_size != new_size:\n logging.info(f\"Analyzer filtered {initial_size - new_size}\"\n f\" out of {initial_size}.\")\n result.masks = result.masks.permute(2, 0, 1)\n new_result = result.select(keep)\n new_result.masks = new_result.masks.permute(1, 2, 0)\n return new_result", "def _filter_crowd_proposals(self, roidb, crowd_thresh):\n for entry in roidb:\n gt_overlaps = entry['gt_overlaps'].toarray()\n crowd_inds = np.where(entry['is_crowd'] == 1)[0]\n non_gt_inds = np.where(entry['gt_classes'] == 0)[0]\n if len(crowd_inds) == 0 or len(non_gt_inds) == 0:\n continue\n crowd_boxes = xyxy_to_xywh(entry['boxes'][crowd_inds, :])\n non_gt_boxes = xyxy_to_xywh(entry['boxes'][non_gt_inds, :])\n iscrowd_flags = [int(True)] * len(crowd_inds)\n ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)\n bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]\n gt_overlaps[non_gt_inds[bad_inds], :] = -1\n entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)", "def filter(self, viewer, parent, elements):\n\n return [e for e in elements if self.select(viewer, parent, e)]", "def filter(self, *args, **kwargs):", "def filter(self):\n\n # Calculate outliers in the multivariate Gaussian distribution analysis.\n # Returns the outliers as vector and an Ellipse object for plotting\n outliers, self._ellipse = multivariate_gaussian(\n self.liedataframe[['coul', 'vdw']],\n confidence=self.settings.confidence,\n returnellipse=True,\n edgecolor='red',\n facecolor='none')\n\n # Register outliers.\n self.liedataframe['filter_mask'] = self.liedataframe['filter_mask'].values + numpy.array(outliers)\n\n # Check outliers for any cases leading to all but one pose to be marked as\n # outlier. Not wise to include this in the boltzmann weighted sheme.\n logger.info(\n \"Outlier detection. Outliers: {0} of {1} points, method: Multivariate Gaussian distribution.\"\n \"Confidence interval {2:.3f}\".format(\n outliers.sum(), self.liedataframe[['coul', 'vdw']].size, self.settings.confidence))\n\n return self.liedataframe", "def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):\n\n\t# Step 1: Compute box scores\n\tbox_scores = box_confidence * box_class_probs # [19, 19, 5, 1] * [19, 19, 5, 80] = [19, 19, 5, 80]\n\n\t# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score\n\tbox_classes = K.argmax(box_scores, axis=-1)\n\tbox_class_scores = K.max(box_scores, axis=-1, keepdims=False)\n\n\t# Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\n\t# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\n\tfiltering_mask = box_class_scores >= threshold\n\n\t# Step 4: Apply the mask to scores, boxes and classes\n\tscores = tf.boolean_mask(box_class_scores, filtering_mask)\n\tboxes = tf.boolean_mask(boxes, filtering_mask)\n\tclasses = tf.boolean_mask(box_classes, filtering_mask)\n\n\treturn scores, boxes, classes", "def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms", "def prepare_filter(self, ):\n if not self._parent.connected():\n return\n papers = self._parent.model.list_papers([\"name\"])\n upps = map(lambda a: (a[\"id\"], a[\"name\"]), papers)\n accounts = self._parent.model.list_accounts([\"name\"])\n uaccs = map(lambda a: (a[\"id\"], a[\"name\"]), accounts)\n self.dialog.update_widget(count_range = self._parent.model.get_deals_count_range(),\n price_range = self._parent.model.get_deals_price_range(),\n comm_range = self._parent.model.get_deals_commission_range(),\n volume_range = self._parent.model.get_deals_volume_range(),\n stock_list = upps,\n accounts_list = uaccs)", "def reduce_possibilities_by_box(self):\n x = self.targetCell.x\n y = self.targetCell.y\n if x < 3 and y < 3: #top left\n self.check_box1()\n if x > 2 and x < 6 and y < 3: #middle left\n self.check_box2()\n if x > 5 and y < 3: #bottom left\n self.check_box3()\n if x < 3 and y > 2 and y < 6: #top middle\n self.check_box4()\n if x > 2 and x < 6 and y > 2 and y < 6: #center\n self.check_box5()\n if x > 5 and y > 2 and y < 6: #bottom middle\n self.check_box6()\n if x < 3 and y > 5: #top right\n self.check_box7()\n if x > 2 and x < 6 and y > 5: #middle right\n self.check_box8()\n if x > 5 and y > 5: #bottom right\n self.check_box9()\n self.targetCell.box_neighbour_possibilities = flatten_list(self.targetCell.box_neighbour_possibilities)", "def filter_bboxes_by_visibility(\n original_shape: Sequence[int],\n bboxes: Sequence[BoxType],\n transformed_shape: Sequence[int],\n transformed_bboxes: Sequence[BoxType],\n threshold: float = 0.0,\n min_area: float = 0.0,\n) -> List[BoxType]:\n img_height, img_width = original_shape[:2]\n transformed_img_height, transformed_img_width = transformed_shape[:2]\n\n visible_bboxes = []\n for bbox, transformed_bbox in zip(bboxes, transformed_bboxes):\n if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]):\n continue\n bbox_area = calculate_bbox_area(bbox, img_height, img_width)\n transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)\n if transformed_bbox_area < min_area:\n continue\n visibility = transformed_bbox_area / bbox_area\n if visibility >= threshold:\n visible_bboxes.append(transformed_bbox)\n return visible_bboxes", "def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre_bboxes[i][1])\n (pre_x2, pre_y2) = (pre_bboxes[i][2], pre_bboxes[i][3])\n (cur_x1, cur_y1) = (bboxes[j][0], bboxes[j][1])\n (cur_x2, cur_y2) = (bboxes[j][2], bboxes[j][3])\n origin_w = pre_x2 - pre_x1\n origin_h = pre_y2 - pre_y1\n current_w = cur_x2 - cur_x1\n current_h = cur_y2 - cur_y1\n prime_area = origin_h * origin_w\n current_area = current_h*current_w\n\n if pre_x1 > cur_x1:\n if pre_y1 > cur_y1:\n if cur_x2 - pre_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = cur_y2 - pre_y1\n if width > origin_w:\n width = origin_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n\n else:\n if cur_x2 - pre_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = pre_y2 - cur_y1\n if width > origin_w:\n width = origin_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n else:\n if pre_y1 > cur_y1:\n if pre_x2 - cur_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = cur_y2 - pre_y1\n if width > current_w:\n width = current_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n else:\n if pre_x2 - cur_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = pre_y2 - cur_y1\n if width > current_w:\n width = current_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n\n if lap_area != 0:\n sum_area = (prime_area + current_area - lap_area)\n iou_score = lap_area/sum_area\n if iou_score > max_threshold: # set the threshold of the iou scores, in line with the sort\n max_flag = 1\n elif iou_score > min_threshold:\n min_flag = 1\n\n if max_flag == 1 or min_flag == 0:\n all_boxes.append(pre_bboxes[i])\n\n if cnt_box != []:\n for index_box in range(cnt_box.shape[0]):\n all_boxes.append(cnt_box[index_box])\n\n return np.asarray(all_boxes)", "def _filter(self, __button):\r\n# WARNING: Refactor _filter; current McCabe Complexity metric = 54.\r\n _criteria = []\r\n _inputs = []\r\n _compound = []\r\n\r\n # Read the user inputs for the different fields that can be used to\r\n # filter with.\r\n _criteria.append(self.cmbCriteriaID.get_active_text())\r\n _inputs.append(self.txtFilterID.get_text())\r\n _compound.append(self.cmbCompound1.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCategory.get_active_text())\r\n _inputs.append(self.cmbFilterCategory.get_active())\r\n _compound.append(self.cmbCompound2.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaType.get_active_text())\r\n _inputs.append(self.cmbFilterType.get_active())\r\n _compound.append(self.cmbCompound3.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaStatus.get_active_text())\r\n _inputs.append(self.cmbFilterStatus.get_active())\r\n _compound.append(self.cmbCompound4.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCriticality.get_active_text())\r\n _inputs.append(self.cmbFilterCriticality.get_active())\r\n _compound.append(self.cmbCompound5.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAge.get_active_text())\r\n _inputs.append(self.txtFilterAge.get_text())\r\n _compound.append(self.cmbCompound6.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLifeCycle.get_active_text())\r\n _inputs.append(self.cmbFilterLifeCycle.get_active())\r\n _compound.append(self.cmbCompound7.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaShortDesc.get_active_text())\r\n _inputs.append(self.txtFilterShortDesc.get_text())\r\n _compound.append(self.cmbCompound8.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLongDesc.get_active_text())\r\n _inputs.append(self.txtFilterLongDesc.get_text())\r\n _compound.append(self.cmbCompound9.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRemarks.get_active_text())\r\n _inputs.append(self.txtFilterRemarks.get_text())\r\n _compound.append(self.cmbCompound10.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAnalysis.get_active_text())\r\n _inputs.append(self.txtFilterAnalysis.get_text())\r\n _compound.append(self.cmbCompound11.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTest.get_active_text())\r\n _inputs.append(self.txtFilterTest.get_text())\r\n _compound.append(self.cmbCompound12.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTestCase.get_active_text())\r\n _inputs.append(self.txtFilterTestCase.get_text())\r\n _compound.append(self.cmbCompound13.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestBy.get_active_text())\r\n _inputs.append(self.cmbFilterRequestBy.get_active_text())\r\n _compound.append(self.cmbCompound14.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestDate.get_active_text())\r\n _inputs.append(self.txtFilterRequestDate.get_text())\r\n _compound.append(self.cmbCompound15.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewBy.get_active_text())\r\n _inputs.append(self.cmbFilterReviewBy.get_active_text())\r\n _compound.append(self.cmbCompound16.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewDate.get_active_text())\r\n _inputs.append(self.txtFilterReviewDate.get_text())\r\n _compound.append(self.cmbCompound17.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveBy.get_active_text())\r\n _inputs.append(self.cmbFilterApproveBy.get_active_text())\r\n _compound.append(self.cmbCompound18.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveDate.get_active_text())\r\n _inputs.append(self.txtFilterApproveDate.get_text())\r\n _compound.append(self.cmbCompound19.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseBy.get_active_text())\r\n _inputs.append(self.cmbFilterCloseBy.get_active_text())\r\n _compound.append(self.cmbCompound20.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseDate.get_active_text())\r\n _inputs.append(self.txtFilterCloseDate.get_text())\r\n _compound.append(self.cmbCompound21.get_active_text())\r\n\r\n _inputs.append(self.chkFilterAccepted.get_active())\r\n _compound.append(self.cmbCompound22.get_active_text())\r\n\r\n _inputs.append(self.chkFilterReviewed.get_active())\r\n\r\n _criteria.append(self.cmbCriteriaAssembly.get_active_text())\r\n _model = self.cmbAssembly.get_model()\r\n _row = self.cmbAssembly.get_active_iter()\r\n if _row is not None:\r\n _text = int(_model.get_value(_row, 1))\r\n else:\r\n _text = 0\r\n _inputs.append(_text)\r\n _compound.append(self.cmbCompound23.get_active_text())\r\n\r\n # Build the query from the user-provided inputs.\r\n if all(_c is None for _c in _criteria):\r\n query = None\r\n elif Configuration.RTK_MODULES[0] == 1:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id={0:d} AND \".format(\r\n self._revision_id)\r\n else:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id=0 AND \"\r\n\r\n if _criteria[0] is not None and _criteria[0] != '':\r\n query = query + \"fld_incident_id\" + _criteria[0] + _inputs[0]\r\n if _compound[0] is not None and _compound[0] != '':\r\n query = query + \" \" + _compound[0] + \" \"\r\n\r\n if _criteria[1] is not None and _criteria[1] != '':\r\n query = query + \"fld_incident_category\" + _criteria[1] + \\\r\n str(_inputs[1])\r\n if _compound[1] is not None and _compound[1] != '':\r\n query = query + \" \" + _compound[1] + \" \"\r\n\r\n if _criteria[2] is not None and _criteria[2] != '':\r\n query = query + \"fld_incident_type\" + _criteria[2] + \\\r\n str(_inputs[2])\r\n if _compound[2] is not None and _compound[2] != '':\r\n query = query + \" \" + _compound[2] + \" \"\r\n\r\n if _criteria[3] is not None and _criteria[3] != '':\r\n query = query + \"fld_status\" + _criteria[3] + str(_inputs[3])\r\n if _compound[3] is not None and _compound[3] != '':\r\n query = query + \" \" + _compound[3] + \" \"\r\n\r\n if _criteria[4] is not None and _criteria[4] != '':\r\n query = query + \"fld_criticality\" + _criteria[4] + str(_inputs[4])\r\n if _compound[4] is not None and _compound[4] != '':\r\n query = query + \" \" + _compound[4] + \" \"\r\n\r\n if _criteria[5] is not None and _criteria[5] != '':\r\n query = query + \"fld_incident_age\" + _criteria[5] + str(_inputs[5])\r\n if _compound[5] is not None and _compound[5] != '':\r\n query = query + \" \" + _compound[5] + \" \"\r\n\r\n if _criteria[6] is not None and _criteria[6] != '':\r\n query = query + \"fld_life_cycle\" + _criteria[6] + str(_inputs[6])\r\n if _compound[6] is not None and _compound[6] != '':\r\n query = query + \" \" + _compound[6] + \" \"\r\n\r\n if _criteria[21] is not None and _criteria[21] != '':\r\n query = query + \"fld_hardware_id\" + _criteria[21] + \\\r\n str(_inputs[23])\r\n if _compound[22] is not None and _compound[22] != '':\r\n query = query + \" \" + _compound[22] + \" \"\r\n\r\n if _criteria[7] is not None and _criteria[7] != '':\r\n query = query + \"fld_short_description \" + _criteria[7] + \\\r\n \" '%\" + _inputs[7] + \"%'\"\r\n if _compound[7] is not None and _compound[7] != '':\r\n query = query + \" \" + _compound[7] + \" \"\r\n\r\n if _criteria[8] is not None and _criteria[8] != '':\r\n query = query + \"fld_long_description \" + _criteria[8] + \\\r\n \" '%\" + _inputs[8] + \"%'\"\r\n if _compound[8] is not None and _compound[8] != '':\r\n query = query + \" \" + _compound[8] + \" \"\r\n\r\n if _criteria[9] is not None and _criteria[9] != '':\r\n query = query + \"fld_remarks \" + _criteria[9] + \\\r\n \" '%\" + _inputs[9] + \"%'\"\r\n if _compound[9] is not None and _compound[9] != '':\r\n query = query + \" \" + _compound[9] + \" \"\r\n\r\n if _criteria[10] is not None and _compound[10] != '':\r\n query = query + \"fld_analysis \" + _criteria[10] + \\\r\n \" '%\" + _inputs[10] + \"%'\"\r\n if _compound[10] is not None and _compound[10] != '':\r\n query = query + \" \" + _compound[10] + \" \"\r\n\r\n if _criteria[11] is not None and _compound[11] != '':\r\n query = query + \"fld_test_found \" + _criteria[11] + \\\r\n \" '%\" + _inputs[11] + \"%'\"\r\n if _compound[11] is not None and _compound[11] != '':\r\n query = query + \" \" + _compound[11] + \" \"\r\n\r\n if _criteria[12] is not None and _compound[12] != '':\r\n query = query + \"fld_test_case \" + _criteria[12] + \\\r\n \" '%\" + _inputs[12] + \"%'\"\r\n if _compound[12] is not None and _compound[12] != '':\r\n query = query + \" \" + _compound[12] + \" \"\r\n\r\n if _criteria[13] is not None and _compound[13] != '':\r\n query = query + \"fld_request_by\" + _criteria[13] + \\\r\n \"'\" + _inputs[13] + \"'\"\r\n if _compound[13] is not None and _compound[13] != '':\r\n query = query + \" \" + _compound[13] + \" \"\r\n\r\n if _criteria[14] is not None and _compound[14] != '':\r\n query = query + \"fld_request_date\" + _criteria[14] + \\\r\n str(datetime.strptime(_inputs[14], \"%Y-%m-%d\").toordinal())\r\n if _compound[14] is not None and _compound[14] != '':\r\n query = query + \" \" + _compound[14] + \" \"\r\n\r\n if _criteria[15] is not None and _compound[15] != '':\r\n query = query + \"fld_reviewed_by\" + _criteria[15] + \\\r\n \"'\" + _inputs[15] + \"'\"\r\n if _compound[15] is not None and _compound[15] != '':\r\n query = query + \" \" + _compound[15] + \" \"\r\n\r\n if _criteria[16] is not None and _compound[16] != '':\r\n query = query + \"fld_reviewed_date\" + _criteria[16] + \\\r\n str(datetime.strptime(_inputs[16], \"%Y-%m-%d\").toordinal())\r\n if _compound[16] is not None and _compound[16] != '':\r\n query = query + \" \" + _compound[16] + \" \"\r\n\r\n if _criteria[17] is not None and _compound[17] != '':\r\n query = query + \"fld_approved_by\" + _criteria[17] + \\\r\n \"'\" + _inputs[17] + \"'\"\r\n if _compound[17] is not None and _compound[17] != '':\r\n query = query + \" \" + _compound[17] + \" \"\r\n\r\n if _criteria[18] is not None and _compound[18] != '':\r\n query = query + \"fld_approved_date\" + _criteria[18] + \\\r\n str(datetime.strptime(_inputs[18], \"%Y-%m-%d\").toordinal())\r\n if _compound[18] is not None and _compound[18] != '':\r\n query = query + \" \" + _compound[18] + \" \"\r\n\r\n if _criteria[19] is not None and _compound[19] != '':\r\n query = query + \"fld_complete_by\" + _criteria[19] + \\\r\n \"'\" + _inputs[19] + \"'\"\r\n if _compound[19] is not None and _compound[19] != '':\r\n query = query + \" \" + _compound[19] + \" \"\r\n\r\n if _criteria[20] is not None and _compound[20] != '':\r\n query = query + \"fld_complete_date\" + _criteria[20] + \\\r\n str(datetime.strptime(_inputs[20], \"%Y-%m-%d\").toordinal())\r\n if _compound[20] is not None and _compound[20] != '':\r\n query = query + \" \" + _compound[20] + \" \"\r\n\r\n if _inputs[21]:\r\n query = query + \"fld_accepted=%d\" % 1\r\n if _compound[21] is not None and _compound[21] != '':\r\n query = query + \" \" + _compound[21] + \" \"\r\n\r\n if _inputs[22]:\r\n query = query + \"fld_reviewed=%d\" % 1\r\n\r\n self._modulebook.request_filter_incidents(self._revision_id, query)", "def filter_profanities(text,replace_handler=profanity_word_handler):\n profanities=CensoredWord.objects.get_profanities_wordlist()\n return word_filter(text,profanities,replace_handler)", "def geometric_area_filtering(boxes, divider):\n correct_boxes = []\n mx = -1\n for box in boxes:\n polygon = Polygon([box.x0, box.x1, box.x2, box.x3])\n if polygon.area > mx:\n mx = polygon.area\n for box in boxes:\n polygon = Polygon([box.x0, box.x1, box.x2, box.x3])\n if polygon.area >= mx / divider:\n correct_boxes.append(box)\n return correct_boxes", "def filter_proposition_mentions(filter_func, test_graph):\r\n ret = test_graph.clone()\r\n proposition_mentions = []\r\n logging.debug('Filtering verbal propositions')\r\n for prop in test_graph.propositions.values():\r\n for mention in prop.mentions.values():\r\n sent = test_graph.sentences[mention.sentence_id]\r\n if filter_func(sent, mention):\r\n logging.debug('Found {}'.format(mention.terms))\r\n proposition_mentions.append(mention)\r\n\r\n logging.debug('#Propositions after filter = {}'.format(len(proposition_mentions)))\r\n ret.propositions = cram_proposition_mentions(proposition_mentions)\r\n return ret", "def filter_prediction(boxes, probs, cls_idx): \n if cfg.TOP_N_DETECTION < len(probs) and cfg.TOP_N_DETECTION > 0:\n order = probs.argsort()[:-cfg.TOP_N_DETECTION-1:-1]\n probs = probs[order]\n boxes = boxes[order]\n cls_idx = cls_idx[order]\n else:\n filtered_idx = np.nonzero(probs > cfg.PROB_THRESHOLD)[0]\n probs = probs[filtered_idx]\n boxes = boxes[filtered_idx]\n cls_idx = cls_idx[filtered_idx]\n\n final_boxes = []\n final_probs = []\n final_cls_idx = []\n\n for c in range(cfg.NUM_CLASSES):\n idx_per_class = [i for i in range(len(probs)) if cls_idx[i] == c]\n keep = nms(boxes[idx_per_class], probs[idx_per_class], cfg.NMS_THRESHOLD)\n for i in range(len(keep)):\n if keep[i]:\n final_boxes.append(boxes[idx_per_class[i]])\n final_probs.append(probs[idx_per_class[i]])\n final_cls_idx.append(c)\n return final_boxes, final_probs, final_cls_idx", "def extract_filter_list(self, filter_type, elements):\n titleLabel = QLabel(filter_type)\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n for element in elements:\n nextLabel = QLabel(element)\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter/3), counter % 3, alignment=Qt.AlignCenter)\n counter += 1", "def find_voting_precincts_in_district(state=48, district=7, leg_body='US-REP'):\r\n vps_in_district_GeoJSON = get_voting_precincts_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n if not os.path.isfile(vps_in_district_GeoJSON):\r\n voting_precincts_file = get_statewide_voting_precincts_geojson_filename(state)\r\n \r\n district_file = get_district_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n get_district_file(state=state, district=district, leg_body=leg_body)\r\n\r\n get_statewide_voting_precincts(state=state)\r\n \r\n print( \"Finding voting precincts in district\" )\r\n district_boundary = gpd.read_file(district_file)\r\n voting_precincts = gpd.read_file(voting_precincts_file)\r\n \r\n print( \"Finding voting precincts that touch the district boundary\" )\r\n vps_touching_district_bool = voting_precincts.touches(district_boundary.geometry[0])\r\n \r\n print( \"Finding voting precincts that intersect the district boundary\" )\r\n vps_intersecting_district_bool = voting_precincts.intersects(district_boundary.geometry[0])\r\n \r\n print( \"Filtering the voting precincts\" )\r\n for index in vps_touching_district_bool[vps_touching_district_bool==True].index:\r\n vps_intersecting_district_bool.loc[index] = False\r\n\r\n vps_in_district = voting_precincts[vps_intersecting_district_bool]\r\n \r\n print( \"Finding blockgroups to filter based on threshold\" )\r\n intersections = vps_in_district.intersection(district_boundary.geometry[0])\r\n\r\n areas_of_intersections = intersections.area\r\n indx_out = []\r\n for vp_index, vp in vps_in_district.iterrows():\r\n area_of_intersection = areas_of_intersections[vp_index]\r\n vp_area = GeoSeries(vp.geometry).area[0]\r\n\r\n share_of_intersection = area_of_intersection / vp_area\r\n \r\n if share_of_intersection < 0.10:\r\n indx_out.append(vp_index)\r\n\r\n #print( \"\\nBlock Group: \", bg.GEOID )\r\n #print( \"Area: \", str(bg_area) )\r\n #print( \"Share of Intersection: \", str(share_of_intersection) )\r\n \r\n vps_to_remove_bool = pd.Series([False]*len(voting_precincts))\r\n\r\n for index in indx_out:\r\n vps_to_remove_bool.loc[index] = True\r\n\r\n vps_to_remove = voting_precincts[vps_to_remove_bool]\r\n\r\n for index in vps_to_remove_bool[vps_to_remove_bool==True].index:\r\n vps_intersecting_district_bool.loc[index] = False\r\n\r\n vps_in_district = voting_precincts[vps_intersecting_district_bool]\r\n if 'PREC' in list(vps_in_district.columns.values):\r\n vps_in_district = vps_in_district.rename(columns={'PREC':'PRECINCT'})\r\n\r\n # See issue #367 https://github.com/geopandas/geopandas/issues/367\r\n try: \r\n os.remove(vps_in_district_GeoJSON)\r\n except OSError:\r\n pass\r\n vps_in_district.to_file(vps_in_district_GeoJSON, driver='GeoJSON')\r\n \r\n vps_in_district.sort_values(by=['PRECINCT'])[['PRECINCT']].to_csv(\"vps.csv\", index=False)", "def yolo2_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):\n box_scores = box_confidence * box_class_probs\n box_classes = K.argmax(box_scores, axis=-1)\n box_class_scores = K.max(box_scores, axis=-1)\n prediction_mask = box_class_scores >= threshold\n\n # TODO: Expose tf.boolean_mask to Keras backend?\n boxes = tf.boolean_mask(boxes, prediction_mask)\n scores = tf.boolean_mask(box_class_scores, prediction_mask)\n classes = tf.boolean_mask(box_classes, prediction_mask)\n return boxes, scores, classes", "def yolo_filter_boxes(box_confidence: torch.Tensor, boxes: torch.Tensor, box_class_probs: torch.Tensor, threshold: float=.6):\n\n batch_size, num_anchors, _, conv_height, conv_width = box_confidence.shape\n\n box_scores = box_confidence * box_class_probs\n\n box_classes = torch.argmax(box_scores, dim=2, keepdim=True)\n\n box_class_scores, _ = torch.max(box_scores, dim=2, keepdim=True)\n\n prediction_mask = box_class_scores > threshold\n\n classes = box_classes[prediction_mask]\n scores = box_class_scores[prediction_mask]\n\n boxes = boxes.permute(0, 1, 3, 4, 2)\n prediction_mask = prediction_mask.permute(0, 1, 3, 4, 2)\n boxes = boxes[prediction_mask.expand_as(boxes)].view(-1, 4)\n\n return boxes, scores, classes", "def non_max_suppression(self, filtered_boxes, box_classes, box_scores):\n box_predictions = []\n predicted_box_classes = []\n predicted_box_scores = []\n for label in range(len(self.class_names)):\n # for each class\n boxes = []\n class_tmp = []\n score_tmp = []\n for i in range(len(box_classes)):\n if box_classes[i] == label:\n boxes.append(filtered_boxes[i])\n class_tmp.append(box_classes[i])\n score_tmp.append(box_scores[i])\n\n class_tmp = np.array(class_tmp)\n while len(class_tmp) > 0 and np.amax(class_tmp) > -1:\n index = np.argmax(score_tmp)\n box_predictions.append(boxes[index])\n predicted_box_classes.append(class_tmp[index])\n predicted_box_scores.append(score_tmp[index])\n score_tmp[index] = -1\n class_tmp[index] = -1\n px1, py1, px2, py2 = boxes[index]\n p_area = (px2 - px1) * (py2 - py1)\n\n for box in range(len(boxes)):\n if class_tmp[box] != -1:\n bx1, by1, bx2, by2 = boxes[box]\n b_area = (bx2 - bx1) * (by2 - by1)\n ox1 = px1 if px1 > bx1 else bx1\n oy1 = py1 if py1 > by1 else by1\n ox2 = px2 if px2 < bx2 else bx2\n oy2 = py2 if py2 < by2 else by2\n if ox2 - ox1 <= 0 or oy2 - oy1 <= 0:\n continue\n # Calculate overlap area and IoU\n o_area = (ox2 - ox1) * (oy2 - oy1)\n u_area = p_area + b_area - o_area\n iou = o_area / u_area\n\n if iou > self.nms_t:\n class_tmp[box] = -1\n score_tmp[box] = -1\n\n box_predictions = np.array(box_predictions)\n predicted_box_classes = np.array(predicted_box_classes)\n predicted_box_scores = np.array(predicted_box_scores)\n return (box_predictions, predicted_box_classes, predicted_box_scores)", "def clip_boxes(boxes, im_shape):\n boxes[:, 0::2]=threshold(boxes[:, 0::2], 0, im_shape[1]-1)\n boxes[:, 1::2]=threshold(boxes[:, 1::2], 0, im_shape[0]-1)\n return boxes", "def clip_boxes(boxes, im_shape):\n boxes[:, 0::2]=threshold(boxes[:, 0::2], 0, im_shape[1]-1)\n boxes[:, 1::2]=threshold(boxes[:, 1::2], 0, im_shape[0]-1)\n return boxes", "def _generate_proposals(self, box):\n # Generate proposals\n num_proposals = self.proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n sigma_factor=self.proposal_params['sigma_factor']\n )\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def _generate_proposals(self, box):\n # Generate proposals\n num_proposals = self.proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n sigma_factor=self.proposal_params['sigma_factor']\n )\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def __filterEdges(self):", "def filter(full_poi_list, type_of_poi):\n pois = []\n if type_of_poi == \"all\":\n for i in full_poi_list:\n entry = i[0]\n pois.append(entry)\n if type_of_poi == \"gym\":\n for i in full_poi_list:\n if i[1] == 2:\n entry = i[0]\n pois.append(entry)\n return pois", "def non_maximum_suppression(boxes):\n\n boxes = sorted(boxes, key=lambda box: box[2]-box[0], reverse=True)\n nms_boxes = []\n overlap_threshold = 0.5\n\n for box in boxes:\n if not any([overlap_between(box, nms_box) > overlap_threshold for nms_box in nms_boxes]):\n nms_boxes.append(box)\n\n return nms_boxes", "def inside_first_filter():\n print(\"inside_first_filter\")\n if len(gCodeBlocks) == 0:\n print(\"no gcode loaded: cannot apply filter\")\n return\n block_to_filter = gCodeBlocks[-1]\n\n g01blocks = block_to_filter.g01blocks\n ng01 = len(g01blocks)\n\n while True:\n swp = False\n for i in range(ng01-1):\n for j in range(i+1, ng01):\n if g01blocks[i].contains(g01blocks[j]):\n g01blocks[i], g01blocks[j] = g01blocks[j], g01blocks[i]\n swp = True\n\n if not swp:\n break\n\n # rearrange original lines\n block_to_filter.lines = []\n for g01block in block_to_filter.g01blocks:\n for line in g01block.lines:\n block_to_filter.lines.append(line)", "def component_filter(components, img, edge_boxes, max_horizontal_txt_height=defaults.MAX_HORIZONTAL_TEXT_HEIGHT):\n white_txt_background = False\n text_like_component = []\n num_of_box_with_white_neighbors = 0\n white_neighbors = []\n for component in components:\n mask = np.zeros(img.shape[0:2])\n edges = find_edges(component, edge_boxes) # find edge areas inside the text box\n if len(edges) == 0: continue # no processing if there is no edge in the box\n if max(edge_cluster_contrast(img, edges)) < 50: continue # no processing if the contrast is too low\n adjusted_x, adjusted_y, w, h = edge_cluster_rectangle(\n edges) # adjust the coordinates of the text box to make it tighter\n component = (slice(adjusted_y, adjusted_y + h), slice(adjusted_x, adjusted_x + w))\n ###### create a mask in which edge areas are filled with 1, other areas 0.\n for edge in edges:\n x, y, w, h = edge\n mask[y:y + h, x:x + w] = 1\n ############ crop the mask into the same shape as the box\n mask = mask[component[0].start:component[0].stop, component[1].start:component[1].stop]\n ############ extract the area of the text box from the image #################\n aoi = img[component[0].start:component[0].stop, component[1].start:component[1].stop]\n aoi = clean.binarize(aoi, threshold=180)\n\n ############## compute the white/black ratio #######################\n zero_ratio = density_analysis(aoi)\n if zero_ratio > 0.75: continue # if too many white pixels, drop it\n if zero_ratio < 0.15: continue # if too many black pixels, drop it\n\n # print('--------------------------------------------------------')\n # analyze_block_vertical(mask)\n # ax1 = plt.subplot(1,2,1)\n # ax1.imshow(aoi)\n # ax2 = plt.subplot(1,2,2)\n # ax2.imshow(mask)\n # plt.show()\n\n ############### analyze the masks or aois to see whether it is text-like ##########\n\n if analyze_block_vertical(mask, max_horizontal_text_height=max_horizontal_txt_height) \\\n or analyze_block_horizon(mask) \\\n or analyze_block_vertical(aoi / 255, max_horizontal_text_height=max_horizontal_txt_height) \\\n or analyze_block_horizon(aoi / 255):\n # if border_analyze_vertical(mask, vertical_borders) or border_analyze_horizon(mask, horizon_borders):\n text_like_component.append(component)\n ########## extract left, right, upper, lower neighboring areas of the candiate box ########\n component_left_neighbor = img[component[0].start:component[0].stop,\n max(component[1].start - 10, 0):component[1].start]\n component_right_neighbor = img[component[0].start:component[0].stop,\n component[1].stop:min(component[1].stop + 10, img.shape[1])]\n\n component_up_neighbor = img[max(component[0].start - 10, 0):component[0].start,\n component[1].start:component[1].stop]\n\n component_low_neighbor = img[component[0].stop:min(component[0].stop + 10, img.shape[0]),\n component[1].start:component[1].stop]\n ############# if the candidate box is indeed a text box, it should should have white areas next to it #######\n left_white_ratio = 0\n if component_right_neighbor.shape[1] > 0 and component_right_neighbor.shape[0] > 0:\n left_white_ratio = np.sum(component_right_neighbor > 240) / (\n component_right_neighbor.shape[0] * component_right_neighbor.shape[1])\n right_white_ratio = 0\n if component_left_neighbor.shape[0] > 0 and component_left_neighbor.shape[1] > 0:\n right_white_ratio = np.sum(component_left_neighbor > 240) / (\n component_left_neighbor.shape[0] * component_left_neighbor.shape[1])\n up_white_ratio = 0\n if component_up_neighbor.shape[0] > 0 and component_up_neighbor.shape[1] > 0:\n up_white_ratio = np.sum(component_up_neighbor > 240) / (\n component_up_neighbor.shape[0] * component_up_neighbor.shape[1])\n low_white_ratio = 0\n if component_low_neighbor.shape[0] > 0 and component_low_neighbor.shape[1] > 0:\n low_white_ratio = np.sum(component_low_neighbor > 240) / (\n component_low_neighbor.shape[0] * component_low_neighbor.shape[1])\n white_neighbors.append(\n [left_white_ratio > 0.9, right_white_ratio > 0.9, up_white_ratio > 0.9, low_white_ratio > 0.9])\n if all([left_white_ratio > 0.95, right_white_ratio > 0.95, up_white_ratio > 0.95, low_white_ratio > 0.95]):\n num_of_box_with_white_neighbors = num_of_box_with_white_neighbors + 1\n\n if num_of_box_with_white_neighbors >= 2: # if there are at least two boxes having neighbors all white, then all text areas have white background\n white_txt_background = True\n text_like_component = [component for idx, component in enumerate(text_like_component) if\n np.sum(white_neighbors[idx]) >= 2]\n # text_like_component=expand_component_1(img,text_like_component,edge_boxes)\n return text_like_component, white_txt_background", "def _prune_completely_outside_window(self, boxes, window):\n with tf.name_scope('prune_completely_outside_window'):\n\n y_min, x_min, y_max, x_max = tf.split(boxes, num_or_size_splits=4, axis=1)\n # they have shape [None, 1]\n win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)\n # they have shape []\n\n coordinate_violations = tf.concat([\n tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),\n tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)\n ], axis=1)\n valid_indices = tf.squeeze(\n tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))),\n axis=1\n )\n boxes = tf.gather(boxes, valid_indices)\n return boxes, valid_indices", "def _boxes_in_bbox(self, document):\n bbox = self._bbox\n boxes = document.get_boxes()\n return [box for box in boxes if bbox[0] <= box.x0 and\n box.x1 <= bbox[2] and bbox[1] <= box.y0 and\n box.y1 <= bbox[3] and self._allowed_page(box) and\n self._acceptable_dimensions(box)]", "def filter_all(_):\n return True", "def box_flags(self, box):\n return [flag for flag in self.flags if flag.box == box]", "def passes_cutoff(self, filter_code):\r\n try:\r\n filterset_dict = {\"all_positions\":[True],\r\n \"all_variants\":[self.is_variant == True],\r\n \"actionable_variants\":[self.is_variant == True, \r\n self.in_blacklist == \"WHITE\", \r\n \"exon\" in self.loc, # and \"exonic_nc\" not in self.loc, \r\n \"syn\" not in self.func, \r\n \"ref\" not in self.func, \r\n self.ir_version == \"14\" or int(self.FAO)>50,\r\n int(self.FRO)+int(self.FAO)>500, \r\n self.FR == \".\"],\r\n \r\n \r\n \"indels\":[self.is_variant == True, self.type == \"del\" or self.type == \"in\" , \"exon\" in self.loc]\r\n }\r\n return all(filterset_dict[filter_code])\r\n \r\n except:\r\n return False", "def filter_close_teeth(bboxes, pixel_threshold=4):\n # sort bboxes by x-coordinate\n other_bboxes = [bbox for bbox in bboxes if bbox.get_label() != 0]\n teeth_bboxes = [bbox for bbox in bboxes if bbox.get_label() == 0]\n teeth_xmins = [bbox.xmin for bbox in teeth_bboxes]\n xmin_sorted = np.argsort(teeth_xmins)\n teeth_bboxes = np.array(teeth_bboxes)[xmin_sorted]\n teeth_bboxes = teeth_bboxes.tolist()\n filtered_bboxes = []\n\n # get rid of bboxes which are within pixel_threshold in the x-axis\n i = 0\n if teeth_bboxes:\n teeth_bboxes_to_return = [teeth_bboxes[0]]\n else:\n teeth_bboxes_to_return = []\n if len(teeth_bboxes) > 2:\n while i + 2 < len(teeth_bboxes): # iterate over the triples\n bbox1 = teeth_bboxes[i]\n bbox2 = teeth_bboxes[i + 1]\n bbox3 = teeth_bboxes[i + 2]\n xmin1, xmax1, ymin1, ymax1 = convert_bbox_coords_to_pixels(bbox1)\n xmin2, xmax2, ymin2, ymax2 = convert_bbox_coords_to_pixels(bbox2)\n xmin3, xmax3, ymin3, ymax3 = convert_bbox_coords_to_pixels(bbox3)\n if xmin2 - xmax1 < pixel_threshold and xmin3 - xmax2 < pixel_threshold:\n filtered_bboxes.append(bbox2)\n else:\n teeth_bboxes_to_return.append(teeth_bboxes[i + 1])\n i += 1\n\n if len(teeth_bboxes) > 1:\n teeth_bboxes_to_return.append(teeth_bboxes[-1])\n\n bboxes_to_return = teeth_bboxes_to_return + other_bboxes\n return bboxes_to_return, filtered_bboxes", "def test_vs_filtering():\n vs = virtualscreening(n_cpu=-1)\n\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf'))\n vs.apply_filter('ro5', soft_fail=1)\n assert_equal(len(list(vs.fetch())), 49)\n\n vs.load_ligands('sdf', os.path.join(test_data_dir, 'data/dude/xiap/actives_docked.sdf'))\n vs.apply_filter('ro3', soft_fail=2)\n assert_equal(len(list(vs.fetch())), 9)", "def select_proposals(proposals_list):\r\n \r\n return []", "def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n del scope\n\n # Number of used stem cells.\n num_stem_cells = 2\n\n # Note that we always feed into 2 layers of equal depth\n # where the first N channels corresponds to previous hidden layer\n # and the second N channels correspond to the final hidden layer.\n hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3)\n\n # Note that what follows is largely a copy of build_pnasnet_large() within\n # pnasnet.py. We are copying to minimize code pollution in slim.\n\n # TODO(shlens,skornblith): Determine the appropriate drop path schedule.\n # For now the schedule is the default (1.0->0.7 over 250,000 train steps).\n hparams = pnasnet.large_imagenet_config()\n if not self._is_training:\n hparams.set_hparam('drop_path_keep_prob', 1.0)\n\n # Calculate the total number of cells in the network\n total_num_cells = hparams.num_cells + num_stem_cells\n\n normal_cell = pnasnet.PNasNetNormalCell(\n hparams.num_conv_filters, hparams.drop_path_keep_prob,\n total_num_cells, hparams.total_training_steps)\n with arg_scope([slim.dropout, nasnet_utils.drop_path],\n is_training=self._is_training):\n with arg_scope([slim.batch_norm], is_training=self._train_batch_norm):\n with arg_scope([slim.avg_pool2d,\n slim.max_pool2d,\n slim.conv2d,\n slim.batch_norm,\n slim.separable_conv2d,\n nasnet_utils.factorized_reduction,\n nasnet_utils.global_avg_pool,\n nasnet_utils.get_channel_index,\n nasnet_utils.get_channel_dim],\n data_format=hparams.data_format):\n\n # This corresponds to the cell number just past 'Cell_7' used by\n # _extract_proposal_features().\n start_cell_num = 8\n true_cell_num = start_cell_num + num_stem_cells\n\n with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):\n net = _build_pnasnet_base(\n hidden_previous,\n hidden,\n normal_cell=normal_cell,\n hparams=hparams,\n true_cell_num=true_cell_num,\n start_cell_num=start_cell_num)\n\n proposal_classifier_features = net\n return proposal_classifier_features", "def _process_boxes(self, boxes):\n homography = getattr(self, \"_homography\", None)\n self.logger.debug(f\"Homography: {homography}\")\n if homography is None:\n return []\n\n threshold = self._config.notif_min_units\n\n pix_markers, distances = self._calculate_distances(boxes, homography)\n return self._gen_segments(pix_markers, distances, threshold)", "def queryset(self, request, queryset):\n if self.value() == 'p':\n return queryset.filter(pointe=True)\n if self.value() == 'rapp':\n return queryset.filter(rapp__isnull=False)\n if self.value() == 'rien':\n return queryset.filter(rapp__isnull=True, pointe=False)\n if self.value() == 'pr':\n # comme il y a trois position, si l'on exclue les ni rapproche ni pointe on a les rapproche ou pointe\n return queryset.exclude(rapp__isnull=True, pointe=False)\n if self.value() == 'nrapp':\n return queryset.filter(rapp__isnull=True)", "def filterRansac():\n pass", "def filter_features(self):\n return {key: {k: v for k, v in value.items() if k in {NAME, TYPE, ACTIVE}} for key, value in self.to_dict().items()}", "def filter_paneled_score_functions(self, pnl_node):\n return self.filter_nodes('Score', parent=pnl_node)", "def filter(self):\n self.getcmd()\n self.get_status()\n self.select()\n if self.params['mode'] == 'greedy':\n self.greedy()\n \n return", "def prune_bbox(receptive_box, bbox, threshold=0):\n xmin = util.where(receptive_box[:, 0] >= bbox[0] - threshold)\n ymin = util.where(receptive_box[:, 1] >= bbox[1] - threshold)\n xmax = util.where(receptive_box[:, 2] < bbox[2] + threshold)\n ymax = util.where(receptive_box[:, 3] < bbox[3] + threshold)\n\n val1 = util.intersect1d(xmin, ymin)\n val2 = util.intersect1d(xmax, ymax)\n valid_ids = torch.sort(torch.unique(util.intersect1d(val1, val2)))[0]\n\n pruned_receptive_box = receptive_box[valid_ids]\n\n return pruned_receptive_box, valid_ids", "def clip(self, bbox):\n from shapely.geometry import Polygon, LinearRing\n poly = Polygon(LinearRing(zip(bbox[[0, 1, 1, 0]], bbox[[2, 2, 3, 3]])))\n return [g for g in self.geometries() if poly.intersects(g)]", "def filter_box(col, row):\n # remember rows count from 0 at the top!\n correct_vertical = top <= row <= bottom\n correct_horizontal = left <= col <= right\n return correct_vertical and correct_horizontal", "def filter(self):\n new_nodes_to_update = {}\n nodes_to_update = {}\n\n for agent_id in self.cameras.keys():\n nodes_to_update[agent_id] = []\n new_nodes_to_update[agent_id] = []\n if agent_id not in self.beliefs:\n world_name = self.cameras[agent_id].name.replace(\"-\",\"_\")+\"_beliefs\"\n rospy.logdebug(\"[perspective_filter] create new world <%s>\" % str(world_name))\n self.beliefs[agent_id] = self.ctx.worlds[world_name]\n self.node_mapping[agent_id] = {}\n\n dq = deque()\n dq.append(self.source.scene.rootnode)\n\n while not rospy.is_shutdown() and 0 < len(dq):\n node = dq.pop()\n if node.id != self.source.scene.rootnode.id:\n # Process start here\n if node.id in self.cameras.keys(): # if the node is the agent POV\n nodes_to_update[node.id].append(node) # we add it to his belief\n\n if node.parent in self.cameras.keys() and node.type == MESH: # if the node is part of an agent\n nodes_to_update[node.parent].append(node) # we add it to his belief\n\n for agent_id, visible_nodes in self.visible_nodes.items(): # then we add the visible nodes\n if agent_id in self.cameras.keys():\n if node in visible_nodes:\n nodes_to_update[agent_id].append(node)\n\n # And end here\n for child_id in node.children:\n dq.append(self.source.scene.nodes[child_id])\n\n for agent_id, nodes in nodes_to_update.items():\n if nodes:\n for node in nodes:\n new_node = node.copy()\n if node.id in self.node_mapping[agent_id]:\n new_node.id = self.node_mapping[agent_id][node.id]\n if new_node.id in self.nodes_transform:\n if not numpy.allclose(self.nodes_transform[new_node.id], new_node.transformation):\n new_nodes_to_update[agent_id].append(new_node)\n self.nodes_transform[new_node.id] = new_node.transformation\n else:\n self.nodes_transform[new_node.id] = new_node.transformation\n new_nodes_to_update[agent_id].append(new_node)\n else:\n self.node_mapping[agent_id][node.id] = new_node.id\n new_nodes_to_update[agent_id].append(new_node)\n\n # Finally we update the corresponding beliefs worlds\n for agent_id, nodes in new_nodes_to_update.items():\n for node in nodes:\n node.parent = self.node_mapping[agent_id][node.parent] if node.parent in self.node_mapping[agent_id] \\\n else self.beliefs[agent_id].scene.rootnode.id\n if nodes:\n self.beliefs[agent_id].scene.nodes.update(nodes)", "def apply_non_max_suppression(boxes, scores, iou_thresh=.45, top_k=200):\n\n selected_indices = np.zeros(shape=len(scores))\n if boxes is None or len(boxes) == 0:\n return selected_indices\n x_min = boxes[:, 0]\n y_min = boxes[:, 1]\n x_max = boxes[:, 2]\n y_max = boxes[:, 3]\n areas = (x_max - x_min) * (y_max - y_min)\n remaining_sorted_box_indices = np.argsort(scores)\n remaining_sorted_box_indices = remaining_sorted_box_indices[-top_k:]\n\n num_selected_boxes = 0\n while len(remaining_sorted_box_indices) > 0:\n best_score_args = remaining_sorted_box_indices[-1]\n selected_indices[num_selected_boxes] = best_score_args\n num_selected_boxes = num_selected_boxes + 1\n if len(remaining_sorted_box_indices) == 1:\n break\n\n remaining_sorted_box_indices = remaining_sorted_box_indices[:-1]\n\n best_x_min = x_min[best_score_args]\n best_y_min = y_min[best_score_args]\n best_x_max = x_max[best_score_args]\n best_y_max = y_max[best_score_args]\n\n remaining_x_min = x_min[remaining_sorted_box_indices]\n remaining_y_min = y_min[remaining_sorted_box_indices]\n remaining_x_max = x_max[remaining_sorted_box_indices]\n remaining_y_max = y_max[remaining_sorted_box_indices]\n\n inner_x_min = np.maximum(remaining_x_min, best_x_min)\n inner_y_min = np.maximum(remaining_y_min, best_y_min)\n inner_x_max = np.minimum(remaining_x_max, best_x_max)\n inner_y_max = np.minimum(remaining_y_max, best_y_max)\n\n inner_box_widths = inner_x_max - inner_x_min\n inner_box_heights = inner_y_max - inner_y_min\n\n inner_box_widths = np.maximum(inner_box_widths, 0.0)\n inner_box_heights = np.maximum(inner_box_heights, 0.0)\n\n intersections = inner_box_widths * inner_box_heights\n remaining_box_areas = areas[remaining_sorted_box_indices]\n best_area = areas[best_score_args]\n unions = remaining_box_areas + best_area - intersections\n intersec_over_union = intersections / unions\n intersec_over_union_mask = intersec_over_union <= iou_thresh\n remaining_sorted_box_indices = remaining_sorted_box_indices[\n intersec_over_union_mask]\n\n return selected_indices.astype(int), num_selected_boxes", "def _generate_scale_proposals(self, box):\n # Generate proposals\n num_proposals = self.scaler_proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.scaler_proposal_params['min_iou'],\n sigma_factor=self.scaler_proposal_params['sigma_factor'])\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def filter_bboxes_by_visibility(img, bboxes, transformed_img, transformed_bboxes, threshold):\n img_height, img_width = img.shape[:2]\n transformed_img_height, transformed_img_width = transformed_img.shape[:2]\n\n visible_bboxes = []\n for bbox, transformed_bbox in zip(bboxes, transformed_bboxes):\n if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]):\n continue\n bbox_area = calculate_bbox_area(bbox, img_height, img_width)\n transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width)\n visibility = transformed_bbox_area / bbox_area\n if visibility >= threshold:\n visible_bboxes.append(transformed_bbox)\n return visible_bboxes", "def filter_behaviors(self):\n\n if not self.pj[ETHOGRAM]:\n return\n\n paramPanelWindow = param_panel.Param_panel()\n paramPanelWindow.setMaximumHeight(800)\n paramPanelWindow.setMaximumWidth(600)\n paramPanelWindow.setWindowTitle(\"Select the behaviors to show in the ethogram table\")\n for w in [paramPanelWindow.lwSubjects, paramPanelWindow.pbSelectAllSubjects,\n paramPanelWindow.pbUnselectAllSubjects,\n paramPanelWindow.pbReverseSubjectsSelection, paramPanelWindow.lbSubjects,\n paramPanelWindow.cbIncludeModifiers,\n paramPanelWindow.cbExcludeBehaviors, paramPanelWindow.frm_time]:\n w.setVisible(False)\n\n # behaviors filtered\n filtered_behaviors = [self.twEthogram.item(i, 1).text() for i in range(self.twEthogram.rowCount())]\n\n if BEHAVIORAL_CATEGORIES in self.pj:\n categories = self.pj[BEHAVIORAL_CATEGORIES][:]\n # check if behavior not included in a category\n if \"\" in [self.pj[ETHOGRAM][idx][\"category\"] for idx in self.pj[ETHOGRAM]\n if \"category\" in self.pj[ETHOGRAM][idx]]:\n categories += [\"\"]\n else:\n categories = [\"###no category###\"]\n\n for category in categories:\n\n if category != \"###no category###\":\n\n if category == \"\":\n paramPanelWindow.item = QListWidgetItem(\"No category\")\n paramPanelWindow.item.setData(34, \"No category\")\n else:\n paramPanelWindow.item = QListWidgetItem(category)\n paramPanelWindow.item.setData(34, category)\n\n font = QFont()\n font.setBold(True)\n paramPanelWindow.item.setFont(font)\n paramPanelWindow.item.setData(33, \"category\")\n paramPanelWindow.item.setData(35, False)\n\n paramPanelWindow.lwBehaviors.addItem(paramPanelWindow.item)\n\n for behavior in [self.pj[ETHOGRAM][x][\"code\"] for x in sorted_keys(self.pj[ETHOGRAM])]:\n\n if ((categories == [\"###no category###\"]) or\n (behavior in [self.pj[ETHOGRAM][x][\"code\"] for x in self.pj[ETHOGRAM]\n if \"category\" in self.pj[ETHOGRAM][x] and\n self.pj[ETHOGRAM][x][\"category\"] == category])):\n\n paramPanelWindow.item = QListWidgetItem(behavior)\n if behavior in filtered_behaviors:\n paramPanelWindow.item.setCheckState(Qt.Checked)\n else:\n paramPanelWindow.item.setCheckState(Qt.Unchecked)\n\n if category != \"###no category###\":\n paramPanelWindow.item.setData(33, \"behavior\")\n if category == \"\":\n paramPanelWindow.item.setData(34, \"No category\")\n else:\n paramPanelWindow.item.setData(34, category)\n\n paramPanelWindow.lwBehaviors.addItem(paramPanelWindow.item)\n\n if paramPanelWindow.exec_():\n if self.observationId and set(paramPanelWindow.selectedBehaviors) != set(filtered_behaviors):\n self.projectChanged = True\n self.load_behaviors_in_twEthogram(paramPanelWindow.selectedBehaviors)\n # update subjects pad\n if hasattr(self, \"codingpad\"):\n self.codingpad.filtered_behaviors = [self.twEthogram.item(i, 1).text() for i in\n range(self.twEthogram.rowCount())]\n self.codingpad.compose()", "def get_filters(self):", "def filter_crop(grayscale_image):\n # Blurring the image helps with getting a more consistent binary image\n blurred_image = cv2.bilateralFilter(grayscale_image, d=0, sigmaColor=40, sigmaSpace=2)\n binary_image = get_binary_image(blurred_image)\n marked = find_connected_components(binary_image)\n _, all_coords = get_image_objects(marked, 0)\n M, N = grayscale_image.shape\n average_void_intensity = compute_average_void_intensity(grayscale_image, marked, all_coords)\n cc_id = -1\n # Finding the cc id of the centered particle\n for i in range(N/2, -1, -1):\n current_cc = marked[M/2, i]\n if current_cc != -1:\n cc_id = current_cc\n break\n\n filtered_crop = remove_side_objects(grayscale_image, marked, cc_id, average_void_intensity)\n\n return filtered_crop", "def pnet_process(self, boxes):\n boxes_num = 0 if boxes is None else boxes.shape[0]\n if boxes_num > 0:\n boxes = self.__nms(boxes, 0.7, 'Union');\n boxes = self.__box_regress(boxes);\n boxes = self.__bbox2square(boxes);\n return boxes", "def filter(self, filter_dict):\n pass", "def filter_func_pack(self, packs):\n return [pack for pack in packs if len(pack.wolves) < self.min_pack]", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def _prune_non_overlapping_boxes(self, boxes1, boxes2, min_overlap=0.0):\n with tf.name_scope('prune_non_overlapping_boxes'):\n ioa = self._ioa(boxes2, boxes1) # [M, N] tensor\n ioa = tf.reduce_max(ioa, axis=0) # [N] tensor\n keep_bool = tf.greater_equal(ioa, tf.constant(min_overlap))\n keep_inds = tf.squeeze(tf.where(keep_bool), axis=1)\n boxes = tf.gather(boxes1, keep_inds)\n return boxes, keep_inds", "def select_proposals(self):\r\n print \"Selecting proposals... \"\r\n global MAX_NUMBER_PROJECTS\r\n proposals_sorted = sorted(self.proposals, key=lambda project:project.likes, reverse=True)\r\n for i in range(MAX_NUMBER_PROJECTS):\r\n self.projects_for_vote.append(proposals_sorted[i])", "def nms(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\tassert type(bboxes) == list\n\tbboxes = [box for box in bboxes if box[1] > threshold]\n\tbboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n\tbboxes_after_nms = []\n\n\twhile bboxes:\n\t\tchosen_box = bboxes.pop(index=0)\n\t\tbboxes = [box for box in bboxes \n\t\t\t\t\t\t\tif box[0] != chosen_box[0] or intersection_over_union\n\t\t\t\t\t\t\t(torch.tensor(chosen_box[2:]), \n\t\t\t\t\t\t\t\ttorch.tensor(chosen_box[2:]),\n\t\t\t \t\t\t\t\tbox_format=\"midpoint\") < iou_threshold]\n\t\tbboxes_after_nms.append(chosen_box)\n\n\treturn bboxes_after_nms", "def _prune_candidates(self, beam_width=None):\n if beam_width is None:\n beam_width = self.beam_width\n if len(self.candidates) <= beam_width:\n return\n neg_scores = np.array([-cand.logp_total() for cand in self.candidates])\n parted_indices = np.argpartition(neg_scores, beam_width - 1)\n self.candidates = np.array(self.candidates)[parted_indices[:beam_width]].tolist()", "def order_filter(self,elements):", "def filter_tokens_by(self, box: Box, soft_margin: Dict = None) -> Dict[int, Token]:\n return {\n idx: token\n for idx, token in enumerate(self.tokens)\n if token.is_in(box, soft_margin)\n }", "def getBoxSelectPoints(self, data, p1, p2):\n\n# TODO: speed this up?\n # get canonical box limits\n (p1x, p1y) = p1\n (p2x, p2y) = p2\n lx = min(p1x, p2x) # left x coord\n rx = max(p1x, p2x)\n ty = max(p1y, p2y) # top y coord\n by = min(p1y, p2y)\n\n # get a list of points inside the selection box\n result = []\n for (x, y, _, id) in data:\n if lx <= x <= rx and by <= y <= ty:\n result.append((x, y, id))\n\n return result", "def filter_items(self, context, data, propname):\n\n helper_funcs = bpy.types.UI_UL_list\n\n items = getattr(data, propname)\n\n # Filtering by name\n filtered = helper_funcs.filter_items_by_name(\n self.filter_name, self.bitflag_filter_item, items, \"name\", reverse=False\n )\n\n if not filtered:\n filtered = [self.bitflag_filter_item] * len(items)\n\n d = context.active_object.data\n anim_ret = context.active_object.anim_ret\n\n for index, bone in enumerate(items):\n excluded = False\n found = False\n\n anim_ret_bone = bone.anim_ret_bone\n\n if not anim_ret_bone:\n excluded = True\n if not excluded and anim_ret_bone.source_bone_name == \"\":\n excluded = True\n if bone.name.startswith(ObjectAnimRet.prefix):\n excluded = True\n if not excluded and not anim_ret.show_def and \"DEF-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_mch and \"MCH-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_org and \"ORG-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_fk and \"fk\" in bone.name.lower():\n excluded = True\n if not excluded and not anim_ret.show_ik and \"ik\" in bone.name.lower():\n excluded = True\n if not excluded and anim_ret.filter_layers:\n data_bone = d.bones[bone.name]\n for layer_id, layer in enumerate(d.layers):\n if layer:\n if data_bone.layers[layer_id]:\n found = True\n break\n\n if excluded or not found:\n filtered[index] &= ~self.bitflag_filter_item\n\n ordered = []\n\n # Reorder by name or average weight.\n if self.use_filter_sort_alpha:\n sort = [(idx, getattr(it, \"name\", \"\")) for idx, it in enumerate(items)]\n\n ordered = helper_funcs.sort_items_helper(sort, lambda e: e[1].lower())\n\n return filtered, ordered", "def create_prior_boxes(self):\n # value of k for each feature map to create k^2 boxes for each feature map\n feature_map_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5}\n\n # scale for boxes across different feature maps. boxes for inner feature maps\n # are scaled much lower to detect small objects\n obj_scales = {'conv4_3': 0.1, 'conv7': 0.21, 'conv8_2': 0.255, 'conv9_2': 0.30}\n\n # Defined aspect ratio calculated from mean of (w/h) across all bounding boxes\n # from the dataset. The mean is 0.66 with deviation of 0.07. So aspect ratio is kept\n # at 0.66 for all feature maps\n aspect_ratios = {'conv4_3': [0.5], 'conv7': [0.55], 'conv8_2': [0.6], 'conv9_2': [.66]}\n\n fmaps = list(feature_map_dims.keys())\n prior_boxes = []\n for k, fmap in enumerate(fmaps):\n # for each feature map, create k*k boxes\n for i in range(feature_map_dims[fmap]):\n for j in range(feature_map_dims[fmap]):\n # calculate center coordinates of boxes\n cx = (j + 0.5) / feature_map_dims[fmap]\n cy = (i + 0.5) / feature_map_dims[fmap]\n\n # For each\n for ratio in aspect_ratios[fmap]:\n prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (1930, 4)\n prior_boxes.clamp_(0, 1) # (1930, 4)\n\n return prior_boxes", "def remove_small_boxes(boxes, min_size):\r\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\r\n keep = (ws >= min_size) & (hs >= min_size)\r\n keep = np.where(keep)[0]\r\n return keep", "def clip_boxes(self, boxes, im_shape):\n if boxes.shape[0] == 0:\n return boxes\n\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)\n return boxes", "def prefilter(self, filt=None, verbose=False):\n erased = []\n if verbose:\n msg = 'Prior to filter, we have {} cells.'.format(len(self.cells))\n print(msg)\n # check for supplementary observables to be computed\n raw_obs, func_obs = set_observable_list(filters=[filt, ])\n\n # compute suppl obs for all cells\n if raw_obs:\n for cell in self.cells:\n for obs in raw_obs:\n cell.build(obs)\n for cell in self.cells:\n if filt is not None:\n if not filt(cell):\n erased.append(cell)\n # make Colony.add_cell_recursive non functional\n cell.bpointer = None\n if cell.parent:\n cell.parent.childs.remove(cell)\n # make daughter cells new roots\n for ch in cell.childs:\n ch.bpointer = None\n if verbose:\n msg = '{} cells do not pass filter.'.format(len(erased))\n print(msg)\n for cell in erased:\n self.cells.remove(cell) # otherwise would be considered root\n # clean-up actions for computing extra obs\n # extra obs computation depends on tree decomposition\n # this will be done in lineage.get_timeseries()\n for cell in self.cells:\n for obs in raw_obs:\n del cell._sdata[obs.label]\n if verbose:\n msg = 'After filtering, we get {} cells.'.format(len(self.cells))\n print(msg)\n# self.metadata.filters.append(repr(boofunc))\n return", "def prune_margin(receptive_box, imsize, threshold=0):\n im_width = imsize[1]\n im_height = imsize[0]\n\n xmin = util.where(receptive_box[:, 0] >= 0 - threshold)\n ymin = util.where(receptive_box[:, 1] >= 0 - threshold)\n xmax = util.where(receptive_box[:, 2] < im_width + threshold)\n ymax = util.where(receptive_box[:, 3] < im_height + threshold)\n\n val1 = util.intersect1d(xmin, ymin)\n val2 = util.intersect1d(xmax, ymax)\n valid_ids = torch.sort(torch.unique(util.intersect1d(val1, val2)))[0]\n\n pruned_receptive_box = receptive_box[valid_ids]\n\n return pruned_receptive_box, valid_ids", "def non_maximum_suppression(boxes, confs, overlap_threshold, top_k):\n eps = 1e-15\n \n boxes = np.asarray(boxes, dtype='float32')\n \n pick = []\n x1, y1, x2, y2 = boxes.T\n \n idxs = np.argsort(confs)\n area = (x2 - x1) * (y2 - y1)\n \n while len(idxs) > 0:\n i = idxs[-1]\n \n pick.append(i)\n if len(pick) >= top_k:\n break\n \n idxs = idxs[:-1]\n \n xx1 = np.maximum(x1[i], x1[idxs])\n yy1 = np.maximum(y1[i], y1[idxs])\n xx2 = np.minimum(x2[i], x2[idxs])\n yy2 = np.minimum(y2[i], y2[idxs])\n \n w = np.maximum(0, xx2 - xx1)\n h = np.maximum(0, yy2 - yy1)\n I = w * h\n \n overlap = I / (area[idxs] + eps)\n # as in Girshick et. al.\n \n #U = area[idxs] + area[i] - I\n #overlap = I / (U + eps)\n \n idxs = idxs[overlap <= overlap_threshold]\n \n return pick", "def bounding_box_filter(points, x_range, y_range, z_range):\n min_x, max_x = x_range\n min_y, max_y = y_range\n min_z, max_z = z_range\n\n bound_x = np.logical_and(points[:, 0] > min_x, points[:, 0] < max_x)\n bound_y = np.logical_and(points[:, 1] > min_y, points[:, 1] < max_y)\n bound_z = np.logical_and(points[:, 2] > min_z, points[:, 2] < max_z)\n\n bb_filter = np.logical_and(np.logical_and(bound_x, bound_y), bound_z)\n\n return points[bb_filter]", "def filter_func(self, agents):\n return [\n agent for agent in agents\n if agent.energy < self.model.energy_threshold and not agent.pack\n ]", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)" ]
[ "0.67611915", "0.6558098", "0.6493744", "0.6487064", "0.644464", "0.64197695", "0.6314156", "0.6236991", "0.6236991", "0.62138236", "0.6198868", "0.61670756", "0.6158014", "0.61504215", "0.6140429", "0.61391824", "0.603643", "0.59337205", "0.59050465", "0.5873898", "0.5776954", "0.57686245", "0.5710087", "0.5676543", "0.56685436", "0.56581855", "0.56570184", "0.5616053", "0.5612297", "0.5602126", "0.5570549", "0.55410105", "0.55382013", "0.55312943", "0.54897714", "0.5448454", "0.5439447", "0.54387504", "0.5432777", "0.54226", "0.5419346", "0.53966075", "0.5395654", "0.53838855", "0.53784126", "0.53784126", "0.53635406", "0.53635406", "0.5350666", "0.5316111", "0.5287448", "0.5270327", "0.52151626", "0.51930463", "0.51896375", "0.518647", "0.51850027", "0.5180761", "0.5173458", "0.5162543", "0.51553714", "0.5151603", "0.51381737", "0.5136714", "0.5128127", "0.5127895", "0.5127555", "0.5123717", "0.51228297", "0.51092505", "0.5107721", "0.5105816", "0.50911534", "0.50883806", "0.5083954", "0.5081546", "0.50799507", "0.5073875", "0.50623226", "0.5061857", "0.5049673", "0.5046279", "0.50423443", "0.5031308", "0.5031084", "0.5025839", "0.5024627", "0.5024176", "0.50210303", "0.50181675", "0.5004261", "0.5003659", "0.5000482", "0.4989281", "0.49856147", "0.49840817", "0.49816316", "0.49767736", "0.49697417", "0.49666208" ]
0.56218785
27
Draw detected bounding boxes.
def vis_detections(im, class_name, dets, thresh=0.5): im = im[:, :, (2, 1, 0)] fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(im, aspect='equal') for i, det in enumerate(dets): bbox = dets[i, :4] score = dets[i, -1] ax.add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5) ) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') ax.set_title(('{} detections with ' 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh), fontsize=14) plt.axis('off') plt.tight_layout() plt.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bounding_boxes(self, bounding_boxes, output):\n for i, bb in enumerate(bounding_boxes):\n\n if bb[2] > output.shape[1] or bb[3] > output.shape[0] or bb[0] < 0 or bb[1] < 0:\n continue\n cv2.rectangle(\n np.asarray(output), (bb[0], bb[1]), (bb[2], bb[3]), self.colors[i % len(self.colors)],\n thickness=2\n )", "def draw_bounding_boxes(display, bounding_boxes):\n\n bb_surface = pygame.Surface((VIEW_WIDTH, VIEW_HEIGHT))\n bb_surface.set_colorkey((0, 0, 0))\n for bbox in bounding_boxes:\n points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]\n # draw lines\n # base\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[2])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[3])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[0])\n # top\n pygame.draw.line(bb_surface, BB_COLOR, points[4], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[5], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[6], points[7])\n pygame.draw.line(bb_surface, BB_COLOR, points[7], points[4])\n # base-top\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[4])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[7])\n display.blit(bb_surface, (0, 0))", "def draw_bounding_box(self):\n # Gets the bounding box\n xmin, ymin, xmax, ymax = self.get_bounding_box()\n\n # Gets the actual coordinates\n width = xmax - xmin\n height = ymax - ymin\n center_x = xmin + (width)/2\n center_y = ymin + (height)/2\n\n arcade.draw_rectangle_outline(center_x, center_y, width, height, (255, 0, 0))", "def draw_bounding_boxes(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n bboxes = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n self.get_instance_bounding_box(img, bboxes, uni)\n\n cv.namedWindow('building bounding boxes', cv.WINDOW_NORMAL)\n cv.imshow('building bounding boxes', bboxes)\n cv.waitKey(0)\n cv.destroyAllWindows()", "def draw_bounding_box(objects,color):\n\n for i in range(len(objects)):\n x, y, w, h, d = objects[i].get_attributes()\n print(x, y, w, h, d)\n corr = get_correction(d, a, hfov, x)\n cv2.rectangle(color, (x-corr, y), (x+w-corr, y+h), (0, 255, 0), 4)\n\n try:\n real_x, real_y = get_dimensions(d, w, h, hfov, vfov, 640, 480)\n real_x = round(real_x, 3)\n real_y = round(real_y, 3)\n\n except:\n real_x, real_y = 'ERROR'\n\n cv2.putText(color, 'depth = ' + str(d) + 'm', (30, i*60 + 30) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'width = ' + str(real_x)+ 'm', (30, i*60 + 45) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n cv2.putText(color, 'height = ' + str(real_y)+ 'm', (30, i*60 + 60) ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n\n if(i < len(objects)-1):\n ## distance between left and right object\n distance = round(distance_between_objects(objects[i], objects[i+1], hfov, 640), 3)\n if distance > l:\n textcolor = (0, 255, 0)\n else:\n textcolor = (0, 0, 255)\n\n cv2.putText(color, 'distance between objects = ' + str(distance) + 'm',\n (320, i*60 + 70) , cv2.FONT_HERSHEY_SIMPLEX, 0.5, textcolor, 1)", "def draw_bbox(image, bboxes, masks, class_ids, class_names, scores, colors, show_label=True, show_mask=True):\n image_h, image_w, _ = image.shape\n\n for i, bbox in enumerate(bboxes):\n y1, x1, y2, x2 = bbox[i]\n coor = np.array([x1, y1, x2, y2], dtype=np.int32)\n fontScale = 0.5\n score = scores[i]\n class_ind = int(class_ids[i])\n bbox_color = colors[class_ind]\n bbox_thick = int(0.6 * (image_h + image_w) / 600)\n c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])\n cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)\n\n if show_label:\n bbox_mess = '%s: %.2f' % (class_names[class_ind], score)\n t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]\n cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled\n\n cv2.putText(image, bbox_mess, (c1[0], c1[1] - 2), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n image = apply_mask(image, mask, bbox_color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n pts = np.array(contours[0], np.int32)\n pts = pts.reshape((-1, 1, 2))\n # image = cv2.polylines(image, [pts], True, bbox_color)\n\n return image", "def show_bounding_boxes(dir_path: str) -> None:\r\n \r\n for image_file in glob.glob(dir_path + '/*.png'):\r\n image = cv2.imread(image_file)\r\n height, width, _ = image.shape\r\n\r\n with open(image_file.split(\".\")[0] +'.txt', 'r') as reader:\r\n annotations = reader.readlines()\r\n for annot in annotations:\r\n annot = annot.split()\r\n \r\n # Calculation of top left point and bottom right point of the bounding box \r\n x1, y1 = int((float(annot[1]) - float(annot[3])/2)*width), int((float(annot[2]) - float(annot[4])/2)*height)\r\n x2, y2 = int((float(annot[1]) + float(annot[3])/2)*width), int((float(annot[2]) + float(annot[4])/2)*height)\r\n \r\n # BGR color format\r\n if annot[0] == '0':\r\n color = (0,255,0) # Mask is worn correctly (Green color)\r\n label = 'Good'\r\n else:\r\n color = (0,0,255) # Mask is either not worn correctly or not worn at all (Red color)\r\n label = 'Bad'\r\n \r\n cv2.putText(image,\r\n label, \r\n (x1, y1 - 10),\r\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\r\n fontScale=0.5, \r\n color=color,\r\n thickness=1) \r\n \r\n cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness=1)\r\n \r\n k = cv2.waitKey(0) & 0xFF\r\n cv2.imshow(image_file.split(\"sss\")[-1], image)\r\n if k == 27:\r\n cv2.destroyAllWindows()\r\n break", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def _draw(self, frame, boxes, probs, landmarks, name):\n try:\n print('drawing')\n for box, prob, ld, id in zip(boxes, probs, landmarks, name):\n # Draw rectangle on frame\n\n cv2.putText(frame, id, (200, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n\n except:\n print('not draw box')\n pass\n\n return frame", "def _draw_detections(frame, frame_detections):\n boxColor = (0,255,0)\n for box in frame_detections:\n cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n # cv2.rectangle(frame,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),boxColor,7)\n cv2.putText(frame,str(format(box[4],'.2f')),(int(box[0]),int(box[3]+20)),cv2.FONT_HERSHEY_SIMPLEX,0.6,boxColor,1,cv2.LINE_AA)\n\n return frame", "def __draw_rec(self):\n # Draw the bounding boxes to the frame\n while True:\n # Save\n temp_categories = defaultdict(int)\n\n if (self.__detect_info is None) or \\\n (self.__img is None):\n continue\n\n # Get the size of image\n width, height = self.__size\n # Get the copy from self.__img\n self.__frame = np.copy(self.__img)\n for info in self.__detect_info:\n # Get only the person detection\n for i in range(len(self.__categories)):\n if info.name != self.__categories[i]:\n continue\n # Increase the amount of detected object\n temp_categories[info.name] += 1\n\n # Pick out the bounding vertices\n top_right_x, top_right_y = \\\n int(info.bounding_poly.normalized_vertices[0].x * width), \\\n int(info.bounding_poly.normalized_vertices[0].y * height)\n bottom_right_x, bottom_right_y = \\\n int(info.bounding_poly.normalized_vertices[2].x * width), \\\n int(info.bounding_poly.normalized_vertices[2].y * height)\n\n # Draw bounding boxes and put the text\n cv2.rectangle(self.__frame,\n (top_right_x, top_right_y),\n (bottom_right_x, bottom_right_y),\n self.__colors[i],\n 2)\n cv2.putText(self.__frame,\n f\"{info.name} {info.score:.2f}\",\n (top_right_x, top_right_y),\n cv2.FONT_HERSHEY_TRIPLEX,\n 0.5,\n self.__colors[i], 2)\n\n self.__show_categories = temp_categories", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def draw_bboxes(img, bboxes):\n colors = tf.cast(np.array([[1, 0, 0, 1]] * 10), dtype=tf.float32)\n img_with_bounding_boxes = tf.image.draw_bounding_boxes(\n img,\n bboxes,\n colors\n )\n plt.figure()\n plt.imshow(img_with_bounding_boxes[0])\n plt.show()", "def draw_bbox(image, bboxes, classes_file_path, show_label = True, show_confidence = True, Text_colors = (255,255,0), \n rectangle_colors = '', tracking = False):\n \n # obtain list of classes name \n classes = read_class_names(classes_file_path)\n \n # obtain length of classes \n num_classes = len(classes)\n \n # obtain shape of image\n image_h, image_w, _ = image.shape\n \n # obtain list of unique hsv (hue, saturation, value) for each class\n hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]\n \n # obtain unique rgb tuples from hsv tuples\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n \n # scale rgb from 0-1 to 0-255 \n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))\n \n # shuffle colors list with same seed\n random.seed(0)\n random.shuffle(colors)\n random.seed(None)\n \n # iterate over bbox in bboxes\n for i, bbox in enumerate(bboxes):\n \n # obtain coordinates of bbox\n coor = np.array(bbox[:4], dtype = np.int32)\n \n # obtain objectiveness score\n score = bbox[4]\n \n # obtain class index\n class_ind = int(bbox[5])\n \n # choose rectangle color if none is given, else chose from tuple\n bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]\n \n # obtain thickness of bboxes\n bbox_thick = int(0.6 * (image_h + image_w) / 1000)\n if bbox_thick < 1: bbox_thick = 1\n \n # obtain font scale\n fontScale = 0.75 * bbox_thick\n \n # obtain tuples of min and max coordinates\n (x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])\n\n # generate bbox\n cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick * 2)\n \n # if show label is true\n if show_label:\n \n # get objectiveness score label\n score_str = \" {:.2f}\".format(score) if show_confidence else \"\"\n \n # if tracking show whole score without rounding\n if tracking: score_str = \" \" + str(score)\n \n # obtain label of class name with objectiveness score\n label = \"{}\".format(classes[class_ind]) + score_str\n \n # get text size \n (text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,\n fontScale, thickness = bbox_thick)\n # put filled text rectangle\n cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, \n thickness = cv2.FILLED)\n\n # put text above rectangle\n cv2.putText(image, label, (x1, y1 - 4), cv2.FONT_HERSHEY_COMPLEX_SMALL,\n fontScale, Text_colors, bbox_thick, lineType = cv2.LINE_AA)\n\n return image", "def draw_image_bboxes(pixel_candidates, gt_candidate, detection_candidate):\n fig, ax = plt.subplots()\n ax.imshow(pixel_candidates, cmap='gray')\n\n for candidate in detection_candidate:\n minc, minr, maxc, maxr = candidate\n rect = mpatches.Rectangle((minc, minr), maxc - minc + 1, maxr - minr + 1, fill=False, edgecolor='red', linewidth=2)\n ax.add_patch(rect)\n\n for candidate in gt_candidate:\n minc, minr, maxc, maxr = candidate\n rect = mpatches.Rectangle((minc, minr), maxc-minc+1, maxr-minr+1, fill=False, edgecolor='green', linewidth=2)\n ax.add_patch(rect)\n\n #plt.show()", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)", "def drawbboxes(img, bboxes, labels):\n thickness = 5\n color = (0, 255, 0)\n for bbox in bboxes:\n # top-left is x1, y1; bottom-right is x2,y2\n x1, y1, x2, y2, prob, category = (\n int(bbox[0]),\n int(bbox[1]),\n int(bbox[2]),\n int(bbox[3]),\n round(bbox[4], 2),\n labels[int(bbox[5])],\n )\n img = cv.rectangle(img, (x1, y1), (x2, y2), color, thickness)\n img = cv.putText(\n img,\n f\"Label: {category} ({prob})\",\n (x1, y1 - 10),\n 0,\n 0.5,\n color,\n thickness // 3,\n )\n return img", "def draw_boxes(indexes, frame, all_boxes):\n bbox = []\n mid_points = []\n\n for i in indexes:\n x = i[0]\n box = all_boxes[x]\n bbox.append(box)\n mid_points.append(mid_point(frame, box))\n x1, y1, w, h = box[0], box[1], box[2], box[3]\n x2, y2 = x1+w, y1+h\n\n cv2.rectangle(frame, (x1,y1),(x2,y2),(255,0,0),2) \n\n return mid_points, bbox", "def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n imcopy = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return imcopy", "def draw_boxes(image, bboxes, color=(0., 0., 1.0), thick=6):\n # make a copy of the image\n draw_img = np.copy(image)\n # draw each bounding box on your image copy using cv2.rectangle()\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return draw_img", "def show_boxes(img, boundary_boxes, gt_boxes=None):\n\n for (x_tl, y_tl, x_br, y_br) in boundary_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 0, 255), 2)\n\n if gt_boxes is not None:\n for (x_tl, y_tl, x_br, y_br) in gt_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 255, 0), 2)\n\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def draw_boxes(bboxes: [[int]], img: 'np.array', line_width: int=2) -> 'np.array':\n for x, y, w, h in bboxes:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), line_width)\n return img", "def draw_labeled_bboxes(img, labels):\n # iterate through all detected instances\n for it in range(1, labels[1]+1):\n # find pixels w/each vehicle label value\n nonzero = (labels[0] == it).nonzero()\n # identify x,y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # define a bounding box based on min/max x,y\n bbox = ( (np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)) )\n blue_clr = (0,0,255)\n cv2.rectangle(img, bbox[0], bbox[1], blue_clr, 6)\n return img", "def draw_rectangles(self, draw_img):\n if not self.features_detected:\n raise Exception('Detect features before drawing')\n for (x,y,w,h) in self.features:\n cv2.rectangle(draw_img,(x,y),(x+w,y+h),(255,0,0),2)\n return draw_img", "def draw_bounding_boxes(image, boxes):\n num_boxes = boxes.shape[0]\n gt_boxes_new = boxes.copy()\n draw_image = Image.fromarray(np.uint8(image))\n for i in range(num_boxes):\n draw_image = _draw_single_box(image=draw_image,\n quad=gt_boxes_new[i,:],\n font=FONT)\n\n image = np.array(draw_image, dtype=np.float32)\n return image", "def draw_bboxes(img, bboxes, color=(0, 0, 255), thick=6):\n draw_img = np.copy(img)\n # Draw rectangles given bbox coordinates as opposing coordinates\n # bboxes = opposing coordinates: (x1,y1), (x2,y2)\n [cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick) for bbox in bboxes]\n return draw_img", "def show_bboxes(img, bounding_boxes=None, facial_landmarks=[]):\n\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n# for b in bounding_boxes:\n# draw.rectangle([\n# (b[0], b[1]), (b[2], b[3])\n# ], outline='white')\n\n for p in facial_landmarks:\n for i in range(106):\n draw.ellipse([\n (p[i*2] - 1.0, p[2*i + 1] - 1.0),\n (p[i*2] + 1.0, p[2*i+1] + 1.0)\n ], outline='blue')\n font = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text([p[2*i], p[2*i+1]], str(i), font=font)\n\n return img_copy", "def plt_bboxes(img, classes, scores, bboxes, figsize=(17.78,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n print (\"original height width\", height, width)\n if (classes.shape[0] > 0):\n print (\"This frame has class\")\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = pascal_classes[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close()\n print(\"Processed data with shape, \", data.shape)\n return data", "def draw_boxes_and_labels(img, localized_objs, obj_classes, box_color=(0, 255, 255)):\n img_h, img_w = img.shape[:2]\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.5\n font_color = (0, 0, 0)\n\n for (i, bbox_cv2) in localized_objs:\n # Draw the object boxes\n left, right, top, bottom = handle_bad_corners(bbox_cv2[0], bbox_cv2[1], bbox_cv2[2], bbox_cv2[3], img_w, img_h)\n cv2.rectangle(img, (left, top), (right, bottom), box_color, 4)\n # Draw a filled boxes on top of the bounding box (as the background for the labels)\n left1, top1, right1, _ = handle_bad_corners(left-2, top-40, right+2, bottom, img_w, img_h)\n cv2.rectangle(img, (left1, top1), (right1, top), box_color, -1, 1)\n # Output the labels that show the x and y coordinates of the bounding box center.\n text_label= obj_classes[i]\n top2 = 0 if top<25 else top-25\n cv2.putText(img, text_label, (left, top2), font, font_size, font_color, 1, cv2.LINE_AA)\n text_xy= 'x='+str((left+right)/2)+' y='+str((top+bottom)/2)\n cv2.putText(img, text_xy, (left,top2+20), font, 0.4, font_color, 1, cv2.LINE_AA)\n\n return img", "def draw_image_bboxes_opencv(image, gt_candidate, detection_candidate):\n for candidate in detection_candidate:\n minc, minr, maxc, maxr = candidate\n cv2.rectangle(image, (minc, minr), (maxc, maxr), (0, 0, 255), 8) # Red\n\n for candidate in gt_candidate:\n minc, minr, maxc, maxr = candidate\n cv2.rectangle(image, (minc, minr), (maxc, maxr), (0, 255, 0), 5) # Green\n\n return image", "def plt_bboxes(img, classes, scores, bboxes, figsize=(10,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize)\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n colors = dict()\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n# crop_img = img[xmin:(xmax - xmin),xmax:(ymax - ymin)]\n# misc.imsave('1.jpg', crop_img)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = CLASSES[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n plt.show()", "def draw_boxes(img, paths, exit_masks=[]):\r\n for path in paths:\r\n contour, centroid = path[-1][:2]\r\n # DONT DRAW IF VEHICLE EXITS\r\n if vehicle_exits(centroid, exit_masks): continue\r\n x, y, w, h = contour\r\n\r\n # DRAW RECTANGLE AND CIRCLE DENOTING THE BOUNDARY AND CENTROID OF VEHICLE\r\n cv2.rectangle(img, (x, y), (x + w - 1, y + h - 1),BOUNDING_BOX_COLOUR, 1)\r\n cv2.circle(img, centroid, 2, CENTROID_COLOUR, -1)\r\n return img", "def _get_bounding_boxes(self, imgs, summed_viz, threshold_value=.7):\n self.viz = summed_viz # for debug\n viz = summed_viz\n n_batchs = viz.shape[ 0]\n n_classes = viz.shape[-1]\n \n # viz.shape (100,14,14,20) => (14,14,100,20)\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Normalize <viz>, image per image (to be in range [-1,1])\n viz = viz / np.max(np.abs(viz), axis=(0,1))\n viz = (viz+1)/2 # range[0,1]\n \n # Resize each summed_viz to its original size (size of input image)\n if viz.shape[:2] != imgs.shape[1:3]:\n viz = np.array(\n [ skimage.transform.resize(viz[:,:,idx], imgs[idx].shape[:2])\n for idx in range(len(imgs))\n if viz.shape[0] != imgs.shape[1]\n ] )\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Threshold <viz>s to keep values over 70% of its max values\n m_max = threshold_value * viz.max(axis=(0,1))\n viz = viz * (m_max < viz)\n \n # We want a 2d boundind box, so project threshold in xs and ys\n xxs = viz.sum(axis=0)\n yys = viz.sum(axis=1)\n \n # Get some non-thresholded values (left, top... of bounding boxes)\n get_lefts = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][ 0]\n get_tops = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][-1]\n get_rights = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][-1]\n get_bottoms = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][ 0]\n\n # Debug\n # def get_lefts (b_id, c_idx): \n # print xxs[:,b_id,c_idx].nonzero()\n # xxs[:,b_id,c_idx].nonzero()[0][ 0]\n \n # Build the 2d array with first or lasts positions of zeros\n # INNER FUNCTION\n def _get_border_array(f_border=get_lefts):\n return np.array(\n [ map(f_border, [b_idx]*n_classes, range(n_classes))\n for b_idx in range(n_batchs) ]\n )\n \n lefts = _get_border_array(get_lefts)\n tops = _get_border_array(get_tops)\n rights = _get_border_array(get_rights)\n bottoms = _get_border_array(get_bottoms)\n \n return lefts, tops, rights, bottoms", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color)", "def draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzero_y = np.array(nonzero[0])\n nonzero_x = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzero_x), np.min(nonzero_y)), (np.max(nonzero_x), np.max(nonzero_y)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # Return the image\n return img", "def draw(self, frame):\n left, right, top, bottom = self.box['left'], self.box['right'], self.box['top'], self.box['bottom']\n text = '{}: {:.2f} ({:.2f} m)'.format(self.label, self.confidence, self.distance)\n\n # Draw label\n text_size, baseline = cv.getTextSize(text, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n top = max(top, text_size[1])\n\n cv.rectangle(frame, (left, top - text_size[1]), (left + text_size[0], top + baseline), (255, 255, 255), cv.FILLED)\n cv.putText(frame, text, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n # Draw bounding box\n cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))", "def draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # Return the image\n return img", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color, 3)", "def draw_rects(img, rects, color):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)", "def draw(\n self,\n box_thickness: int = 2,\n show_confidence: bool = True,\n color_mapping: Optional[List[Tuple[int, int, int]]] = None,\n target_bboxes: Optional[np.ndarray] = None,\n target_bboxes_format: Optional[str] = None,\n target_class_ids: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n image = self.image.copy()\n\n target_bboxes = target_bboxes if target_bboxes is not None else np.zeros((0, 4))\n target_class_ids = target_class_ids if target_class_ids is not None else np.zeros((0, 1))\n bbox_format_factory = BBoxFormatFactory()\n if len(target_bboxes):\n target_bboxes_xyxy = convert_bboxes(\n bboxes=target_bboxes,\n image_shape=self.prediction.image_shape,\n source_format=bbox_format_factory.get(target_bboxes_format),\n target_format=bbox_format_factory.get(\"xyxy\"),\n inplace=False,\n )\n else:\n target_bboxes_xyxy = target_bboxes\n\n plot_targets = any([len(tbbx) > 0 for tbbx in target_bboxes_xyxy])\n color_mapping = color_mapping or generate_color_mapping(len(self.class_names))\n\n for pred_i in np.argsort(self.prediction.confidence):\n class_id = int(self.prediction.labels[pred_i])\n score = \"\" if not show_confidence else str(round(self.prediction.confidence[pred_i], 2))\n image = draw_bbox(\n image=image,\n title=f\"{self.class_names[class_id]} {score}\",\n color=color_mapping[class_id],\n box_thickness=box_thickness,\n x1=int(self.prediction.bboxes_xyxy[pred_i, 0]),\n y1=int(self.prediction.bboxes_xyxy[pred_i, 1]),\n x2=int(self.prediction.bboxes_xyxy[pred_i, 2]),\n y2=int(self.prediction.bboxes_xyxy[pred_i, 3]),\n )\n\n if plot_targets:\n target_image = self.image.copy()\n for target_idx in range(len(target_bboxes_xyxy)):\n class_id = int(target_class_ids[target_idx])\n target_image = draw_bbox(\n image=target_image,\n title=f\"{self.class_names[class_id]}\",\n color=color_mapping[class_id],\n box_thickness=box_thickness,\n x1=int(target_bboxes_xyxy[target_idx, 0]),\n y1=int(target_bboxes_xyxy[target_idx, 1]),\n x2=int(target_bboxes_xyxy[target_idx, 2]),\n y2=int(target_bboxes_xyxy[target_idx, 3]),\n )\n\n height, width, ch = target_image.shape\n new_width, new_height = int(width + width / 20), int(height + height / 8)\n\n # Crate a new canvas with new width and height.\n canvas_image = np.ones((new_height, new_width, ch), dtype=np.uint8) * 255\n canvas_target = np.ones((new_height, new_width, ch), dtype=np.uint8) * 255\n\n # New replace the center of canvas with original image\n padding_top, padding_left = 60, 10\n\n canvas_image[padding_top : padding_top + height, padding_left : padding_left + width] = image\n canvas_target[padding_top : padding_top + height, padding_left : padding_left + width] = target_image\n\n img1 = cv2.putText(canvas_image, \"Predictions\", (int(0.25 * width), 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0))\n img2 = cv2.putText(canvas_target, \"Ground Truth\", (int(0.25 * width), 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0))\n\n image = cv2.hconcat((img1, img2))\n return image", "def draw_predictions(frame, boxes, confidences, class_ids, labels, colors):\n\tfor i in range(len(boxes)):\n\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\t\tcolor = [int(c) for c in colors[class_ids[i]]]\n\n\t\t# Draw bounding box\n\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color=color, thickness=1)\n\n\t\t# Print label + confidence\n\t\ttext = str(labels[class_ids[i]]) + ' ' + str(confidences[i])\n\t\t(text_width, text_height) = cv2.getTextSize(text, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3, thickness=1)[0]\n\t\tcv2.rectangle(frame, (x, y-text_height-1), (x+text_width, y), color=color, thickness=cv2.FILLED)\n\t\tcv2.putText(frame, text, org=(x, y-1), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3, color=(0,0,0), thickness=1)\n\n\treturn frame", "def draw_bounding_boxes_on_image(image, boxes, color=[], thickness=5):\n\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n raise ValueError('Input must be of size [N, 4]')\n for i in range(boxes_shape[0]):\n draw_bounding_box_on_image(image, boxes[i, 1], boxes[i, 0], boxes[i, 3],\n boxes[i, 2], color[i], thickness)", "def detect(self, mask):\n # 1) Return Non zero indices\n det_idx = np.where(mask > 0.0)\n idx_x, idx_y = det_idx[0], det_idx[1]\n # 2) Create 1x1 box for each pixel detected.\n detections = []\n for i in range(0, len(idx_x)):\n x, y = idx_x[i], idx_y[i]\n detections.append((x, y, x+1, y+1, 1)) # x1, y1, x2, y2, area\n # 3) merge boxes\n bounding_boxes = self.bounding_boxes(detections)\n return bounding_boxes", "def draw_boundingbox(image, infer_output, image_width, image_height, conf_thresh):\n\n out_image = image.copy()\n logger.debug(' - input image: [width] %d, [height] %d' % (image.shape[1], image.shape[0]))\n\n def check_valid_range(val, max_val):\n \"\"\" check the coordinate of bbox is inside of an image\"\"\"\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val\n\n valid_obj_num = 0\n valid_obj_bbox = []\n\n for obj_info in infer_output:\n conf = obj_info['conf']\n # filter by the confidence\n if conf >= conf_thresh:\n # calculate bbox coordinate\n xmin = int(obj_info['x_min'] * image_width)\n ymin = int(obj_info['y_min'] * image_height)\n xmax = int(obj_info['x_max'] * image_width)\n ymax = int(obj_info['y_max'] * image_height)\n\n # round up into valid range\n xmin = check_valid_range(xmin, image_width)\n ymin = check_valid_range(ymin, image_height)\n xmax = check_valid_range(xmax, image_width)\n ymax = check_valid_range(ymax, image_height)\n\n # draw bbox\n cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)\n\n valid_obj_num += 1\n valid_obj_bbox.append((xmin, ymin, xmax, ymax))\n logger.debug(' - draw bbox [%d, %d, %d, %d] confidence: %f' % (xmin,ymin,xmax,ymax,conf))\n\n return out_image, valid_obj_num", "def draw_boxes_on_image(img, bboxes, color=(0, 0, 1), thick=6):\n imcopy = np.copy(img)\n\n for bbox in bboxes:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n\n return imcopy", "def visualize_bbox(img, bbox, class_name, color=(255, 0, 0) , thickness=2):\n BOX_COLOR = (255, 0, 0) # Red\n TEXT_COLOR = (255, 255, 255) # White\n\n x_min, y_min, x_max, y_max = bbox\n\n cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)\n\n ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)\n cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)\n cv2.putText(\n img,\n text=class_name,\n org=(x_min, y_min - int(0.3 * text_height)),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.35,\n color=TEXT_COLOR,\n lineType=cv2.LINE_AA,\n )\n return img", "def bounding_box(points):\n x, y, w, h = cv2.boundingRect(np.array([p for p in points]))\n bounding = Box(x, y, w, h)\n return bounding", "def draw_bBox(img, start, end, class_name, prob, bbox_color=(0, 255, 0)):\n v = cv2.rectangle(img, start, end, bbox_color, 1)\n\n text = \"{0}: {1:.2f}%\".format(class_name, prob) if class_name != \"\" else \"{0:.1f}%\".format(prob)\n text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_COMPLEX, 0.5, 2)[0]\n cv2.rectangle(img, (start[0], start[1] - text_size[1]), (start[0] + text_size[0], start[1]), bbox_color, cv2.FILLED)\n cv2.putText(img, text, start, cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)", "def draw_cv2(self, image, boxes, scores, class_ids):\n\n h, w = image.shape[:2]\n for (x1, y1, x2, y2), score, cls_id in zip(boxes, scores, class_ids):\n\n # Box coordinates are normalised, convert to absolute and clip to image boundaries\n x1 = np.clip(int(x1 * w), 0, w-1)\n y1 = np.clip(int(y1 * h), 0, h-1)\n x2 = np.clip(int(x2 * w), 0, w-1)\n y2 = np.clip(int(y2 * h), 0, h-1)\n\n cid = int(cls_id)\n c = self.colors[cid]\n label = f'{self.class_names[cid]} {score:.2f}'\n cv2.rectangle(image, (x1, y1), (x2, y2), c, 3)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image, label, (x1, int(y1 * 0.95)), font, 1, c, 3)\n return image", "def draw_bbox(image, im_id, catid2name, bboxes, threshold):\n draw = ImageDraw.Draw(image)\n\n catid2color = {}\n color_list = colormap(rgb=True)[:40]\n for dt in np.array(bboxes):\n if im_id != dt['image_id']:\n continue\n catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']\n if score < threshold:\n continue\n\n xmin, ymin, w, h = bbox\n xmax = xmin + w\n ymax = ymin + h\n\n if catid not in catid2color:\n idx = np.random.randint(len(color_list))\n catid2color[catid] = color_list[idx]\n color = tuple(catid2color[catid])\n\n # draw bbox\n draw.line(\n [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),\n (xmin, ymin)],\n width=2,\n fill=color)\n\n # draw label\n text = \"{} {:.2f}\".format(catid2name[catid], score)\n tw, th = draw.textsize(text)\n draw.rectangle(\n [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)\n draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))\n\n return image", "def draw_labeled_bboxes(img, labels):\n # Iterate through all detected cards\n for car_number in range(1, labels[1] + 1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n # Return the image\n return img", "def draw_boxes(image, bounds, color):\n draw = ImageDraw.Draw(image)\n\n for bound in bounds:\n draw.polygon([\n bound.vertices[0].x, bound.vertices[0].y,\n bound.vertices[1].x, bound.vertices[1].y,\n bound.vertices[2].x, bound.vertices[2].y,\n bound.vertices[3].x, bound.vertices[3].y], None, color)\n # font = ImageFont.truetype(\"sans-serif.ttf\", 10)\n draw.text((bound.vertices[0].x, bound.vertices[0].y,),bound,(255,255,255),font=font)\n return image", "def draw(self):\r\n\r\n\r\n\t\tself.predict()\r\n\t\t#print np.shape(self.gray)\r\n\t\t#cv2.rectangle(self.gray, (self.bb[0], self.bb[1]), (self.bb[0] + self.bb[2], self.bb[1] + self.bb[3]))\r\n\r\n\t\t# draw points as green circles\r\n\t\tfor point in self.features:\r\n\t\t\tcv2.circle(self.gray,(int(point[0][0]),int(point[0][1])),3,(255),-1)\r\n\t\t\t\r\n\t\tcv2.imshow('image',self.gray)\r\n\t\tcv2.waitKey(1)", "def draw_detections(self, img, yolo_results):\n\n _, height, _ = img.shape\n for yolo_result in yolo_results:\n class_index = yolo_result.class_index\n obj_name = yolo_result.obj_name\n x = yolo_result.x_min\n y = yolo_result.y_min\n w = yolo_result.width\n h = yolo_result.height\n\n offset = class_index * 123457 % self.meta.classes\n\n red = self._get_color(2, offset, self.meta.classes)\n green = self._get_color(1, offset, self.meta.classes)\n blue = self._get_color(0, offset, self.meta.classes)\n box_width = int(height * 0.006)\n cv2.rectangle(img, (int(x), int(y)), (int(x+w)+1, int(y+h)+1), (red, green, blue), box_width)\n cv2.putText(img, obj_name, (int(x) -1, int(y) -1), cv2.FONT_HERSHEY_PLAIN, 2, (red, green, blue), 2)\n\n return img", "def draw(self):\n print(\"Drawing...\", end=' ')\n s = self.pixelsPerCell\n for h in range(self.height):\n for w in range(self.width):\n self.box[w][h] = self.canvas.create_rectangle(w*s, h*s, w*s+s, h*s+s,\n fill = \"gray\", outline = \"gray\")\n self.canvas.update()\n print(\"Done!\")", "def gain_box_score(im, preds):\n if len(preds[0]) == 0:\n cv2.imshow(\"Video detection\", im)\n else:\n for pred in preds:\n for i, box_label in enumerate(zip( pred[\"boxes\"], pred[\"labels\"] )):\n box, label = box_label\n xmin, ymin, xmax, ymax = box\n#-------------------- Create a Rectangle patch ----------------------- \n if label==1:\n class_name='with_mask'\n color = (0, 255, 0)\n elif label==2:\n class_name='without_mask'\n color = (0, 0, 255)\n elif label==3:\n class_name='mask_worn_improperly'\n color = (255, 255 ,0)\n score = pred['scores'][i]\n#--------------------- Bounding Box painting -------------------------- \n if score > 0.65:\n cv2.rectangle(im, (xmin, ymin), (xmax, ymax), color, 1) \n cv2.putText(im, str(class_name)+str(round(score.item(),2)), (xmin,int(ymax-ymax/20)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1) #print class name\n cv2.imshow(\"Video detection\",im)\n print('*****', 'Bbox:', i , '*****' )\n print('Class: ', str(class_name))\n print('Scores: ', str(round(score.item(),2)))\n print('boxes: ',f'{int(xmin)}, {int(ymin)}, {int(xmax)}, {int(ymax)}')\n print('image shape: ', im.shape) \n else:\n cv2.imshow(\"Video detection\", im)\n print('********************','\\n')", "def draw_bounding_boxes_on_image_array(image, boxes, color=[], thickness=5):\n\n draw_bounding_boxes_on_image(image, boxes, color, thickness)\n\n return image", "def draw_boxes(self, image, boxes):\n return draw_boxes(image, boxes, self.labels)", "def bounding_boxes(self, detections):\n bboxes = []\n while len(detections) > 0:\n det = detections.pop(0)\n merging = True\n while merging:\n merging = False\n pointer = 0\n while pointer < len(detections):\n if self.get_distance(det, detections[pointer]) <= self.max_distance:\n det = self.merge_boxes(det, detections[pointer])\n merging = True\n detections.pop(pointer)\n else:\n pointer += 1\n if det[4] >= self.min_area:\n bboxes.append(det)\n return bboxes", "def draw_person_boxes(self, frame, boxes, probs):\n # convert color space for numpy\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # for all the boxes:\n for (box, prob) in zip(boxes, probs):\n \n # extract the properties of the box and text:\n (startX, startY, endX, endY) = box.astype(\"int\")\n label = \"{}: {:.2f}%\".format(\"Person\", prob * 100)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.7\n thickness = 1\n text_size, _ = cv2.getTextSize(label, font, font_scale, thickness)\n text_w, text_h = text_size\n \n text_color_bg = (0,0,0) # black bg for text\n text_color = (255,255,255) # white text\n box_color = (255,0,0) # red box\n \n # draw the bb prediction on the frame\n cv2.rectangle(frame, (startX, startY), (endX, endY), box_color , 1)\n \n # include text:\n y = startY - text_h if startY - text_h > text_h else startY + text_h\n cv2.rectangle(frame, (startX, y - text_h), (startX + text_w, startY-1), text_color_bg, -1)\n cv2.putText(frame, label, (startX, y), font, font_scale, text_color, thickness)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n return frame", "def draw_bbox(n):\n return drawBbox(named(n))", "def draw_boxes(image, bounds):\n draw = ImageDraw.Draw(image)\n if bounds[0].normalized_vertices:\n width = image.width\n height = image.height\n for i in range(len(bounds)):\n draw.polygon([\n bounds[i].normalized_vertices[0].x * width, bounds[i].normalized_vertices[0].y * height,\n bounds[i].normalized_vertices[1].x * width, bounds[i].normalized_vertices[1].y * height,\n bounds[i].normalized_vertices[2].x * width, bounds[i].normalized_vertices[2].y * height,\n bounds[i].normalized_vertices[3].x * width, bounds[i].normalized_vertices[3].y * height],\n None, colors[i % len(colors)])\n return image\n else:\n for i in range(len(bounds)):\n draw.polygon([\n bounds[i].vertices[0].x, bounds[i].vertices[0].y,\n bounds[i].vertices[1].x, bounds[i].vertices[1].y,\n bounds[i].vertices[2].x, bounds[i].vertices[2].y,\n bounds[i].vertices[3].x, bounds[i].vertices[3].y],\n None, colors[i % len(colors)])\n return image", "def boxes_stats(self):\n all_boxes = []\n nb_detections = []\n convexities = []\n all_ids = set()\n for image_id in self.dataset_handler.image_ids:\n masks, ids = self.dataset_handler.load_mask(image_id)\n all_ids = all_ids.union(set(ids))\n boxes = utils.extract_bboxes(masks)\n all_boxes.append(boxes)\n nb_detections.append(boxes.shape[0])\n for mask_idx in range(masks.shape[2]):\n mask = masks[:, :, mask_idx]\n props = regionprops(mask.astype(np.int8))[0]\n convexities.append(props.filled_area/props.convex_area)\n\n self.nb_classes = len(all_ids) + 1\n\n convexities = np.array(convexities)\n self.convexity_stats = stats.describe(convexities)\n\n nb_detections = np.array(nb_detections)\n self.nb_detections_stats = stats.describe(nb_detections)\n\n all_boxes = np.concatenate(all_boxes, axis=0)\n heights = all_boxes[:, 2] - all_boxes[:, 0]\n widths = all_boxes[:, 3] - all_boxes[:, 1]\n\n self.height_stats = stats.describe(heights)\n self.width_stats = stats.describe(widths)\n\n ratios = widths / heights\n self.ratio_stats = stats.describe(ratios)\n\n mean_pixel = [np.mean(img, axis=(0, 1))\n for img in self.dataset_handler.images]\n self.mean_pixel = np.mean(np.array(mean_pixel), axis=0)", "def drawdebugrects(self,image):\r\n \r\n if util.isgray(image):\r\n faceColor = 255 \r\n \"\"\"leftEyeColor = 255 \r\n rightEyeColor = 255 \r\n noseColor = 255 \r\n mouthColor = 25\"\"\"\r\n \r\n else:\r\n faceColor = (255,0,0) # white \r\n \"\"\"leftEyeColor = (0, 0, 255) # red \r\n rightEyeColor = (0, 255, 255) # yellow \r\n noseColor = (0, 255, 0) # green \r\n mouthColor = (255, 0, 0) # blue \"\"\" \r\n \r\n \r\n for face in self.faces:\r\n \r\n rects.outlinerect(image,face.facerect,facecolor)\r\n #rects.outlineRect(image, face.leftEyeRect, leftEyeColor) \r\n #rects.outlineRect(image, face.rightEyeRect,rightEyeColor) \r\n #rects.outlineRect(image, face.noseRect, noseColor) \r\n #rects.outlineRect(image, face.mouthRect, mouthColor)\r", "def draw_bounds():\n\n pass", "def draw_boxes(image, gt_boxes_norm, pre_boxes_norm):\n # Load Image\n image = (image * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image = cv2.add(image,image)\n #image = cv2.bitwise_not(image)\n # Draw prediction boxes\n for pre_box_points in pre_boxes_norm:\n image_shape = np.flip(image.shape[0:2], axis=0)\n\n for pre_box_point_idx in range(len(pre_box_points)):\n\n pre_start_point = pre_box_points[pre_box_point_idx] * image_shape\n pre_end_point = pre_box_points[(pre_box_point_idx + 1) % 4] * image_shape\n\n pre_start_point = pre_start_point.astype(np.int32)\n pre_end_point = pre_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(pre_start_point),\n tuple(pre_end_point),\n (107,222,35), thickness=1)\n\n # Draw boxes if they exist\n if gt_boxes_norm is not None:\n for gt_box_points in gt_boxes_norm:\n for gt_box_point_idx in range(len(gt_box_points)):\n\n gt_start_point = gt_box_points[gt_box_point_idx] * image_shape\n gt_end_point = gt_box_points[(gt_box_point_idx + 1) % 4] * image_shape\n\n gt_start_point = gt_start_point.astype(np.int32)\n gt_end_point = gt_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(gt_start_point),\n tuple(gt_end_point),\n (0,0,205), thickness=1)\n\n return image", "def vis_detections(im, class_name, dets, bord, thet, fp, thresh=0.5):\n para_dict={\n 'left': 20,\n 'right': -20,\n 'front': 40,\n 'back': 0,\n 'resolution': 0.05\n }\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n if save_image:\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n f = open('bbox.txt', 'w')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n center = np.array([[(dets[i, 0]+dets[i, 2])/2],[(dets[i, 1]+dets[i, 3])/2]])\n theta = thet[i, 0]\n l = bord[i, 0]\n w = bord[i, 1]\n h = bord[i, 2]\n tz = bord[i, 3]\n # theta = fix_theta(theta, l, w, (bbox[2] - bbox[0])*para_dict['resolution'], (bbox[3] - bbox[1])*para_dict['resolution'])\n p1 = box_rot(l, w, theta)/para_dict['resolution'] + center\n p2 = p1.transpose()\n\n \n f.write('%f %f %f %f\\n' % (dets[i, 0], dets[i, 1], dets[i, 2], dets[i, 3]))\n\n fp.write(\"%s %f %f %f %f %f %f %f %f\\n\" % (class_name,\n para_dict['front']-center[1,0]*para_dict['resolution'],\n para_dict['left']-center[0,0]*para_dict['resolution'],\n tz,theta,l,w,h,score))\n\n if save_image:\n\n ax.add_patch(\n plt.Polygon(p2,edgecolor='red',linewidth=2,fill=False)\n )\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='yellow', linewidth=2)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f} height {:.3f} tz {:.3f}'.format(class_name, score, h, tz),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n if save_image:\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n f.close()", "def vis_detections(im, class_name, dets, thresh=0.5, video= None,fid=0):\n dirname = os.path.dirname(__file__)\n show_dir = os.path.join(dirname, '..', 'show/%s' % os.path.basename(video))\n # print(show_dir)\n if not os.path.exists(show_dir):\n os.makedirs(show_dir)\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n plt.savefig('%s/all_bboxes_%d.jpg' % (show_dir, fid))\n # plt.show()", "def draw_rects(image, face_locations):\n\n # Placeholder\n emotions = ['Poker'] * len(face_locations)\n\n # Convert to grayscale and extract faces\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = [process_face(gray_image, coords)\n for coords in face_locations]\n emotions = [predict_emotion(face, model)\n for face in faces]\n\n # Convert to PIL image\n pil_image = Image.fromarray(image[:, :, ::-1])\n\n # Create a Pillow ImageDraw Draw instance to draw with\n draw = ImageDraw.Draw(pil_image)\n\n for (top, right, bottom, left), emotion in zip(face_locations, emotions):\n\n # Draw bounding box around face\n draw.rectangle(((left, top), (right, bottom)), outline=rect_color)\n\n # Write emotion in caption box\n caption_box_height = 0.1 * (bottom - top)\n fnt = ImageFont.truetype('app/static/gillsans.ttf',\n size=int(caption_box_height))\n textwidth, textheight = draw.textsize(emotion, font=fnt)\n textwidth += 0.25 * textwidth\n\n # Draw Caption box\n draw.rectangle(((left, bottom + caption_box_height),\n (left + textwidth, bottom)),\n fill=rect_color, outline=rect_color)\n\n # Draw text\n draw.text((left + 0.10 * textwidth,\n bottom + (0.05 * caption_box_height)),\n emotion, font=fnt, fill=(255, 255, 255, 255))\n\n del draw\n\n return pil_image", "def _visualize_boxes_and_labels_on_image(\n self,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=0.5):\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n display_str = ''\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n if not display_str:\n if scores is None:\n display_str = '?%'\n else:\n display_str = '{}%'.format(int(100*scores[i]))\n else:\n if scores is None:\n display_str = '{}: ?%'.format(display_str)\n else:\n display_str = '{}: {}%'.format(display_str, int(100*scores[i]))\n box_to_display_str_map[box].append(display_str)\n box_to_color_map[box] = self.STANDARD_COLORS[classes[i] % len(self.STANDARD_COLORS)]\n first = True\n mask = None\n # Draw all boxes onto image.\n for idx,(box, color) in enumerate(box_to_color_map.items()):\n ymin, xmin, ymax, xmax = box\n if instance_masks is not None:\n\n if self._shuffle:\n # draw mask for each object\n self._draw_mask_on_image(box_to_instance_masks_map[box]*(idx+1))\n else:\n # stack all masks and draw one big mask\n if first:\n first = False\n mask = box_to_instance_masks_map[box]*(idx+1)\n else:\n mask = np.bitwise_or(mask, box_to_instance_masks_map[box])\n\n self._draw_bounding_box_on_image(\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates)\n\n # Draw Masks on Image (only one color for all masks)\n if mask is not None and not self._shuffle:\n self._draw_mask_on_image(mask)", "def _visualize(self, unnorm_image, class_ids, scores, bounding_boxes):\n ax = utils.viz.plot_bbox(unnorm_image,\n bounding_boxes[0],\n scores[0],\n class_ids[0],\n class_names=self._network.classes)\n fig = plt.gcf()\n fig.set_size_inches(14, 14)\n plt.show()", "def _compute_bounding_box(self, points_2d):\n max_x = max(map(lambda point: int(point[0]), points_2d))\n min_x = min(map(lambda point: int(point[0]), points_2d))\n max_y = max(map(lambda point: int(point[1]), points_2d))\n min_y = min(map(lambda point: int(point[1]), points_2d))\n\n width = max_x - min_x + 1\n height = max_y - min_y + 1\n\n return [min_x, min_y, width, height]", "def return_bbox_image(self, image, bboxes, label, color):\n if bboxes:\n for obj in bboxes:\n image = self.draw_single_bbox(image, obj.position_xywh, label=label, color=color)\n\n return image", "def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color=(255, 0, 0), thickness=5):\n\n image_width = image.shape[1]\n image_height = image.shape[0]\n cv2.rectangle(image, (int(xmin), int(ymin)), (int(xmax), int(ymax)), color, thickness)", "def draw_boxes(image: np.ndarray, boxes: np.ndarray, box_classes: List[int],\n class_names: List[str], scores: List[float] = None):\n image = Image.fromarray(np.floor(image * 255 + 0.5).astype('uint8'))\n\n font = ImageFont.truetype(\n font='RictyDiminished-Regular.ttf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n colors = _get_colors_for_classes(len(class_names))\n\n for i, c in list(enumerate(box_classes)):\n box_class = class_names[c]\n box = boxes[i]\n if isinstance(scores, np.ndarray):\n score = scores[i]\n label = '{} {:.2f}'.format(box_class, score)\n else:\n label = '{}'.format(box_class)\n\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i], outline=colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n return np.array(image)", "def vis_bbox_opencv(img, bbox, thick=1):\n (x0, y0, w, h) = bbox\n x1, y1 = int(x0 + w), int(y0 + h)\n x0, y0 = int(x0), int(y0)\n cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)\n return img", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image", "def draw_ocr_group_rects(orig, new_horz, new_verz):\n print(len(new_horz) + len(new_verz),'groups')\n for i,tbox in enumerate(new_horz):\n cv2.rectangle(orig, tbox.p1,tbox.p2, [0,0,200])\n\n for i,tbox in enumerate(new_verz):\n cv2.rectangle(orig, tbox.p1,tbox.p2, [0,180,0])", "def apply_detection_results(image, masks, bboxes, class_ids, class_names, colors, scores=None):\n\n # Opacity of masks: 0.5\n opacity = 0.5\n\n result = image.astype(float)/255\n\n for detection_idx in range(masks.shape[2]):\n\n if not np.any(bboxes[detection_idx]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n\n # Get the color in float form\n color = colors[class_names[class_ids[detection_idx]]]\n\n # Draw the segmentation mask\n mask = masks[:,:,detection_idx]\n alpha_mask = np.stack((mask, mask, mask), axis=2)\n alpha_mask = alpha_mask.astype(np.float) * opacity\n assert alpha_mask.shape == image.shape\n\n foreground = np.ones(image.shape, dtype=float) * color\n _background = cv2.multiply(1.0 - alpha_mask, result)\n _foreground = cv2.multiply(alpha_mask, foreground)\n\n result = cv2.add(_foreground, _background)\n\n # Draw the bounding box\n y1, x1, y2, x2 = bboxes[detection_idx]\n cv2.rectangle(result, (x1, y1), (x2, y2), color, thickness=1)\n\n # Caption time\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 0.3\n lineType = 2\n offset_x_text = 2\n offset_y_text = -4\n label = class_names[class_ids[detection_idx]]\n caption = \"{} {:.3f}\".format(label, scores[detection_idx]) if scores.any() else label\n\n cv2.putText(result, caption, (x1 + offset_x_text, y2 + offset_y_text), fontFace=font, fontScale=fontScale,\n color=(1.0, 1.0, 1.0), lineType=lineType)\n\n result *= 255\n result = result.astype(np.uint8)\n\n return result", "def draw_bboxes_with_labels(img, bboxes, label_indices, probs, labels):\n colors = []\n for i in range(len(labels)):\n colors.append(tuple(np.random.choice(range(256), size=4)))\n image = tf.keras.preprocessing.image.array_to_img(img)\n width, height = image.size\n draw = ImageDraw.Draw(image)\n denormalized_bboxes = denormalize_bboxes(bboxes, height, width)\n for index, bbox in enumerate(denormalized_bboxes):\n y1, x1, y2, x2 = np.split(bbox, 4)\n width = x2 - x1\n height = y2 - y1\n if width <= 0 or height <= 0:\n continue\n label_index = int(label_indices[index])\n color = colors[label_index]\n label_text = \"{0} {1:0.3f}\".format(labels[label_index], probs[index])\n draw.text((x1 + 4, y1 + 2), label_text, fill=color)\n draw.rectangle((x1, y1, x2, y2), outline=color, width=3)\n #\n plt.figure()\n plt.imshow(image)\n plt.show()", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def _boxes_coordinates(self,\n image,\n boxes,\n classes,\n scores,\n max_boxes_to_draw=20,\n min_score_thresh=.5):\n\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n number_boxes = min(max_boxes_to_draw, boxes.shape[0])\n final_boxes = []\n final_scores = []\n for i in range(number_boxes):\n if self.category_index[classes[i]]['name'] not in \\\n self.classes_to_detect:\n continue\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n ymin, xmin, ymax, xmax = box\n\n im_height, im_width, _ = image.shape\n left, right, top, bottom = [int(z) for z in\n (xmin * im_width, xmax * im_width,\n ymin * im_height,\n ymax * im_height)]\n\n final_boxes.append([top, left, bottom, right])\n final_scores.append(scores[i])\n return final_boxes, final_scores", "def draw_box_label(img, bbox_cv2, bbox_class=\"diver\", box_color=(0, 0, 255), show_label=True):\n if not bbox_cv2 or bbox_cv2==[]: \n return img\n \n img_h, img_w = img.shape[:2]\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.5\n font_color = (0, 0, 0)\n left, right, top, bottom = handle_bad_corners(bbox_cv2[0], bbox_cv2[1], bbox_cv2[2], bbox_cv2[3], img_w, img_h)\n \n # Draw the bounding box and labels\n cv2.rectangle(img, (left, top), (right, bottom), box_color, 4) \n if show_label:\n # Draw a filled box on top of the bounding box (as the background for the labels)\n left1, top1, right1, _ = handle_bad_corners(left-2, top-40, right+2, bottom, img_w, img_h)\n cv2.rectangle(img, (left1, top1), (right1, top), box_color, -1, 1)\n # Output the labels that show the x and y coordinates of the bounding box center.\n text_label= bbox_class\n top2 = 0 if top<25 else top-25\n cv2.putText(img, text_label, (left, top2), font, font_size, font_color, 1, cv2.LINE_AA)\n text_xy= 'x='+str((left+right)/2)+' y='+str((top+bottom)/2)\n cv2.putText(img, text_xy, (left,top2+20), font, 0.4, font_color, 1, cv2.LINE_AA)\n \n return img", "def layout_bounding_boxes(canvas_x, canvas_y, canvas_width, line_height,\n space_widths, y_space, sizes):\n\n cur_x, cur_y = canvas_x, canvas_y\n cur_size = 0\n cur_line = 0\n boxes = []\n line_breaks = []\n line_poss = []\n line_poss.append((cur_x, cur_y))\n while cur_size < len(sizes):\n sz = sizes[cur_size]\n if cur_x + sz[0] > canvas_width + canvas_x:\n cur_line += 1\n cur_y = canvas_y - cur_line * (y_space + line_height)\n cur_x = canvas_x\n line_poss.append((cur_x, cur_y))\n line_breaks.append(cur_size)\n else:\n boxes.append((cur_x, cur_y, sz[0], sz[1]))\n cur_x += sz[0]\n if cur_size < len(space_widths):\n cur_x += space_widths[cur_size]\n cur_size += 1\n return boxes, line_breaks, line_poss", "def draw_boxes(image, boxes, class_names, scores, min_score=0.65):\n colors = list(ImageColor.colormap.values())\n\n try:\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf\",\n 25)\n except IOError:\n print(\"Font not found, using default font.\")\n font = ImageFont.load_default()\n\n for i in range(boxes.shape[0]):\n decoded_class = class_names[i].decode(\"ascii\")\n if decoded_class==\"Car\" or decoded_class==\"Land vehicle\" or decoded_class==\"Truck\" or decoded_class==\"Bus\" or decoded_class==\"Van\" or decoded_class==\"Ambulance\" or decoded_class==\"Motorcycle\":\n if scores[i] >= min_score:\n ymin, xmin, ymax, xmax = tuple(boxes[i])\n display_str = \"{}: {}%\".format(class_names[i].decode(\"ascii\"), int(100 * scores[i]))\n color = colors[hash(class_names[i]) % len(colors)]\n image_pil = Image.fromarray(np.uint8(image)).convert(\"RGB\")\n draw_bounding_box_on_image(\n image_pil,\n ymin,\n xmin,\n ymax,\n xmax,\n color,\n font,\n display_str_list=[display_str])\n np.copyto(image, np.array(image_pil))\n return image", "def draw_bounding_box( y1, x1, y2, x2, \n img, \n thickness=4, \n fontsize=15,\n outlineColor=(255, 255, 0),\n textColor=(255, 255, 0),\n display_str=() ):\n\n img = Image.fromarray( img )\n draw = ImageDraw.Draw( img )\n\n for x in range( 0, thickness ):\n draw.rectangle( [(x1-x, y1-x), (x2-x, y2-x)], outline=outlineColor )\n\n for i in range(1, len(fonts)):\n if fonts[i].fontformat == 'TrueType':\n for j in range(1,len(fonts[i].style)):\n if fonts[i].style[j][1] == 'Regular':\n absolute_path = fonts[i].file\n break;\n try:\n absolute_path\n except NameError:\n font = ImageFont.load_default()\n else:\n font = ImageFont.truetype(absolute_path, fontsize)\n\n draw.text( (x1, y1), display_str, font=font, fill=textColor )\n\n return numpy.array( img )", "def boundingRect(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\treturn {\"x\":x, \"y\": y, \"w\": w, \"h\": h}", "def vis_gt_boxes(self):\n import cv2\n num_images = len(self.gt)\n for i in range(num_images):\n im = cv2.imread(self.image_path_at(i))\n im = im[:, :, (2, 1, 0)]\n plt.cla()\n plt.imshow(im)\n gt_image = self.gt[i]\n for j in range(len(gt_image['boxes'])):\n bbox = gt_image['boxes'][j]\n c = gt_image['gt_classes'][j] \n plt.gca().add_patch(plt.Rectangle((float(bbox[0]), float(bbox[1])),\n float(bbox[2]) - float(bbox[0]),\n float(bbox[3]) - float(bbox[1]), fill=False,\n edgecolor='r', linewidth=3))\n x = (bbox[0] + bbox[2])/2\n y = bbox[1]\n s = '{}'.format(self.classes[c])\n plt.text(x, y, s, fontsize=14,horizontalalignment='center',weight='bold',backgroundcolor=(1,1,1))\n plt.show()", "def draw_boxes_v2(img_name, img, boxes, labels, scores, obj_list=None, figsize=(15,15)):\n fig,ax = plt.subplots(figsize=figsize)\n\n if isinstance(img, torch.Tensor):\n img = img.numpy().squeeze().transpose((1,2,0))\n # Display the image\n ax.imshow(img)\n\n # Create a Rectangle patch\n for box, label, score in zip(boxes, labels, scores):\n label = int(label)\n color = STANDARD_COLORS[label]\n x,y,w,h = box\n rect = patches.Rectangle((x,y),w,h,linewidth=1.5,edgecolor = color,facecolor='none')\n score = np.round(score, 3)\n if obj_list is not None:\n text = '{}: {}'.format(obj_list[label], str(score))\n else:\n text = '{}: {}'.format(label, str(score))\n plt.text(x, y-3,text, color = color, fontsize=15)\n # Add the patch to the Axes\n ax.add_patch(rect)\n plt.axis('off')\n plt.savefig(img_name,bbox_inches='tight')\n plt.close()", "def draw_all_detection(im_array, detections, class_names, scale):\r\n import cv2\r\n import random\r\n color_white = (255, 255, 255)\r\n im = image.transform_inverse(im_array, config.PIXEL_MEANS)\r\n # change to bgr\r\n im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)\r\n for j, name in enumerate(class_names):\r\n if name == '__background__':\r\n continue\r\n color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color\r\n dets = detections[j]\r\n for det in dets:\r\n bbox = det[:4] * scale\r\n score = det[-1]\r\n bbox = map(int, bbox)\r\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)\r\n cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),\r\n color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)\r\n return im", "def vis_bbox(im, bbox):\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n\n plt.axis('off')\n plt.tight_layout()\n plt.draw()", "def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()", "def change_bbox_color(img, boxes, p1, p2):\n points = np.unique(p1 + p2)\n\n for i in points:\n x1, y1, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]\n x2, y2 = x1+w, y1+h\n _ = cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 2) \n\n return img", "def draw_pic(img_path: str, bbox: List) -> None:\n img = cv2.imread(img_path)\n\n for v in bbox:\n x, y, w, h = v[2]\n cv2.rectangle(\n img, (int(x), int(y)), (int(x) + int(w), int(y) + int(h)), (255, 0, 0), 2\n )\n\n figure(num=None, figsize=(20, 15))\n plt.imshow(img)\n plt.show()", "def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.1):\n colors = list(ImageColor.colormap.values())\n\n try:\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf\",\n 25)\n except IOError:\n print(\"Font not found, using default font.\")\n font = ImageFont.load_default()\n\n for i in range(min(boxes.shape[0], max_boxes)):\n if scores[i] >= min_score:\n ymin, xmin, ymax, xmax = tuple(boxes[i])\n display_str = \"{}: {}%\".format(class_names[i].decode(\"ascii\"),\n int(100 * scores[i]))\n color = colors[hash(class_names[i]) % len(colors)]\n image_pil = Image.fromarray(np.uint8(image)).convert(\"RGB\")\n draw_bounding_box_on_image(\n image_pil,\n ymin,\n xmin,\n ymax,\n xmax,\n color,\n font,\n display_str_list=[display_str])\n np.copyto(image, np.array(image_pil))\n return image", "def visualize_detections(detection_result,\n label_map,\n font_path=None,\n font_size=10,\n line_width=1,\n score_thresh=.5,\n max_num_viz=None,\n color_per_instance_mask=True):\n image = np.copy(detection_result['image'])\n height, width = image.shape[:2]\n\n scores = detection_result['scores']\n boxes = detection_result['boxes']\n classes = detection_result['classes']\n\n detection_indices = scores >= score_thresh\n scores = scores[detection_indices]\n boxes = boxes[detection_indices]\n classes = classes[detection_indices]\n\n masks = None\n if 'masks' in detection_result:\n masks = detection_result['masks']\n masks = masks[detection_indices]\n\n num_detections = scores.shape[0]\n if max_num_viz is not None:\n num_detections = np.minimum(num_detections, max_num_viz)\n \n color_map = get_color_map(len(label_map) + 1)\n\n for i in range(num_detections):\n ymin, xmin, ymax, xmax = boxes[i].astype(np.int32)\n ymin = np.maximum(0, ymin)\n xmin = np.maximum(0, xmin)\n ymax = np.minimum(height - 1, ymax)\n xmax = np.minimum(width - 1, xmax)\n color = color_map[classes[i]]\n if color_per_instance_mask:\n mask_color = (color_map[i] + 10) % color_map.shape[0]\n if (mask_color[0] < 50).all(): \n mask_color = (color_map[i] + 15) % color_map.shape[0]\n else:\n mask_color = color\n\n image[ymin : ymax, \n np.maximum(xmin - line_width // 2, 0) : \n np.minimum(xmin + line_width - line_width // 2, width)] = color\n image[ymin : ymax, \n np.maximum(xmax - line_width // 2, 0) : \n np.minimum(xmax + line_width - line_width // 2, width)] = color\n image[np.maximum(ymin - line_width // 2, 0) : \n np.minimum(ymin + line_width - line_width // 2, height), \n xmin : xmax] = color\n image[np.maximum(ymax - line_width // 2, 0) : \n np.minimum(ymax + line_width - line_width // 2, height), \n xmin : xmax] = color\n\n detection_label_text = '%s: %.2f' % (\n label_map[classes[i]], int(scores[i] * 100) / 100)\n\n if font_path is not None: \n font = ImageFont.truetype(font_path, size=font_size)\n else:\n font = ImageFont.load_default()\n text_width, text_height = font.getsize(detection_label_text)\n\n x = xmin\n y = np.maximum(ymin - text_height, 0)\n \n image[y : np.minimum(y + text_height, height), \n x : np.minimum(x + text_width, width)] = color \n\n img_obj = Image.fromarray(image)\n draw = ImageDraw.Draw(img_obj)\n draw.text((x, y), detection_label_text, TEXT_COLOR, font=font)\n image = np.array(img_obj)\n\n if masks is not None:\n draw_mask(image, masks[i], color=mask_color)\n\n return image", "def boundingRectPoints(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\tfirst = (x, y)\n\tend = (x+w, y+h)\n\treturn {\"top-left\": first, \"bottom-right\":end}", "def draw_bboxes(images, # type: thelper.typedefs.InputType\n preds=None, # type: Optional[thelper.typedefs.AnyPredictionType]\n bboxes=None, # type: Optional[thelper.typedefs.AnyTargetType]\n color_map=None, # type: Optional[thelper.typedefs.ClassColorMap]\n redraw=None, # type: Optional[thelper.typedefs.DrawingType]\n block=False, # type: Optional[bool]\n min_confidence=0.5, # type: thelper.typedefs.Number\n class_map=None, # type: Optional[thelper.typedefs.ClassIdType, AnyStr]\n **kwargs # type: Any\n ):\n def get_class_name(_bbox):\n if isinstance(class_map, dict):\n return class_map[_bbox.class_id]\n elif bbox.task is not None:\n return _bbox.task.class_names[_bbox.class_id]\n else:\n raise RuntimeError(\"could not find class name from either class mapping or bbox task definition\")\n\n image_list = [get_displayable_image(images[batch_idx, ...]) for batch_idx in range(images.shape[0])]\n if color_map is not None and isinstance(color_map, dict):\n assert len(color_map) <= 256, \"too many indices for uint8 map\"\n color_map_new = np.zeros((256, 3), dtype=np.uint8)\n for idx, val in color_map.items():\n color_map_new[idx, ...] = val\n color_map = color_map_new.tolist()\n nb_imgs = len(image_list)\n grid_size_x, grid_size_y = nb_imgs, 1 # all images on one row, by default (add gt and preds as extra rows)\n box_thickness = thelper.utils.get_key_def(\"box_thickness\", kwargs, default=2, delete=True)\n font_thickness = thelper.utils.get_key_def(\"font_thickness\", kwargs, default=1, delete=True)\n font_scale = thelper.utils.get_key_def(\"font_scale\", kwargs, default=0.4, delete=True)\n if preds is not None:\n assert len(image_list) == len(preds)\n for preds_list, image in zip(preds, image_list):\n for bbox_idx, bbox in enumerate(preds_list):\n assert isinstance(bbox, thelper.data.BoundingBox), \"unrecognized bbox type\"\n if bbox.confidence is not None and bbox.confidence < min_confidence:\n continue\n color = get_bgr_from_hsl(bbox_idx / len(preds_list) * 360, 1.0, 0.5) \\\n if color_map is None else color_map[bbox.class_id]\n conf = \"\"\n if thelper.utils.is_scalar(bbox.confidence):\n conf = f\" ({bbox.confidence:.3f})\"\n elif isinstance(bbox.confidence, (list, tuple, np.ndarray)):\n conf = f\" ({bbox.confidence[bbox.class_id]:.3f})\"\n draw_bbox(image, bbox.top_left, bbox.bottom_right, f\"{get_class_name(bbox)} {conf}\",\n color, box_thickness=box_thickness, font_thickness=font_thickness, font_scale=font_scale)\n if bboxes is not None:\n assert len(image_list) == len(bboxes), \"mismatched bboxes list and image list sizes\"\n clean_image_list = [get_displayable_image(images[batch_idx, ...]) for batch_idx in range(images.shape[0])]\n for bboxes_list, image in zip(bboxes, clean_image_list):\n for bbox_idx, bbox in enumerate(bboxes_list):\n assert isinstance(bbox, thelper.data.BoundingBox), \"unrecognized bbox type\"\n color = get_bgr_from_hsl(bbox_idx / len(bboxes_list) * 360, 1.0, 0.5) \\\n if color_map is None else color_map[bbox.class_id]\n draw_bbox(image, bbox.top_left, bbox.bottom_right, f\"GT: {get_class_name(bbox)}\",\n color, box_thickness=box_thickness, font_thickness=font_thickness, font_scale=font_scale)\n grid_size_y += 1\n image_list += clean_image_list\n return draw_images(image_list, redraw=redraw, window_name=\"detections\", block=block,\n grid_size_x=grid_size_x, grid_size_y=grid_size_y, **kwargs)", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour" ]
[ "0.77598673", "0.7638101", "0.7510348", "0.72977304", "0.7150563", "0.7103931", "0.7101873", "0.7083905", "0.7020769", "0.6993739", "0.6974172", "0.69637305", "0.6859061", "0.68521667", "0.68319523", "0.6830371", "0.6809837", "0.6805064", "0.67750585", "0.6774759", "0.6742592", "0.6698867", "0.6694392", "0.66835654", "0.66801983", "0.66625834", "0.6661356", "0.66427153", "0.659461", "0.6586955", "0.6567837", "0.6551023", "0.6546257", "0.6514258", "0.65044916", "0.65013444", "0.6500802", "0.64951575", "0.64715326", "0.64519966", "0.64472365", "0.6444148", "0.64412856", "0.6440102", "0.64270455", "0.6406803", "0.64054507", "0.63843167", "0.6383763", "0.63735634", "0.63592166", "0.63563603", "0.6342475", "0.6334032", "0.6333849", "0.6330339", "0.63263994", "0.63135904", "0.63080966", "0.630628", "0.6274095", "0.62581444", "0.62436867", "0.6235422", "0.62297034", "0.62277484", "0.62219393", "0.62156415", "0.621548", "0.62116414", "0.62086546", "0.6207838", "0.61593467", "0.61562574", "0.61470073", "0.6145117", "0.61321", "0.61321", "0.6126131", "0.6123052", "0.6116892", "0.6110017", "0.61038256", "0.61000854", "0.6098559", "0.60982865", "0.6098114", "0.6089042", "0.60876006", "0.60807675", "0.6080198", "0.6075498", "0.6075366", "0.6066647", "0.60661393", "0.60643333", "0.6057531", "0.60554594", "0.6051775", "0.60476446", "0.6040261" ]
0.0
-1
Test a region proposal network on a image dataset
def test_imdb(net, imdb, anchors): output_dir = get_output_dir(imdb, net) cache_file = os.path.join(output_dir, 'res_boxes.pkl') # load cache result boxes (filtered) if os.path.exists(cache_file): with open(cache_file, 'rb') as f: proposal_boxes = cPickle.load(f) print 'load res boxes from \'{}\''.format(cache_file) return proposal_boxes if not os.path.exists(output_dir): os.makedirs(output_dir) print 'Generating proposal boxes by rpn model...' proposal_boxes = test_net(net, imdb, anchors) print 'Get proposal boxes done!' print 'Current NMS configuration:' print NMS_CONFIG expand_val = lambda boxes: np.array([boxes[:,0] - boxes[:,2], boxes[:,1] - boxes[:,3], boxes[:,2] - boxes[:,0], boxes[:,3] - boxes[:,1], np.zeros(boxes.shape[0])]).T * EXPAND_RATIO # filter boxes print 'Filtering proposal boxes...' for i in xrange(len(proposal_boxes)): proposal_boxes[i] = boxes_filter(proposal_boxes[i], PRE_NMS_TOPN=NMS_CONFIG['PRE_NMS_TOPN'], NMS_THRESH=NMS_CONFIG['NMS_THRESH'], POST_NMS_TOPN=NMS_CONFIG['POST_NMS_TOPN'], CONF_THRESH=CONF_THRESH, USE_GPU=NMS_CONFIG['USE_GPU']) # expand bounding box if len(proposal_boxes[i]) > 0: proposal_boxes[i] = proposal_boxes[i] + expand_val(proposal_boxes[i]) print 'filter proposal box: {:d}/{:d}'.format(i+1, len(proposal_boxes)) print 'Filter proposal boxes done!' # save file with open(cache_file, 'wb') as f: cPickle.dump(proposal_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'save result boxes to `{:s}`'.format(cache_file) return proposal_boxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_net(args, dataset_name, proposal_file, output_dir, ind_range=None, gpu_id=0, early_stop=False):\n # print('test_net')\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes = {}\n\n timers = defaultdict(Timer)\n \n \n\n\n if 'train' in dataset_name:\n if ind_range is not None:\n det_name = 'discovery_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'discovery.pkl'\n else:\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n \n det_file = os.path.join(output_dir, det_name)\n if os.path.exists(det_file):\n print('the file', det_file, 'exists. I am loading detections from it...')\n return load_object(det_file)['all_boxes']\n\n for i, entry in enumerate(roidb):\n if early_stop and i > 10: break\n\n box_proposals = entry['boxes']\n if len(box_proposals) == 0:\n continue\n \n im = cv2.imread(entry['image'])\n # print(entry['image'])\n cls_boxes_i = im_detect_all(model, im, box_proposals, timers)\n\n all_boxes[entry['image']] = cls_boxes_i\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n \n det_time = (timers['im_detect_bbox'].average_time)\n \n logger.info(('im_detect: range [{:d}, {:d}] of {:d}:{:d}/{:d} {:.3f}s (eta: {})').format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, eta))\n\n cfg_yaml = yaml.dump(cfg)\n\n save_object(\n dict(\n all_boxes=all_boxes,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes", "def test():\n\n # load image and adjust its format\n if MEMORY_CACHE:\n test_input = dataset[0]['file']\n oriImg = test_input.byte().permute((1, 2, 0)).numpy() # B,G,R order\n else:\n oriImg = cv2.imread(dataset[0]['file']) # B,G,R order\n test_input = torch.from_numpy(oriImg).permute((2, 0, 1)).float()\n \n # transfer data on GPU on demand\n if CUDA:\n test_input = test_input.cuda()\n\n # perform prediction\n net.eval()\n with torch.no_grad():\n result = net(test_input.unsqueeze(0))[0]\n\n print(result)\n\n # draw rectangles and its class\n img = cv2.cvtColor(oriImg, cv2.COLOR_BGR2RGB)\n for box, label, score in zip(result['boxes'], result['labels'], result['scores']):\n # if score > 0.5:\n if label < len(orig_labels):\n img = cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)\n img = cv2.putText(img, '{}: {:.0%}'.format(orig_labels[label], score), (box[0] + 5, box[3] - 5), cv2.FONT_HERSHEY_SIMPLEX, .7, (0, 255, 0), 2, cv2.LINE_AA)\n plt.imshow(img)\n plt.axis('off')\n plt.show()", "def setUp(self):\n img_path = osp.join(osp.dirname(__file__), '../../data/gray.jpg')\n self.results = {\n 'img_path':\n img_path,\n 'img_shape': (300, 400),\n 'instances': [{\n 'bbox': [0, 0, 10, 20],\n 'bbox_label': 1,\n 'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n 'ignore_flag': 0\n }, {\n 'bbox': [10, 10, 110, 120],\n 'bbox_label': 2,\n 'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n 'ignore_flag': 0\n }, {\n 'bbox': [50, 50, 60, 80],\n 'bbox_label': 2,\n 'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n 'ignore_flag': 1\n }]\n }", "def main():\n\n dir_path =r'/Users/dustin/CS/projects/ship_detector/data/ships-in-satellite-imagery/shipsnet/'\n\n data_array, label_array = read_images(dir_path)\n\n array_info(data_array, label_array)\n\n image_info(data_array[0,:], plot_image=False)\n\n split_ratios = [0.8, 0.1, 0.1] #splitting the dataset into 80% train, 10% dev, 10% test\n\n X_train, X_dev, X_test, Y_train, Y_dev, Y_test = dataset_split(data_array, label_array, split_ratios)", "def test(self, img_path):\n import cv2 \n\n self.load_data_test(path=img_path)\n self.C.horizontal_flips = False\n self.C.vertical_flips = False\n self.C.rotate_90 = False\n\n st = time.time()\n\n from .utils.data_generators import format_img_size\n from .utils.data_generators import format_img_channels\n from .utils.data_generators import format_img\n from .utils.data_generators import get_real_coordinates\n\n if self.cnn_name == 'vgg16' or self.cnn_name == 'vgg19':\n num_feature = 512\n else:\n num_feature = 1024 # any other convNet\n \n input_shape_img = (None, None, 3)\n input_shape_features = (None, None, num_feature)\n\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(self.C.num_roi, 4))\n feature_map_input = Input(shape=input_shape_features)\n\n # define the base network\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n\n # define the RPN, built on the base layers\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n rpn_layers = self.region_proposal_net(shared_layers, num_anchors)\n classifier = self.classifier(feature_map_input, \n self.cnn_model.classifier_layers, \n roi_input, \n self.C.num_roi, \n num_class=len(self.class_mapping), \n trainable=True)\n\n model_rpn = Model(img_input, rpn_layers)\n model_classifier_only = Model([feature_map_input, roi_input], classifier)\n model_classifier = Model([feature_map_input, roi_input], classifier)\n\n print('Loading weights from {}'.format(self.C.model_path))\n model_rpn.load_weights(self.C.model_path, by_name=True)\n model_classifier.load_weights(self.C.model_path, by_name=True)\n\n model_rpn.compile(optimizer='sgd', loss='mse')\n model_classifier.compile(optimizer='sgd', loss='mse')\n\n for i in range(len(self.test_images)):\n img = cv2.imread(self.test_images[i])\n X, ratio = format_img(img, self.C)\n X = np.transpose(X, (0, 2, 3, 1))\n\n # get the feature maps and output from the RPN\n [Y1, Y2, F] = model_rpn.predict(X)\n\n R = roi_helpers.rpn_to_roi(Y1, Y2, self.C, K.image_data_format(), overlap_thresh=0.7)\n\n # convert from (x1,y1,x2,y2) to (x,y,w,h)\n R[:, 2] -= R[:, 0]\n R[:, 3] -= R[:, 1]\n\n # apply the spatial pyramid pooling to the proposed regions\n bboxes = {}\n probs = {}\n\n for jk in range(R.shape[0] // self.C.num_roi+1):\n ROIs = np.expand_dims(R[self.C.num_roi*jk:self.C.num_roi*(jk+1), :], axis=0)\n if ROIs.shape[1] == 0:\n break\n\n if jk == R.shape[0] // self.C.num_roi:\n # pad R\n curr_shape = ROIs.shape\n target_shape = (curr_shape[0], self.C.num_roi, curr_shape[2])\n ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)\n ROIs_padded[:, :curr_shape[1], :] = ROIs\n ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]\n ROIs = ROIs_padded\n\n [P_cls, P_regr] = model_classifier_only.predict([F, ROIs])\n\n for ii in range(P_cls.shape[1]):\n if np.max(P_cls[0, ii, :]) < self.C.bbox_threshold or \\\n np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):\n continue\n\n cls_name = self.class_mapping[np.argmax(P_cls[0, ii, :])]\n if cls_name not in bboxes:\n bboxes[cls_name] = []\n probs[cls_name] = []\n\n (x, y, w, h) = ROIs[0, ii, :]\n cls_num = np.argmax(P_cls[0, ii, :])\n try:\n (tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]\n tx /= C.class_regress_std[0]\n ty /= C.class_regress_std[1]\n tw /= C.class_regress_std[2]\n th /= C.class_regress_std[3]\n x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)\n except:\n pass\n \n bboxes[cls_name].append([self.C.stride*x, \n self.C.stride*y, \n self.C.stride*(x+w), \n self.C.stride*(y+h)])\n probs[cls_name].append(np.max(P_cls[0, ii, :]))\n\n all_detections = []\n\n for key in bboxes:\n bbox = np.array(bboxes[key])\n new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, \n np.array(probs[key]), overlap_thresh=0.5)\n \n for jk in range(new_boxes.shape[0]):\n (x1, y1, x2, y2) = new_boxes[jk,:]\n (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)\n\n cv2.rectangle(img,(real_x1, real_y1), \n (real_x2, real_y2), \n (int(self.class_to_color[key][0]), \n int(self.class_to_color[key][1]), \n int(self.class_to_color[key][2])),\n 2)\n\n textLabel = '%s: %.3f' % (key, new_probs[jk])\n all_detections.append((key, new_probs[jk]))\n\n (retval,baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n text_org = (real_x1+10, real_y1+20)\n\n cv2.rectangle(img, (text_org[0], text_org[1]+baseLine), \n (text_org[0]+retval[0]+10, text_org[1]-retval[1]-10), \n (0, 0, 0), 2)\n cv2.rectangle(img, (text_org[0],text_org[1]+baseLine), \n (text_org[0]+retval[0]+10, text_org[1]-retval[1]-10), \n (255, 255, 255), -1)\n cv2.putText(img, textLabel, text_org, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n\n print('Elapsed time = {}'.format(time.time() - st))\n print(self.test_images[i], all_detections)\n if all_detections:\n cv2.imwrite(self.test_images_bbox[i], img)", "def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def im_detect(net, target_data,im_data, im_info, features_given=True):\n\n cls_prob, rois = net(target_data, im_data, im_info,\n features_given=features_given)\n scores = cls_prob.data.cpu().numpy()[0,:,:]\n zs = np.zeros((scores.size, 1))\n scores = np.concatenate((zs,scores),1)\n boxes = rois.data.cpu().numpy()[0,:, :]\n\n return scores, boxes", "def im_detect(net, im, boxes=None):\n blobs, im_scales = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'] = blobs['rois'][index, :]\n boxes = boxes[index, :]\n\n if cfg.TEST.HAS_RPN:\n im_blob = blobs['data']\n blobs['im_info'] = np.array(\n [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],\n dtype=np.float32)\n\n # reshape network inputs\n net.blobs['data'].reshape(*(blobs['data'].shape))\n if cfg.TEST.HAS_RPN:\n net.blobs['im_info'].reshape(*(blobs['im_info'].shape))\n else:\n net.blobs['rois'].reshape(*(blobs['rois'].shape))\n\n # do forward\n forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}\n if cfg.TEST.HAS_RPN:\n forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)\n else:\n forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)\n \n blobs_out = net.forward(**forward_kwargs)\n \n if cfg.TEST.HAS_RPN:\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n rois = net.blobs['rois'].data.copy()\n # unscale back to raw image space\n boxes = rois[:, 1:5] / im_scales[0]\n\n if cfg.TEST.SVM:\n # use the raw scores before softmax under the assumption they\n # were trained as linear SVMs\n scores = net.blobs['cls_score'].data\n else:\n # use softmax estimated probabilities\n scores = blobs_out['cls_prob']\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = blobs_out['bbox_pred']\n pred_boxes = bbox_transform_inv(boxes, box_deltas)\n pred_boxes = clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:\n # Map scores and predictions back to the original set of boxes\n scores = scores[inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n\n return scores, pred_boxes", "def demo(net, image_name, classes):\n\n # Load pre-computed Selected Search object proposals\n # box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')\n test_mats_path = '/home/tanshen/fast-rcnn/data/kaggle/test_bbox'\n box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')\n obj_proposals = sio.loadmat(box_file)['boxes']\n\n # Load the demo image\n test_images_path = '/home/tanshen/fast-rcnn/data/kaggle/ImagesTest'\n # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')\n im_file = os.path.join(test_images_path, image_name + '.jpg')\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im, obj_proposals)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0\n NMS_THRESH = 0.3\n max_inds = 0\n max_score = 0.0\n for cls in classes:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n keep = np.where(cls_scores >= CONF_THRESH)[0]\n cls_boxes = cls_boxes[keep, :]\n cls_scores = cls_scores[keep]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,\n # CONF_THRESH, image_name)\n #if get_max!=[]: \n\n [ind,tmp]=get_max(im, cls, dets, thresh=CONF_THRESH)\n #print image_name,cls,tmp\n\n #vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)\n #print dets[:,-1]\n #print image_name,max_score\n file.writelines([image_name,'\\t',cls,'\\t',str(tmp),'\\n'])\n if(max_score<tmp):\n max_score=tmp\n cls_max=cls\n print image_name,cls_max,max_score", "def im_detect(sess, net, im, boxes=None):\n\n blobs, im_scales = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n\n if cfg.TEST.HAS_RPN:\n im_blob = blobs['data']\n blobs['im_info'] = np.array(\n [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],\n dtype=np.float32)\n # forward pass\n if cfg.TEST.HAS_RPN:\n feed_dict={net.data: blobs['data'], net.im_info: blobs['im_info'], net.keep_prob: 1.0}\n else:\n feed_dict={net.data: blobs['data'], net.rois: blobs['rois'], net.keep_prob: 1.0}\n\n run_options = None\n run_metadata = None\n if cfg.TEST.DEBUG_TIMELINE:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n cls_score, cls_prob, bbox_pred, rois = sess.run([net.get_output('cls_score'), net.get_output('cls_prob'), net.get_output('bbox_pred'),net.get_output('rois')],\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata)\n\n if cfg.TEST.HAS_RPN:\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n boxes = rois[:, 1:5] / im_scales[0]\n\n\n if cfg.TEST.SVM:\n # use the raw scores before softmax under the assumption they\n # were trained as linear SVMs\n scores = cls_score\n else:\n # use softmax estimated probabilities\n scores = cls_prob\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred\n pred_boxes = bbox_transform_inv(boxes, box_deltas)\n pred_boxes = _clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.TEST.DEBUG_TIMELINE:\n trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n trace_file = open(str(long(time.time() * 1000)) + '-test-timeline.ctf.json', 'w')\n trace_file.write(trace.generate_chrome_trace_format(show_memory=False))\n trace_file.close()\n\n return scores, pred_boxes", "def test_get_networks(self):\n pass", "def test_over_mask_over_regions_segmented_and_whole_extractor(region, images_used):\n atlas = None\n dict_parameters = None\n reshape_kind = None\n\n if images_used == \"MRI\":\n atlas = mri_atlas.load_atlas_mri()\n dict_parameters = MRI_stack_NORAD.get_parameters()\n reshape_kind = \"C\"\n\n elif images_used == \"PET\":\n atlas = pet_atlas.load_atlas()\n dict_parameters = PET_stack_NORAD.get_parameters()\n reshape_kind = \"F\"\n\n whole_mask_flatten, mask_segmented_flatten = \\\n get_whole_region_mask_and_region_segmented_mask(\n atlas=atlas,\n dict_parameters=dict_parameters,\n region=region,\n reshape_kind=reshape_kind)\n\n print(\"Number voxels activaed in whole MRI: {0}\\n\"\n \"length whole image: {1} \\n\"\n \"Number voxles activaed in region segmented 3d: {2}\\n\"\n \"length region segmented {3}\".format(\n sum(whole_mask_flatten), len(whole_mask_flatten),\n sum(mask_segmented_flatten), len(mask_segmented_flatten)))", "def find_instances(self, image, region, overlap):\n self.image = np.copy(image)\n\n self.eff_step_size = int((1.0-overlap)*self.eff_box_size)\n\n y_steps = (region[3]-region[1])//self.eff_step_size\n x_steps = (region[2]-region[0])//self.eff_step_size\n\n if region[0]+(x_steps-1)*self.eff_step_size+self.eff_box_size>region[2]:\n x_steps -= 1\n if region[1]+(y_steps-1)*self.eff_step_size+self.eff_box_size>region[3]:\n y_steps -= 1\n\n if self.single_hog:\n self.resized_image = image[region[1]:region[3],region[0]:region[2],:]\n self.resized_image = cv2.resize(self.resized_image, (int(self.resized_image.shape[1]/self.scaling), int(self.resized_image.shape[0]/self.scaling)))\n features, img = self.hogger.hog_image(self.resized_image, visualize=False, feature_vector=False)\n features = np.array(features)\n self.find_instances_in_features(features, region)\n return self.image, self.resized_image\n else:\n for row in range(y_steps):\n off_y = region[1] + row * self.eff_step_size\n for col in range(x_steps):\n off_x = region[0]+col * self.eff_step_size\n sub_sample = self.get_resized_sub_sample(off_x, off_y)\n pred = self.classifier.classify(sub_sample)\n if(pred==1.0):\n cv2.rectangle(self.image, (off_x, off_y), (off_x+self.eff_box_size, off_y+self.eff_box_size), color=(255,255,255), thickness=2)\n self.boundings.append(((off_x, off_y), (off_x+self.eff_box_size, off_y+self.eff_box_size)))\n\n return self.image, None", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def test_CreateROI1(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n import urllib\r\n downloads = (\r\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\r\n )\r\n\r\n for url,name,loader in downloads:\r\n filePath = slicer.app.temporaryPath + '/' + name\r\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\r\n logging.info('Requesting download %s from %s...\\n' % (name, url))\r\n urllib.urlretrieve(url, filePath)\r\n if loader:\r\n logging.info('Loading %s...' % (name,))\r\n loader(filePath)\r\n self.delayDisplay('Finished with download and loading')\r\n\r\n volumeNode = slicer.util.getNode(pattern=\"FA\")\r\n logic = CreateROILogic()\r\n self.assertIsNotNone( logic.hasImageData(volumeNode) )\r\n self.delayDisplay('Test passed!')", "def Compute_EdgeBoxesAndCNN_features(demonet='res152',nms_thresh = 0.7,database='IconArt_v1',\n augmentation=False,L2 =False,\n saved='all',verbose=True,filesave='tfrecords',k_regions=300,\n testMode=False,plotProposedBoxes=False):\n\n path_data = '/media/gonthier/HDD/output_exp/ClassifPaintings/'\n path_imgs = path_data + 'EdgeBoxesIllust/'+database +'/'\n \n if plotProposedBoxes:\n print(\"We will only plot the regions of the EdgeBoxes with k_regions = \",k_regions,path_imgs)\n pathlib.Path(path_imgs).mkdir(parents=True, exist_ok=True) \n \n item_name,path_to_img,default_path_imdb,classes,ext,num_classes,str_val,df_label,\\\n path_data,Not_on_NicolasPC = get_database(database)\n \n if augmentation:\n raise NotImplementedError\n N = 50\n else: \n N=1\n if L2:\n raise NotImplementedError\n extL2 = '_L2'\n else:\n extL2 = ''\n if saved=='all':\n savedstr = '_all'\n elif saved=='fc7':\n savedstr = ''\n elif saved=='pool5':\n savedstr = '_pool5'\n \n tf.reset_default_graph() # Needed to use different nets one after the other\n if verbose: print('=== EdgeBoxes net',demonet,'database',database,' ===')\n \n if demonet=='res152':\n weights_path = '/media/gonthier/HDD/models/resnet152_weights_tf.h5'\n model = resnet_152_keras.resnet152_model_2048output(weights_path)\n num_features = 2048\n else:\n raise(NotImplementedError)\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth=True\n # init session\n# sess = tf.Session(config=tfconfig)\n \n features_resnet_dict= {}\n \n sets = ['train','val','trainval','test']\n \n if filesave == 'pkl':\n name_pkl_all_features = path_data+'EdgeBoxes_'+ demonet +'_'+database+'_N'+str(N)+extL2+'_TLforMIL_nms_'+str(nms_thresh)+savedstr+'.pkl'\n pkl = open(name_pkl_all_features, 'wb')\n elif filesave =='tfrecords':\n if k_regions==300:\n k_per_bag_str = ''\n else:\n k_per_bag_str = '_k'+str(k_regions)\n dict_writers = {}\n for set_str in sets:\n name_pkl_all_features = path_data\n if testMode: name_pkl_all_features+= 'TestMode_'\n name_pkl_all_features += 'EdgeBoxes_'+ demonet +'_'+database+'_N'+str(N)+extL2+'_TLforMIL_nms_'+str(nms_thresh)+savedstr+k_per_bag_str+'_'+set_str+'.tfrecords'\n dict_writers[set_str] = tf.python_io.TFRecordWriter(name_pkl_all_features)\n \n model_edgeboxes = 'model/model.yml'\n print('Need of pip install opencv-contrib-python')\n edge_detection = cv2.ximgproc.createStructuredEdgeDetection(model_edgeboxes)\n \n number_of_regions = []\n Itera = 1000\n if testMode:\n Itera = 1\n for i,name_img in enumerate(df_label[item_name]):\n if testMode and i>1:\n break\n if filesave=='pkl':\n if not(k_regions==300):\n raise(NotImplementedError)\n if i%Itera==0:\n if verbose : print(i,name_img)\n if not(i==0):\n pickle.dump(features_resnet_dict,pkl) # Save the data\n features_resnet_dict= {}\n if database in ['IconArt_v1','VOC2007','clipart','comic','Paintings',\\\n 'watercolor','WikiTenLabels','MiniTrain_WikiTenLabels',\\\n 'WikiLabels1000training','CASPApaintings']:\n complet_name = path_to_img + name_img + '.jpg'\n elif database=='PeopleArt':\n complet_name = path_to_img + name_img\n name_sans_ext = os.path.splitext(name_img)[0]\n elif(database=='Wikidata_Paintings') or (database=='Wikidata_Paintings_miniset_verif'):\n name_sans_ext = os.path.splitext(name_img)[0]\n complet_name = path_to_img +name_sans_ext + '.jpg'\n\n if plotProposedBoxes:\n plot_im_withBoxes(complet_name,edge_detection,k_regions,path_imgs)\n list_im, rois = get_crops(complet_name,edge_detection,k_regions,demonet,augmentation=False)\n number_of_regions += [len(list_im)]\n fc7 = model.predict(list_im)\n # Need a BGR and between 0 and 255 minus the mean per color \n \n roi_scores = np.ones((len(list_im,)))\n# cls_score, cls_prob, bbox_pred, rois,roi_scores, fc7,pool5 = TL_im_detect(sess, net, im) # Arguments: im (ndarray): a color image in BGR order\n #features_resnet_dict[name_img] = fc7[np.concatenate(([0],np.random.randint(1,len(fc7),29))),:]\n if saved=='fc7':\n features_resnet_dict[name_img] = fc7\n# elif saved=='pool5':\n# features_resnet_dict[name_img] = pool5\n elif saved=='all':\n features_resnet_dict[name_img] = rois,roi_scores,fc7\n \n elif filesave=='tfrecords':\n if i%Itera==0:\n if verbose : print(i,name_img)\n if database in ['IconArt_v1','VOC2007','clipart','comic','Paintings','watercolor'\\\n ,'CASPApaintings','WikiTenLabels','MiniTrain_WikiTenLabels','WikiLabels1000training']:\n complet_name = path_to_img + name_img + '.jpg'\n name_sans_ext = name_img\n elif database=='PeopleArt':\n complet_name = path_to_img + name_img\n name_sans_ext = os.path.splitext(name_img)[0]\n elif(database=='Wikidata_Paintings') or (database=='Wikidata_Paintings_miniset_verif'):\n name_sans_ext = os.path.splitext(name_img)[0]\n complet_name = path_to_img +name_sans_ext + '.jpg'\n\n im = cv2.imread(complet_name)\n \n height = im.shape[0]\n width = im.shape[1]\n\n if plotProposedBoxes:\n plot_im_withBoxes(complet_name,edge_detection,k_regions,path_imgs)\n list_im, rois = get_crops(complet_name,edge_detection,k_regions,demonet,augmentation=False)\n # Boxes are x, y, w, h\n number_of_regions += [len(list_im)]\n fc7 = model.predict(list_im)\n roi_scores = np.ones((len(list_im,)))\n# cls_score, cls_prob, bbox_pred, rois,roi_scores, fc7,pool5 = TL_im_detect(sess, net, im) # Arguments: im (ndarray): a color image in BGR order\n \n if testMode:\n print('Image :',height,width)\n print('Normally ROI (x1,x2,y1,y2) :')\n print(rois)\n \n if(len(fc7) >= k_regions):\n rois = rois[0:k_regions,:]\n roi_scores =roi_scores[0:k_regions,]\n fc7 = fc7[0:k_regions,:]\n else:\n number_repeat = k_regions // len(fc7) +1\n f_repeat = np.repeat(fc7,number_repeat,axis=0)\n roi_scores_repeat = np.repeat(roi_scores,number_repeat,axis=0)\n rois_repeat = np.repeat(rois,number_repeat,axis=0)\n rois = rois_repeat[0:k_regions,:]\n roi_scores =roi_scores_repeat[0:k_regions,]\n fc7 = f_repeat[0:k_regions,:]\n num_regions = fc7.shape[0]\n num_features = fc7.shape[1]\n dim1_rois = rois.shape[1]\n classes_vectors = np.zeros((num_classes,1),dtype=np.float32)\n \n if database=='Paintings':\n for j in range(num_classes):\n if(classes[j] in df_label['classe'][i]):\n classes_vectors[j] = 1\n if database in ['VOC2007','clipart','watercolor','comic','PeopleArt','CASPApaintings']:\n for j in range(num_classes):\n value = int((int(df_label[classes[j]][i])+1.)/2.)\n # En fait ce qui se passe la c'est que tu rescale a la sauvage \n # entre 0 et 1 un truc qui peut etre entre 0 et 1 mais aussi entre -1 et 1\n # C'est chelou\n classes_vectors[j] = value\n if database in ['WikiTenLabels','MiniTrain_WikiTenLabels','WikiLabels1000training','IconArt_v1']:\n for j in range(num_classes):\n value = int(df_label[classes[j]][i])\n classes_vectors[j] = value\n \n #features_resnet_dict[name_img] = fc7[np.concatenate(([0],np.random.randint(1,len(fc7),29))),:]\n if saved=='fc7':\n print('It is possible that you need to replace _bytes_feature by _floats_feature in this function')\n print('!!!!!!!!!!!!!!!!!!!!!')\n raise(NotImplementedError)\n # TODO : modifier cela !\n features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'num_regions': _int64_feature(num_regions),\n 'num_features': _int64_feature(num_features),\n 'fc7': _bytes_feature(tf.compat.as_bytes(fc7.tostring())),\n 'label' : _bytes_feature(tf.compat.as_bytes(classes_vectors.tostring())),\n 'name_img' : _bytes_feature(str.encode(name_sans_ext))})\n elif saved=='pool5':\n raise(NotImplementedError)\n elif saved=='all':\n feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'num_regions': _int64_feature(num_regions),\n 'num_features': _int64_feature(num_features),\n 'dim1_rois': _int64_feature(dim1_rois),\n 'rois': _floats_feature(rois),\n 'roi_scores': _floats_feature(roi_scores),\n 'fc7': _floats_feature(fc7),\n 'label' : _floats_feature(classes_vectors),\n 'name_img' : _bytes_feature(str.encode(name_sans_ext))}\n features=tf.train.Features(feature=feature)\n example = tf.train.Example(features=features) \n# print(len(feature['rois']))\n if database=='VOC2007' or database=='PeopleArt':\n if (df_label.loc[df_label[item_name]==name_img]['set']=='train').any():\n dict_writers['train'].write(example.SerializeToString())\n dict_writers['trainval'].write(example.SerializeToString())\n elif (df_label.loc[df_label[item_name]==name_img]['set']=='val').any():\n dict_writers['val'].write(example.SerializeToString())\n dict_writers['trainval'].write(example.SerializeToString())\n elif (df_label.loc[df_label[item_name]==name_img]['set']=='test').any():\n dict_writers['test'].write(example.SerializeToString())\n if (database=='Wikidata_Paintings_miniset') or database=='Paintings':\n if (df_label.loc[df_label[item_name]==name_img]['set']=='train').any():\n dict_writers['train'].write(example.SerializeToString())\n dict_writers['trainval'].write(example.SerializeToString())\n elif (df_label.loc[df_label[item_name]==name_img]['set']=='validation').any():\n dict_writers['val'].write(example.SerializeToString())\n dict_writers['trainval'].write(example.SerializeToString())\n elif (df_label.loc[df_label[item_name]==name_img]['set']=='test').any():\n dict_writers['test'].write(example.SerializeToString())\n if database in ['IconArt_v1','watercolor','clipart','comic','WikiTenLabels',\\\n 'MiniTrain_WikiTenLabels','WikiLabels1000training','CASPApaintings']:\n if (df_label.loc[df_label[item_name]==name_img]['set']=='train').any():\n dict_writers['train'].write(example.SerializeToString())\n dict_writers['trainval'].write(example.SerializeToString())\n elif (df_label.loc[df_label[item_name]==name_img]['set']=='test').any():\n dict_writers['test'].write(example.SerializeToString())\n \n if filesave=='pkl':\n pickle.dump(features_resnet_dict,pkl)\n pkl.close()\n elif filesave=='tfrecords':\n for set_str in sets:\n dict_writers[set_str].close()\n \n print('Mean number of regions per image :',np.mean(number_of_regions),'with k max = ',k_regions)\n \n tf.reset_default_graph()\n \n if testMode:\n sets = ['train','test','trainval','val']\n dim_rois = 4\n for set_str in sets:\n name_pkl_all_features = path_data\n if testMode: name_pkl_all_features+= 'TestMode_'\n name_pkl_all_features += 'EdgeBoxes_'+ demonet +'_'+database+'_N'+str(N)+extL2+'_TLforMIL_nms_'+str(nms_thresh)+savedstr+k_per_bag_str+'_'+set_str+'.tfrecords'\n print(name_pkl_all_features)\n if set_str=='train':\n train_dataset = tf.data.TFRecordDataset(name_pkl_all_features)\n sess = tf.Session()\n train_dataset = train_dataset.map(lambda r: parser_w_rois_all_class(r, \\\n num_classes=num_classes,with_rois_scores=True,num_features=num_features,\n num_rois=k_regions,dim_rois=dim_rois))\n mini_batch_size = 1\n dataset_batch = train_dataset.batch(mini_batch_size)\n dataset_batch.cache()\n iterator = dataset_batch.make_one_shot_iterator()\n next_element = iterator.get_next()\n print(next_element)\n nx = sess.run(next_element)\n print(nx)\n name_img = nx[-1][0].decode('utf8')\n if database in ['IconArt_v1','VOC2007','clipart','comic','Paintings',\\\n 'watercolor','WikiTenLabels','MiniTrain_WikiTenLabels',\\\n 'WikiLabels1000training','CASPApaintings']:\n complet_name = path_to_img + name_img + '.jpg'\n name_sans_ext = name_img\n elif database=='PeopleArt':\n complet_name = path_to_img + name_img\n name_sans_ext = os.path.splitext(name_img)[0]\n elif(database=='Wikidata_Paintings') or (database=='Wikidata_Paintings_miniset_verif'):\n name_sans_ext = os.path.splitext(name_img)[0]\n complet_name = path_to_img +name_sans_ext + '.jpg'\n \n im = cv2.imread(complet_name)\n \n blobs, im_scales = get_blobs(im)\n dd = nx[1]/ im_scales[0] \n score = nx[2]\n roi = np.hstack((dd[0],score[0].reshape((-1,1))))\n \n \n class_name = ['']\n vis_detections_list(im, class_name, [roi])\n \n os.remove(name_pkl_all_features)", "def __init__(self, mode, roidb_file=VG_SGG_FN, dict_file=VG_SGG_DICT_FN,\n image_file=IM_DATA_FN, filter_empty_rels=True, num_im=-1, num_val_im=5000,\n filter_duplicate_rels=True, filter_non_overlap=True,\n use_proposals=False):\n if mode not in ('test', 'train', 'val'):\n raise ValueError(\"Mode must be in test, train, or val. Supplied {}\".format(mode))\n self.mode = mode\n\n # Initialize\n self.roidb_file = roidb_file\n self.dict_file = dict_file\n self.image_file = image_file\n self.filter_non_overlap = filter_non_overlap\n self.filter_duplicate_rels = filter_duplicate_rels and self.mode == 'train'\n\n self.split_mask, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(\n self.roidb_file, self.mode, num_im, num_val_im=num_val_im,\n filter_empty_rels=filter_empty_rels,\n filter_non_overlap=self.filter_non_overlap and self.is_train,\n )\n\n self.filenames = load_image_filenames(image_file)\n self.filenames = [self.filenames[i] for i in np.where(self.split_mask)[0]]\n\n self.ind_to_classes, self.ind_to_predicates = load_info(dict_file)\n\n if use_proposals:\n print(\"Loading proposals\", flush=True)\n p_h5 = h5py.File(PROPOSAL_FN, 'r')\n rpn_rois = p_h5['rpn_rois']\n rpn_scores = p_h5['rpn_scores']\n rpn_im_to_roi_idx = np.array(p_h5['im_to_roi_idx'][self.split_mask])\n rpn_num_rois = np.array(p_h5['num_rois'][self.split_mask])\n\n self.rpn_rois = []\n for i in range(len(self.filenames)):\n rpn_i = np.column_stack((\n rpn_scores[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],\n rpn_rois[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],\n ))\n self.rpn_rois.append(rpn_i)\n else:\n self.rpn_rois = None\n\n # You could add data augmentation here. But we didn't.\n # tform = []\n # if self.is_train:\n # tform.append(RandomOrder([\n # Grayscale(),\n # Brightness(),\n # Contrast(),\n # Sharpness(),\n # Hue(),\n # ]))\n\n tform = [\n SquarePad(),\n Resize(IM_SCALE),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n self.transform_pipeline = Compose(tform)", "def test_differentiable__mask_connectivity(self):\n data = random_graph_data(5, 4, 3)\n print(data.size)", "def load_pedestrian(self, dataset_dir, subset):\n # Add classes. We have only one class to add.\n self.add_class(\"pedestrian\", 1, \"pedestrian\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n print(dataset_dir)\n # Load annotations\n # VGG Image Annotator (up to version 1.6) saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n # Note: In VIA 2.0, regions was changed from a dict to a list.\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n annotations = list(annotations.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. These are stores in the\n # shape_attributes (see json format above)\n # The if condition is needed to support VIA versions 1.x and 2.x.\n if type(a['regions']) is dict:\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\n else:\n polygons = [r['shape_attributes'] for r in a['regions']] \n\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"pedestrian\",\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons)", "def test_retinanet(ind_range=None):\n assert cfg.RETINANET.RETINANET_ON, \\\n 'RETINANET_ON must be set for testing RetinaNet model'\n output_dir = get_output_dir(training=False)\n dataset = JsonDataset(cfg.TEST.DATASET)\n im_list = dataset.get_roidb()\n if ind_range is not None:\n start, end = ind_range\n im_list = im_list[start:end]\n logger.info('Testing on roidb range: {}-{}'.format(start, end))\n else:\n # if testing over the whole dataset, use the NUM_TEST_IMAGES setting\n # the NUM_TEST_IMAGES could be over a small set of images for quick\n # debugging purposes\n im_list = im_list[0:cfg.TEST.NUM_TEST_IMAGES]\n\n model = model_builder.create(cfg.MODEL.TYPE, train=False)\n if cfg.TEST.WEIGHTS:\n nu.initialize_from_weights_file(\n model, cfg.TEST.WEIGHTS, broadcast=False\n )\n model_builder.add_inference_inputs(model)\n workspace.CreateNet(model.net)\n boxes, scores, classes, image_ids = im_list_detections(\n model, im_list[0:cfg.TEST.NUM_TEST_IMAGES])\n\n cfg_yaml = yaml.dump(cfg)\n if ind_range is not None:\n det_name = 'retinanet_detections_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'retinanet_detections.pkl'\n det_file = os.path.join(output_dir, det_name)\n save_object(\n dict(boxes=boxes, scores=scores, classes=classes, ids=image_ids, cfg=cfg_yaml),\n det_file)\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return boxes, scores, classes, image_ids", "def test_network(bpn, test_data):\n DisplayNetwork.display_green(\"[INFO] Started to test the network\")\n output = bpn.Run(np.array(test_data))\n return output", "def test(nifti_region_to_save, path_where_store_out=\"pet_regions_segmented\"):\n regions_used = \"three\"\n list_regions = session.select_regions_to_evaluate(regions_used)\n dic_regions_segmented = load_pet_regions_segmented(list_regions)\n\n region_container_3d = dic_regions_segmented[\n nifti_region_to_save] # [patients x heigh, width, depth]\n\n for patient in range(0, region_container_3d.shape[0], 1):\n img = nib.Nifti1Image(region_container_3d[patient, :, :, :], np.eye(4))\n img.to_filename(os.path.join(path_where_store_out,\n \"region_{0},patient_{1}.nii\".format(\n regions_used, patient)))", "def im_detect(net, im, boxes):\n blobs, unused_im_scale_factors = _get_blobs(im, boxes)\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n for i in range(len(blobs['data'])):\n if cfg.DEDUP_BOXES > 0:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'][i] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'][i] = blobs['rois'][i][index, :]\n boxes_tmp = boxes[index, :].copy()\n else:\n boxes_tmp = boxes.copy()\n t_data = blobs['data'][i].astype(np.float32, copy=False)\n #t_data = t_data.reshape((1, t_data.shape[0], t_data.shape[1], t_data.shape[2], t_data.shape[3]))\n data_height, data_width = t_data.shape[1], t_data.shape[2]\n im_data = torch.FloatTensor(t_data).cuda()\n im_data = im_data.permute(0, 3, 1, 2).contiguous() #.view(3, data_height, data_width)\n LIM = 2000 # split ROIs due to memory issue\n if cfg.TEST.USE_FLIPPED :\n blobs['data'][i] = blobs['data'][i][:, :, ::-1, :]\n width = blobs['data'][i].shape[2]\n t_data = blobs['data'][i].astype(np.float32, copy=False)\n data_height, data_width = t_data.shape[1], t_data.shape[2]\n #im_data = torch.FloatTensor(t_data).cuda()\n im_data_flip = torch.from_numpy(t_data.copy()).cuda()\n im_data_flip = im_data_flip.permute(0, 3, 1, 2).contiguous()#.view(3, data_height, data_width)\n #im_data = im_data[...,::-1]\n for j in range (int(np.ceil(blobs['rois'][i].shape[0] / LIM))) :\n t_rois = blobs['rois'][i][j*LIM:(j+1)*LIM].astype(np.float32, copy=False)\n im_rois = torch.FloatTensor(t_rois).cuda()\n ic_prob, ic_prob1, ic_prob2 = net(im_data, im_rois)\n scores_tmp = ic_prob + ic_prob1 + ic_prob2\n pred_boxes_small = np.tile(boxes_tmp[j*LIM : (j+1)*LIM], (1, scores_tmp.shape[2]))\n\n if cfg.TEST.USE_FLIPPED:\n #pdb.set_trace()\n oldx1 = blobs['rois'][i][j*LIM:(j+1)*LIM, 1].copy()\n oldx2 = blobs['rois'][i][j*LIM:(j+1)*LIM, 3].copy()\n blobs['rois'][i][j*LIM:(j+1)*LIM, 1] = width - oldx2 - 1\n blobs['rois'][i][j*LIM:(j+1)*LIM, 3] = width - oldx1 - 1\n assert (blobs['rois'][i][j*LIM:(j+1)*LIM, 3] >= blobs['rois'][i][j*LIM:(j+1)*LIM, 1]).all()\n t_rois = blobs['rois'][i][j*LIM:(j+1)*LIM].astype(np.float32, copy=False)\n im_rois = torch.FloatTensor(t_rois).cuda()\n ic_prob, ic_prob1, ic_prob2 = net(im_data_flip, im_rois)\n scores_tmp += ic_prob + ic_prob1 + ic_prob2\n del im_rois\n\n if j is 0 :\n scores_tmp_real = scores_tmp\n pred_boxes = pred_boxes_small\n else :\n scores_tmp_real = torch.cat((scores_tmp_real, scores_tmp), dim=1)\n pred_boxes = np.vstack((pred_boxes, pred_boxes_small))\n\n\n if cfg.DEDUP_BOXES > 0:\n # Map scores and predictions back to the original set of boxes\n scores_tmp = scores_tmp_real[:,inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n \n if i == 0: \n scores = np.copy(scores_tmp.data).squeeze()\n if len(scores.shape) == 1 :\n scores = scores[np.newaxis, :]\n else:\n scores += scores_tmp[0].data\n\n scores /= len(blobs['data']) * (1. + cfg.TEST.USE_FLIPPED)\n return scores[:,1:], pred_boxes[:, 4:]", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def check_dataset(*, low_path: str, high_path: str, count: int = 1):\n with open(high_path, \"rb\") as s_file:\n src_data: np.array = np.load(s_file)\n\n with open(low_path, \"rb\") as s_file:\n res_data: np.array = np.load(s_file)\n\n assert src_data.shape == res_data.shape\n n, m = res_data.shape\n core_size = int(np.sqrt(m / LAYERS))\n assert core_size ** 2 * LAYERS == m\n k = core_size * 4\n\n for _ in range(count):\n img = np.zeros(\n (core_size, k, LAYERS), dtype=res_data.dtype\n )\n i = random.randint(0, n)\n res_row = res_data[i]\n src_row = src_data[i]\n\n mask = create_percent_diff(src_row, res_row)\n restored_src = apply_diff(res_row, mask)\n for l_i, layer_mask in enumerate(np.reshape(mask, (LAYERS, core_size, core_size))): # noqa\n print(f\"layer {l_i} mask:\")\n for row in layer_mask:\n print(\",\".join(map(\"{: >3}\".format, row)))\n\n nopy_restore_area(\n img[:, 0:core_size, :], src_row, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size:core_size * 2, :], res_row, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size * 2:core_size * 3, :], mask, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size * 3:k, :], restored_src, core_size, LAYERS\n )\n plt.imshow(Image.fromarray(img))\n plt.show(block=True)", "def demo(sess, net, image_name):\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n #im_file = os.path.join('/home/corgi/Lab/label/pos_frame/ACCV/training/000001/',image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print (('Detection took {:.3f}s for '\n '{:d} object proposals').format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)", "def load_test_dataset():\n\n def gen_image(resolution, x1, y1, x2, y2):\n width, height = resolution\n image = np.full([height, width, 3], fill_value=255, dtype=np.uint8)\n image[int(y1 * height) : int(y2 * height), int(x1 * width) : int(x2 * width), :] = np.array(\n [0, 128, 128], dtype=np.uint8\n )[None, None, :]\n return image, Rectangle(x1=x1, y1=y1, x2=x2, y2=y2)\n\n images = [\n gen_image((640, 480), 0.0, 0.0, 0.5, 0.5),\n gen_image((640, 480), 0.5, 0.0, 1.0, 0.5),\n gen_image((640, 480), 0.0, 0.5, 0.5, 1.0),\n gen_image((640, 480), 0.5, 0.5, 1.0, 1.0),\n ]\n labels = [LabelEntity(name=\"rect\", domain=Domain.DETECTION, id=ID(\"0\"))]\n\n def get_image(i, subset):\n image, bbox = images[i]\n return DatasetItemEntity(\n media=Image(data=image),\n annotation_scene=AnnotationSceneEntity(\n annotations=[Annotation(bbox, labels=[ScoredLabel(label=labels[0])])],\n kind=AnnotationSceneKind.ANNOTATION,\n ),\n subset=subset,\n )\n\n items = [\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(2, Subset.TRAINING),\n get_image(3, Subset.TRAINING),\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(2, Subset.TRAINING),\n get_image(3, Subset.TRAINING),\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(0, Subset.VALIDATION),\n get_image(1, Subset.VALIDATION),\n get_image(2, Subset.VALIDATION),\n get_image(3, Subset.VALIDATION),\n get_image(0, Subset.TESTING),\n get_image(1, Subset.TESTING),\n get_image(2, Subset.TESTING),\n get_image(3, Subset.TESTING),\n ]\n return DatasetEntity(items), labels", "def test_GoogleImages():\n gi = pytest.importorskip('google_images')\n gimages = gi.GoogleImages('../tests/')\n gimages.download(COORDS_X, COORDS_Y, step=False)\n f = gimages.featurize(COORDS_X, COORDS_Y, step=False)\n assert f.shape == (len(COORDS_X), len(COORDS_Y))", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results", "def test_natural_neighbor(test_data, test_grid):\n xp, yp, z = test_data\n xg, yg = test_grid\n\n img = natural_neighbor(xp, yp, z, xg, yg)\n\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')", "def prepare_cityscapes_data(seed=1, percent=30.0, version=2017):\n def _save_anno(name, images, annotations):\n \"\"\"Save annotation\n \"\"\"\n print('>> Processing data {}.json saved ({} images {} annotations)'.format(\n name, len(images), len(annotations)))\n new_anno = {}\n new_anno['images'] = images\n new_anno['annotations'] = annotations\n new_anno['categories'] = anno['categories']\n\n with open(\n '{root}/{save_name}.json'.format(\n save_name=name, root=DATA_DIR),\n 'w') as f:\n json.dump(new_anno, f)\n print('>> Data {}.json saved ({} images {} annotations)'.format(\n name, len(images), len(annotations)))\n\n np.random.seed(seed)\n \n anno = json.load(open(os.path.join(DATA_DIR, 'instancesonly_filtered_gtFine_train.json')))\n\n image_list = anno['images']\n labeled_tot = int(percent / 100. * len(image_list))\n #labeled_ind = np.random.choice(range(len(image_list)), size=labeled_tot)\n labeled_ind = np.arange(len(image_list))\n np.random.shuffle(labeled_ind)\n labeled_ind = labeled_ind[0:labeled_tot]\n\n labeled_id = []\n labeled_images = []\n unlabeled_images = []\n labeled_ind = set(labeled_ind)\n for i in range(len(image_list)):\n if i in labeled_ind:\n labeled_images.append(image_list[i])\n labeled_id.append(image_list[i]['id'])\n else:\n unlabeled_images.append(image_list[i])\n\n # get all annotations of labeled images\n labeled_id = set(labeled_id)\n labeled_annotations = []\n unlabeled_annotations = []\n for an in anno['annotations']:\n if an['image_id'] in labeled_id:\n labeled_annotations.append(an)\n else:\n unlabeled_annotations.append(an)\n\n # save labeled and unlabeled\n save_name = 'instancesonly_filtered_gtFine_train.{seed}@{tot}'.format(\n version=version, seed=seed, tot=int(percent))\n _save_anno(save_name, labeled_images, labeled_annotations)\n save_name = 'instancesonly_filtered_gtFine_train.{seed}@{tot}-unlabeled'.format(\n version=version, seed=seed, tot=int(percent))\n _save_anno(save_name, unlabeled_images, unlabeled_annotations)", "def test_conus():\n sat = gini.GINIZFile(get_test_file(\"TIGN02\", fponly=True))\n assert sat.archive_filename() == \"GOES_SUPER_IR_201509281745.png\"\n assert sat.awips_grid() == 0\n assert sat.metadata[\"map_projection\"] == 5", "def brain_has_lead_image(self, brain=None):", "def paint_a_picture():\n # Make a training set (many random i,j coord and an x by y box around that coord to start with)\n # Throw it into the net\n # Test how it does for some random coordinate inputs\n pass", "def proposal_assignments_det(rpn_rois, gt_boxes, gt_classes, image_offset, fg_thresh=0.5):\n fg_rois_per_image = int(np.round(ROIS_PER_IMG * FG_FRACTION))\n gt_img_inds = gt_classes[:, 0] - image_offset\n all_boxes = torch.cat([rpn_rois[:, 1:], gt_boxes], 0)\n ims_per_box = torch.cat([rpn_rois[:, 0].long(), gt_img_inds], 0)\n im_sorted, idx = torch.sort(ims_per_box, 0)\n all_boxes = all_boxes[idx]\n num_images = int(im_sorted[-1]) + 1\n labels = []\n rois = []\n bbox_targets = []\n for im_ind in range(num_images):\n g_inds = (gt_img_inds == im_ind).nonzero()\n if g_inds.dim() == 0:\n continue\n g_inds = g_inds.squeeze(1)\n g_start = g_inds[0]\n g_end = g_inds[-1] + 1\n t_inds = (im_sorted == im_ind).nonzero().squeeze(1)\n t_start = t_inds[0]\n t_end = t_inds[-1] + 1\n ious = bbox_overlaps(all_boxes[t_start:t_end], gt_boxes[g_start:g_end])\n max_overlaps, gt_assignment = ious.max(1)\n max_overlaps = max_overlaps.cpu().numpy()\n gt_assignment += g_start\n keep_inds_np, num_fg = _sel_inds(max_overlaps, fg_thresh, fg_rois_per_image, ROIS_PER_IMG)\n if keep_inds_np.size == 0:\n continue\n keep_inds = torch.LongTensor(keep_inds_np)\n labels_ = gt_classes[:, 1][gt_assignment[keep_inds]]\n bbox_target_ = gt_boxes[gt_assignment[keep_inds]]\n if num_fg < labels_.size(0):\n labels_[num_fg:] = 0\n rois_ = torch.cat((im_sorted[t_start:t_end, None][keep_inds].float(), all_boxes[t_start:t_end][keep_inds]), 1)\n labels.append(labels_)\n rois.append(rois_)\n bbox_targets.append(bbox_target_)\n rois = torch.cat(rois, 0)\n labels = torch.cat(labels, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n return rois, labels, bbox_targets", "def fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = modellib.PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(modellib.BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(1, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n # Duplicate output for fg/bg detections\n x = KL.Concatenate(axis=-1)([x for i in range(num_classes)])\n return x", "def region_classifier(meta_path, regions_path, regions_label, fout,\n force):\n RegionClassifier.run(meta_path=meta_path,\n regions=regions_path,\n regions_label=regions_label,\n force=force, fout=fout)", "def num_regions(image_data):\n if len(image_data.shape) > 2:\n image_data = skimage.color.rgb2gray(image_data)\n _, num_labels = ndimage.label(image_data)\n return num_labels", "def trainNet():", "def test_roi_averaging(self):\n filename = get_test_data_path() + 'sgacc_mask.nii.gz'\n regions = self.dataset.masker.mask(filename, in_global_mask=True)\n avg_vox = reduce.average_within_regions(self.dataset, regions)\n n_studies = self.dataset.image_table.data.shape[1]\n self.assertEqual(n_studies, avg_vox.shape[1])\n self.assertGreater(avg_vox.sum(), 0.05)", "def test_on_map_of_noise(synthetic_checkerboard):\n img = synthetic_checkerboard['img']\n di = synthetic_checkerboard['distimg']\n\n cpp_vorimg = tess.tessellate_labimg(img,di)\n py_vorimg = pytess.tessellate_labimg(img,di)\n\n printers.store_ndarray(\"py_voronoi_on_map_of_noise_output.txt\",py_vorimg)\n\n assert cpp_vorimg.size > 0\n assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape\n assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1)\n\n printers.store_ndarray(\"cpp_voronoi_input.txt\",img)\n printers.store_ndarray(\"cpp_voronoi_on_map_of_noise_output.txt\",cpp_vorimg)\n\n # assert np.alltrue(cpp_vorimg[:4,:4] == 1)\n assert np.alltrue(cpp_vorimg == py_vorimg)", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def test_get_network(self):\n pass", "def inpaint(self, img_slice, mask_slice, min_x, max_x, min_y, max_y, views='lateral'):\n # create binary mask\n mask = np.zeros(img_slice.shape)\n mask[min_x:max_x, min_y:max_y] = 1\n # keep a copy of original to have background later \n img_orig = np.copy(img_slice)\n mask_binary = np.copy(mask)\n\n # rotate image if coronal\n if views=='coronal':\n img_slice = np.rot90(img_slice, axes=(1, 0)) # image is from lat,ax -> ax,lat\n mask_slice = np.rot90(mask_slice, axes=(1, 0))\n mask = np.rot90(mask, axes=(1, 0))\n \n # prepare binary mask for net\n mask = cv2.resize(mask, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask = torch.Tensor(mask) # gives dtype float32\n mask = mask.unsqueeze(0)\n mask = mask.unsqueeze(0)\n\n # prepare seg mask for net\n mask_slice[mask_slice==self.vertebra_id] = 0\n # resize to network size\n mask_seg = cv2.resize(mask_slice, self.resize_size, interpolation=cv2.INTER_NEAREST)\n mask_seg = np.uint8(np.round(mask_seg)) # just to be sure\n\n mask_seg = self.map_vert_to_class(mask_seg)\n mask_seg = torch.Tensor(mask_seg) # gives dtype float32\n mask_seg_one_hot = torch.nn.functional.one_hot(mask_seg.long(), num_classes=6)\n mask_seg_one_hot = mask_seg_one_hot.permute(2,0,1)\n mask_seg_one_hot = mask_seg_one_hot.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n mask_seg = mask_seg.unsqueeze(0)\n\n # prepare img for net \n img_slice = cv2.resize(img_slice, self.resize_size)\n img_slice = np.clip(img_slice, -1024, 3071) # clip to HU units\n img_slice = np.uint8(255*(img_slice+1024)/4095) # normalize to range 0-255 \n img_slice = img_slice[:,:, None]\n img_slice = self.toTensor(img_slice)\n img_slice = img_slice.unsqueeze(0)\n corrupt_img = (1-mask)*img_slice\n\n if self.use_cuda:\n mask = mask.cuda()\n mask_seg = mask_seg.cuda()\n corrupt_img = corrupt_img.cuda() \n\n # inpaint\n if views=='lateral':\n netG = self.netGlat\n elif views=='coronal':\n netG = self.netGcor\n\n # get prediction\n with torch.no_grad():\n _, inpainted_mask, inpainted_img = netG(corrupt_img, mask_seg, mask)\n inpainted_mask = self.softmax(inpainted_mask)\n\n #inpainted_mask = torch.argmax(inpainted_mask, dim=1)\n inpainted_img = inpainted_img * mask + corrupt_img * (1. - mask)\n inpainted_mask = inpainted_mask * mask + mask_seg_one_hot * (1. - mask)\n #inpainted_mask = self.map_class_to_vert(inpainted_mask)\n\n # set img back to how it was\n inpainted_img = inpainted_img.squeeze().detach().cpu().numpy()\n inpainted_img = (inpainted_img)*4095 - 1024 # normalize back to HU units \n inpainted_img = cv2.resize(inpainted_img, (self.orig_ax_length, self.orig_ax_length))\n # set mask back\n inpainted_mask = inpainted_mask.squeeze().detach().cpu().numpy()\n inpainted_mask_resized = np.zeros((6, self.orig_ax_length, self.orig_ax_length))\n for i in range(6):\n if views=='coronal':\n inpainted_mask_resized[i,:,:] = np.rot90(cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length))) #, interpolation=cv2.INTER_NEAREST)\n else:\n inpainted_mask_resized[i,:,:] = cv2.resize(inpainted_mask[i,:,:], (self.orig_ax_length, self.orig_ax_length)) #, interpolation=cv2.INTER_NEAREST)\n inpainted_mask = inpainted_mask_resized\n \n if views=='coronal':\n inpainted_img = np.rot90(inpainted_img) #, axes=(1, 0))\n\n return inpainted_img, inpainted_mask, mask_binary", "def vis_mechanically_coupled_regions(img_dir,output_dir,data,dbscn_length,dbscn_min_size,display_not_save=False):\n #Read in the image that is segmented/labelled for nuclei\n img=imread(img_dir)\n\n #save plots to show clusters\n fig = plt.figure(figsize=(6, 2))\n ax0 = fig.add_subplot(131)\n ax1 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n #show segmented image labels\n ax0.imshow(img,aspect='auto') \n ax0.axis('off')\n #nuclear centroid color-coded by their orientation\n img1=ax1.scatter(data[\"Y\"], data[\"X\"], c=data[\"angles\"],s=1)\n ax1.set_xlim(0,img.shape[0])\n ax1.set_ylim(img.shape[1],0)\n plt.colorbar(img1)\n ax1.axis('off')\n\n # plot the cluster assignments\n img3=ax3.scatter(data[data[\"clusters\"]> -1][\"Y\"], data[data[\"clusters\"]> -1][\"X\"], \n c=data[data[\"clusters\"]> -1][\"clusters\"],cmap=\"plasma\",s=1)\n ax3.set_xlim(0,img.shape[0])\n ax3.set_ylim(img.shape[1],0)\n ax3.axis('off')\n\n #add titles\n ax0.title.set_text('Segmented Image')\n ax1.title.set_text('Filtered Orientation')\n ax3.title.set_text('Clusters')\n\n if display_not_save:\n plt.show()\n else: \n plt.savefig((output_dir+\"/\"+img_dir.rsplit('/', 1)[-1][:-4]+\"_\"+str(dbscn_length)+\"_\"+ str(dbscn_min_size)+\".png\"),dpi=600, bbox_inches = 'tight',pad_inches = 0)\n fig.clf()\n plt.close(fig)\n plt.close('all')\n \n \n del fig,ax0,ax1,ax3,img1,img3", "def retinanet_object_detection(image_org,\n object_detection_graph, object_detection_session, \n object_detection_threshold=0.5,\n object_count_threshold=1,\n backbone='resnet',\n boundary_filter_size=0, \n output_detection_image=False,\n output_dir=None,\n img_name=None,\n patch_idx=-1):\n\n ret = 0\n\n img_height, img_width = image_org.shape[:2]\n\n if img_name != None:\n img_base_name = os.path.splitext(os.path.basename(img_name))[0]\n if patch_idx >= 0:\n img_base_name = img_base_name + '_' + str(patch_idx)\n\n # copy to draw on\n draw = image_org.copy()\n\n # preprocess each image for network\n if backbone == 'mobilenet' or backbone == 'densenet':\n img = retinanet_preprocess_image(image_org, mode='tf')\n else:\n img = retinanet_preprocess_image(image_org, mode='caffe')\n\n img, scale = retinanet_resize_image(img)\n\n #print(scale)\n\n # process image\n start = time.time()\n image_tensor = object_detection_graph.get_tensor_by_name('input_1:0')\n output_tensor_0 = object_detection_graph.get_tensor_by_name('filtered_detections/map/TensorArrayStack/TensorArrayGatherV3:0')\n output_tensor_1 = object_detection_graph.get_tensor_by_name('filtered_detections/map/TensorArrayStack_1/TensorArrayGatherV3:0')\n output_tensor_2 = object_detection_graph.get_tensor_by_name('filtered_detections/map/TensorArrayStack_2/TensorArrayGatherV3:0')\n boxes, scores, labels = object_detection_session.run([output_tensor_0, output_tensor_1, output_tensor_2], feed_dict={image_tensor: np.expand_dims(img, axis=0)})\n #print(\"processing time: \", time.time() - start)\n\n # correct for image scale\n boxes /= scale\n\n #print(scores[0])\n #print(labels[0])\n\n # visualize detections\n detected_bboxes = [] \n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < object_detection_threshold:\n break\n\n # print(score, label)\n b = box.astype(int)\n \n detected_bboxes.append(b)\n \n if len(detected_bboxes) < object_count_threshold:\n ret = 100 \n\n #print(len(detected_bboxes))\n\n # Using the bounding box centers as object locations and filter out those objects too close to boundaries\n objects = []\n scores_filtered = []\n labels_filtered = []\n object_width_sum = 0\n object_height_sum = 0\n for i in range(len(detected_bboxes)):\n b = detected_bboxes[i]\n if object_too_close_to_boundary(b, img_width=img_width, img_height=img_height, boundary_filter_size=boundary_filter_size):\n continue \n objects.append(b)\n scores_filtered.append(scores[0][i])\n labels_filtered.append(labels[0][i])\n\n object_width_sum += (b[2] - b[0])\n object_height_sum += (b[3] - b[1])\n\n if len(objects) > 0:\n average_object_width = int(object_width_sum / len(objects))\n average_object_height = int(object_height_sum / len(objects))\n else:\n average_object_width = 0\n average_object_height = 0\n\n #print(average_object_width)\n #print(average_object_height) \n\n # Save the detection images\n if output_detection_image:\n if img_width < 1000 or img_height < 1000:\n thickness = 1\n elif img_width < 2000 or img_height < 2000:\n thickness = 2\n else:\n thickness = 3\n\n # draw detection boxes\n for i in range(len(detected_bboxes)):\n b = detected_bboxes[i]\n if object_too_close_to_boundary(b, img_width=img_width, img_height=img_height, boundary_filter_size=boundary_filter_size):\n draw_box(draw, b, color=(0, 0, 255), thickness=thickness) \n elif labels[0][i] >= 1:\n draw_box(draw, b, color=(255, 255, 0), thickness=thickness) \n else:\n draw_box(draw, b, color=(255, 0, 0), thickness=thickness)\n img_output_filename = img_base_name + '_d.jpg'\n cv2.imwrite(os.path.join(output_dir, img_output_filename), draw) \n\n return ret, objects, scores_filtered, labels_filtered, average_object_width, average_object_height", "def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])", "def obtain_training_set_shape(para, alg):\n \n \n # Preliminaries\n z = os.listdir('Images/shapeset') # image directory\n box_how = [] # the ratio of box's height over its width\n omega = np.load('omega' + alg + '.npy') # load parameters\n \n # Establish a typical bounding box shape\n for i in range(len(z)):\n tu = img.imread('Images/shapeset/' + z[i])\n tu_b = obtain_testing_y(tu, omega, alg)\n tu_b = tu_b.astype(np.uint8) # convert binary image to a format that @findContours can process\n \n # find contours of objects with wanted color\n contours, hierachy = cv2.findContours(tu_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # the binary image will be replaced by this binary contour image\n cv2.drawContours(tu_b, contours, -1, (255, 0, 0), 3) # -1 = draw all contours, (color), thickness of contour lines\n \n # get contours edges, namely bounding box\n tu_b = label(tu_b) # label connected regions of an integer array, so that unconnected contours will considered as separate regions\n region = regionprops(tu_b) # identify regions of the labeled image\n rc = [] # region's centroids\n\n # get rid of tiny regions\n for prop in region.copy():\n if prop.bbox_area < para.bbox_area:\n region.remove(prop)\n else:\n rc.append(prop.centroid)\n \n # get rid of repeated regions\n ind = sorted(range(len(rc)), key = rc.__getitem__) # store element indices of local_centroid tuples before sorting\n rs = sorted(rc) # sorted region\n\n rdel = [] # repeated regions to be deleted\n for i in range(0, len(rs) - 1):\n if abs(rs[i+1][0] - rs[i][0]) < para.cent_dif and abs(rs[i+1][1] - rs[i+1][1]) < para.cent_dif:\n rdel.append(region.copy().pop(ind[i+1]))\n \n for i in range(len(rdel)):\n region.remove(rdel[i])\n \n # since only 1 object, only 1 region should be identified\n if len(region) > 1:\n for i in range(len(region)):\n print(region[i].centroid, region[i].bbox_area)\n plt.imshow(tu_b, cmap = 'gray')\n fig = plt.get_current_fig_manager()\n fig.window.setGeometry(400, 100, 3000, 2000)\n plt.title('You found more than 1 contour on this image!!!', fontsize = 66)\n else:\n minr, minc, maxr, maxc = region[0].bbox # max/min row/column coordinates\n box_how.append((maxr-minr)/(maxc-minc))\n \n # Store extreme values\n max_ratio = max(box_how)\n min_ratio = min(box_how) \n \n return max_ratio, min_ratio", "def test_bkg_regions(i07_nexus: I07Nexus, regions):\n for i, _ in enumerate(regions):\n assert i07_nexus.background_regions[i] == regions[i]", "def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def _sample_regions(region_rois, gt_regions, voc_sign):\n # overlaps: (rois x gt_regions)\n overlaps_gt = bbox_overlaps(\n np.ascontiguousarray(region_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_regions[:, :4], dtype=np.float))\n # gt_assignment = overlaps_gt.argmax(axis=1)\n max_overlaps_gt = overlaps_gt.max(axis=1)\n # labels = gt_regions[gt_assignment, 4:]\n fg_inds = np.where(max_overlaps_gt >= cfg.TRAIN.FG_THRESH_REGION)[0]\n bg_inds = np.where(\n (max_overlaps_gt < cfg.TRAIN.BG_THRESH_HI_REGION) & (max_overlaps_gt >= cfg.TRAIN.BG_THRESH_LO_REGION))[0]\n\n # ## Debug Codes\n # print('fg: {} v.s. bg:{}'.format(len(fg_inds), len(bg_inds)))\n # gt_hit_overlap = overlaps_gt.max(axis=0)\n # hit_ids = np.unique(np.where(gt_hit_overlap >= cfg.TRAIN.FG_THRESH_REGION)[0])\n # print('Recall: {} ({}/{})'.format(\n # float(len(hit_ids)) / len(gt_regions), len(hit_ids), len(gt_regions)))\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = np.ones((len(keep_inds), gt_regions.shape[1] - 4), dtype=np.int64) * voc_sign['end']\n # Here we randomly select regions overlapped with proposed ROI more than 0.7\n gt_assignment = np.zeros(len(fg_inds), dtype=np.int64)\n for i in range(len(fg_inds)):\n gt_assignment[i] = npr.choice(np.where(overlaps_gt[fg_inds[i]] > cfg.TRAIN.FG_THRESH_REGION)[0], size=1)\n labels[i] = gt_regions[gt_assignment[i], 4:]\n\n # add start label to background and padding them with <end> sign\n labels[len(fg_inds):, 0] = voc_sign['start']\n rois = region_rois[keep_inds]\n\n targets_fg = bbox_transform(rois[:len(fg_inds), 1:5], gt_regions[gt_assignment, :4])\n bbox_inside_weights_fg = np.ones(targets_fg.shape, dtype=np.float32) * cfg.TRAIN.BBOX_INSIDE_WEIGHTS\n targets_bg = np.zeros((bg_inds.size, targets_fg.shape[1]), dtype=np.float32)\n bbox_inside_weight_bg = np.zeros(targets_bg.shape, dtype=np.float32)\n bbox_targets = np.vstack([targets_fg, targets_bg])\n bbox_inside_weight = np.vstack([bbox_inside_weights_fg, bbox_inside_weight_bg])\n\n return labels, bbox_targets, bbox_inside_weight, keep_inds", "def get_regions_mask(self, input):", "def resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]", "def runNN(selectables_interests=[[0.0, 0.1, 0.6, 0.7], [0.0, 0.5, 0.6, 1]], profile_interests = [0.0, 0.1, 0.2, 0.3]):\n #Intialise a single neuron neural network.\n # Apply vertical normalisation for the pageobject interests\n if len(selectables_interests)>0:\n selectables_interests = normalize(selectables_interests, axis=0, norm='l1')\n else:\n # If there is nothing to compare it to, return the current profile.\n return profile_interests\n # For the given size of the first element in the array (number of interests) add a dimension of such size\n Interest_Array_Inputs = [[] for i in range(len(selectables_interests[0]))]\n Interest_Array_Outputs = [[] for i in range(len(selectables_interests[0]))]\n # Creating instances of 3 dimensional Inputs/Outputs sets\n for ida, ll in enumerate(selectables_interests):\n # For the number of permutations\n current_fuzzy_element_permutation = fuzzy_permutation(ll)\n for index in range(len(current_fuzzy_element_permutation[0])):\n # Insert array inside column Interest for the input, output.\n Interest_Array_Inputs[current_fuzzy_element_permutation[0][index][0]].append(current_fuzzy_element_permutation[0][index][1][:])\n Interest_Array_Outputs[current_fuzzy_element_permutation[1][index][0]].append([current_fuzzy_element_permutation[1][index][1]])\n profile_Interest_Inputs = []\n # Creating the list of inputs (Leaving out an interest to be predicted)\n for index, inputList in enumerate(fuzzy_permutation(profile_interests)[0]):\n profile_Interest_Inputs.append(inputList[1])\n\n collected_interests = []\n # Training on each individual interest\n for i in range(len(profile_interests)):\n collected_interests.append(initialise_train_result(Interest_Array_Inputs[i], Interest_Array_Outputs[i], profile_Interest_Inputs[i]))\n well_ordered_interests = collected_interests[::-1]\n # Returns the outcoming interests.\n return well_ordered_interests", "def test_add_network(self):\n pass", "def demo(sess, net, img_path):\n\n # Load the demo image\n once_time = 0\n\n im = cv2.imread(img_path)\n im = cv2.resize(im, (227, 227))\n # im = im[np.newaxis, :, :, :]\n t = time.time()\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n print('subtract consume time {}s'.format(time.time() - t))\n im = im_orig[np.newaxis, :, :, :]\n # print('>>>>>>>', im.shape[0], im.shape[1])\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n yaw, pitch, roll, yaw_raw, pitch_raw, roll_raw = net.test_image(sess, im)\n # yaw, pitch = net.test_image(sess, im)\n print(yaw, pitch, roll)\n # print(yaw_raw)\n # print(pitch_raw)\n # print(roll_raw)\n timer.toc()\n once_time = timer.total_time\n print('Detection took {:.3f}s'.format(timer.total_time))\n\n # cv2_vis(im, CLASSES[1], dets, result_file)\n return yaw, pitch, roll, once_time", "def demonstrate(self, train_path):\n if not os.path.exists(train_path):\n print(\"training json file not exists, program quit\")\n sys.exit()\n with open(train_path) as f:\n json_data = json.load(f)\n self.train_time_stamp_list = json_data['time']\n self.train_image_path_list = json_data['image_path']\n self.train_position_list = json_data['position']\n self.train_angle_list = json_data['angle']\n self.train_semantic_tag_list = json_data['semantic_tag']\n num_images = len(self.train_image_path_list)\n\n # create nodes\n print(\"start demonstrating, totally {} images in demonstration set\".format(num_images))\n self.node_id_list = []\n self.node_semantic_tag_list = []\n self.node_metric_feature_list = []\n self.node_conv_feature_list = []\n last_node_position = np.array([float('inf'), float('inf'), float('inf')])\n for train_index in range(num_images):\n train_position = np.array(self.train_position_list[train_index])\n if np.sqrt(np.sum(np.square(train_position - last_node_position))) > self.min_node_distance:\n last_node_position = train_position\n self.node_id_list.append(train_index)\n train_semantic_tag = self.train_semantic_tag_list[train_index]\n self.node_semantic_tag_list.append(train_semantic_tag)\n node_image_path = self.train_image_path_list[train_index]\n node_image = cv2.imread(node_image_path)\n image_batch = self.process_batch([node_image])\n node_conv_feature, node_metric_feature = self.sess.run([self.conv_features,\n self.metric_features], feed_dict = {self.images_placeholder: image_batch})\n self.node_conv_feature_list.append(node_conv_feature[0])\n self.node_metric_feature_list.append(node_metric_feature[0])\n print(\"{}/{} demonstration image shown\".format(train_index+1, num_images))\n self.node_number = len(self.node_id_list)\n print(\"all nodes created, totally {} of nodes\".format(len(self.node_id_list)))", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def test_init(self):\n roi = rois.ProfileImageVerticalLineROI()\n if qt.BINDING == \"PyQt5\":\n # the profile ROI + the shape\n self.assertEqual(roi.receivers(roi.sigRegionChanged), 2)", "def test_DARTSNetwork(self):\n genotype1 = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n genotype2 = '0|6|0|0|0|2|0|0 1|0|0|0|0|0|1|0 1|1|0|0|0|0|0|0--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n population = [genotype1, genotype2, genotype2, genotype1]\n\n net = DARTSNetwork(3, 5, population, search_space, 2)\n x = torch.rand(16, 3, 129, 64)\n y = net(x)\n self.assertEqual(list(y.shape), [16, 5])", "def test_quick_method(self):\n data = load_occupancy(return_dataset=True)\n _, y = data.to_numpy()\n\n visualizer = balanced_binning_reference(y, show=False)\n\n assert isinstance(visualizer, BalancedBinningReference)\n self.assert_images_similar(visualizer, tol=0.5)", "def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n\n # print(gt_boxes)\n # fang[-1] ok\n\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n all_rois[:, 1:5].data,\n gt_boxes[:, :4].data)\n max_overlaps, gt_assignment = overlaps.max(1)\n labels = gt_boxes[gt_assignment, [4]]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = (max_overlaps >= cfg.TRAIN.FG_THRESH).nonzero().view(-1)\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = ((max_overlaps < cfg.TRAIN.BG_THRESH_HI) + (max_overlaps >= cfg.TRAIN.BG_THRESH_LO) == 2).nonzero().view(-1)\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.numel() > 0 and bg_inds.numel() > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.numel())\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(fg_rois_per_image), replace=False)).long().cuda()]\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.numel() < bg_rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(bg_rois_per_image), replace=to_replace)).long().cuda()]\n elif fg_inds.numel() > 0:\n to_replace = fg_inds.numel() < rois_per_image\n fg_inds = fg_inds[torch.from_numpy(npr.choice(np.arange(0, fg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = rois_per_image\n elif bg_inds.numel() > 0:\n to_replace = bg_inds.numel() < rois_per_image\n bg_inds = bg_inds[torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(rois_per_image), replace=to_replace)).long().cuda()]\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = torch.cat([fg_inds, bg_inds], 0)\n \n # Select sampled values from various arrays:\n labels = labels[keep_inds].contiguous()\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n # print(int(fg_rois_per_image)) -> 16\n\n rois = all_rois[keep_inds].contiguous()\n roi_scores = all_scores[keep_inds].contiguous()\n\n\n\n bbox_target_data, front_2_1_points_targets_data, front_2_2_points_targets_data, front_center_targets_data, \\\n back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data\\\n = _compute_targets(rois[:, 1:5].data, gt_boxes[gt_assignment[keep_inds]][:, :4].data, labels.data,\\\n gt_boxes[gt_assignment[keep_inds]][:, 5:9].data, gt_boxes[gt_assignment[keep_inds]][:, 9:13].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 13:15].data, gt_boxes[gt_assignment[keep_inds]][:, 15:19].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 19:23].data, gt_boxes[gt_assignment[keep_inds]][:, 23:25].data, \\\n gt_boxes[gt_assignment[keep_inds]][:, 25:27].data)\n\n bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights \\\n = _get_bbox_regression_labels(bbox_target_data, num_classes, front_2_1_points_targets_data, front_2_2_points_targets_data, \\\n front_center_targets_data, back_2_1_points_targets_data, back_2_2_points_targets_data, back_center_targets_data, center_targets_data)\n \n \n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, front_2_1_points_targets, front_2_2_points_targets, front_center_targets, \\\n back_2_1_points_targets, back_2_2_points_targets, back_center_targets, center_targets, front_center_inside_weights", "def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\r\n # MP:\r\n # overlaps: (no_rois x no_gt_bbox) each row gives the overlap of the proposed region with the gt boxes. Overlap is measured as: (overlapping area)/(union area).\r\n # gt_assignment: determines which of the gt boxes has more overlap with the regions\r\n # max_overlaps: takes the maximum overlap of a region\r\n # labels: defines which which gt box corresponds best with the region and assigns its label to the region\r\n # fg_rois_per_image = 8\r\n # overlaps: (rois x gt_boxes)\r\n\r\n # MP: bbox_overlaps rewritten as c_bbox_overlaps\r\n #overlaps =c_bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n # \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n overlaps = bbox_overlaps(np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\r\n \t\t np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\r\n # MP: which column index has maximum value\r\n gt_assignment = overlaps.argmax(axis=1)\r\n max_overlaps = overlaps.max(axis=1)\r\n labels = gt_boxes[gt_assignment, 4]\r\n\r\n\r\n # MP: Extract RoIs where overlap >= FG_THRESH\r\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\r\n\r\n # Guard against the case when an image has fewer than fg_rois_per_image (i.e. 8)\r\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\r\n\r\n # Sample foreground regions without replacement\r\n if fg_inds.size > 0:\r\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\r\n\r\n # MP: Extract RoIs where overlap in [BG_THRESH_LO, BG_THRESH_HI), i.e. [0.0, 0.5)\r\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\r\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\r\n\r\n # Compute number of background RoIs to take from this image (guarding\r\n # against there being fewer than desired)\r\n # MP: Take the no of bg_inds such that fg_inds.shape + bg_inds.shape = 32\r\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\r\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\r\n if bg_inds.size > 0:\r\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\r\n\r\n\r\n # MP: concatenate the fg_inds and bg_inds, such that keep_inds.shape = 32\r\n keep_inds = np.append(fg_inds, bg_inds)\r\n # MP: obtain the labels set the ones corresponding to bg_inds to zero\r\n labels = labels[keep_inds]\r\n labels[int(fg_rois_per_this_image):] = 0\r\n\r\n # MP: select the 32 rois (fg & bg) from the 2000+ rois with the keep_inds\r\n rois = all_rois[keep_inds]\r\n # MP: fg rois\r\n rois_pos = np.zeros((fg_inds.size, 5), dtype=np.float32) #because return rois_pos as top ---> allocate memory for it\r\n rois_pos[:, :] = all_rois[fg_inds]\r\n gt_assignment_pos = gt_assignment[fg_inds]\r\n\r\n # MP: compute diff to approximate bbox to ground truth\r\n bbox_target_data = _compute_targets(\r\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\r\n\r\n # MP: set the diff values in a matrix where each row corresponds to a foreground bbox\r\n # and the values are stored starting at the index of the label.\r\n # Therefore number of columns: 4*(no labels)\r\n # The bg bboxes are also included in rows, but have all values equal to zero.\r\n bbox_targets, bbox_inside_weights = \\\r\n _get_bbox_regression_labels(bbox_target_data, num_classes)\r\n\r\n '''\r\n # MP: printing and saving files\r\n print \"overlaps with size {}: {}\".format(overlaps.shape, overlaps)\r\n print \"gt_assignment with size {}: {}\".format(gt_assignment.shape, gt_assignment)\r\n print \"max_overlaps with size{}: {}\".format(max_overlaps.shape, max_overlaps)\r\n print \"labels with size{}: {}\".format(labels.shape, labels)\r\n print \"bg_inds with size{}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"bg_rois_per_this_image: {}\".format(bg_rois_per_this_image)\r\n print \"bg_inds with shape {}: {}\".format(bg_inds.shape, bg_inds)\r\n print \"fg_inds with size {}: {}\".format(fg_inds.shape, fg_inds)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois wiht shape {}: {}\".format(rois.shape, rois)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"labels with shape {}: {}\".format(labels.shape,labels)\r\n print \"rois_pos wiht shape {}: {}\".format(rois_pos.shape, rois_pos)\r\n print \"gt_assignment_pos wiht shape {}: {}\".format(gt_assignment_pos.shape, gt_assignment_pos)\r\n print \"bbox_target_data wiht shape {}: {}\".format(bbox_target_data.shape, bbox_target_data)\r\n print \"diff: {}\".format(rois_pos[:,:] + bbox_target_data[0:fg_inds.size,:])\r\n print \"bbox_targets with size {}: {}\".format(bbox_targets.shape, bbox_targets)\r\n print \"bbox_inside_weights with size {}: {}\".format(bbox_inside_weights.shape, bbox_inside_weights)\r\n\r\n np.savetxt('bbox_targets.txt', bbox_targets, delimiter=',')\r\n np.savetxt('bbox_inside_weights.txt', bbox_inside_weights, delimiter=',')\r\n '''\r\n\r\n return labels, rois, bbox_targets, bbox_inside_weights, gt_boxes[gt_assignment[keep_inds], :], rois_pos, gt_assignment_pos", "def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )", "def skeletonize(data,subscriber = 0):\n nx,ny=data.shape\n #zero padding\n image = zeros((nx+2,ny+2),'int16')\n image[:,:] = IP.BACKGROUND_COLOR\n image[1:-1,1:-1]=data\n\n erosionComplete = False\n runs = 0\n erosionComplete = False\n runs = 0\n isCorner = zeros((nx+2,ny+2),'bool')\n while not erosionComplete:\n ruleI = (image == IP.FEATURE_COLOR)\n XFeat, YFeat = ruleI.nonzero()\n numberFeatures = len(XFeat)\n erosedPixels = 0\n if runs == 0:\n progressbar = progress(numberFeatures)\n neighbourhood = zeros((nx+2,ny+2,3),'int16')\n for x,y in zip(XFeat.tolist(),YFeat.tolist()):\n fingerprint = checkNeighbours(image[x-1:x+2,y-1:y+2])\n neighbourhood[x,y,:]=numpy.array(fingerprint)\n\n ruleII = neighbourhood[:,:,1]>=1\n ruleIII = neighbourhood[:,:,0]> 1\n border = (ruleI & ruleII & ruleIII)\n #ruleIV and ruleV\n XBord, YBord = border.nonzero()\n XBord2 = []\n YBord2 = []\n for x,y in zip(XBord.tolist(),YBord.tolist()):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n else:\n XBord2.append(x)\n YBord2.append(y)\n for x,y in zip(XBord2,YBord2):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n if erosedPixels == 0:\n erosionComplete = True\n subscriber %= 100.\n else:\n xCorn, yCorn = (neighbourhood[:,:,2] > 0 ).nonzero()\n for x,y in zip(xCorn.tolist(),yCorn.tolist()):\n if neighbourhood[x,y,2] == 1:\n isCorner[x+1,y-1] = True\n elif neighbourhood[x,y,2] == 2:\n isCorner[x+1,y+1] = True\n elif neighbourhood[x,y,2] == 3:\n isCorner[x-1,y+1] = True\n elif neighbourhood[x,y,2] == 4:\n isCorner[x-1,y-1] = True\n runs += 1\n return image[1:-1,1:-1].copy()", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def __call__(self, src, label):\r\n \"\"\"color distort\"\"\"\r\n # img = random_color_distort(src)\r\n\r\n # print(\"previous label shape = \", label.shape)\r\n target = np.zeros(shape=(label.shape[0],))\r\n\r\n \"\"\"Pyramid Anchor sampling\"\"\"\r\n img, boxes, label = self.random_baiducrop(src, label[:, :4], target)\r\n # print(\"label shape = \", label.shape)\r\n # print('boxes shape =', boxes.shape)\r\n bbox = boxes\r\n # img = mx.nd.array(img)\r\n\r\n \"\"\"color distort\"\"\"\r\n img = mx.nd.array(img)\r\n img = random_color_distort(img)\r\n\r\n # \"\"\"random crop, keep aspect ration=1\"\"\"\r\n # h, w, _ = img.shape\r\n # bbox, crop_size = random_crop_with_constraints(label, (w, h))\r\n # x_offset, y_offset, new_width, new_height = crop_size\r\n # img = mx.image.fixed_crop(img, x_offset, y_offset, new_width, new_height)\r\n\r\n \"\"\"resize with random interpolation\"\"\"\r\n h, w, _ = img.shape\r\n interp = np.random.randint(0, 5)\r\n img = gimage.imresize(img, self._width, self._height, interp=interp)\r\n bbox = gbbox.resize(bbox, (w, h), (self._width, self._height))\r\n\r\n \"\"\"random horizontal flip\"\"\"\r\n h, w, _ = img.shape\r\n img, flips = gimage.random_flip(img, px=0.5)\r\n bbox = gbbox.flip(bbox, (w, h), flip_x=flips[0])\r\n\r\n \"\"\"To Tensor & Normalization\"\"\"\r\n img = mx.nd.image.to_tensor(img)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n\r\n if self._anchors is None:\r\n return img, bbox\r\n\r\n # @TODO: generating training target so cpu workers can help reduce the workload on gpu\r\n face_anchors, head_anchors, body_anchors = self._anchors\r\n gt_bboxes = mx.nd.array(bbox[:, :4]).expand_dims(0)\r\n gt_ids = mx.nd.zeros((1, gt_bboxes.shape[1], 1), dtype=gt_bboxes.dtype)\r\n\r\n face_cls_targets, face_box_targets, _ = self._target_generator(\r\n face_anchors, None, gt_bboxes, gt_ids)\r\n\r\n head_cls_targets, head_box_targets, _ = self._target_generator(\r\n head_anchors, None, gt_bboxes, gt_ids)\r\n\r\n body_cls_targets, body_box_targets, _ = self._target_generator(\r\n body_anchors, None, gt_bboxes, gt_ids)\r\n\r\n return img, \\\r\n face_cls_targets[0], head_cls_targets[0], body_cls_targets[0], \\\r\n face_box_targets[0], head_box_targets[0], body_box_targets[0]", "def proposal_target_layer(object_rois, gt_objects, gt_relationships, gt_regions,\n num_classes, voc_sign):\n\n # object_rois: (1 x H x W x A, 5) [0, x1, y1, x2, y2]\n # region_rois: (1 x H x W x A, 5) [0, x1, y1, x2, y2]\n # gt_objects: (G_obj, 5) [x1 ,y1 ,x2, y2, obj_class] float\n # gt_relationships: (G_obj, G_obj) [pred_class] int (-1 for no relationship)\n # gt_regions: (G_region, 4+40) [x1, y1, x2, y2, word_index] (imdb.eos for padding)\n # # gt_ishard: (G_region, 4+40) {0 | 1} 1 indicates hard\n # # dontcare_areas: (D, 4) [ x1, y1, x2, y2]\n # n_classes_obj\n # n_classes_pred\n # is_training to indicate whether in training scheme\n # overlaps: (rois x gt_boxes)\n ########################\n ## sample object ROIs ##\n ########################\n num_images = 1\n object_rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images\n object_keep_inds, object_gt_assignment, object_fg_indicator, object_fg_duplicate = \\\n _sample_rois(object_rois[:, 1:5], gt_objects[:, :4], object_rois_per_image, cfg.TRAIN.FG_FRACTION)\n object_labels = gt_objects[object_gt_assignment, 4]\n # Clamp labels for the background RoIs to 0\n object_labels[np.logical_not(object_fg_indicator)] = 0\n object_selected_rois = object_rois[object_keep_inds]\n\n\n object_bbox_targets_temp = bbox_transform(object_selected_rois[:, 1:5], gt_objects[object_gt_assignment, :4])\n object_bbox_target_data = np.hstack(\n (object_labels[:, np.newaxis], object_bbox_targets_temp)).astype(np.float32, copy=False)\n object_bbox_targets, object_bbox_inside_weights = \\\n _get_bbox_regression_labels(object_bbox_target_data, num_classes)\n\n ##########################\n ## sample relationships ##\n ##########################\n\n\n rel_per_image = int(cfg.TRAIN.BATCH_SIZE_RELATIONSHIP / num_images)\n rel_bg_num = rel_per_image\n object_fg_inds = object_keep_inds[object_fg_indicator]\n if object_fg_inds.size > 0:\n id_i, id_j = np.meshgrid(xrange(object_fg_inds.size), xrange(object_fg_inds.size), indexing='ij') # Grouping the input object rois\n id_i = id_i.reshape(-1)\n id_j = id_j.reshape(-1)\n pair_labels = gt_relationships[object_gt_assignment[id_i], object_gt_assignment[id_j]]\n fg_id_rel = np.where(pair_labels > 0)[0]\n rel_fg_num = fg_id_rel.size\n rel_fg_num = int(min(np.round(rel_per_image * cfg.TRAIN.FG_FRACTION_RELATIONSHIP), rel_fg_num))\n # print 'rel_fg_num'\n # print rel_fg_num\n if rel_fg_num > 0:\n fg_id_rel = npr.choice(fg_id_rel, size=rel_fg_num, replace=False)\n else:\n fg_id_rel = np.empty(0, dtype=int)\n rel_labels_fg = pair_labels[fg_id_rel]\n sub_assignment_fg = id_i[fg_id_rel]\n obj_assignment_fg = id_j[fg_id_rel]\n sub_list_fg = object_fg_inds[sub_assignment_fg]\n obj_list_fg = object_fg_inds[obj_assignment_fg]\n rel_bg_num = rel_per_image - rel_fg_num\n\n phrase_labels = np.zeros(rel_bg_num, dtype=np.float)\n sub_assignment = npr.choice(xrange(object_keep_inds.size), size=rel_bg_num, replace=True)\n obj_assignment = npr.choice(xrange(object_keep_inds.size), size=rel_bg_num, replace=True)\n if (sub_assignment == obj_assignment).any(): # an ugly hack for the issue\n obj_assignment[sub_assignment == obj_assignment] = (obj_assignment[sub_assignment == obj_assignment] + 1) % object_keep_inds.size\n\n\n\n if object_fg_inds.size > 0:\n phrase_labels = np.append(rel_labels_fg, phrase_labels, )\n sub_assignment = np.append(sub_assignment_fg, sub_assignment,)\n obj_assignment = np.append(obj_assignment_fg, obj_assignment, )\n\n object_selected_rois, region_selected_rois, mat_object, mat_relationship, mat_region = \\\n _setup_connection(object_selected_rois, nms_thres=cfg.TRAIN.REGION_NMS_THRES,\n sub_assignment_select = sub_assignment,\n obj_assignment_select = obj_assignment)\n #print '[Training] Region ROI num: {0:d}'.format(region_selected_rois.shape[0])\n region_labels, bbox_targets_region, bbox_inside_weight_region, region_assignments = \\\n _sample_regions(region_selected_rois, gt_regions, voc_sign)\n\n # assert region_labels.shape[1] == cfg.TRAIN.LANGUAGE_MAX_LENGTH\n object_labels = object_labels.reshape(-1, 1)\n phrase_labels = phrase_labels.reshape(-1, 1)\n object_fg_duplicate = np.stack([object_fg_indicator, object_fg_duplicate], axis=1)\n\n return (object_labels, object_selected_rois, object_bbox_targets, object_bbox_inside_weights, mat_object, object_fg_duplicate), \\\n (phrase_labels, mat_relationship), \\\n (region_selected_rois[:, :5], mat_region), \\\n (region_labels, bbox_targets_region, bbox_inside_weight_region, region_assignments)", "def test(neuralnet, dataloader):\n neuralnet.eval()\n batch_transform = data.BatchTransform()\n\n idx = 0\n for iteration, batch in enumerate(dataloader):\n with torch.no_grad():\n im = batch[0].requires_grad_(False).to(DEVICE)\n keypts = batch[1].requires_grad_(False).to(DEVICE)\n\n deformed_batch = batch_transform.exe(im, landmarks=keypts)\n im, future_im, mask = deformed_batch['image'], deformed_batch['future_image'], deformed_batch['mask']\n\n future_im_pred, gauss_mu, _ = neuralnet(im, future_im)\n\n predict = future_im_pred.data.cpu().numpy().transpose(0, 2, 3, 1)\n gauss_mu = gauss_mu.data.cpu().numpy()\n # gauss_map = gauss_map.data.cpu().numpy()\n future_im = future_im.data.cpu().numpy().transpose(0, 2, 3, 1)\n\n os.makedirs('testcheck', exist_ok=True)\n fig_path = path.join('testcheck', 'fig_{}.png'.format(iteration))\n utils.savegrid(fig_path, future_im, predict, gauss_mu=gauss_mu, name='deform')\n\n idx += im.shape[0]\n\n neuralnet.train()\n return idx", "def region_of_interest(img):\n # defining a blank mask to start with\n y, x = img.shape[:2]\n # print(\"roi_width : {}, roi_height : {}\".format(x, y))\n\n # 왼쪽과 오른쪽 roi를 동시 설정; 영역의 범위에 따라 보이는 영역이 바뀜\n left1 = [int(0.1*x), int(y)]\n left2 = [int(0.1*x), int(0.1*y)]\n left3 = [int(0.4*x), int(0.1*y)]\n left4 = [int(0.4*x), int(y)]\n\n right1 = [int(0.7*x), int(y)]\n right2 = [int(0.7*x), int(0.1*y)]\n right3 = [int(0.9*x), int(0.1*y)]\n right4 = [int(0.9*x), int(y)]\n\n middle = [int(0.2*x), int(y)]\n\n shaper = np.array([left1, left2, left3, left4, right1, right2, right3, right4, middle])\n\n mask = np.zeros_like(img)\n\n # 흑백, 컬러 구분 없이 흑백으로 채움\n if len(img.shape) > 2:\n ignore_mask_color = (255, 255, 255)\n else:\n ignore_mask_color = 255\n\n # 식별된 구간 제외 전부 검은색칠\n cv2.fillPoly(mask, np.int32([shaper]), ignore_mask_color)\n\n # bitwise_and 연산\n masked_image = cv2.bitwise_and(img, mask)\n # cv2.imshow('ROI', masked_image)\n # cv2.waitKey()\n\n return masked_image", "def ridge_detection(im, **kwargs):\n\n kernelH = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]]).ravel()\n kernelV = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]]).ravel()\n kernelD = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]).ravel()\n\n def _ridge(section):\n\n sH = section[kernelH]\n sV = section[kernelV]\n sD = section[kernelD]\n\n return sum((sH.max() == sH[1], sV.max() == sV[1], sD.max() == sD[1])) > 1\n\n return ndimage.filters.generic_filter(im, _ridge, size=3, origin=(1,1), **kwargs)", "def gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape):\n for image_file in glob(os.path.join(data_folder, 'image_2', '*.png')):\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n\n im_softmax = sess.run(\n [tf.nn.softmax(logits)],\n {keep_prob: 1.0, image_pl: [image]})\n im_softmax_road = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation_road = (im_softmax_road > 0.5).reshape(image_shape[0], image_shape[1], 1)\n \n im_softmax_other_road = im_softmax[0][:, 2].reshape(image_shape[0], image_shape[1])\n segmentation_other_road = (im_softmax_other_road > 0.5).reshape(image_shape[0], image_shape[1], 1)\n \n # green road\n mask_road = np.dot(segmentation_road, np.array([[0, 255, 0, 127]]))\n mask_road = scipy.misc.toimage(mask_road, mode=\"RGBA\")\n \n # blue other_road\n mask_other_road = np.dot(segmentation_other_road, np.array([[0, 0, 255, 127]]))\n mask_other_road = scipy.misc.toimage(mask_other_road, mode=\"RGBA\")\n \n street_im = scipy.misc.toimage(image)\n street_im.paste(mask_road, box=None, mask=mask_road)\n street_im.paste(mask_other_road, box=None, mask=mask_other_road)\n\n yield os.path.basename(image_file), np.array(street_im)", "def runTest(self):\r\n self.setUp()\r\n self.test_CreateROI1()", "def ridgeDetection(input_img, input_mode, method='meijering', black_ridges=False, output_path=None):\n\n if input_mode == 'fp':\n np_img = loadImage(input_img)\n elif input_mode == 'np':\n np_img = input_img\n else:\n return (input_mode, \" is not a supported mode. Supported modes are 'np' or 'fp'.\")\n\n if method == 'meijering':\n np_ridges = filters.meijering(np_img, black_ridges=black_ridges)\n\n if output_path is not None:\n saveImage(np_ridges, output_path)\n return(np_ridges)", "def _pnet_helper(self, image, scale):\n n, c, h, w = image.size()\n\n resized = nn.functional.interpolate(image,\n scale_factor=scale,\n mode='bilinear',\n align_corners=False)\n\n offsets, scores = self.pnet(resized)\n scores = scores[:, 1, :, :]\n boxes = compute_boxes(scores, offsets, scale, self.score_thresholds[0])\n # boxes: [batch_index, (4: bbox), score, (4: offset)]\n\n if boxes is None:\n return None\n\n kept = batched_nms(boxes, n, .5, mode='union')\n\n return kept", "def run():\n cons_in, soln_in, disc = make_discriminator()\n target, loss, accuracy, optimiser = make_training_nodes(disc)\n training_set_sampler = make_sampler(cons_in, soln_in, target)\n test_set_sampler = make_sampler(cons_in, soln_in, target)\n\n disc.get_session().run(tf.global_variables_initializer())\n\n fit(\n disc.get_session(),\n optimiser,\n training_set_sampler,\n 250,\n 2000,\n 32,\n [(\"Loss\", loss), (\"Accuracy\", accuracy)],\n )\n\n print(\n \"Validation accuracy: {}\".format(\n disc.feed(accuracy, test_set_sampler.batch(1024))\n )\n )\n\n plot_surface(\n evaluate_surface(\n lambda x, y: Circles.solve([0, 0, 0.25], [x, y, 0.25]),\n (-1, 1, 0.08),\n (-1, 1, 0.08),\n ),\n x_label=\"Solution x\",\n y_label=\"Solution y\",\n z_label=\"p(satisfied | x, y)\",\n )\n\n plot_surface(\n evaluate_surface(\n lambda x, y: disc.feed(\n disc.output_node, {cons_in: [[0, 0, 0.25]], soln_in: [[x, y, 0.25]]}\n )[0],\n (-1, 1, 0.08),\n (-1, 1, 0.08),\n ),\n x_label=\"Solution x\",\n y_label=\"Solution y\",\n z_label=\"p(satisfied | x, y)\",\n )", "def training_mask_generation(img_pan_filename, input_geojson_filename, labels):\r\n with rasterio.open(img_pan_filename) as f:\r\n metadata_pan = f.profile\r\n img_pan = f.read(1)\r\n \r\n mask = np.zeros((img_pan.shape[0], img_pan.shape[1]))\r\n \r\n xres = metadata_pan['transform'][0]\r\n ulx = metadata_pan['transform'][2]\r\n yres = metadata_pan['transform'][4]\r\n uly = metadata_pan['transform'][5]\r\n \r\n lrx = ulx + (metadata_pan['width'] * xres) \r\n lry = uly - (metadata_pan['height'] * abs(yres))\r\n\r\n polygons = json.load(open(input_geojson_filename))\r\n \r\n for polygon in range(len(polygons['features'])):\r\n layer_num = labels.index(str(polygons['features'][polygon]['properties']['Label']))\r\n coords = np.array(polygons['features'][polygon]['geometry']['coordinates'][0][0]) \r\n xf = ((metadata_pan['width']) ** 2 / (metadata_pan['width'] + 1)) / (lrx - ulx)\r\n yf = ((metadata_pan['height']) ** 2 / (metadata_pan['height'] + 1)) / (lry - uly)\r\n coords[:, 1] = yf * (coords[:, 1] - uly)\r\n coords[:, 0] = xf * (coords[:, 0] - ulx) \r\n position = np.round(coords).astype(np.int32)\r\n cv2.fillConvexPoly(mask, position, layer_num)\r\n \r\n return np.expand_dims(mask, axis = 2)", "def demo(sess, net, image_name):\n # Load the demo image\n global CLASS_NAME\n global CHECK\n CHECK = 0\n # 读取的截图所在的位置\n # im_file = Cnn_path + \"data/VOCdevkit2007/VOC2007/JPEGImages/\" + image_name\n curpath = os.path.dirname(os.path.realpath(__file__))\n im_file = curpath + \"\\\\data\\\\VOCdevkit2007\\\\VOC2007\\\\JPEGImages\\\\\" + image_name\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n # score 阈值,最后画出候选框时需要,>thresh才会被画出\n CONF_THRESH = 0.5\n # 非极大值抑制的阈值,剔除重复候选框\n NMS_THRESH = 0.3\n # 利用enumerate函数,获得CLASSES中 类别的下标cls_ind和类别名cls\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n # 取出bbox ,score\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n # 将bbox,score 一起存入dets\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n # 进行非极大值抑制,得到抑制后的 dets\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # 画框\n vis_detections(im, cls, dets, thresh=CONF_THRESH)\n if CHECK == 0:\n CLASS_NAME = \"None\"\n # im = im[:, :, (2, 1, 0)]\n # fig, ax = plt.subplots()\n # ax.imshow(im, aspect='equal')\n # ax.set_title(\"None\",fontsize=10)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n # RES[INDS.__getitem__(image_name.split(\"_\")[0])][INDS.__getitem__(CLASS_NAME)]+=1\n # plt.savefig(\"./output/\"+CLASS_NAME+\"_\" + image_name)\n # plt.savefig(\"./output/\" + image_name)\n MAX_SCORE[0] = 0.0", "def test_predictor():", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r", "def test_boxnet(self):\n\t\timg = np.random.rand(2, 3, 256, 128)\n\t\t\n\t\tvgg = VGGNet()\n\t\tboxnet = BoxNet()\n\n\t\tfm = vgg(img)\n\t\tboxes = boxnet(fm)\n\n\t\tnp.testing.assert_equal(boxes.shape, (2,6,256/2**4,128/2**4))\n\n\t\t\"\"\" Dimension check with random shifts \"\"\"\n\n\t\t\"\"\" Visualize boxes with random shifts \"\"\"", "def test_transform_and_load_subnets(neo4j_session):\n subnet_res = tests.data.gcp.compute.VPC_SUBNET_RESPONSE\n subnet_list = cartography.intel.gcp.compute.transform_gcp_subnets(subnet_res)\n cartography.intel.gcp.compute.load_gcp_subnets(neo4j_session, subnet_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(subnet:GCPSubnet)\n RETURN subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range, subnet.private_ip_google_access,\n subnet.vpc_partial_uri\n \"\"\"\n nodes = neo4j_session.run(query)\n actual_nodes = {\n (\n n['subnet.id'],\n n['subnet.region'],\n n['subnet.gateway_address'],\n n['subnet.ip_cidr_range'],\n n['subnet.private_ip_google_access'],\n n['subnet.vpc_partial_uri'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/regions/europe-west2/subnetworks/default',\n 'europe-west2',\n '10.0.0.1',\n '10.0.0.0/20',\n False,\n 'projects/project-abc/global/networks/default',\n ),\n }\n assert actual_nodes == expected_nodes", "def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def test_networking_project_network_tag_create(self):\n pass", "def segment_images(self, session, logits, keep_prob, input_image, data_test_dir, image_shape):\n\n\t # Run the loop for segmenting all the test images\n\t\tfor image_file in glob(os.path.join(data_test_dir, 'image_2', '*.png')):\n\t\t\t# read each image\n\t\t\tim = scipy.misc.imread(image_file)\n\t\t\t# resize each image to the required shape \n\t\t\timage = scipy.misc.imresize(im, image_shape)\n\t\t\t# \n\t\t\tim_softmax = session.run([tf.nn.softmax(logits)], {keep_prob: 1.0, input_image: [image]})\n\t\t\t#\n\t\t\tim_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n\t\t\t#\n\t\t\tsegmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n\t\t\t#\n\t\t\tmask = np.dot(segmentation, np.array([[255, 0, 255, 255]]))\n\t\t\t#\n\t\t\tmask = scipy.misc.toimage(mask, mode=\"RGBA\")\n\t\t\t#\n\t\t\tstreet_im = scipy.misc.toimage(image)\n\t\t\t#\n\t\t\tstreet_im.paste(mask, box=None, mask=mask)\n\t\t\t#\n\t\t\tyield os.path.basename(image_file), np.array(street_im)", "def scan_for_gt_outliers(dataset_path, model_path):\n candicates = []\n images, masks = get_dataset(dataset_path)\n images.sort()\n masks.sort()\n setattr(argparse, \"_gpus_arg_default\", lambda x: 0)\n model = UNet.load_from_checkpoint(checkpoint_path=model_path)\n model.cuda()\n model.eval()\n\n f_beta = FBeta(num_classes=1, beta=0.5).to(torch.device(\"cuda\", 0))\n precision = Precision(num_classes=1, is_multiclass=False).to(\n torch.device(\"cuda\", 0)\n )\n\n pixels = 512 * 512\n with tqdm(\n total=len(images),\n desc=\"Extracting sets with erronous ground truths.\",\n leave=False,\n position=0,\n ) as pbar:\n for i, m in zip(images, masks):\n img = Image.open(i)\n msk = Image.open(m)\n\n img_tensor = transforms.ToTensor()(img).cuda()\n msk_tensor = transforms.ToTensor()(msk)\n msk_tensor = torch.gt(torch.sigmoid(msk_tensor), 0.5).cuda()\n\n if len(msk_tensor.unique()) != 2:\n continue\n\n img_tensor = torch.unsqueeze(img_tensor, 0)\n msk_tensor = torch.unsqueeze(msk_tensor, 0)\n\n preds = model(img_tensor)\n preds = torch.gt(torch.sigmoid(preds), 0.5)\n probs_img = torch.clamp(\n kornia.enhance.add_weighted(\n src1=img_tensor, alpha=1.0, src2=preds, beta=0.5, gamma=0.0,\n ),\n max=1.0,\n )\n cv = preds / msk_tensor\n fp = torch.sum(cv == float(\"inf\")).item() / pixels\n # f1 = f_beta(preds, msk_tensor)\n # p = precision(preds, msk_tensor)\n\n if fp > 0.1:\n candicates.append((i, m))\n with open(\"candidates.pkl\", \"wb\") as f:\n pickle.dump(candicates, f)\n\n img.close()\n msk.close()\n\n pbar.update()", "def region_of_interest(self, img):\n # get region vertices\n r1, r2, r3, r4 = self.region_filter_params[\"ratios\"]\n img_height, img_width = img.shape\n vertices = define_region_vertices(img_height, img_width, r1, r2, r3, r4)\n\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, [vertices], ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def test_grdimage_central_meridians_and_standard_parallels(grid, proj_type, lon0, lat0):\n fig_ref, fig_test = Figure(), Figure()\n fig_ref.grdimage(\n \"@earth_relief_01d_g\", projection=f\"{proj_type}{lon0}/{lat0}/15c\", cmap=\"geo\"\n )\n fig_test.grdimage(grid, projection=f\"{proj_type}{lon0}/{lat0}/15c\", cmap=\"geo\")\n return fig_ref, fig_test", "def detectFaceAndClassify(faceNet, faceMaskClassifier, testImagePath, threshold):\n # load the input test image from disk\n image = cv2.imread(testImagePath)\n # making a copy of image and finding the image spatial dimensions\n orig = image.copy()\n (h, w) = image.shape[:2]\n\n # construct a blob from the image to pass to the network\n # using standard weights for the face detection model for image preprocessing\n blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n # obtain the face detections by passing the blob through the network\n print(\"computing face detections...\")\n faceNet.setInput(blob)\n faceDetections = faceNet.forward()\n\n # loop over the detections to classify them and form bounding boxes and labels\n for i in range(0, faceDetections.shape[2]):\n # extract only confident detections using the confidence/probability\n # associated with the detection\n confidence = faceDetections[0, 0, i, 2]\n\n # filter out weak detections by ensuring the confidence is\n # greater than the minimum confidence 0.5 or input variable\n if confidence > threshold:\n # extract bounding box dimensions and face Region of intrest for classification\n faceROI, startX, startY, endX, endY = extractBoxAndFaceROI(image, faceDetections, itemNum=i,\n height=h, width=w)\n\n faceROI = np.expand_dims(faceROI, axis=0)\n\n # Passing the pre-processed image with classification model to check if there is a mask or not\n (mask, withoutMask) = faceMaskClassifier.predict(faceROI)[0]\n # (mask, withoutMask) = faceMaskClassifier.predict(faceROI)\n\n # find the class and associated colour to use for the bounding box and text\n label = \"Mask\" if mask > withoutMask else \"No Mask\"\n color = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n # include the probability of prediction in the label of the bounding box\n label = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n # forming bounding box rectangle and display the label the output image frame\n cv2.putText(image, label, (startX, startY - 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.45, color, 2)\n cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)\n\n # show the output image\n cv2.imshow(\"Output\", image)\n # display the image still a key is pressed, when key is pressed program is terminated\n cv2.waitKey(0)", "def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):\r\n assert vis or not test_data.shuffle\r\n data_names = [k[0] for k in test_data.provide_data]\r\n\r\n i = 0\r\n t = time.time()\r\n imdb_boxes = list()\r\n original_boxes = list()\r\n for im_info, data_batch in test_data:\r\n t1 = time.time() - t\r\n t = time.time()\r\n\r\n scale = im_info[0, 2]\r\n scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)\r\n t2 = time.time() - t\r\n t = time.time()\r\n\r\n # assemble proposals\r\n dets = np.hstack((boxes, scores))\r\n original_boxes.append(dets)\r\n\r\n # filter proposals\r\n keep = np.where(dets[:, 4:] > thresh)[0]\r\n dets = dets[keep, :]\r\n imdb_boxes.append(dets)\r\n\r\n if vis:\r\n vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)\r\n\r\n logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +\r\n 'proposal %d ' % (dets.shape[0]) +\r\n 'data %.4fs net %.4fs' % (t1, t2))\r\n i += 1\r\n\r\n assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'\r\n\r\n # save results\r\n rpn_folder = os.path.join(imdb.root_path, 'rpn_data')\r\n if not os.path.exists(rpn_folder):\r\n os.mkdir(rpn_folder)\r\n\r\n rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')\r\n with open(rpn_file, 'wb') as f:\r\n cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)\r\n\r\n if thresh > 0:\r\n full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')\r\n with open(full_rpn_file, 'wb') as f:\r\n cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)\r\n\r\n logger.info('wrote rpn proposals to %s' % rpn_file)\r\n return imdb_boxes", "def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x", "def test_net(model, val_loader=None, thresh=0.05):\n\n for iter, data in enumerate(val_loader):\n\n # one batch = data for one image\n image = data['image']\n target = data['label']\n wgt = data['wgt']\n rois = data['rois']\n gt_boxes = data['gt_boxes']\n gt_class_list = data['gt_classes']\n\n #TODO: perform forward pass, compute cls_probs\n\n\n # TODO: Iterate over each class (follow comments)\n for class_num in range(20): \n # get valid rois and cls_scores based on thresh\n \n # use NMS to get boxes and scores\n \n\n #TODO: visualize bounding box predictions when required\n #TODO: Calculate mAP on test set", "def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()" ]
[ "0.60389805", "0.5884871", "0.57754153", "0.5745751", "0.5740149", "0.5732082", "0.5713616", "0.5691783", "0.56859934", "0.56745535", "0.563614", "0.56359076", "0.5585057", "0.5569545", "0.5568741", "0.55645275", "0.5541818", "0.5533012", "0.5529382", "0.5483159", "0.5482852", "0.54785836", "0.5453219", "0.5446292", "0.54318386", "0.54303765", "0.54226786", "0.53922415", "0.53771913", "0.5355515", "0.53531396", "0.5349461", "0.534396", "0.53411233", "0.5337291", "0.5311661", "0.5310996", "0.53108823", "0.52958906", "0.52903026", "0.52889055", "0.52880263", "0.5268279", "0.5268267", "0.5259447", "0.5257783", "0.52576107", "0.5249793", "0.52361", "0.5231203", "0.5222842", "0.5211064", "0.52101713", "0.5208591", "0.52065504", "0.52005816", "0.51997185", "0.5198558", "0.5180168", "0.5177362", "0.51763797", "0.51758885", "0.5171094", "0.516758", "0.5165419", "0.5161889", "0.5161604", "0.5154147", "0.5150833", "0.5145369", "0.51422983", "0.51394284", "0.51391363", "0.51306534", "0.5126267", "0.5123819", "0.51222146", "0.51136667", "0.51127607", "0.51110506", "0.5104737", "0.5101297", "0.51000303", "0.5096368", "0.5088679", "0.5087608", "0.50843555", "0.50842154", "0.50815976", "0.5080559", "0.5079733", "0.5079716", "0.50709224", "0.506908", "0.50687385", "0.50668377", "0.5066313", "0.50660336", "0.50629723", "0.50615203", "0.5060877" ]
0.0
-1
checking return values for `start` and `end` when calling channel_messages for numbers not multiples of 50.
def test_channel_messages_unlimited_pagination(): clear() userOne = auth_register('[email protected]', '123abc!@#', 'First', 'User') randChannel = channels_create(userOne['token'], 'randChannel', True) for _ in range(149): message_send(userOne['token'], randChannel['channel_id'], 'Hello') messages = channel_messages(userOne['token'], randChannel['channel_id'], 0) assert(messages['start'] == 0) assert(messages['end'] == 50) messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50) assert(messages2['start'] == 50) assert(messages2['end'] == 100) messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100) assert(messages3['start'] == 100) assert(messages3['end'] == -1) assert(len(messages3['messages']) == 49) # an error should be raised when start is beyond 149 messages with pytest.raises(InputError): channel_messages(userOne['token'], randChannel['channel_id'], 150)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channel_messages(token, channel_id, start):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n if is_user_channel_member(channel_id, curr_id) is False:\n raise error.AccessError(description=\"user is not a member of this channel\")\n\n #get channel data\n curr_channel = database.get_channel_data(channel_id)\n # find the length of messages\n messages_length = len(curr_channel[\"messages\"])\n\n # if start is after the oldest message in messages InputError is raised\n # if messages is called and start is 0 on an empty channel, it returns an empty channel.\n # if start is after the oldest message in messages InputError is raised\n\n if messages_length <= start and (messages_length != 0 or start > 0):\n raise error.InputError(description=\"\"\"The start value selected is\n past the oldest message in the list\"\"\")\n\n if messages_length == 0 and start == 0:\n return {\"messages\": [], \"start\": start, \"end\": -1}\n\n # get the list of dictionaries 'message'\n curr_messages = curr_channel[\"messages\"]\n messages_returned = []\n\n end = start + 50\n num_msgs_to_check = messages_length - start\n\n # If end is larger than the total no. of messages,\n # the function will print till end and return -1\n if num_msgs_to_check < 50:\n\n counter = 0\n while counter < num_msgs_to_check:\n target_message_index = start + counter\n messages_returned.append(curr_messages[target_message_index])\n counter += 1\n\n end = -1\n # else if end is within total no of messages,\n # function will print 50 messaages from start and return start + 50\n else:\n # loop to add each message to return up till 50 messages is returned\n counter = 0\n while counter < 50:\n target_message_index = start + counter\n messages_returned.append(curr_messages[target_message_index])\n counter += 1\n\n for msg in messages_returned:\n for react in msg['reacts']:\n react['is_this_user_reacted'] = curr_id in react['u_ids']\n\n return {\"messages\": messages_returned, \"start\": start, \"end\": end}", "def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def test_if_it_outputs_correct_output_for_numbers_greater_than_50(self):\n self.assertEquals(len(prime_numbers(55)), 16)", "def test_generator_downward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: -1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_min", "def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max", "def validate(c_name, val):\n n = 80\n threshold = 4\n while (threshold >= 0):\n if ((len(channels[c_name]) > n) and (val <= threshold)):\n return True\n else:\n n -= 20\n threshold -= 1\n\n return False", "def test_limit_and_from(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(channel.json_body[\"next_token\"], 15)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 10)\n self._check_fields(channel.json_body[\"event_reports\"])", "def test_seq_rangeExamples(self):\n\n self.assertEqual(MessageSet(2, 4), MessageSet(4, 2))\n self.assertEqual(list(MessageSet(2, 4)), [2, 3, 4])\n\n m = MessageSet(3291, None)\n m.last = 3290\n self.assertEqual(list(m), [3290, 3291])", "def test_args_count_in_range(args: list, min: int, max: int) -> bool:\n\n\tcount = args_count(args)\n\treturn (count >= min and count <= max)", "def test_hello_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_hello_failed_code.__iter__()\n length = self.test_hello_failed_code.__len__()\n\n while value < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_HELLO_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def test_stream_loop(self):\n chans, gains, scans, rate = (10,10,10,10), (1,2,4,5), 1024, 500\n v = [v[0] for v in self.l.stream_sync(\n channels=chans, gains=gains,\n num_scans=scans, rate=rate)]\n for vi in v:\n for r in vi:\n self.assertTrue(abs(r-2.5) < .1,\n \"%s should be cal, 2.5v\" % vi[0])", "def _send_messages(number_range, partition=0, topic=topic, producer=kafka_producer, request=request):\n messages_and_futures = [] # [(message, produce_future),]\n for i in number_range:\n # request.node.name provides the test name (including parametrized values)\n encoded_msg = '{}-{}-{}'.format(i, request.node.name, uuid.uuid4()).encode('utf-8')\n future = kafka_producer.send(topic, value=encoded_msg, partition=partition)\n messages_and_futures.append((encoded_msg, future))\n kafka_producer.flush()\n for (msg, f) in messages_and_futures:\n assert f.succeeded()\n return [msg for (msg, f) in messages_and_futures]", "def check_delivered_messages(results):\n assert results[\"metrics\"][\"Delivered messages\"] == 20", "def channel_messages(token, channel_id, start=0):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if auth_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n messages_results = messages.query(\"channel_id\", \"==\", channel_id)\n if start > len(messages_results):\n raise ValueError(\n \"start is greater than the total number of messages in the channel.\"\n )\n if start < 0:\n raise ValueError(\"Invalid value for start.\")\n sorted_messages = sorted(messages_results, key=itemgetter(\"time_created\"))\n sorted_messages.reverse()\n end = len(sorted_messages) if start + 50 > len(sorted_messages) - 1 else start + 50\n messages_list = sorted_messages[start:end]\n returned_messages = []\n for message in messages_list:\n returned_messages.append(\n {\n \"message_id\": message[\"message_id\"],\n \"u_id\": message[\"u_id\"],\n \"message\": message[\"message\"],\n \"is_pinned\": message[\"is_pinned\"],\n \"time_created\": message[\"time_created\"],\n }\n )\n for message in returned_messages:\n reacts_results = reacts.get(message[\"message_id\"])\n returned_reacts = []\n for react_id in reacts_results:\n if not react_id == \"message_id\":\n returned_reacts.append(\n {\n \"react_id\": react_id,\n \"u_ids\": reacts_results[react_id],\n \"is_this_user_reacted\": auth_u_id in reacts_results[react_id],\n }\n )\n message[\"reacts\"] = returned_reacts\n return {\n \"messages\": returned_messages,\n \"start\": start,\n \"end\": -1 if end == len(sorted_messages) else end,\n }", "def check_all():\r\n i = 100000\r\n while i <= 999996:\r\n if check(i):\r\n print(i)\r\n i = i + 1", "def test_meter_mod_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_meter_mod_failed_code.__iter__()\n length = self.test_meter_mod_failed_code.__len__()\n\n while value < self.MAX_METER_MOD_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_METER_MOD_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "async def chatchart(self, ctx, channel: Optional[discord.TextChannel] = None, messages:int = 5000):\n if channel is None:\n channel = ctx.channel\n\n # --- Early terminations\n if channel.permissions_for(ctx.message.author).read_messages is False:\n return await ctx.send(\"You're not allowed to access that channel.\")\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n return await ctx.send(\"I cannot read the history of that channel.\")\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in blacklisted_channels:\n return await ctx.send(f\"I am not allowed to create a chatchart of {channel.mention}.\")\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n\n message_limit = await self.config.limit()\n if (message_limit != 0) and (messages > message_limit):\n messages = message_limit\n\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(\"No permissions to read that channel.\")\n\n msg_data = self.calculate_member_perc(history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in {channel.mention} or I can't read message history.\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, channel)\n\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))", "def test_flow_monitor_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_flow_monitor_failed_code.__iter__()\n length = self.test_flow_monitor_failed_code.__len__()\n\n while value < self.MAX_FLOW_MONITOR_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_FLOW_MONITOR_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def count_valid(message, prefix):\n return 3", "def test_inrange():\n assert cs.any > 0\n assert cs.any < cmax", "def test_async_config_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_async_config_failed_code.__iter__()\n length = self.test_async_config_failed_code.__len__()\n\n while value < self.MAX_ASYNC_CONFIG_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_ASYNC_CONFIG_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def range_query(self, start_key, end_key):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n\n print \"THIS IS FAKE\"\n pycastle_log.info(\"Doing range query from key \"+str(start_key)+\" to key \"+str(end_key))\n try:\n i = 0\n while i < 10:\n yield i\n i+=1\n if i % 5 == 0:\n pycastle_log.info(\"Getting next batch\")\n except GeneratorExit:\n pycastle_log.info(\"User requested stop of range query from key \"+str(start_key)+\" to key \"+str(end_key))", "def numeralcheck(msg, *args):\r\n try:\r\n num = int(msg.content)\r\n if num == 0 and not args:\r\n return (\"done\")\r\n return (num)\r\n except ValueError:\r\n if msg.content.lower() == \"done\":\r\n if args:\r\n return (0)\r\n return (\"done\")", "def test_sufficientWidth(self):\n msg = \"barbazbo\"\n maxLen = len(\"PRIVMSG foo :{}\".format(msg)) + 2\n self.client.msg(\"foo\", msg, maxLen)\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :{}\".format(msg)])\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen - 1)\n self.assertEqual(2, len(self.client.lines))\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen + 1)\n self.assertEqual(1, len(self.client.lines))", "def test_splitLongMessagesWithDefault(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 2)", "def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)", "def send_messages(_) -> int:\n return 1 << 11", "def send_messages(_) -> int:\n return 1 << 11", "def check_exit_reached(minimum: int, maximum: int) -> list:\n the_exit = [minimum - 1, maximum - 1]\n return the_exit", "def test_out_of_bounds(\n self, num_groups: int, total_tokens: int, group_number: int\n ) -> None:\n assume(\n group_number < 0 or group_number >= num_groups or total_tokens < num_groups\n )\n self.assertThat(\n lambda: token_count_for_group(num_groups, total_tokens, group_number),\n raises(ValueError),\n )", "def test_group_mod_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_group_mod_failed_code.__iter__()\n length = self.test_group_mod_failed_code.__len__()\n\n while value < self.MAX_GROUP_MOD_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_GROUP_MOD_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)", "def test_fetch_with_max_id_and_since_id(self):\n # when from_date/to_date are not set, max result len = DIRECT_MESSAGES_LIMIT\n FakeTwitterApi.restore_settings()\n api = FakeTwitterApi()\n last_id, _ = api.DM[0]\n first_id, _ = api.DM[DirectMessagesFetcher.DIRECT_MESSAGES_LIMIT - 1]\n\n res = DirectMessagesFetcher(api)\n statuses = list(res.fetch())\n self.assertEqual(len(statuses), DirectMessagesFetcher.DIRECT_MESSAGES_LIMIT)\n self.assertEqual(statuses[0]['id'], last_id)\n self.assertEqual(statuses[-1]['id'], first_id)\n\n # let set max_id to 51th item\n FakeTwitterApi.restore_settings()\n api = FakeTwitterApi()\n id_50, _ = api.DM[50] # go to 51th dm\n\n res = DirectMessagesFetcher(api, **{\"max_id\": id_50})\n statuses = list(res.fetch())\n self.assertEqual(len(statuses), DirectMessagesFetcher.DIRECT_MESSAGES_LIMIT - 50)\n self.assertTrue(all(s['id'] <= id_50 for s in statuses))\n self.assertEqual(statuses[0]['id'], id_50)\n\n # let check since_id\n FakeTwitterApi.restore_settings()\n api = FakeTwitterApi()\n since_id, _ = api.DM[100]\n max_id, _ = api.DM[20]\n\n res = DirectMessagesFetcher(api, **{'max_id': max_id, 'since_id': since_id})\n statuses = list(res.fetch())\n self.assertEqual(len(statuses), 100 - 20)", "def test_modulo(self):\n node, other, messages = self._create_nodes_messages()\n\n for modulo in xrange(0, 10):\n for offset in xrange(0, modulo):\n # global times that we should receive\n global_times = [message.distribution.global_time for message in messages if (message.distribution.global_time + offset) % modulo == 0]\n\n sync = (1, 0, modulo, offset, [])\n other.give_message(node.create_introduction_request(other.my_candidate, node.lan_address, node.wan_address, False, u\"unknown\", sync, 42), node)\n\n responses = node.receive_messages(names=[u\"full-sync-text\"], return_after=len(global_times))\n response_times = [message.distribution.global_time for _, message in responses]\n\n self.assertEqual(sorted(global_times), sorted(response_times))", "def __check_noncircular(event, resp):\n\n # the status code to return based on whether the subsequence was \n # specified by start/end, or by range header\n status_codes = {\"range\": SC.REQUESTED_RANGE_NOT_SATISFIABLE,\n \"start-end\": SC.NOT_IMPLEMENTED}\n\n start, end, subseq_type = \\\n [resp.get_datum(a) for a in [\"start\", \"end\", \"subseq-type\"]]\n \n # if request start is greater than end, set the response status code\n # to an error code\n if start and end:\n if int(start) > int(end):\n resp.set_status_code(status_codes[subseq_type])\n resp.set_body(json.dumps({\n \"message\": \"server DOES NOT support circular \" +\n \"sequences, end MUST be higher than start\"\n }))", "def test_flow_mod_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_flow_mod_failed_code.__iter__()\n length = self.test_flow_mod_failed_code.__len__()\n\n while value < self.MAX_FLOW_MOD_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_FLOW_MOD_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def test_enum_out_of_range(self):\n @converters.wrap\n def inner_test(param: enums.DisconnectReason):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(lambda: inner_test(param='4'), 3114)", "def test_port_mod_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_port_mod_failed_code.__iter__()\n length = self.test_port_mod_failed_code.__len__()\n\n while value < self.MAX_PORT_MOD_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_PORT_MOD_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def check_number(client, num, min, max):\r\n while True:\r\n try:\r\n # Convert it into integer\r\n temp = int(num)\r\n if temp >= min and temp <= max:\r\n break\r\n else:\r\n msg_client(client, \"Perfavore, inserire un numero compreso tra: \" + str(min) + \" e \" + str(max) + \": \")\r\n num= client.recv(BUFSIZ)\r\n except ValueError:\r\n msg_client(client, \"Perfavore, inserire un numero compreso tra: \" + str(min) + \" e \" + str(max) + \": \")\r\n num = client.recv(BUFSIZ) \r\n return temp", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks,\n nicefunc=self._nice_pow10,\n enclose = True)\n return frange(min, max, delta)", "def validate_oee_error_16(self):\n sql = \"SELECT COUNT(*) FROM bdeview\"\n lines = self.c.execute(sql).fetchall()\n # Temporary make it 50 for testing purpose\n return lines[0][0]>=50, []", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks, enclose=True)\n return frange(min, max, delta)", "def test_message_int():\n result = True\n\n message = msg.Message()\n for i in range(num_it):\n message.appendInt(i)\n if message.length != msg.HEADER_SIZE + (i+1)*msg.intStruct.size:\n print(\"Size is \", message.length, \" but should be \", msg.HEADER_SIZE + (i+1)*msg.intStruct.size)\n print(\"Error : message.appendInt\")\n result = False\n\n message.resetCursor()\n for i in range(num_it):\n r = message.readInt()\n if r != i:\n print(r, \" vs \", i)\n print(\"Error : message.read/appendInt\")\n result = False\n\n return result", "def le(value, limit):\n return value <= limit", "def _expect_100(connection: typing.Union[ssl.SSLSocket, socket.socket]) -> bool:\n try:\n headers = b''\n while b'\\r\\n\\r\\n' not in headers:\n headers += connection.recv(1024)\n return b' 100 ' in headers.split(b'\\r\\n')[0]\n except IOError:\n return False", "def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None", "def print_messages(start_message: str, end_message: str):\n def _print_messages(func):\n def new_func(*args, **kwargs):\n print(start_message)\n result = func(*args, **kwargs)\n print(end_message)\n return result\n return new_func\n return _print_messages", "def test_chunk_size(self):\n chunk_size = 3\n for num_args in [chunk_size - 1, chunk_size, chunk_size + 1]:\n\n # Test for normal list (range is considered a normal list as it implements __len__ and such)\n with self.subTest(num_args=num_args, input='list'):\n chunks = list(chunk_tasks(range(num_args), chunk_size=chunk_size))\n for chunk in chunks[:-1]:\n self.assertEqual(len(chunk), chunk_size)\n self.assertLessEqual(len(chunks[-1]), chunk_size)\n self.assertEqual(list(range(num_args)), list(chain.from_iterable(chunks)))\n\n # Test for an actual generator (range does not really behave like one)\n with self.subTest(num_args=num_args, input='generator/iterator'):\n chunks = list(chunk_tasks(iter(range(num_args)), chunk_size=chunk_size))\n for chunk in chunks[:-1]:\n self.assertEqual(len(chunk), chunk_size)\n self.assertLessEqual(len(chunks[-1]), chunk_size)\n self.assertEqual(list(range(num_args)), list(chain.from_iterable(chunks)))", "def test_find_break_points_invalid_range(self):\r\n self.assertRaises(ValueError, self.mc._find_break_points, 1, 0, 5)\r\n self.assertRaises(ValueError, self.mc._find_break_points, 1, 1, 5)", "def test_queue_op_failed_code_value(self):\n value = 0\n\n iter_given_code = self.test_queue_op_failed_code.__iter__()\n length = self.test_queue_op_failed_code.__len__()\n\n while value < self.MAX_QUEUE_OP_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_QUEUE_OP_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def lenRange(start, stop, step=1):\n return (stop - start + step - 1 + 2 * (step < 0)) // step", "def _translate_limit(self, len_, start, num):\n if start > len_ or num <= 0:\n return 0, 0\n return min(start, len_), num", "def test_within_length(self):\r\n\r\n flow1 = Flowgram(\"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08\") # len 7\r\n flow2 = Flowgram('0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1') # len 10\r\n\r\n self.assertTrue(within_length(flow1, 0, 10))\r\n self.assertFalse(within_length(flow1, 10, 20))\r\n self.assertFalse(within_length(flow2, 0, 5))\r\n self.assertTrue(within_length(flow2, 5, 20))\r\n self.assertTrue(within_length(flow2, 5, 11))", "def can_reach_square(self, start, end):\n raise NotImplementedError", "def mesg_cb(self, a, b):\n if a[0] == (8, 1):\n self.omg_please_stop = True", "def test_splitLongMessagesWithOverride(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 3, length=irc.MAX_COMMAND_LENGTH // 2)", "def test_length(self):\n countdown = [x for x in generators.countdown(10)]\n self.assertEqual(len(countdown), 11)", "def replay_limit_range(robot_name,command):\n global history\n global directions\n list(filter(lambda cmd: cmd in directions, history))\n replay_list = []\n (agr1, agr2) = split_command_input(command)\n list_len = len(history) - int(agr2)\n while list_len < len(history):\n replay_list.append(history[list_len])\n list_len += 1\n for command in replay_list: \n handle_command(robot_name, command)\n return True,' > '+robot_name+' replayed '+str(len(replay_list))+' commands.'", "def _get_clip_indices(utt_start, utt_end, batch_start, batch_end):\n if utt_end <= batch_start:\n return None\n if utt_start >= batch_end:\n return None\n start = 0\n end = utt_end - utt_start\n if utt_start < batch_start:\n start = batch_start - utt_start\n if utt_end > batch_end:\n end = batch_end - utt_start\n if utt_end <= batch_end:\n ends = True\n else:\n ends = False\n return (start, end), ends", "def print_big_number_announcement(steps):\n if steps > 5000:\n print(\"It will take a while...\")", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def ticks(self, start, end, desired_ticks=8):\n if start > end:\n start, end = end, start\n\n if start == 0.0:\n # Whoever calls us with a value of 0.0 puts themselves at our mercy\n log_start = 1e-9\n else:\n log_start = log10(start)\n\n if end == 0.0:\n log_end = 1e-9\n else:\n log_end = log10(end)\n log_interval = log_end - log_start\n\n if log_interval < 1.0:\n # If the data is spaced by less than a factor of 10, then use\n # regular/linear ticking\n min, max, delta = heckbert_interval(start, end, desired_ticks,\n enclose=True)\n return frange(min, max, delta)\n\n elif log_interval < desired_ticks:\n magic_numbers = [1, 2, 5]\n for interval in magic_numbers:\n n1 = self._logtickceil_as_irep(start,interval)\n n2 = self._logtickfloor_as_irep(end,interval)\n ticks = [self._irep_to_value(n,interval) for n in range(n1,n2+1)]\n if len(ticks) < desired_ticks * 1.5:\n return ticks\n return ticks\n\n else:\n # Put lines at every power of ten\n startlog = ceil(log_start)\n endlog = floor(log_end)\n expticks = linspace(startlog, endlog, endlog - startlog + 1)\n return 10**expticks", "def checkValue(c, m, y, k):\n MINVAL=0\n MAXVAL=255\n valueOk=True\n for val in c, m, y, k:\n if val >=MINVAL and val <=255:\n pass\n else:\n valueOk=False\n \n return valueOk", "def my_service():\n start, end = 100, 110\n for number in range(start, end):\n yield number", "def __verify_range(value, minimum, maximum):\n if value in range(minimum, maximum):\n return True\n else:\n return False", "async def test_channel_only(self):\n expected_channel = MockTextChannel()\n actual_channel, duration = self.cog.parse_silence_args(MockContext(), expected_channel, 10)\n\n self.assertEqual(expected_channel, actual_channel)\n self.assertEqual(10, duration)", "def test_lengthWithWildcardRange(self):\n self.assertRaises(TypeError, len, MessageSet(1, None))", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def test_identify_limit(limit, all, expected):\n assert identify_limit(limit, all) == expected", "def quick_test():\n if PERIOD < 2:\n return False\n if SIZE % PERIOD != 0:\n return False\n return True", "def _is_in_range(valid_values):\n\n def f(x):\n if x not in valid_values:\n raise ValueError('{} not in {}'.format(x, valid_values))", "def game_sequence(self, upper_limit):\n for i in range(1, upper_limit):\n response = self.build_text_response_for_number_(i)\n yield response if response else i", "def test_is_streaming(fprime_test_api):\n results = fprime_test_api.assert_telemetry_count(5, timeout=10)\n for result in results:\n msg = \"received channel {} update: {}\".format(result.get_id(), result.get_str())\n print(msg)\n fprime_test_api.assert_telemetry(\n \"sendBuffComp.SendState\", value=\"SEND_IDLE\", timeout=3\n )", "def count_in_range(start, end, check):\n count = 0\n for val in range(start, end):\n if check(val):\n count += 1\n\n return count", "def out_of_range_check(self, guess, range):\r\n if ((guess<0) or (guess>=range)):\r\n return \"Input is out of range!\"\r\n else:\r\n return guess", "def pair_equal(amount=100, start=0, stop=100, truncated=True):\n sequence = []\n amount = amount + start\n\n\n for x in range(start, amount):\n if truncated and x >= stop:\n sequence.append(stop)\n else:\n sequence.append(x)\n\n return sequence", "async def convert_to_summary(ctx, start, end):\n channel = ctx.channel\n try:\n start_message = await channel.fetch_message(start)\n end_message = await channel.fetch_message(end)\n except:\n await ctx.send(\"Can not fetch message!\")\n return\n\n try:\n raw_messages = await channel.history(\n before=end_message.created_at,\n after=start_message.created_at,\n oldest_first=True,\n ).flatten()\n except:\n await ctx.send(\"Can not get messages in that time range!\")\n return\n\n raw_messages = [start_message, *raw_messages, end_message]\n clean_messages = [\n {\n \"content\": message.clean_content,\n \"datetime\": message.created_at,\n \"author\": message.author.display_name,\n \"reaction\": sum(reaction.count for reaction in message.reactions),\n }\n for message in raw_messages\n if message.clean_content\n ]\n\n summary = generate_summary(clean_messages)\n keywords = generate_keywords(clean_messages)\n\n return summary, keywords, clean_messages", "def test_wrong_number_of_bounds(self):\n emsg = \"should have only an upper and lower limit\"\n with self.assertRaisesRegex(TypeError, emsg):\n _make_mask_cube(self.mask, self.coords, [0], self.units)\n with self.assertRaisesRegex(TypeError, emsg):\n _make_mask_cube(self.mask, self.coords, [0, 2, 4], self.units)", "def test_bundle_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_bundle_failed_code.__iter__()\n length = self.test_bundle_failed_code.__len__()\n\n while value < self.MAX_BUNDLE_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_BUNDLE_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def test_next_token(self) -> None:\n\n # `next_token` does not appear\n # Number of results is the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=20\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does not appear\n # Number of max results is larger than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=21\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does appear\n # Number of max results is smaller than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 19)\n self.assertEqual(channel.json_body[\"next_token\"], 19)\n\n # Check\n # Set `from` to value of `next_token` for request remaining entries\n # `next_token` does not appear\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 1)\n self.assertNotIn(\"next_token\", channel.json_body)", "def test_switch_config_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_switch_config_failed_code.__iter__()\n length = self.test_switch_config_failed_code.__len__()\n\n while value < self.MAX_SWITCH_CONFIG_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_SWITCH_CONFIG_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1", "def test__validate_threads__0():\n channel_id = 202306130027\n channel_name = 'Koishi'\n \n channel = Channel.precreate(\n channel_id,\n channel_type = ChannelType.guild_thread_private,\n name = channel_name,\n )\n \n for input_value, expected_output in (\n (None, {}),\n ([], {}),\n ({}, {}),\n ([channel], {channel_id: channel}),\n ({channel_id: channel}, {channel_id: channel}),\n ):\n output = validate_threads(input_value)\n vampytest.assert_eq(output, expected_output)", "def cubes(amount, start, stop, truncated, sequence):\n for x in range(start, amount):\n y = x ** 3\n if truncated and y >= stop:\n sequence.append(stop)\n else:\n sequence.append(y)\n return sequence", "def __check_args_val(self):\n if self.__min_range < 0:\n error_msg = \"min_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < 0:\n error_msg = \"max_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < self.__min_range:\n error_msg = \"max_range must be greater than or equal to min_range\"\n raise ValueError(error_msg)", "def test_random_high_low_values(self):\n channel_count = 10\n low = -100\n high = 100\n gen = random_data(low=-100, high=100,\n channel_count=channel_count)\n data = [next(gen) for _ in range(100)]\n\n self.assertEqual(len(data), 100)\n\n for record in data:\n self.assertEqual(len(record), channel_count)\n for value in record:\n self.assertTrue(low <= value <= high)", "def setup_number_of_faces():\n \n while True:\n faces = int(input(\"Geben Sie die Seitenanzahl der Würfel an (2 - 100) oder tippe '0' zum A\\\nbbruch: \"))\n if 2 <= faces <= 100:\n break\n elif faces == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine Zahl zwischen 2 und 100 eingeben!\")\n print()\n print()\n return faces", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def test_check_data_over_specifying_percentiles(self):\n msg = \"Cannot specify both no_of_percentiles and percentiles\"\n with self.assertRaisesRegex(ValueError, msg):\n Plugin().process(self.cube, no_of_percentiles=3, percentiles=[25, 50, 75])", "def analyze_length(self, limit = 30, filters = None):\n for i in xrange(1, min(limit, len(self.cipher))):\n self.analyze(i, filters)\n if self.errno == -1:\n print \"%02d\" % i,\n self.report_key()", "def checkLimit(device, checkStatus):\n d = device.read(1)\n if d:\n print(d)\n status = d[0]\n\n printStatus(status)\n if (checkStatus & status):\n return False\n return True", "def limitsExsess(topic, value):\n\n if isNotifyTime(topic):\n if \"temperature\" in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram(\"Temperature out of bounds: \"+value+\"degC\")\n return True\n if \"CO\" in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram(\"Carbon Monoxide level above threshold: \"+value)\n return True\n if \"All_Gas\" in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram(\"Poison gas level above threshold: \"+value)\n return True\n if \"alarm\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"ALARM in Living room is On!\")\n return True\n if \"MotionHUE\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"HUE Motion sensor detected movement!\")\n return True\n return False", "def in_range(low, high, step=None):\n def check(value):\n if not low <= value < high:\n return False\n\n if step is not None:\n return (value - low) % step == 0\n return True\n\n return check", "def on_message_batch(self, messages):\n assert isinstance(messages, list)\n assert len(messages) > 0\n assert all(isinstance(message, Message.Implementation) for message in messages)\n assert all(message.community == messages[0].community for message in messages)\n assert all(message.meta == messages[0].meta for message in messages)\n\n def _filter_fail(message):\n if isinstance(message, DelayMessage):\n if __debug__:\n dprint(message.delayed.candidate, \" delay \", message.delayed, \" (\", message, \")\")\n \n if message.create_request():\n self._statistics.delay_send += 1\n self._statistics.dict_inc(self._statistics.delay, \"om_message_batch:%s\" % message.delayed)\n self._statistics.delay_count += 1\n return False\n\n elif isinstance(message, DropMessage):\n if __debug__:\n dprint(message.dropped.candidate, \" drop: \", message.dropped.name, \" (\", message, \")\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_message_batch:%s\" % message)\n self._statistics.drop_count += 1\n return False\n\n else:\n return True\n\n meta = messages[0].meta\n\n if __debug__:\n debug_count = len(messages)\n debug_begin = time()\n\n # drop all duplicate or old messages\n assert type(meta.distribution) in self._check_distribution_batch_map\n messages = list(self._check_distribution_batch_map[type(meta.distribution)](messages))\n assert len(messages) > 0 # should return at least one item for each message\n assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)\n\n # handle/remove DropMessage and DelayMessage instances\n messages = [message for message in messages if isinstance(message, Message.Implementation) or _filter_fail(message)]\n if not messages:\n return 0\n\n # check all remaining messages on the community side. may yield Message.Implementation,\n # DropMessage, and DelayMessage instances\n try:\n messages = list(meta.check_callback(messages))\n except:\n dprint(\"exception during check_callback for \", meta.name, exception=True, level=\"error\")\n return 0\n assert len(messages) >= 0 # may return zero messages\n assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)\n\n if __debug__:\n if len(messages) == 0:\n dprint(meta.check_callback, \" yielded zero messages, drop, or delays. This is allowed but likely to be an error.\", level=\"warning\")\n\n # handle/remove DropMessage and DelayMessage instances\n messages = [message for message in messages if _filter_fail(message)]\n if not messages:\n return 0\n\n # store to disk and update locally\n if __debug__:\n dprint(\"in... \", len(messages), \" \", meta.name, \" messages from \", \", \".join(str(candidate) for candidate in set(message.candidate for message in messages)))\n \n if self.store_update_forward(messages, True, True, False):\n \n self._statistics.dict_inc(self._statistics.success, meta.name, len(messages))\n self._statistics.success_count += len(messages)\n\n # tell what happened\n if __debug__:\n debug_end = time()\n level = \"warning\" if (debug_end - debug_begin) > 1.0 else \"normal\"\n dprint(\"handled \", len(messages), \"/\", debug_count, \" %.2fs\" % (debug_end - debug_begin), \" \", meta.name, \" messages (with \", meta.batch.max_window, \"s cache window)\", level=level)\n \n # return the number of messages that were correctly handled (non delay, duplictes, etc)\n return len(messages)\n \n return 0", "def test_if_it_includes_a_number_if_the_number_is(self):\n self.assertNotIn(16, prime_numbers(16))", "async def test_all_args(self):\n expected_channel = MockTextChannel()\n actual_channel, duration = self.cog.parse_silence_args(MockContext(), expected_channel, 15)\n\n self.assertEqual(expected_channel, actual_channel)\n self.assertEqual(15, duration)", "def test_invalidMaxLength(self):\n self.assertRaises(ValueError, self.client.msg, \"foo\", \"bar\", 0)\n self.assertRaises(ValueError, self.client.msg, \"foo\", \"bar\", 3)", "def game_core_impl(number):\n count = 0 # Steps counter\n # Range boundaries. Values are taken from task definition (constant)\n BOTTOM_BOUND = 1\n UPPER_BOUND = 101\n while True:\n count += 1\n # Range center\n center = math.floor((UPPER_BOUND - BOTTOM_BOUND) / 2)\n # Target value for current step\n current_value = BOTTOM_BOUND + center\n if current_value == number:\n break\n if current_value < number:\n BOTTOM_BOUND += center\n else:\n UPPER_BOUND -= center\n return count", "def test_empty_messages(self):\n self.failureResultOf(self.producer.send_messages(\"topic\"), ValueError)\n self.failureResultOf(self.producer.send_messages(\"topic\", msgs=[]), ValueError)", "def test_get_sms_messages_paginated(self):\n pass" ]
[ "0.6338927", "0.58790916", "0.53648764", "0.53105456", "0.52494335", "0.5227476", "0.519632", "0.51526994", "0.5122233", "0.5106034", "0.50966465", "0.50898653", "0.50656456", "0.5056774", "0.50472474", "0.50439817", "0.5038363", "0.50265247", "0.50246954", "0.50190175", "0.50129956", "0.5001165", "0.49974138", "0.49937803", "0.49851128", "0.49762174", "0.4967449", "0.49574095", "0.49574095", "0.49535394", "0.49318656", "0.49177063", "0.49176916", "0.49121916", "0.49098969", "0.49037594", "0.4891914", "0.4881173", "0.48511985", "0.48333952", "0.48266035", "0.48116243", "0.48092633", "0.48036805", "0.4801383", "0.4799999", "0.47987992", "0.4795865", "0.477926", "0.47756416", "0.47684085", "0.47662655", "0.47584066", "0.47531724", "0.47506493", "0.47488004", "0.47478583", "0.47453043", "0.47445756", "0.47413927", "0.47401226", "0.47383374", "0.47352487", "0.47339746", "0.473135", "0.4730768", "0.47303057", "0.4726728", "0.47076866", "0.47065225", "0.47043577", "0.46975243", "0.46909901", "0.46898162", "0.46877995", "0.46772987", "0.46744022", "0.46740255", "0.46720582", "0.46693426", "0.46645057", "0.46522292", "0.4649779", "0.46486545", "0.46485537", "0.4648051", "0.46439087", "0.46420082", "0.4640222", "0.463241", "0.46291077", "0.46290094", "0.46203154", "0.4619875", "0.4615551", "0.46135992", "0.4610231", "0.46084604", "0.46033588", "0.46030456" ]
0.6797525
0
checking for validation of token raises accesserror
def test_channel_leave_invalid_token(): clear() user = auth_register('[email protected]', '123abc!@#', 'First', 'Last') userchannel_id = channels_create(user['token'], 'userchannel', True) auth_logout(user['token']) with pytest.raises(AccessError): channel_leave(user['token'], userchannel_id['channel_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def check_token_validate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/Validate/\"\n return self._lr_object._get_json(url, payload)", "async def validate_token(self, token):", "def validate_access_token(cmd, namespace):\n n = namespace\n\n if not n.access_token:\n n.access_token = get_config_value(cmd, 'communication', 'access_token', None)", "def validate_request_token():\n if not g.x_tapis_token:\n raise errors.NoTokenError(\"No access token found in the request.\")\n claims = validate_token(g.x_tapis_token)\n g.token_claims = claims\n g.username = claims.get('username')\n g.tenant_id = claims.get('tenant_id')\n g.account_type = claims.get('account_type')\n g.delegation = claims.get('delegation')", "def _validate_jwt_token(self):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n if not self.jwt_access_token:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", )\n\n # convert to json\n data = json.dumps({\"token\": self.jwt_access_token})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n try:\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n valid = not bool(dic)\n if self.debug:\n print('Valid token : {}'.format(valid))\n return valid\n except urllib_error.HTTPError as e:\n return False", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def test_fail_token(client, request):\n res = client.get('/token?uid=1')\n\n assert res.status_code == 400\n assert 'User does not exist' in res.data.decode('utf-8')", "def check_token_structure(data):\n assert \"token\" in data\n token_structure = data[\"token\"]\n\n assert \"access_token\" in token_structure\n assert \"token_type\" in token_structure\n assert \"expires_in\" in token_structure", "def verify_token(self, token):\n return False", "def _auth_oauth_validate(self, provider, access_token):\n\t\toauth_provider = self.env['auth.oauth.provider'].browse(provider)\n\t\tvalidation = self._auth_oauth_rpc(oauth_provider.validation_endpoint, access_token)\n\t\tif validation.get(\"error\"):\n\t\t\traise Exception(validation['error'])\n\t\tif oauth_provider.data_endpoint:\n\t\t\tdata = self._auth_oauth_rpc(oauth_provider.data_endpoint, access_token)\n\t\t\tvalidation.update(data)\n\t\treturn validation", "def test_get_non_valid_token(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n res = self.app.get('/api/token/non-valid?api_key=' + user.api_key)\r\n error = json.loads(res.data)\r\n\r\n assert res.status_code == 404, error\r\n assert error['status'] == 'failed', error\r\n assert error['action'] == 'GET', error\r\n assert error['target'] == 'token', error\r\n assert error['exception_cls'] == 'NotFound', error", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def verify_access_token(self, token: str) -> bool:\n try:\n data = crypt.verify_token(token)\n except crypt.jwt_exceptions.PyJWTError as e:\n raise FileAccessError() from e\n if data['uuid'] != str(self.pk) or data['space_id'] != str(self.space_id):\n raise FileAccessError()\n\n return True", "def checkError(self, data):\n if data and ('error' in data):\n e = T411Error(data['code'], data['error'])\n T411.log.error(str(e))\n # Error 201 = Token has expired\n # Error 202 = Invalid token\n if e.code in [201, 202]:\n self.headers[T411.authentication_header] = None\n self.token_timestamp = None\n raise e", "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "def _is_oauth_token_valid(token: dict, time_key=\"expires_on\") -> bool:\n if \"access_token\" not in token or token.get(\"token_type\", \"\") != \"Bearer\" or time_key not in token:\n raise AirflowException(f\"Can't get necessary data from OAuth token: {token}\")\n\n return int(token[time_key]) > (int(time.time()) + TOKEN_REFRESH_LEAD_TIME)", "def accessCheck(self) -> None:\n\n if self.access_token:\n return\n self.access_token = self.login()", "def test_invalid_access_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': 'invalid_access_token',\n 'grant_type': 'invalid_grant_type'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('unsupported_grant_type', response.json['error'])\n self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,\n response.json['error_description'])", "def test_validate_token_returns_false_for_invalid_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key_2, algorithm='HS256')\n\n self.assertFalse(\n validate_token(token)[0],\n 'Failed to recognise invalidate token.'\n )", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "async def check_access_token(self, token):\n async with self._session.get(\n 'https://eu.battle.net/oauth/check_token',\n params={'token': token}) as resp:\n self.request_count += 1\n valid = resp.status == 200\n if valid:\n json = await resp.json()\n exp = datetime.fromtimestamp(json['exp'])\n valid = valid and exp - datetime.now() >= timedelta(hours=1)\n self._access_token_checked = valid\n return self._access_token_checked", "def is_token_valid(self,pk,request):\n\n pass", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/readiness'\n response = self.perform_get_request(endpoint)\n\n if response.status_code != 200:\n self.print_error_response(response, \"error\")\n return response.status_code == 200", "def test_get_token_failure(self):\n url = '/api-token-auth/'\n data = {'username': 'adam', 'password': '321'}\n\n response = Client().post(url, data)\n self.assertEqual(response.status_code, 400)", "def token_auth_error():\n logger.debug(\"Token authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def __token_is_valid(self):\n\n if not self.__login_token or len(self.__login_token) < 10:\n # Token is not set or totally invalid\n return False\n\n try:\n jwt.decode(self.__login_token, verify = False)\n return True\n except:\n # Most likely the token is expired as `exp` is in the past\n return False", "def _assert_access_token_error(self, response, expected_error_message, error_code):\n assert response.status_code == 400\n response_json = json.loads(response.content.decode('utf-8'))\n self.assertDictEqual(\n response_json,\n {\n \"access_token\": [{\"user_message\": expected_error_message}],\n \"error_code\": error_code\n }\n )", "def test_invalid_token_admin(self):\n invalid_token = {\n \"Content-Type\" : \"application/json\",\n \"x-access-token\" : \"eyJ0eXAiOiJK6MTUyNjczNzQ5Nvm2LkbWLZF2RuD32FBvgG8KyM\"}\n response = self.app.get(\n '/api/v3/users',\n headers=invalid_token)\n self.assertEqual(response.status_code, 401)", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def validate(auth_token):\n try:\n graph = facebook.GraphAPI(access_token=auth_token, version=\"3.0\")\n profile = graph.request('/me?fields=id,name,email')\n return profile\n except:\n message = \"The token is invalid or expired.\"\n return message", "def _check_token_response(self, response, *args, **kwargs):\n raise NotImplementedError('Subclasses must implement this method.')", "def checkToken( self ):\n\n if ( self.token == None ):\n return False\n else :\n d = {\n \"auth_token\" : str(self.token) ,\n \"method\" : \"flickr.auth.checkToken\",\n \"format\" : \"json\",\n \"nojsoncallback\" : \"1\"\n }\n sig = self.signCall( d )\n\n url = self.urlGen( api.rest, d, sig )\n try:\n res = self.getResponse( url )\n if ( self.isGood( res ) ):\n self.token = res['auth']['token']['_content']\n self.perms = res['auth']['perms']['_content']\n return True\n else :\n self.reportError( res )\n except:\n print(str(sys.exc_info()))\n return False", "def jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithm=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.exceptions.PyJWTError:\n raise serializers.ValidationError('Invalidad token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n self.context['payload'] = payload\n return data", "def test_raises_token_expired_when_applicable(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/no_valid_auth_token.yaml'):\n with self.assertRaises(exceptions.TokenAndRefreshExpiredError):\n badgr.get_from_server(self._sample_url)", "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n return f(*args, **kwargs)\n\n return decorated", "def test_live_thread_token_is_valid(self):\n assert self.token.is_valid()", "def _check_response(self, res: requests.Response, token: str) -> None:\n if res.status_code == 401:\n if token:\n raise AuthenticationTokenError(\n 'Cannot refresh invalid token that was given externally.')\n\n self._token_handler.refresh_token()\n\n # Raise this exception to trigger retry with backoff\n raise requests.exceptions.RequestException", "def test_rejects_invalid_tokens(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n self.assertFalse(tool.verify_token(\n 'ThisTokenDoesNotEvenHaveASlash', 12345, 'test_action'))\n timestamp = utils.get_timestamp(XsrfToolTests.TEST_NOW)\n self.assertFalse(\n tool.verify_token('NotTheRightDigest/%f' % timestamp, 12345,\n 'test_action'))", "def validate_token(token):\n # first, decode the token data to determine the tenant associated with the token. We are not able to\n # check the signature until we know which tenant, and thus, which public key, to use for validation.\n try:\n data = jwt.decode(token, verify=False)\n except Exception as e:\n logger.debug(f\"got exception trying to parse data from the access_token jwt; exception: {e}\")\n raise errors.AuthenticationError(\"could not parse the access token.\")\n # get the tenant out of the jwt payload and get associated public key\n token_tenant_id = data['tenant_id']\n try:\n public_key_str = get_tenant_config(token_tenant_id)['public_key']\n except errors.BaseTapisError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; unexpected tenant_id.\")\n except KeyError:\n raise errors.AuthenticationError(\"Unable to process Tapis token; no public key associated with the \"\n \"tenant_id.\")\n # try:\n # pub_key = get_pub_rsa_key(public_key_str)\n # except Exception as e:\n # logger.error(f\"got exception trying to create public RSA key object; e: {e} \")\n # raise errors.ServiceConfigError(\"Unable to process public key associated with tenant.\")\n try:\n return jwt.decode(token, public_key_str, algorithm='RS256')\n except Exception as e:\n logger.debug(f\"Got exception trying to decode token; exception: {e}\")\n raise errors.AuthenticationError(\"Invalid Tapis token.\")", "def test_create_token_missing_field(self):\n\n invalid_credentials = {'email': '[email protected]', 'password': ''}\n response = self.client.post(URL_TOKEN, invalid_credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def _validar_token(self):\n\n\t\ttoken = request.headers.get(\"Authorization\").split(\" \")[1]\n\n\t\tres = self.autenticador.validarToken(token)\n\t\tif(not res):\n\t\t\treturn False\n\t\treturn True", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def jwt_optional(self) -> None:\n if self._TOKEN and self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "def test_for_bad_request_errors(self):\n # Invalid token:\n response = self.client.get(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': 'invalid_token',\n },\n ),\n follow=True,\n )\n\n self.assertEqual(response.status_code, 400)", "def authenticate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n access_token = request.headers.get('token', '')\n if access_token.strip(' '):\n decoded = decode_token(access_token)\n if decoded['status']:\n return func(*args, **kwargs)\n abort(http_status_code=401, message='Invalid token.Please login')\n abort(http_status_code=401,\n message='Token is missing')\n return wrapper", "def validate(self, accessToken, requiredScopes=None, requiredSubject=None):\n\n # Reset properties that may have been set by the previous call.\n self.__resetValidation()\n\n try:\n # Call Authlete's /api/auth/introspection API.\n self._introspectionResponse = self.__callIntrospectionApi(\n accessToken, requiredScopes, requiredSubject)\n except Exception as cause:\n self._introspectionException = cause\n self._errorResponse = self.__buildErrorFromException(cause)\n self._valid = False\n return False\n\n # The 'action' parameter in the response from /api/auth/introspection\n # denotes the next action that the API caller should take.\n action = self._introspectionResponse.action\n\n if action == IntrospectionAction.OK:\n # The access token is valid.\n self._valid = True\n return True\n else:\n self._errorResponse = self.__buildErrorFromResponse(self._introspectionResponse)\n self._valid = False\n return False", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def forward_validate_token_request(request):\n # TODO(garcianavalon) figure out if this method belongs to keystone client or if\n # there is a better way to do it/structure this\n keystone_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL')\n endpoint = '/access-tokens/{0}'.format(request.GET.get('access_token'))\n url = keystone_url + endpoint\n LOG.debug('API_KEYSTONE: GET to {0}'.format(url))\n response = requests.get(url)\n return response", "def validate_connection(self):\n __method_name = inspect.currentframe().f_code.co_name\n res = self.pull(\n url=self.base_url + consts.OAUTH2_ENDPOINT,\n auth=HTTPBasicAuth(self.client_id, self.client_secretkey),\n data={\"grant_type\": \"client_credentials\"},\n method=\"POST\",\n )\n if res and res.get(\"access_token\"):\n self.session.headers[\"Authorization\"] = \"bearer {}\".format(\n res.get(\"access_token\")\n )\n self.applogger.info(\n \"{}(method={}) : {} : Validation successful.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return\n self.applogger.error(\n \"{}(method={}) : {} : Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n raise Exception(\n \"Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'\n )", "def check_token_invalidate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/invalidate/\"\n return self._lr_object._get_json(url, payload)", "def validateAgentJWTToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_authtoken_is_valid(self):\n auth_client = self.fixtures.auth_client\n # scenario 1: when validity is unlimited (0)\n tomriddle = models.User(username='voldemort', fullname='Tom Riddle')\n scope = ['id', 'email']\n tomriddle_token = models.AuthToken(\n auth_client=auth_client, user=tomriddle, scope=scope, validity=0\n )\n self.assertTrue(tomriddle_token.is_valid())\n\n # scenario 2: when validity has not been given\n draco = models.User(username='draco', fullname='Draco Malfoy')\n draco_token = models.AuthToken(auth_client=auth_client, user=draco, scope=scope)\n with self.assertRaises(TypeError):\n draco_token.is_valid()\n\n # scenario 3: when validity is limited\n harry = models.User(username='harry', fullname='Harry Potter')\n harry_token = models.AuthToken(\n auth_client=auth_client,\n user=harry,\n scope=scope,\n validity=3600,\n created_at=utcnow(),\n )\n self.assertTrue(harry_token.is_valid())\n\n # scenario 4: when validity is limited *and* the token has expired\n cedric = models.User(username='cedric', fullname='Cedric Diggory')\n cedric_token = models.AuthToken(\n auth_client=auth_client,\n user=cedric,\n scope=scope,\n validity=1,\n created_at=utcnow() - timedelta(1),\n )\n self.assertFalse(cedric_token.is_valid())", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def test_social_auth_exception(self):\n self._setup_provider_response_with_body(200, json.dumps(\"false\"))\n response = self.client.post(self.url, self.data())\n self._assert_access_token_error(response, \"The provided access_token is not valid.\", \"tpa-invalid-access-token\")\n self._verify_user_existence(user_exists=False, social_link_exists=False)", "def test_verification_with_invalid_token(self):\n verification_url = reverse('authentication:verify_email', kwargs={\n 'token': 'weucnuwencusn'})\n response = self.client.get(\n verification_url\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def check_token(fn):\n def response(self, *args, **kw):\n if not JWT_DISABLED:\n intoken = get_token_from_header()\n try:\n jwt.decode(intoken, SECRET_KEY)\n except jwt.exceptions.DecodeError:\n raise Error(FORBIDDEN)\n except jwt.ExpiredSignatureError:\n raise Error(UNAUTHORIZED, msg=\"Signature expired.\")\n except jwt.InvalidTokenError:\n raise Error(UNAUTHORIZED, msg=\"Invalid token.\")\n return fn(self, *args, **kw)\n return response", "def test_valid_access_request(self):\n\n # Generate a valid auth token\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, response.status_code)\n\n # Assert that the token came back in the response\n token = response.json\n self.assertIsNotNone(token['access_token'])\n self.assertIsNotNone(token['expires_in'])\n self.assertIsNotNone(token['id_token'])\n self.assertIsNotNone(token['refresh_token'])\n self.assertIsNotNone(token['token_type'])\n self.assertEqual('Bearer', token['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(token['access_token'])\n self.assertIsNotNone(access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, access_token.user_id)\n self.assertEqual(token['id_token'], access_token.user_id)\n self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(token['expires_in'], access_token.expires_in)\n self.assertEqual(token['access_token'], access_token.access_token)\n\n # Assert that the refresh token is in the database\n with base.HybridSessionManager():\n refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(\n token['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n refresh_token.expires_in)\n self.assertEqual(token['refresh_token'], refresh_token.refresh_token)\n\n # Assert that the authorization code is no longer in the database.\n with base.HybridSessionManager():\n none_code = \\\n auth_api.authorization_code_get(authorization_code.code)\n self.assertIsNone(none_code)", "def check_if_token_is_valid(token):\n if token is None:\n return\n try:\n jwt.decode(\n token,\n key=current_app.config['JWT_KEY'],\n audience=current_app.config['AUTH0_BASE_URL'] + '/api/v2/',\n issuer=current_app.config['AUTH0_BASE_URL'] + '/')\n except (jwt.JWTError,\n jwk.JWKError,\n jwt.ExpiredSignatureError,\n jwt.JWTClaimsError,\n AttributeError,\n AssertionError,\n IndexError):\n return False\n else:\n return True", "def _check_response(self, res: requests.Response, token: str) -> None:\n return", "def validate_token(func):\n\n def wrapper(*args, **kwargs):\n # args[0] should be O365ManagementApi (self) because this function is\n # called from the O365ManagementApi class.\n try:\n if args[0].token.expiresOn < datetime.now():\n args[0].token = args[0].get_token()\n do_func = func(*args, **kwargs)\n return do_func\n except AttributeError as a:\n raise AttributeError(\"{0}: Existing token not valid or empty\".format(a))\n\n return wrapper", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token.')\n\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token.')\n\n self.context['payload'] = payload\n return data", "def test_invalid_access_key(self):\r\n data = {\r\n \"EdX-ID\": self.receipt_id,\r\n \"Result\": \"Testing\",\r\n \"Reason\": \"Testing\",\r\n \"MessageType\": \"Testing\"\r\n }\r\n json_data = json.dumps(data)\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=json_data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test testing:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('Access key invalid', response.content)\r\n self.assertEqual(response.status_code, 400)", "def token_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n token = request.headers['token']\n try:\n decoded = decode_token(token)\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"token expired\"}), 401\n except jwt.InvalidSignatureError:\n return jsonify({\"message\": \"Signature verification failed\"}), 401\n except jwt.InvalidTokenError:\n return jsonify({\"message\": \"Invalid Token verification failed\"}), 401\n except KeyError:\n return jsonify({\"message\": \"Missing token\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def test_validate_token(self, mock_xsrf_validate_token):\n self.handler.validate_token('test token', '[email protected]')\n mock_xsrf_validate_token.assert_called_once_with(\n 'test token', '[email protected]',\n timeout=xsrf.REFRESH_TOKEN_TIMEOUT_SEC)", "def check_token(api_key, required_scopes):\n return {\"uid\": \"\"}", "def testIsValidToken(self):\n self.assertTrue(TokenResource.isValidToken('aValidToken'),\n msg='Expected isValidToken to accept a valid token.')\n self.assertTrue(TokenResource.isValidToken(TokenResource.VALID_TOKEN_CHARS),\n msg='Expected isValidToken to accept a valid token.')\n self.assertFalse(TokenResource.isValidToken('Token!'),\n msg='Expected isValidToken to accept an invalid token.')\n self.assertFalse(TokenResource.isValidToken('an invalid Token'),\n msg='Expected isValidToken to accept an invalid token.')", "def _assert_valid(self, token_id, token_ref):\n current_time = timeutils.normalize_time(timeutils.utcnow())\n expires = token_ref.get('expires')\n if not expires or current_time > timeutils.normalize_time(expires):\n raise exception.TokenNotFound(token_id=token_id)", "def test_create_token_missing_field(self):\n res = self.client.post(TOKEN_URL, {'email':'', 'password':\"\"})\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_read_o_auth_access_token(self):\n pass", "def token_required(func):\n @wraps(func)\n def decorator(*args,**kwargs):\n token = request.headers.get('x-access-token') or request.headers.get('X-Access-Token')\n\n if not token:\n abort(400,description=\"Token Missing\")\n \n try:\n data = jwt.decode(token,current_app.config['SECRET_KEY'],algorithms=[\"HS256\"])\n curr_user = Users.query.filter_by(public_key=data[\"public_key\"]).first()\n token = BlacklistToken.query.filter_by(token=token).first()\n if token:\n abort(401,description=\"Invalid Token\")\n except:\n abort(401,\"Invalid token\")\n return func(curr_user,*args,**kwargs)\n return decorator", "def validate(self, data):\n try:\n payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithms=['HS256'])\n except ExpiredSignatureError:\n raise serializers.ValidationError(\"The token has expired.\")\n except JWTError:\n raise serializers.ValidationError(\"Error validating token. Ensure is the right token.\")\n\n self.context['payload'] = payload\n return data", "def api_auth_validate(request, access_key):\n if not request.is_json:\n return {'error' : 'Bad request, payload must be JSON', 'code' : 400}\n if not 'working_repo' in session:\n return {'error' : 'Operation requires authentication', 'code': 401}\n if session['working_repo'] != access_key:\n return {'error' : 'Not authorized for this operation', 'code' : 403}\n \n return True", "def validate_token(self, data):\n try:\n payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise serializers.ValidationError('Verification link has expired.')\n except jwt.PyJWTError:\n raise serializers.ValidationError('Invalid token')\n if payload['type'] != 'email_confirmation':\n raise serializers.ValidationError('Invalid token')\n\n self.context['payload'] = payload\n return data", "def validate_token(self, payload, headers, request):\n token = headers.get(self.TOKEN_NAME, \"\")\n\n # no token\n if self.verify == VerificationMethod.NONE:\n # do nothing as no method was chosen\n pass\n\n # static token\n elif self.verify == VerificationMethod.TOKEN:\n if not compare_digest(token, self.token):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n # hmac token\n elif self.verify == VerificationMethod.HMAC:\n digest = hmac.new(self.secret.encode('utf-8'), request.body, hashlib.sha256).digest()\n computed_hmac = base64.b64encode(digest)\n if not hmac.compare_digest(computed_hmac, token.encode('utf-8')):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n return True", "def isValid(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY)\n return True\n except:\n return False", "def test_create_token_missing_field(self):\r\n res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})\r\n\r\n self.assertNotIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def auth_error():\n return unauthorized('Invalid credentials')", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated", "def get_access_token(self, request) -> str or Exception:\n pass", "def test_bad_token(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def testGetToken(self):\n # Token is base64 for a json object so always starts with '{\"'\n self.assertTrue(self.dl_object._access_token.startswith('eyJ'))\n self.assertTrue(len(self.dl_object._access_token) > 100)", "def test_no_token_auth_required(self, client):\n assert_hook_status(client, status=401)", "def verify_token(token):\n if config.API_TOKEN is None:\n logger.error(\n 'API token is not configured, auth will fail!')\n return token == config.API_TOKEN", "def validate_token_request(self):\n device_code = self.request.data.get('device_code')\n if not device_code:\n raise InvalidRequestError('Missing \"device_code\" in payload')\n\n client = self.authenticate_token_endpoint_client()\n if not client.check_grant_type(self.GRANT_TYPE):\n raise UnauthorizedClientError()\n\n credential = self.query_device_credential(device_code)\n if not credential:\n raise InvalidRequestError('Invalid \"device_code\" in payload')\n\n if credential.get_client_id() != client.get_client_id():\n raise UnauthorizedClientError()\n\n user = self.validate_device_credential(credential)\n self.request.user = user\n self.request.client = client\n self.request.credential = credential", "def test_verifies_bearer_token(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n self.assertEqual(badgr._token_data['token_type'], \"Bearer\")\n self.assertEqual(badgr._token_data['access_token'],\n self._sample_token)", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def is_valid(self):\n return self.access_token is not None \\\n and time.time() < self._expiration_timestamp", "def test_validate_token(self, demo_app):\n demo_app.config.get.return_value = self.jwt_key\n token = jwt.encode({}, self.jwt_key, algorithm='HS256')\n\n self.assertTrue(\n validate_token(token)[0],\n 'Failed to validate token.'\n )", "def validate_token(user, tkn):\n try:\n decoded = jwt.decode(tkn, KEY)\n if decoded['user'] == user:\n stored_token = User.get(User.username == user).token\n if stored_token == tkn:\n return True\n return False\n except jwt.ExpiredSignatureError:\n return HTTPResponse(status=400, body={\"msg\":\"Validation error.\"})", "def check_token(self):\n request = self.request\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_json = self.tokens_json\n access_token = tokens_json['access_token']\n settings = request.ferlysettings\n\n for attempt in (0, 1):\n info_url = 'https://%s/oauth2/userInfo' % settings.cognito_domain\n resp = requests.get(\n info_url, headers={\n 'Authorization': 'Bearer %s' % access_token,\n })\n\n if (resp.status_code == 401 and\n attempt == 0 and\n 'refresh_token' in tokens_json):\n # Try to refresh the token.\n log.warning(\n \"Refreshing access token %s from %s at %s\",\n token_id, token_row.username, request.remote_addr)\n tokens_json = self.refresh(tokens_json)\n access_token = tokens_json['access_token']\n tokens_encoded = json.dumps(tokens_json).encode('utf-8')\n tokens_fernet = Fernet(secret).encrypt(\n tokens_encoded).decode('ascii')\n token_row.tokens_fernet = tokens_fernet\n continue\n\n if 200 <= resp.status_code < 300:\n user_info = resp.json()\n break\n else:\n log.warning(\n \"userInfo failed for token %s from %s at %s: %s\",\n token_id, token_row.username, request.remote_addr,\n resp.content)\n raise self.forbidden()\n\n new_attrs = (\n ('user_agent', request.user_agent),\n ('remote_addr', request.remote_addr),\n )\n for attr, value in new_attrs:\n if getattr(token_row, attr) != value:\n setattr(token_row, attr, value)\n\n token_row.update_ts = self.now + datetime.timedelta(\n seconds=settings.token_trust_duration)\n token_row.expires = self.now + datetime.timedelta(\n seconds=settings.token_duration)\n\n log.info(\n \"Updated token %s from %s at %s: %s\",\n token_id, token_row.username, request.remote_addr, user_info)", "def get_token(request):\n try:\n ft_session = request.session['ft_token']\n token = OAuthAccessToken.objects.get(session_key=ft_session)\n # invalidate any token > 24 hours old\n now = datetime.now()\n diff = now - token.created\n if diff.days:\n token.delete()\n return False\n # TODO check ip address matches\n #oauthorize\n return token\n except KeyError:\n print 'no session token..'\n except OAuthAccessToken.DoesNotExist:\n print 'no access token ...'\n return False", "def is_token_valid(self):\n try:\n token_details = jwt.decode(self.__token, verify=False)\n self.__admin_id = token_details[\"id\"]\n self.__username = token_details[\"username\"]\n expiry = token_details[\"expiry\"]\n if time.time() > expiry:\n raise TokenExpiredException\n cursor = self.__connection.cursor()\n cursor.execute(\n \"select password from neutron_admin_credential where admin_id=%s and username=%s\",\n (self.__admin_id, self.__username)\n )\n result = cursor.fetchone()\n if result is None:\n self.__message = \"Invalid id details\"\n return False\n passsword = result[\"password\"]\n admin_secret = passsword + get_admin_credential()\n jwt.decode(self.__token, key=admin_secret, verify=True)\n return True\n except jwt.DecodeError:\n self.__message = \"Invalid Token\"\n return False\n except KeyError:\n self.__message = \"Insecure Token\"\n return False\n except ValueError:\n self.__message = \"Insecure Token\"", "def test_create_token_for_not_user(self):\n\n credentials = {'email': '[email protected]', 'password': 'Testpass12'}\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def fresh_jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")\n\n if not self.get_raw_jwt()['fresh']:\n raise HTTPException(status_code=401,detail=\"Fresh token required\")" ]
[ "0.8248678", "0.78147525", "0.7386542", "0.71018034", "0.70842767", "0.7034177", "0.6963336", "0.69433403", "0.68867075", "0.6882854", "0.6882577", "0.6882458", "0.68776894", "0.6838688", "0.6802286", "0.67813826", "0.67801815", "0.67788696", "0.6763725", "0.67588973", "0.6749858", "0.6718751", "0.6718751", "0.66879004", "0.66872466", "0.6672081", "0.6668117", "0.6659488", "0.66406405", "0.6623664", "0.660698", "0.6590735", "0.6588669", "0.65798223", "0.6579441", "0.6553783", "0.6551131", "0.6549762", "0.65456504", "0.65439796", "0.6543037", "0.65401024", "0.6504171", "0.65034884", "0.650082", "0.6498519", "0.64924324", "0.6486632", "0.6483056", "0.6481088", "0.6478622", "0.6449074", "0.64483786", "0.64413464", "0.6434294", "0.6432382", "0.6408709", "0.63986325", "0.6397956", "0.63874817", "0.6379717", "0.63755846", "0.63735", "0.6358126", "0.63450545", "0.63317394", "0.63305616", "0.6327408", "0.63244045", "0.63214445", "0.6313182", "0.6310067", "0.6305144", "0.62968457", "0.6291256", "0.62834597", "0.62747514", "0.62704694", "0.6269705", "0.62684387", "0.62658286", "0.62585396", "0.62532437", "0.62531465", "0.62528604", "0.62491786", "0.6246949", "0.6233993", "0.6230214", "0.62286395", "0.62270474", "0.62167233", "0.6210341", "0.6192578", "0.6190507", "0.6190005", "0.6188143", "0.61848027", "0.61798114", "0.6166715", "0.6159931" ]
0.0
-1
check for accesserror when user isn't in the specified channel
def test_channel_leave_invalid_user(): clear() user = auth_register('[email protected]', '123abc!@#', 'first', 'last') leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) with pytest.raises(AccessError): channel_leave(leaver['token'], userchannel_id['channel_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def _return_failure(self, nick, channel):\n self.bot.client.send('PRIVMSG', channel,\n ':%s: You are not authorized to to that.' % nick.split('!')[0])", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "async def deny(self, ctx, user: discord.Member, *, reason: str=None):\n self.data_check(ctx)\n server = ctx.message.server\n try:\n defchannel = self.riceCog2[server.id][\"defchannel\"]\n except:\n defchannel = default_channel\n try:\n channelmute = self.riceCog2[server.id][\"channelmute\"]\n except:\n channelmute = defchannelmute \n channel = discord.utils.get(server.channels, name = defchannel)\n if channel is None:\n msg = await self.bot.say (\"I was unable to write to your log channel. Please make sure there is a channel called {} on the server!\".format(defchannel))\n return\n else:\n pass\n if reason is None:\n msg = await self.bot.say(\"Please enter a reason for the warning!\")\n await asyncio.sleep(5)\n await self.bot.delete_message(msg)\n return\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n msg = await self.bot.say(\"This user has already been denied access to the channel.\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n return\n else:\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n role = nobnl \n mod = ctx.message.author\n await self.bot.delete_message(ctx.message)\n await self.bot.add_roles(user, nobnl)\n dmuser = await self.bot.start_private_message(user)\n await self.bot.send_message(dmuser, \"Howdy!\\nThis is to let you know that you have been denied access to the channel for the reason:\\n\\n```{}``` \\nPlease speak to a member of staff if you have an issue.\".format(reason))\n user=user\n reason=reason\n ID = uuid.uuid4()\n embed=discord.Embed(title=\"User Denied:\", color=0xA00000)\n embed.add_field(name=\"Case ID:\", value=ID, inline=False)\n embed.add_field(name=\"Moderator:\", value=mod, inline=False)\n embed.add_field(name=\"User:\", value=\"{0} ({0.id})\".format(user), inline=False)\n embed.add_field(name=\"Reason:\", value=reason, inline=False)\n react = await self.bot.send_message(channel, embed=embed)\n await self.bot.add_reaction(react, \"\\U0001f44d\")\n await self.bot.add_reaction(react, \"\\U0001f44e\")\n await self.bot.add_reaction(react, \"\\U0001f937\")\n self.norole[server.id][user.id] = {\n 'Reason': reason,\n 'Mod': ctx.message.author.id,\n 'Role': True\n }\n dataIO.save_json(self.warninglist, self.norole)\n channel = discord.utils.get(server.channels, name = channelmute)\n for channel in server.channels:\n perms = discord.PermissionOverwrite()\n \n if channel.type == discord.ChannelType.text:\n perms.send_messages = False\n perms.read_messages = False\n await self.bot.edit_channel_permissions(channel, role, overwrite=perms) \n else:\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n role = nobnl \n mod = ctx.message.author\n await self.bot.delete_message(ctx.message)\n await self.bot.add_roles(user, nobnl)\n dmuser = await self.bot.start_private_message(user)\n await self.bot.send_message(dmuser, \"Howdy!\\nThis is to let you know that you have been denied access to the channel for the reason:\\n\\n```{}``` \\nPlease speak to a member of staff if you have an issue.\".format(reason))\n user=user\n reason=reason\n ID = uuid.uuid4()\n embed=discord.Embed(title=\"User Denied:\", color=0xA00000)\n embed.add_field(name=\"Case ID:\", value=ID, inline=False)\n embed.add_field(name=\"Moderator:\", value=mod, inline=False)\n embed.add_field(name=\"User:\", value=\"{0} ({0.id})\".format(user), inline=False)\n embed.add_field(name=\"Reason:\", value=reason, inline=False)\n react = await self.bot.send_message(channel, embed=embed)\n await self.bot.add_reaction(react, \"\\U0001f44d\")\n await self.bot.add_reaction(react, \"\\U0001f44e\")\n await self.bot.add_reaction(react, \"\\U0001f937\")\n self.norole[server.id][user.id] = {\n 'Reason': reason,\n 'Mod': ctx.message.author.id,\n 'Role': True\n }\n dataIO.save_json(self.warninglist, self.norole)\n channel = discord.utils.get(server.channels, name = channelmute)\n for channel in server.channels:\n perms = discord.PermissionOverwrite()\n \n if channel.type == discord.ChannelType.text:\n perms.send_messages = False\n perms.read_messages = False\n await self.bot.edit_channel_permissions(channel, role, overwrite=perms)", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def votes(self, irc, msg, args, channel, pid):\n if channel and msg.args[0] in irc.state.channels:\n if msg.args[0] != channel:\n if ircdb.checkCapability(msg.prefix, 'admin') or ircdb.checkCapability(msg.prefix, 'owner'):\n irc.error(\"Not Implemented\")\n else:\n irc.errorInvalid('argument', channel)\n elif msg.args[0] == channel:\n irc.error(\"Not Implemented\")", "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "async def ccallow(self, ctx, channel: discord.TextChannel):\n channel_list = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in channel_list:\n channel_list.remove(channel.id)\n else:\n return await ctx.send(\"Channel is not on the deny list.\")\n await self.config.guild(ctx.guild).channel_deny.set(channel_list)\n await ctx.send(f\"{channel.mention} will be allowed for chatchart use.\")", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def error_no_bulletin_channel(user: discord.User, guild: discord.Guild) -> str:\n return (\n f\"{user.mention}, it looks like your guild: '{guild.name}' does not yet have \"\n \"a bulletin channel set up, so I can't alert them to your market's movements.\"\n \" Tell an admin they need to type the `$bulletins_here` command in the channel\"\n \" you all want to use. Or if you're an admin, do it yourself!\"\n )", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_leave_invalid_token():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'First', 'Last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n channel_leave(user['token'], userchannel_id['channel_id'])", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def check_channel_shell_request(self, channel):\n return False", "async def set_channel(self, ctx, channel):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n self.stream_channel = channel\n await self.bot.say(\"Channel sucessfully assigned.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)", "def authorized(guild, channel):\n\tif str(guild.id) in Settings.authorized_guilds:\n\t\tif str(channel.id) in Settings.authorized_channels[str(guild.id)]:\n\t\t\treturn True\n\t\telse:\n\t\t\t# logger.info('%s is not an authorized channel in %s', channel.id, guild.id)\n\t\t\tpass\n\telse:\n\t\t# logger.info('%s is not an authorized guild id', guild.id)\n\t\tpass\n\treturn False", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def _check_access(user, course_id):\r\n if not has_access(user, 'staff', course_id):\r\n raise Http404\r\n\r\n return", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def check_channel_exec_request(self, channel, command):\n return False", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def slack_access(s, level=READ):\n try: slack_access_level = settings.SLACK_USERS[s.slack_uid]\n except: return False\n return (slack_access_level & level) != 0", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_access_negative(self, api):\n self.builder.add_user(api.get_user())\n r1 = api.access_user(api.get_user(), False)\n access_false = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_false == 0\n assert r1.status_code == 200", "async def _check_channel(\n self, starboard: StarboardEntry, channel: discord.TextChannel\n ) -> bool:\n if starboard.whitelist_channel:\n return channel.id in starboard.whitelist_channel\n else:\n return channel.id not in starboard.blacklist_channel", "def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True", "def get_everyone_denied(self):", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def channel_join(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # checks if user is already a part of channel\n for user_id in curr_channel[\"member_ids\"]:\n if curr_id == user_id:\n raise error.InputError(description=\"user is joining a channel user is already in\")\n\n # this checks if the channel is empty (or new) in this case we make the new member an owner.\n if curr_channel[\"member_ids\"] == []:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n # this checks if the user is an owner of the slacker\n # if they are they are given owner privelages in the channel\n # else they are a member\n elif user_perms[\"permission_id\"] == 1:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is True:\n # adds the user into the channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is False:\n raise error.InputError(description=\"\"\"channel_join recieved a channel_id\n for a private channel\"\"\")", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def no_network_access_check(user):\n return not user.has_property(\"network_access\")", "def assert_user_cannot_read(self, user, video):\n livesession = LiveSessionFactory(\n email=user.email,\n is_registered=True,\n user=user,\n video=video,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n self._get_url(video, livesession),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 403)", "def check_channel_forward_agent_request(self, channel):\n return False", "def test_call_bad_perms(self):\r\n self.assertRaises(ValueError, self.cs_overview, -1)", "def is_private(event):\n channel = event.get('channel')\n return channel.startswith('D')", "def validateDevChannel( self, dev, devChannel ):\n d = self.dcDict\n if devChannel not in d[dev]['devChannels'].keys(): raise DCBoxError( 0 )", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_id = -1\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], invalid_channel_id, 1)", "def on_badchannelkey(self, conn, event) -> None:\n channel_name = event.arguments[0]\n logger.warning('Cannot join channel %s (bad key).', channel_name)", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_leave_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_leave(leaver['token'], invalid_id)", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def extra_users_groups(self, acs):\n return who_cannot_access(self.user, self.users_allowed.all(), self.groups_allowed.all(), acs)", "def test_react_invalid_message_id_in_different_channel():\n clear()\n user_a, user_b = register_n_users(2)\n # user_a create a channel\n channels_create(user_a[\"token\"], \"public_channel_a\", True)[\"channel_id\"]\n # user_b create a channel and send message in his own channel\n public_channel_id_b = channels_create(user_b[\"token\"], \"public_channel_b\", True)[\n \"channel_id\"\n ]\n message_id_b = message_send(\n user_b[\"token\"], public_channel_id_b, \"I am in channel_b\"\n )[\"message_id\"]\n # user_a should not be able to react the the message in the public_channel_b\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], message_id_b, 1)", "def vc_only():\n\n async def check(ctx):\n if ctx.guild and ctx.author.voice:\n if not ctx.guild.me.voice or ctx.author.voice.channel == ctx.guild.me.voice.channel:\n return True\n await ctx.reply(\"I'm already in another voice channel!\")\n return False\n await ctx.reply('You must join a server voice channel first!')\n return False\n\n return commands.check(check)", "async def check_permissions(self, ctx, channel: typing.Optional[typing.Union[discord.TextChannel, discord.VoiceChannel]] = None, *, target: typing.Union[discord.Member, discord.Role] = None):\n if target == None:\n target = ctx.author\n if isinstance(target, discord.Member):\n if channel == None:\n perms = target.guild_permissions\n else:\n perms = channel.permissions_for(target)\n col = target.color\n avatar = await self.bot.user_avatar_as(target, size=256)\n name = str(target)\n elif isinstance(target, discord.Role):\n perms = target.permissions\n if channel != None:\n perms.update(\n **{x[0]: x[1] for x in channel.overwrites_for(ctx.guild.default_role) if x[1] != None})\n perms.update(**{x[0]: x[1] for x in channel.overwrites_for(target) if x[1] != None})\n col = target.color\n avatar = ctx.guild.icon_url_as(format='png', size=256)\n name = str(target)\n permsl = list()\n # Get the perms translations\n\n # if perms[\"\"]\n if perms.administrator:\n # If the user is admin, we just say it\n if \"administrator\" in perms_translations.keys():\n perm = perms_translations[\"administrator\"]\n else:\n perm = \"Administrator\"\n permsl.append(\":white_check_mark:\" + perm)\n else:\n # Here we check if the value of each permission is True.\n for perm, value in perms:\n if (perm not in self.perms_name['text']+self.perms_name['common_channel'] and isinstance(channel, discord.TextChannel)) or (perm not in self.perms_name['voice']+self.perms_name['common_channel'] and isinstance(channel, discord.VoiceChannel)):\n continue\n #perm = perm.replace('_',' ').title()\n if perm in perms_translations.keys():\n perm = perms_translations[perm]\n else:\n perm = perm.replace('_', ' ').title()\n if value:\n permsl.append(\":white_check_mark:\" + perm)\n else:\n permsl.append(\":x:\" + perm)\n if ctx.channel.permissions_for(ctx.guild.me).embed_links:\n # \\uFEFF is a Zero-Width Space, which basically allows us to have an empty field name.\n # And to make it look nice, we wrap it in an Embed.\n desc = \"Permissions générales\" if channel is None else channel.mention\n embed = discord.Embed(color=col, description=desc)\n embed.set_author(name=name, icon_url=avatar)\n if len(permsl) > 10:\n sep = int(len(permsl)/2)\n if len(permsl) % 2 == 1:\n sep += 1\n embed.add_field(name='\\uFEFF', value=\"\\n\".join(permsl[:sep]))\n embed.add_field(name='\\uFEFF', value=\"\\n\".join(permsl[sep:]))\n else:\n embed.add_field(name='\\uFEFF', value=\"\\n\".join(permsl))\n await ctx.send(embed=embed)\n # Thanks to Gio for the Command.\n else:\n try:\n await ctx.send(\"**Permission de '{}' :**\\n\\n\".format(name.replace('@', '')) + \"\\n\".join(permsl))\n except:\n pass", "def user_in_channel(self, server_id, user):\n srv = self.get_server_dict(server_id)\n return user.voice.voice_channel and srv['voice'] and user.voice.voice_channel == srv['voice'].channel", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def cant(user, action):\n\n return not can(user, action)", "def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "async def cog_command_error(self, ctx: Context, error: CommandInvokeError):\n if isinstance(error.original, NoRolesError):\n await error.original.handle_error(ctx)\n else:\n await super().cog_command_error(ctx, error)", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def whenException(self, channel, call):", "async def compare_channels(self, user_id, channel):\n game_id = await self.get_game_by_player(user_id)\n game = await self.get_game(game_id)\n if game[5] == channel.id:\n return True\n else:\n await channel.send(f\"> **{user_id}, that game ({game_id}) is not available in this text channel.**\")\n return False", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def _has_access_error_desc(user, action, descriptor, course_key):\r\n def check_for_staff():\r\n return _has_staff_access_to_descriptor(user, descriptor, course_key)\r\n\r\n checkers = {\r\n 'load': check_for_staff,\r\n 'staff': check_for_staff\r\n }\r\n\r\n return _dispatch(checkers, action, user, descriptor)", "def on_access_deny(self, handler):\n print \"User with {0} has been DENIED access.\".format(\n handler.client_address[0]\n )\n time.sleep(2) # lets annoy user if it is denied access", "def _check_has_channel(data):\r\n return re.findall(\r\n r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'JOIN #([a-zA-Z0-9_]+)$', data)", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def cmd_not_authed_dcc_auth(self, c, e):\n UserId = self.__authUser(c, e)\n\n if int(UserId) > 0:\n self.__IpToUser[self.getIpStringByDCCConnection(c)]['auth'] = 'authed_dcc'\n c.privmsg(AUTH_USER_SUCCESS_BY_BOTKEY)\n else:\n c.privmsg(AUTH_USER_FAILED)", "def access_validation(self, request, *args, **kwargs):\n course_id = self.kwargs.get('course_id')\n user = self.request.user\n user_role_id = getattr(user, user.role).id\n course_role_dict = {\n 'teacher': Course.objects.filter(Q(teachers__id=user_role_id) & Q(id=course_id)),\n 'student': Course.objects.filter(Q(students__id=user_role_id) & Q(id=course_id)),\n }\n course = course_role_dict[user.role]\n error = {'error': f\"Either you don't have access to this course, \"\n f\"or course with ID{course_id} doesn't exist.\"}\n if course:\n return method(self, request, *args, **kwargs)\n return Response(error, status=status.HTTP_403_FORBIDDEN)" ]
[ "0.680053", "0.6787445", "0.6721486", "0.6431934", "0.6347937", "0.6341007", "0.6327553", "0.61701375", "0.60786307", "0.60661197", "0.60619277", "0.60619277", "0.60619277", "0.60619277", "0.60603285", "0.60603285", "0.6012092", "0.6003716", "0.59488", "0.5939925", "0.5939417", "0.59331137", "0.592417", "0.5922532", "0.5886108", "0.58830297", "0.58773786", "0.58485025", "0.5847983", "0.58424073", "0.58066535", "0.5788789", "0.5777931", "0.5757077", "0.57529247", "0.57314086", "0.5731197", "0.5731197", "0.5726158", "0.57165766", "0.57092273", "0.5700668", "0.5697843", "0.56817156", "0.5679098", "0.56740403", "0.56729186", "0.56719756", "0.56680477", "0.56549615", "0.56471395", "0.5640286", "0.56184006", "0.56146395", "0.56130916", "0.56126624", "0.56050384", "0.5597634", "0.5590967", "0.55891514", "0.5587194", "0.557629", "0.55668896", "0.5564464", "0.55596435", "0.5546556", "0.5543807", "0.5540923", "0.55407095", "0.552834", "0.5516552", "0.5503871", "0.55018175", "0.5500293", "0.54988843", "0.5496395", "0.54939693", "0.5491546", "0.54618543", "0.5459829", "0.5457732", "0.54566723", "0.5453863", "0.5452814", "0.5433325", "0.54324836", "0.54324836", "0.54322416", "0.54302526", "0.54266876", "0.5413629", "0.5409935", "0.54099184", "0.54088086", "0.54058653", "0.5390662", "0.5388007", "0.5385506", "0.53812563", "0.5379582" ]
0.65093845
3
check if inputerror is raised if the channel_id is invalid
def test_channel_leave_invalid_channel(): clear() user = auth_register('[email protected]', '123abc!@#', 'first', 'last') leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') channels_create(user['token'], 'userchannel', True) invalid_id = 0 with pytest.raises(InputError): channel_leave(leaver['token'], invalid_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_id = -1\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], invalid_channel_id, 1)", "def _check_channel_input(self, channel):\n # da `.get` `None` zurueckgibt wenn der Schluessel `channel` nicht existiert,\n # wird auch bei fehlender Konfiguration der Fehler geworfen\n if self.channels.get(channel) != GPIO.IN:\n raise RuntimeError(\"You must setup() the GPIO channel as an input first\")", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def test__validate_channels__type_error(input_value):\n validate_channels(input_value)", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "async def ticker_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def add_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Add as bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelAdd(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "def isInputValid(self, input):\r\n pass", "def test__validate_channels__passing(input_value):\n return validate_channels(input_value)", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def validateDevChannel( self, dev, devChannel ):\n d = self.dcDict\n if devChannel not in d[dev]['devChannels'].keys(): raise DCBoxError( 0 )", "def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")", "def test_react_invalid_message_id_in_different_channel():\n clear()\n user_a, user_b = register_n_users(2)\n # user_a create a channel\n channels_create(user_a[\"token\"], \"public_channel_a\", True)[\"channel_id\"]\n # user_b create a channel and send message in his own channel\n public_channel_id_b = channels_create(user_b[\"token\"], \"public_channel_b\", True)[\n \"channel_id\"\n ]\n message_id_b = message_send(\n user_b[\"token\"], public_channel_id_b, \"I am in channel_b\"\n )[\"message_id\"]\n # user_a should not be able to react the the message in the public_channel_b\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], message_id_b, 1)", "def del_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Delete bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelDel(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')", "def test_get_flow_request_by_channel_id_wrong_channel_id(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/search/?channel_id=unknown', **headers)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res.json(), {'errors': ['not_found']})", "def test_dccChatMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_CHAT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC CHAT request: ['foo']\")", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.param.name)\n self.bot.stats.incr(\"errors.missing_required_argument\")\n elif isinstance(e, errors.TooManyArguments):\n embed = self._get_error_embed(\"Too many arguments\", str(e))\n self.bot.stats.incr(\"errors.too_many_arguments\")\n elif isinstance(e, errors.BadArgument):\n embed = self._get_error_embed(\"Bad argument\", str(e))\n self.bot.stats.incr(\"errors.bad_argument\")\n elif isinstance(e, errors.BadUnionArgument):\n embed = self._get_error_embed(\"Bad argument\", f\"{e}\\n{e.errors[-1]}\")\n self.bot.stats.incr(\"errors.bad_union_argument\")\n elif isinstance(e, errors.ArgumentParsingError):\n embed = self._get_error_embed(\"Argument parsing error\", str(e))\n await ctx.send(embed=embed)\n self.bot.stats.incr(\"errors.argument_parsing_error\")\n return\n else:\n embed = self._get_error_embed(\n \"Input error\",\n \"Something about your input seems off. Check the arguments and try again.\"\n )\n self.bot.stats.incr(\"errors.other_user_input_error\")\n\n await ctx.send(embed=embed)\n await self.send_command_help(ctx)", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)", "def check_channel_shell_request(self, channel):\n return False", "def validate_input(self, *args):\n return", "def on_badchannelkey(self, conn, event) -> None:\n channel_name = event.arguments[0]\n logger.warning('Cannot join channel %s (bad key).', channel_name)", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "def test_api_invalid_stream_id(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n result = self.api_patch(\n user,\n \"/api/v1/users/me/subscriptions/121\",\n {\"property\": \"is_muted\", \"value\": \"somevalue\"},\n )\n self.assert_json_error(result, \"Invalid stream ID\")", "def validate_message(self, state_id, msg):\n pass", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def check_channel_exec_request(self, channel, command):\n return False", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def test_dccAcceptMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_ACCEPT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND ACCEPT request: ['foo']\")", "def input_error(self, errCode):\n errMsg = ''\n if 'A' in errCode: errMsg = errMsg + 'X column is not specified.\\n'\n if 'B' in errCode: errMsg = errMsg + 'X Column is not numeric.\\n'\n if 'C' in errCode: errMsg = errMsg + 'Y column is not specified.\\n'\n if 'D' in errCode: errMsg = errMsg + 'Y Column is not numeric.\\n'\n if 'E' in errCode: errMsg = errMsg + 'Z Column is not numeric.\\n'\n if 'F' in errCode: errMsg = errMsg + 'Calibration point 1 row is out of range.\\n'\n if 'G' in errCode: errMsg = errMsg + 'Calibration point 2 row is out of range.\\n'\n if 'H' in errCode: errMsg = errMsg + 'First row is not specified.\\n'\n if 'I' in errCode: errMsg = errMsg + 'Last row is not specified.\\n'\n if 'J' in errCode: errMsg = errMsg + 'First row is out of range.\\n'\n if 'K' in errCode: errMsg = errMsg + 'Last row is out of range.\\n'\n if 'L' in errCode: errMsg = errMsg + 'First and last rows are not compatible.\\n'\n self.wait_window(InputError(self, errMsg.rstrip('\\n')))", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)", "def set_channel(self):\n\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\tif self.channel > 1 :\n\t\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\t\n\t\treturn self.channel", "def _raise_if_invalid(self):\n if self._stack_result == -1 and self._recm_data == -1:\n error_message = 'Worker result for request ID {} does not exist yet'.format(\n self.external_request_id)\n logger.exception(error_message)\n raise SARBRequestInvalidException(error_message)", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def __input_validator(msg):\n\n\t\tstatus = msg[\"status\"]\n\n\t\tif status == 1:\n\t\t\treturn status\n\t\telif status == 0:\n\t\t\tprint(msg[\"body\"])\n\t\telif status == -1:\n\t\t\tprint(\"Please enter something!\")\n\t\telif status == -2:\n\t\t\tprint(\"Your command {} is invalid\".format(msg[\"verb\"]))\n\t\telif status == -3:\n\t\t\tprint(\"No argument given after {}\".format(msg[\"verb\"]))", "def checkUIDValidity(self, uid):\r\n if uid not in self._pendingContainer:\r\n raise CredentialError('Invalid environment ID.')", "def validate_input(self, argin):\n try:\n configuration_dict = json.loads(argin)\n _ = configuration_dict[\"id\"]\n except (KeyError, JSONDecodeError) as err:\n msg = f\"Validate configuration failed with error:{err}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n except Exception as other_errs:\n msg = f\"Validate configuration failed with unknown error:{other_errs}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n\n return (\n configuration_dict,\n ResultCode.OK,\n \"ConfigureScan arguments validation successful\",\n )", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def __parse_channel_id(self, data):\n if 'channel_id' in data:\n return data['channel_id']\n if 'channel' in data:\n return data['channel']['id']\n return None", "def test_dccSendMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_SEND, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND request: ['foo']\")", "def test_id_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"e-dad\"\n self.assertRaises(InvalidInputError, self.command.run)", "def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def test_dccResumeMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_RESUME, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND RESUME request: ['foo']\")", "def test_standup_send_invalid_channel (url, _pre_setup):\n\n token = _pre_setup[0]['token']\n\n standup_send_data = {\n 'token': token,\n 'channel_id': 99999,\n 'message': \"message\"\n }\n\n response = requests.post(url + \"standup/send\", json=standup_send_data)\n assert response.status_code == 400", "async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> None:\n client = MatterClient(data[CONF_URL], aiohttp_client.async_get_clientsession(hass))\n await client.connect()", "async def error(self, channel_id,user_infos, user_id, team_id):\n # Message de commande incorrecte\n error = \"Commande invalide. Veuillez utiliser la commande [help] pour plus d'informations.\"\n return await self.sendText(error, channel_id,user_infos, team_id)", "def validateVfabric(output ,arg_dict, key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' = '%s' is not a valid Id. ID should be numeric \" % \n\t\t\t\t(key,id)))\n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n\t output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def check_input_socketname(self, socket_name):\n socket = self.__inputs.get(socket_name)\n if not socket: raise InvalidSocketError('input: '+str(socket_name))", "def testIdNonUniqueIdOnInit(self):\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n cdl_convert.ColorCorrection,\n 'uniqueId',\n 'file'\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n try:\n cc = cdl_convert.ColorCorrection('uniqueId', 'file')\n except ValueError:\n self.fail(\"Non-unique ID was not accepted!\")\n\n self.assertEqual(\n 'uniqueId001',\n cc.id\n )", "def test_ap_csa_invalid(dev, apdev):\n csa_supported(dev[0])\n ap = connect(dev[0], apdev)\n\n vals = [ 2461, 4900, 4901, 5181, 5746, 5699, 5895, 5899 ]\n for val in vals:\n if \"FAIL\" not in ap.request(\"CHAN_SWITCH 1 %d\" % val):\n raise Exception(\"Invalid channel accepted: %d\" % val)", "def whenException(self, channel, call):", "def set_channel(self):\r\n\t\tself.channel = int(input(\"Enter the Channel No.(0-8) = \"))\r\n\t\twhile self.channel > 8 :\r\n\t\t\tself.channel = int(input(\"Enter the Channel No.(0-8) = \"))\r\n\t\t\r\n\t\treturn self.channel", "def getValidation(myInput):\r\n if myInput == \"\":\r\n print('You did not enter the number of bugs collected.')\r\n return -1\r\n elif myInput.isnumeric() == False:\r\n print('You entered a negative or a text value, please enter numerical digits only.')\r\n return -1\r\n elif myInput.isnumeric() == True:\r\n return int(myInput)\r\n else:\r\n print('There has been a read error, please reenter your number')\r\n return -1", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def check_message(self, msg):\n pass", "def check_input(self):\n try:\n if(self.datatype == \"eeg\"):\n self.model.set_datatype(self.datatype)\n self.model.set_dyad(self.dyad)\n self.model.set_channel(self.channel_or_video)#causes loading of data\n elif(self.datatype == \"motion\"):\n self.model.set_datatype(self.datatype)\n self.model.set_filepath(self.database.dictionary[str(self.dyad)][\"video\"][str(self.channel_or_video)][\"motion\"][\"in_roi\"][\"1\"][\"path\"])#TODO NOT ALWAYS 1\n self.model.set_channel(self.channel_or_video)\n else:\n QMessageBox.about(self, \"Incorrect selection\", \"Choose datatype\")\n self.accept()\n except KeyError as e:\n QMessageBox.about(self, \"Incorrect selection\", \"Please choose wisely\" + str(e))", "def validate_device_id(device_id):\n regex = re.compile(r'^[0-9a-fA-F]{2,6}$')\n if regex.match(device_id) == None:\n raise ValidationError('Device ID must be 2-6 characters and must be hexadecimal (0-9,a-f,A-F).')", "def validateIOmoduleId(output ,arg_dict , key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric \" % (key,id))) \n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n if isinstance(error, InWhitelistCheckFailure):\n error.handled = True", "def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2", "def error_check(command):\r\n\r\n # TODO\r", "def handle_err(self, err, msg):\n assert \"BAD:\" in msg.value().decode('utf-8')\n assert err is not None\n self.remaining -= 1", "def _is_valid_input(self, parameter_name):\n raise NotImplementedError()", "def non_existing_recipe_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def test_check_response_length_invalid(input):\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n with pytest.raises(ShdlcResponseError):\r\n cmd.check_response_length(input)", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def is_valid_channel_name(channel):\n if not is_channel_name(channel):\n return False\n\n test_section = channel[1:]\n\n if not MIN_CHANNEL_NAME_LEN < len(channel) < MAX_CHANNEL_NAME_LEN:\n return False\n\n valid_symbols = '#\\\\|^`[]{}_'\n valid_chars = string.ascii_letters + string.digits + valid_symbols\n\n for char in channel:\n if char not in valid_chars:\n return False", "def validate_inputs(name, country, catches):\n while not name:\n name = input('Player name cannot be empty: ')\n\n while not country:\n country = input('Enter a valid country name: ')\n\n while not catches:\n catches = input('Now enter number of catches record: ')\n try: # Once user has input data, try to cast it to integer to verify is not string\n int(catches)\n except ValueError: # if input data is not an integer, print message and clear catches value to keep asking user to enter data\n print('Data given is not a number')\n catches = ''\n\n return name, country, catches", "def validated_input(input_msg: str, error_msg: str, validator, screenshot:str =None):\n while(True):\n reset_screen()\n\n if screenshot is not None:\n print(screenshot)\n\n data = input(input_msg)\n\n try:\n return validator(data)\n except:\n reset_screen()\n popup(error_msg.format(data), screenshot)\n input(\"\")", "def error(self, message):\n self.exit(2, f\"Input error: {message}\\n\")", "def check_input(the_user_entry):\n try:\n for z in range(length_of_bad_input):\n if bad_input[z] == the_user_entry:\n messagebox.showwarning(title=\"Invalid input!\",\n message=\"The following characters are forbidden:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")\n clear_box()\n raise ValueError\n except ValueError:\n print(\"The user entered an invalid character in the entry box\\n\"\n \"potentially one of the following:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")", "def _check_id(self, keyword):\n if keyword not in self.request.data:\n return '{} parameter is missing'.format(keyword)\n \"\"\" Check if <keyword> parameter is not None \"\"\"\n if self.request.data[keyword] == '':\n return '{} ID cannot be None'.format(keyword)\n \"\"\" Check if <keyword> parameter is > 0 \"\"\"\n if int(self.request.data[keyword]) < 1:\n return '{} ID must be an integer > 0'.format(keyword)", "def _CHK(self,_err):\n if _err < 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(_err,ctypes.byref(buf),buf_size)\n raise RuntimeError(\"nidaq call failed with error %d: %s\"%(_err,repr(buf.value)))\n if _err > 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(_err,ctypes.byref(buf),buf_size)\n raise RuntimeError(\"nidaq generated warning %d: %s\"%(_err,repr(buf.value)))", "def check_input(input_array):\n if len(input_array) != 3:\n print(responses.ERROR_INVALID_INPUT)\n return False\n\n if not valid_port(input_array):\n return False\n\n return True", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')", "def invalid(self):\n pass", "def check_errors(self) -> None:", "def test_invalid_course_key(self):\n errstring = \"Unparsable course_id\"\n with self.assertRaisesRegex(CommandError, errstring):\n call_command('export_olx', 'InvalidCourseID')", "def _invalid_transport_key_id():\n pecan.abort(404, u._('Not Found. Provided transport key id is invalid.'))", "def check_value(is_valid, error_msg):\n if not is_valid:\n raise ValueError(error_msg)", "def _check_for_incomplete_input(self):\n pass", "def handle_invalid_command(self, msg):\n return self.create_response(Command.INVALID_COMMAND.value)", "def __connect_failed__(self):\n # Ask the user what to do with the error\n choice = input(\"[A]bort, [C]hange address and port, or [R]etry?\")\n if (choice.lower() == \"a\"):\n exit()\n elif (choice.lower() == \"c\"):\n address = input(\"Please enter the address:\")\n port_number = input(\"Please enter the port:\")", "def non_existing_package_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel -p hash1\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')" ]
[ "0.70518154", "0.6931385", "0.6793265", "0.6561746", "0.6464727", "0.6447893", "0.62791675", "0.6238365", "0.61477757", "0.6107388", "0.6075252", "0.60303783", "0.60014194", "0.59643716", "0.59615505", "0.5909318", "0.5904439", "0.58474076", "0.5836723", "0.5830802", "0.5800052", "0.57135195", "0.56947607", "0.5684883", "0.56225914", "0.5621993", "0.5619677", "0.561031", "0.5594745", "0.5561833", "0.55528456", "0.55189085", "0.551548", "0.5493905", "0.5493905", "0.5493905", "0.5493905", "0.5484849", "0.54816747", "0.5454767", "0.54433", "0.54384893", "0.539641", "0.5388138", "0.5384721", "0.5383694", "0.53671366", "0.53540486", "0.5352771", "0.53348684", "0.53304476", "0.5328705", "0.53242934", "0.5319001", "0.53071326", "0.5300674", "0.530044", "0.52774817", "0.5276861", "0.5275341", "0.5267166", "0.5254683", "0.5250182", "0.52487427", "0.52434045", "0.52419126", "0.5230402", "0.5226921", "0.52251387", "0.52200407", "0.5215891", "0.5212919", "0.5212624", "0.52106106", "0.5200176", "0.51990485", "0.5194723", "0.5189689", "0.51854193", "0.51770574", "0.5157066", "0.5137756", "0.5131648", "0.51264745", "0.51230586", "0.5122184", "0.5116023", "0.5112455", "0.51121265", "0.51102334", "0.50987184", "0.5098397", "0.5096872", "0.5091053", "0.50877434", "0.5080874", "0.50778687", "0.50733435", "0.50688696", "0.5068766" ]
0.62529725
7
if the person removed is a normal user of flockr, check that they were actually removed from flockr
def test_channel_leave_normal_case(): clear() user = auth_register('[email protected]', '123abc!@#', 'first', 'last') leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(leaver['token'], userchannel_id['channel_id']) channel_leave(leaver['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_user_del(user):\n\twith open('tracked_users', 'r') as myfile:\n\t\tuserfile = myfile.read()\n\t\tif user.lower() in userfile.lower():\n\t\t\treturn 1\n\treturn 0", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def del_user(user):\n\ttry:\n\t\tmyfile = open('tracked_users', 'r')\n\t\tlines = myfile.readlines()\n\t\tmyfile.close()\n\t\tmyfile = open('tracked_users', 'w')\n\t\tfor line in lines:\n\t\t\tif line.lower() != user.lower()+'\\n':\n\t\t\t\tmyfile.write(line.lower())\n\t\tmyfile.close()\n\t\tos.remove('data/'+user.lower())\n\t\treturn 1\n\texcept Exception as e:\n\t\tfd = open('tracked_users', 'r')\n\t\tprint(fd.read())\n\t\tfd.close()\n\t\tprint(e)\n\t\treturn -1", "def user_disappears(self, user):\n pass", "def delete_user():", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def __kick_passenger(self, user, reason):\n\n try:\n if user.id not in self.__users.keys():\n print(\"the person you're trying to delete doesn't exist.\")\n return\n\n if reason == \"kicked all passengers by an admin\": # the ususal case, made a standart message so users won't be nervous\n user.send_message(\n f\"Hello {user.name.split(' ')[0]}, your request has been removed.\\n\"\n f\"Simply place another one if it's still relevant.\\n\\nBest regards, Bus4U team\")\n\n else: # in case of something spacial\n print(f\"reason '{reason}'\")\n user.send_message(\n f\"hello {user.name.split(' ')[0]}, it looks like you've been kicked out of the system for: {reason}\")\n del self.__users[user.id]\n except Exception as e:\n print(\"Some Error accrued\")", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "async def _remove(self, ctx, points: int, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(\n lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(\n lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(\n lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(\n lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. Please add them first using points member add\"\n \" <discord name or Nickname>\".format(x.display_name))\n else:\n self.db[server.id][x.id][\"Lifetime Loss\"] += points\n self.db[server.id][x.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, x.name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please register first using points member add\"\n \" <Discord name or nickname>\".format(namea))\n return\n self.db[server.id][name.id][\"Lifetime Loss\"] += points\n self.db[server.id][name.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, name.name))\n self.save_db()", "def test_stream_stale_follows(self):\n self.user2.delete()\n self.assertNotIn('Two', str(user_stream(self.user1)))", "def delete_relic_users():\n try:\n school_users = m.User.query.join(m.User.fraternity)\\\n .join(m.Fraternity.school).filter_by(\n id=current_user.fraternity.school_id).all()\n for user in school_users:\n preuser = m.Preuser.query.filter(\n m.Preuser.email == user.email,\n m.Preuser.school_title == current_user.fraternity.school.title)\\\n .first()\n if preuser is None:\n user.delete()\n except Exception:\n return False\n return True", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s' that we could remove.\" % person_name)\n return\n # the person exists, so remove it\n query_no_results(\"delete from person where name = ?\", [person_name])\n # remove all associations with tasks\n query_no_results(\"delete from task_person_pair where person = ?\", [results[0][0]])\n print(\"Person with name '%s' removed.\" % person_name)", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_remove_user(self):\n pass", "def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def remove_user(self, username):\n\n row = self.c.execute(\"SELECT * FROM profiles WHERE name =?\",\n (username,))\n for i in row:\n user = i[1]\n print(user)\n if user == username:\n self.c.execute(\"SELECT id FROM profiles WHERE name=?\",\n (username,))\n i_d = self.c.fetchone()[0]\n self.c.execute(\"DELETE FROM events WHERE user_id=?\", (i_d,))\n self.c.execute(\"DELETE FROM profiles WHERE name=?\", (username,))\n self.conn.commit()\n return True\n else:\n print ('User not found.')", "def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False", "def _check_remove_last_super(user_obj):\n if not user_obj.is_superuser:\n return\n\n # Is there any other active superuser left?\n all_active_su = User.objects.filter(is_superuser__exact = True,\n is_active__exact = True)\n num_active_su = all_active_su.count()\n assert num_active_su >= 1, _(\"No active superuser configured.\")\n if num_active_su == 1:\n raise PopupException(_(\"You cannot remove the last active superuser from the configuration.\"), error_code=401)", "def test_deluser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"group.adduser\", [self._group, self._user])\n self.assertTrue(self.run_function(\"group.deluser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertNotIn(self._user, str(group_info[\"members\"]))", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def test_teams_remove_user_from_team_v2(self):\n pass", "def leave_union(self):\n if self.union is None:\n return f'{self.username} is not a member of any guild'\n\n if self.union.has_member(self):\n union_name = self.union.name\n self.union = None\n self.save()\n return f'{self.username} has been removed from {union_name}'", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid", "def delete_user():\n #TODO user delete\n pass", "def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def unorphaned(self):\n return self.new_owner == self.user", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def _should_delete(self, msg, ctx):\n # Do not remove the user's call\n if msg.id == ctx.message.id:\n return False\n # Remove command calls\n if msg.content.startswith(ctx.prefix):\n return True\n # Remove our bot's messages\n if msg.author == self.bot.user:\n return True\n return False", "def test_user_is_excluded_from_deleted_conversation(self):\n conv = G(Conversation, type=CONVERSATION_TYPE_CHAT, creator=self.user1,\n users=[self.user1, self.user2])\n self.login(self.user2)\n self.client.delete(self.get_url(conv.pk))\n self.assertNotIn(self.user2,\n Conversation.objects.get(pk=conv.pk).users.all())", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def remove_member(self, persona):\n if persona in self.members:\n self.members.remove(persona)", "def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False", "def __delUserCheckUsers(self,loaded_users):\n total_credit=0\n for loaded_user in loaded_users:\n if loaded_user.isOnline():\n raise GeneralException(errorText(\"USER_ACTIONS\",\"DELETE_USER_IS_ONLINE\")%loaded_user.getUserID())\n total_credit += max( 0 , loaded_user.getBasicUser().getCredit() )\n return total_credit", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "def onUserDeletion(event):\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n storage = getUtility(IPubSubStorage)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n if principal_id in storage.leaf_nodes:\n storage.leaf_nodes.remove(principal_id)\n if principal_id in storage.publishers:\n del storage.publishers[principal_id]\n if principal_id in storage.node_items:\n del storage.node_items[principal_id]\n if principal_id in storage.collections['people']:\n storage.collections['people'].remove(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = deletePrincipal(client, principal_jid)\n return d", "def DeleteUser(self, delusercount, deluser):\n for i in range(delusercount):\n login = string.replace(deluser[i]['Login'], ' ', '')\n action = 'userman -D ' + login\n output = commands.getstatusoutput(action)\n print output\n updatecount, update = self.__sqlData[\"UPDATE AccUser SET ToDo = 0 WHERE Login = '%s'\" % (login)]", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}", "def LdapDeleteUser(self, record_filter):\n #Find uid of the person\n person = self.LdapFindUser(record_filter, [])\n logger.debug(\"LDAPapi.py \\t LdapDeleteUser record %s person %s\"\n % (record_filter, person))\n\n if person:\n dn = 'uid=' + person['uid'] + \",\" + self.baseDN\n else:\n return {'bool': False}\n\n result = self.LdapDelete(dn)\n return result", "def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))", "def fire(name):\r\n try:\r\n if name in man:\r\n man.remove(name)\r\n else:\r\n print(f\"Error: {name} not found in personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "async def deluser(self, ctx, member: discord.Member):\r\n for k, v in player.items():\r\n if k == member.name:\r\n del player[k]\r\n cur.execute(\"DELETE FROM players WHERE name=%s\", [k])\r\n conn.commit()\r\n await ctx.send(k + ' has been removed from the player-base')\r\n break", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def user_has_selected_nickname(self):\n if self.fresh is None:\n delta = self.created - self.modified\n # Simulate delta = abs(delta)\n if delta.days < 0:\n delta = -delta\n self.fresh = (delta.days == 0 and delta.seconds < 2)\n return not self.fresh", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def removeUser(self, fullName):\n logger.debug(\"Func: removeUser\")\n\n # old Name removeUser\n currentDB = self._loadUsers()\n del currentDB[fullName]\n self._dumpJson(currentDB, self._pathsDict[\"usersFile\"])\n self._usersDict = currentDB\n return None, None", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def _delete_user(self, user):\n if User.delete_user(user):\n self.session.output({'deleted': 'user {} and their related accounts'.format(user)})\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to delete user ]')\n return False", "def can_delete(self, user):\n raise Return(False)", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "def remove_user(self, u):\r\n\t\tlogger.debug(\"Entering\")\r\n\t\t\r\n\t\tif login.remove_user(u):\r\n\t\t\tlogger.debug(\"Exiting - success\")\r\n\t\t\treturn True, \"%s has been removed.\" % u\r\n\r\n\t\tlogger.debug(\"Exiting - failure\")\r\n\t\treturn False, \"%s does not exist.\" % u", "def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def remove_person(self, per: str):\n if per in self._people:\n self._people.remove(per)\n else:\n raise IDDoesNotExist", "async def _ad_remove(self, ctx, member):\n member_object = discord.utils.find(\n lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),\n ctx.guild.members\n )\n if member_object is not None:\n member = member_object.id\n elif member.isnumeric():\n member = int(member)\n\n admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))\n if admin:\n self.database.remove_item(admin[0])\n if member_object:\n await ctx.send(f\"Removed admin from {member_object.name}\")\n else:\n await ctx.send(\"Removed admin from invalid user\")\n else:\n await ctx.send(\"That person isn't an admin!\")", "def test_user_is_really_deleted():\n response = api_helper.get_user(user_id=pytest.test_user.id)\n assert response.status_code == 200\n assert len(response.json()['data']) == 0", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "def _onRemove(self, event):\n sel = self.userlist.GetSelection()\n if sel >= 0:\n c.removeUser(self.userlist.GetString(sel))\n self.userlist.Delete(sel)\n del self.users[sel]\n if len(self.users) >= 0:\n self.userlist.SetSelection(0)\n else:\n self.userlist.SetSelection(-1)", "def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_deletion(self):\n User.objects.filter(username=self.user.username).delete()\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_WARNING)", "def test_teams_remove_user_from_team_v1(self):\n pass", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def test_removeperson(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t.remove_person(p2)\n t.store()\n\n t2 = model.Team(id=id)\n self.assertEqual(t2.persons, [p1.id, p3.id])\n\n with self.assertRaises(ValueError): # cannot be removed again\n t2.remove_person(p2)", "def delete_user(change):\n return change()", "async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def userPart(self, __userID):\n\n\t\tconnectedUsers = self.connectedUsers\n\t\tif (__userID in connectedUsers):\n\t\t\tconnectedUsers.remove(__userID)", "def test_remove_learner_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def staff_remove(request):\n username = request.params['remove']\n user = models.User.get_by_username(username)\n user.staff = False\n return httpexceptions.HTTPSeeOther(\n location=request.route_url('admin_staff'))", "def removeUser(self, username):\r\n try:\r\n self.getUser(username)\r\n for line in fileinput.input(self.filename, inplace=1):\r\n if self.scanner.match(line).groups()[0] != username:\r\n print(line[:-1])\r\n except KeyError:\r\n raise CredentialError('No such user')", "def testTurnOffMembership(self):\n fsd_tool = getToolByName(self.portal, TOOLNAME)\n # Disable membership support for FSDPerson\n fsd_tool.setEnableMembraneTypes(tuple([t for t in fsd_tool.getEnableMembraneTypes() if t != 'FSDPerson']))\n # Manually run the at_post_edit_script to fire the FacultyStaffDirectoryModifiedEvent.\n fsd_tool.at_post_edit_script()\n # Double check to make sure that FSDPerson is really detatched from membrane\n userFolder = getToolByName(self.portal, 'acl_users')\n self.failIf(userFolder.getUserById('abc123'))\n \n # Try to add a new person\n self.directory.invokeFactory(type_name=\"FSDPerson\", id=\"eee555\", firstName=\"Ima\", lastName=\"Personobject\")\n person = self.directory['eee555']\n # Manually run the at_post_create_script to fire the PersonModifiedEvent.\n try:\n person.at_post_create_script()\n except KeyError:\n self.Fail(\"FacultyStaffDirectory incorrectly tried to find the user attached to a FSPerson while membrane support was disabled.\")", "def test_remove_followers(self):\n pass", "def allowed_user_access_delete(usera, userb):\n try:\n upa = usera.get_profile()\n upb = userb.get_profile()\n except AttributeError:\n return False\n\n return (usera == userb and usera.has_perm(\"vnswww.userprofile_delete_self\")\n or usera.has_perm(\"vnswww.userprofile_delete_any\")\n or (usera.has_perm(\"vnswww.userprofile_delete_org\") and upa.org == upb.org))", "def deauth(nick):\n global auth_list\n if nick in auth_list:\n a = auth_list.index(nick)\n del(auth_list[a])", "def remove_obsolete_users(self, date_limit):\n for user in User.objects.filter(last_login__lt=date_limit):\n if not ServiceProvider.objects.filter(admins=user):\n self.output(\"Removing user: \" + user.username)\n if not self.list_only:\n user.delete()", "def onUserDeletion(event):\n request = getRequest()\n if not IProductLayer.providedBy(request):\n return\n\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = users.deletePrincipal(client, principal_jid)\n return d", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def deleteUser(self,name):\n raise BorkedDeleteUser", "def test_captain_removes_teammate_success(self):\n team = Team.create(name='foo', program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n captain = User.create(name='captain', email='[email protected]',\n user_type='user', owned_teams=[team.uid])\n team.captain_id = captain.uid\n user.put()\n captain.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(captain),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "def remove_user(self, login):\n\t\tif login in self.users_by_name:\n\t\t\tuser = self.users_by_name[login]\n\t\t\tif not user.system:\n\t\t\t\tself.users.pop(user.id, None)\n\t\t\t\tdel(self.users_by_name[login])\n\t\t\t\tself.sync()", "def test_remove_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.remove_facility'))", "def delete_leader(self):", "def _deleteUsers( self, bSerial ):\n\n\t\ttry:\n\t\t\tself._oLock.acquire()\n\n\t\t\ttry:\n\t\t\t\tsQuery = 'DELETE FROM CustUser WHERE bSerial = %s'\n\t\t\t\trgoResult = self._libDB.query( sQuery, bSerial )\n\n\t\t\t\treturn True\n\n\t\t\texcept Exception, e:\n\t\t\t\terrMsg( 'Error removing users for serial-[%s] [%s]' % ( bSerial, e ) )\n\t\t\t\treturn False\n\n\t\tfinally:\n\t\t\tself._oLock.release()", "def ensure_principal_absent( user_email ):\n \n ensure_user_absent( user_email )\n delete_principal_data( user_email )\n return True", "def test_realm_admin_remove_multiple_users_from_stream(self) -> None:\n target_users = [\n self.example_user(name)\n for name in [\"cordelia\", \"prospero\", \"iago\", \"hamlet\", \"outgoing_webhook_bot\"]\n ]\n result = self.attempt_unsubscribe_of_principal(\n query_count=27,\n cache_count=8,\n target_users=target_users,\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 5)\n self.assert_length(json[\"not_removed\"], 0)", "def remove(self, user_id):\n pass", "def has_remove_permissions(self, obj):\n return True", "def fire(name):\r\n try:\r\n if name in off:\r\n off.remove(name)\r\n else:\r\n print(f\"Error: {name} not found in personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))" ]
[ "0.65418464", "0.6525852", "0.61570674", "0.6097376", "0.60110086", "0.5990641", "0.59904647", "0.5983225", "0.5961415", "0.5946392", "0.59444773", "0.59234893", "0.5917741", "0.58916634", "0.58891475", "0.58835924", "0.58744335", "0.58742744", "0.5868517", "0.5852744", "0.58465856", "0.58463115", "0.5811499", "0.57897764", "0.5766605", "0.5763578", "0.57524514", "0.5751362", "0.5747413", "0.57455945", "0.5736959", "0.5736056", "0.57347375", "0.5732275", "0.5729548", "0.57051784", "0.5703265", "0.5697665", "0.5690266", "0.56896955", "0.56883717", "0.5685446", "0.56776965", "0.56731117", "0.5663272", "0.566322", "0.5660026", "0.56556886", "0.5654666", "0.5650692", "0.5647466", "0.5640924", "0.5636981", "0.56319946", "0.56256545", "0.5621512", "0.5617704", "0.5614798", "0.5613157", "0.5611356", "0.56039345", "0.5600884", "0.55942893", "0.5586008", "0.5569705", "0.5567879", "0.5566551", "0.5563384", "0.55588216", "0.555562", "0.555036", "0.5539116", "0.5532451", "0.5526745", "0.55251", "0.5524875", "0.55241853", "0.5518475", "0.5516336", "0.5486517", "0.54812044", "0.5478918", "0.5473824", "0.5460746", "0.5460099", "0.54585147", "0.54569", "0.5455713", "0.5452134", "0.54471976", "0.54460645", "0.54404145", "0.54395324", "0.5439223", "0.5438987", "0.54344887", "0.54344136", "0.5422353", "0.54217404", "0.54190713", "0.5418016" ]
0.0
-1
if the person removed is an owner of flockr, check that they were actually removed from flockr
def test_channel_leave_normal_case_owner(): clear() leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') user = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(leaver['token'], userchannel_id['channel_id']) channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id']) channel_leave(leaver['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['owner_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def unorphaned(self):\n return self.new_owner == self.user", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)", "def isowner(self, o):\n return self._owner is o", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def leave_union(self):\n if self.union is None:\n return f'{self.username} is not a member of any guild'\n\n if self.union.has_member(self):\n union_name = self.union.name\n self.union = None\n self.save()\n return f'{self.username} has been removed from {union_name}'", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def is_still_owner(self):\n raise tooz.NotImplemented", "def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s' that we could remove.\" % person_name)\n return\n # the person exists, so remove it\n query_no_results(\"delete from person where name = ?\", [person_name])\n # remove all associations with tasks\n query_no_results(\"delete from task_person_pair where person = ?\", [results[0][0]])\n print(\"Person with name '%s' removed.\" % person_name)", "def check_deletion(oc_name, org):\n duplicate_name = org['name']\n\n distance = org_tools.getDistance(oc_name, duplicate_name)\n\n if distance <= 0.35:\n org['can_delete'] = 1\n else:\n org['can_delete'] = 0\n\n return org", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def test_removeperson(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t.remove_person(p2)\n t.store()\n\n t2 = model.Team(id=id)\n self.assertEqual(t2.persons, [p1.id, p3.id])\n\n with self.assertRaises(ValueError): # cannot be removed again\n t2.remove_person(p2)", "def remove_member(self, persona):\n if persona in self.members:\n self.members.remove(persona)", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "async def _remove(self, ctx, points: int, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(\n lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(\n lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(\n lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(\n lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. Please add them first using points member add\"\n \" <discord name or Nickname>\".format(x.display_name))\n else:\n self.db[server.id][x.id][\"Lifetime Loss\"] += points\n self.db[server.id][x.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, x.name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please register first using points member add\"\n \" <Discord name or nickname>\".format(namea))\n return\n self.db[server.id][name.id][\"Lifetime Loss\"] += points\n self.db[server.id][name.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, name.name))\n self.save_db()", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "def remove_person(self, per: str):\n if per in self._people:\n self._people.remove(per)\n else:\n raise IDDoesNotExist", "def delete_leader(self):", "def check_user_del(user):\n\twith open('tracked_users', 'r') as myfile:\n\t\tuserfile = myfile.read()\n\t\tif user.lower() in userfile.lower():\n\t\t\treturn 1\n\treturn 0", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def test_stream_stale_follows(self):\n self.user2.delete()\n self.assertNotIn('Two', str(user_stream(self.user1)))", "def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_captain_removes_teammate_success(self):\n team = Team.create(name='foo', program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n captain = User.create(name='captain', email='[email protected]',\n user_type='user', owned_teams=[team.uid])\n team.captain_id = captain.uid\n user.put()\n captain.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(captain),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def _testAssistantOwnershipAfter(self, person=None, task='create'):\n if not person:\n person = self.person\n \n newperson = self.getPerson(id='def456', firstName=\"Test\", lastName=\"Assistant\")\n person.setAssistants([newperson.UID(),])\n self.simulateATGUIInteraction(person=person, task=task)\n owners = person.users_with_local_role('Owner')\n \n return 'def456' in owners", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def is_owner(self):\n return self._is_owner", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def untether(self) -> None:\n if self.msg.sender != self.owner:\n revert(f'Only the owner can call the untether method.')\n pass", "def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid", "def _unfiled_box():\n return db.box((db.box.name == 'Unfiled') & (db.box.owner == auth.user.id))", "def remove_partner(self, other_person,s):\n self.number_of_partners -= 1\n self.current_partners.remove(other_person.identifier)\n \n if self.number_of_partners == 0:\n #no partners left -> single\n s.number_of_singles += 1\n s.singles.add(self.identifier)", "def __updater_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in updater\")\n for comp_tuple in self._updater_map.keys():\n if int(comp_tuple[0]) not in self.msg.get_ownershipList():\n del self._updater_map[comp_tuple]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def is_loan_owner(self, farmer_id, loan_id):\n owner = self.get_loan_owner(loan_id)\n if (\"farmer_id\" in owner.keys()) and owner[\"farmer_id\"] == farmer_id:\n return True\n return False", "def _remove_player(self, player, player_party, other_party):\n\n party = vars(self)[player_party][:]\n party.remove(player)\n vars(self)[player_party].remove(player)\n for other in vars(self)[other_party]:\n if player in other.prefs:\n other.forget(player)", "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)", "def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def demote(name):\r\n try:\r\n if name in man:\r\n man.remove(name)\r\n off.append(name)\r\n off.sort()\r\n else:\r\n print(f\"{name} cannot be demoted from a manager as they are not in the personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def fire(name):\r\n try:\r\n if name in man:\r\n man.remove(name)\r\n else:\r\n print(f\"Error: {name} not found in personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "async def remove(ctx, pkmn_id: int):\n res = database.remove_from_party(ctx.message.author, pkmn_id)\n if not res:\n ctx.send(\"**Oak**: Make sure you actually have that pokemon or if your party is not full ya scrub.\")\n return await show_party(ctx.message.author)", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def has_remove_permissions(self, obj):\n return True", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False", "def test_kyc_delete_legal_board_member(self):\n pass", "def remove_referrer(self, service_id):\n referrers = [r for r in self.referrers if r['service_id'] != service_id]\n if len(referrers) == len(self.referrers):\n logger.warning(\"%s did not have a reservation for %s\", service_id, self)\n return False\n else:\n self.referrers = referrers\n logger.warning(\"%s removed from referrers of %s\", service_id, self)\n # TODO: set atime, but make volume atime and instance atime both nanoseconds\n return True", "def remove_patient(self, patient_name):\n for i in range(len(self)):\n selected_patient = self._patient_list[i]\n if patient_name == selected_patient.first_name + \" \" + selected_patient.last_name:\n self._patient_list.pop(i)\n self.calculate_avg_cholesterol()\n return True\n return False", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "def user_deletable(self):\n source_module_id = getattr(self, 'source_module_id', False)\n if not source_module_id:\n return True\n\n root_module_id = getattr(self, 'root_module_id', False)\n if not root_module_id:\n return True\n\n app = self.get_app()\n parent_module = app.get_module_by_unique_id(root_module_id)\n\n if parent_module.module_type == 'shadow':\n return False\n\n return True", "async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def test_that_contractor_is_in_recipients(self):\n\n self.trs.recipient.users.clear()\n self.trs.recipient.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def fire(name):\r\n try:\r\n if name in off:\r\n off.remove(name)\r\n else:\r\n print(f\"Error: {name} not found in personnel list\")\r\n\r\n except TypeError:\r\n print(\"Error: Call with strings only\")", "async def on_member_remove(member):\r\n pass", "def test_handle_remove_not_in_team(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.return_value = False\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User not in team!\", 200))\n self.gh.has_team_member.assert_called_once_with(\"myuser\", \"githubid\")\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def _remove(updated_pending_requests):\n remove_member_from_pending_query = Query.room_request(roomname, \"\", updated_pending_requests)\n self.db.execute_query(remove_member_from_pending_query)", "def test_remove_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n f.friends.remove(u)\n self.assertIs(u in f.friends.all(), False)\n self.assertIs(f in u.friends.all(), False)", "def drop_off_task(obs):\n gripper_obs = obs[0][0][2:5]\n object_obs = torch.cat((obs[0][0][5:7], torch.tensor([1.0])))\n if (sum(gripper_obs == object_obs) == 3).item():\n print(f'Dropping the object off now')\n return True\n else:\n print(f'Picking up the object!')\n return False", "def _handleBusOwnerChanged(self, new_owner):\n if new_owner == '':\n logger.warn('No owner anymore for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n raise Exception('LostDhcpSlave')\n else:\n pass # Owner exists", "def remove_person_from_the_station(self, station: TelegramController.Station):\n\n if station.line_number in self.__stations_dict and station.station_number in self.__stations_dict[\n station.line_number]:\n if self.__stations_dict[station.line_number][station.station_number] == 1:\n del self.__stations_dict[station.line_number][station.station_number]\n if len(self.__stations_dict[station.line_number]) == 0:\n del self.__stations_dict[station.line_number]\n elif self.__stations_dict[station.line_number][station.station_number] > 1:\n self.__stations_dict[station.line_number][station.station_number] -= 1\n self.__message_sender.send_line(station.line_number, update_passengers=True)\n else:\n print(\"whoops an error, looks like the current station doesn't exit and there's no person waiting for it.\")", "def complete(self):\n return (self.memberDevices <= len(self.members)) or not self.exists", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def target_will_be_deleted(cls, ctx, user, target):\n # For every rover feature that was enabled/used by this target being deleted (as passed via\n # the target metadata) and that is listed in any rover_features field in the capability definitions,\n # decrement its uses count to both free up any free uses and keep an accurate count of the number of uses.\n for rover_feature in (f for f in target.metadata.iterkeys() if f in capability_module.all_rover_features()):\n if target.rover.can_reuse_feature(rover_feature):\n target.rover.reuse_feature(rover_feature)\n else:\n logger.warn(\"No available capabilities when trying to reuse rover feature [%s][%s]\",\n rover_feature, target.user.capabilities)", "def test_remove_followers(self):\n pass" ]
[ "0.6447291", "0.6402777", "0.637274", "0.61781394", "0.6064277", "0.59293276", "0.58794194", "0.58277196", "0.58173835", "0.5780744", "0.5778175", "0.5695273", "0.5692215", "0.56889635", "0.56681126", "0.5662332", "0.5635315", "0.56060123", "0.5600414", "0.5581912", "0.5572824", "0.5558181", "0.555741", "0.5554508", "0.5551208", "0.5549676", "0.5538474", "0.5528646", "0.55206037", "0.5484841", "0.547482", "0.5462268", "0.5457631", "0.54571044", "0.54478526", "0.5444867", "0.54446566", "0.54420394", "0.54409444", "0.5435328", "0.5426527", "0.54172057", "0.54168695", "0.54104245", "0.5388187", "0.5385031", "0.5379166", "0.5371102", "0.53708696", "0.5365126", "0.5362628", "0.53529793", "0.5352597", "0.5349752", "0.533929", "0.5334408", "0.5334376", "0.5333468", "0.53105766", "0.5279138", "0.5276762", "0.5272932", "0.5271816", "0.527084", "0.5269935", "0.5252177", "0.5251743", "0.52512574", "0.5233443", "0.5228801", "0.5221088", "0.5220006", "0.52154654", "0.52076685", "0.52065694", "0.5205373", "0.51984423", "0.5198417", "0.5192645", "0.5190721", "0.5184148", "0.5182349", "0.5180277", "0.5175932", "0.51724935", "0.51640785", "0.51530975", "0.515198", "0.5150458", "0.51467925", "0.5142", "0.5138596", "0.51373386", "0.5137216", "0.51276106", "0.5125328", "0.5121738", "0.51174307", "0.5114009", "0.5112847" ]
0.5255025
65
check if the channel_id is invalid an inputerror is raised
def test_channel_join_invalid_channel(): clear() user = auth_register('[email protected]', '123abc!@#', 'first', 'last') joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last') channels_create(user['token'], 'userchannel', True) invalid_id = 0 with pytest.raises(InputError): channel_join(joiner['token'], invalid_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_id = -1\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], invalid_channel_id, 1)", "def _check_channel_input(self, channel):\n # da `.get` `None` zurueckgibt wenn der Schluessel `channel` nicht existiert,\n # wird auch bei fehlender Konfiguration der Fehler geworfen\n if self.channels.get(channel) != GPIO.IN:\n raise RuntimeError(\"You must setup() the GPIO channel as an input first\")", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def test_channel_leave_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_leave(leaver['token'], invalid_id)", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test__validate_channels__type_error(input_value):\n validate_channels(input_value)", "def add_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Add as bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelAdd(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")", "def del_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Delete bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelDel(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "async def ticker_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def validateDevChannel( self, dev, devChannel ):\n d = self.dcDict\n if devChannel not in d[dev]['devChannels'].keys(): raise DCBoxError( 0 )", "def test_get_flow_request_by_channel_id_wrong_channel_id(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/search/?channel_id=unknown', **headers)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res.json(), {'errors': ['not_found']})", "def test_react_invalid_message_id_in_different_channel():\n clear()\n user_a, user_b = register_n_users(2)\n # user_a create a channel\n channels_create(user_a[\"token\"], \"public_channel_a\", True)[\"channel_id\"]\n # user_b create a channel and send message in his own channel\n public_channel_id_b = channels_create(user_b[\"token\"], \"public_channel_b\", True)[\n \"channel_id\"\n ]\n message_id_b = message_send(\n user_b[\"token\"], public_channel_id_b, \"I am in channel_b\"\n )[\"message_id\"]\n # user_a should not be able to react the the message in the public_channel_b\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], message_id_b, 1)", "def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')", "def test__validate_channels__passing(input_value):\n return validate_channels(input_value)", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def channel_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_id\")", "def test_dccChatMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_CHAT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC CHAT request: ['foo']\")", "def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)", "def test_api_invalid_stream_id(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n result = self.api_patch(\n user,\n \"/api/v1/users/me/subscriptions/121\",\n {\"property\": \"is_muted\", \"value\": \"somevalue\"},\n )\n self.assert_json_error(result, \"Invalid stream ID\")", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def on_badchannelkey(self, conn, event) -> None:\n channel_name = event.arguments[0]\n logger.warning('Cannot join channel %s (bad key).', channel_name)", "def __parse_channel_id(self, data):\n if 'channel_id' in data:\n return data['channel_id']\n if 'channel' in data:\n return data['channel']['id']\n return None", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.param.name)\n self.bot.stats.incr(\"errors.missing_required_argument\")\n elif isinstance(e, errors.TooManyArguments):\n embed = self._get_error_embed(\"Too many arguments\", str(e))\n self.bot.stats.incr(\"errors.too_many_arguments\")\n elif isinstance(e, errors.BadArgument):\n embed = self._get_error_embed(\"Bad argument\", str(e))\n self.bot.stats.incr(\"errors.bad_argument\")\n elif isinstance(e, errors.BadUnionArgument):\n embed = self._get_error_embed(\"Bad argument\", f\"{e}\\n{e.errors[-1]}\")\n self.bot.stats.incr(\"errors.bad_union_argument\")\n elif isinstance(e, errors.ArgumentParsingError):\n embed = self._get_error_embed(\"Argument parsing error\", str(e))\n await ctx.send(embed=embed)\n self.bot.stats.incr(\"errors.argument_parsing_error\")\n return\n else:\n embed = self._get_error_embed(\n \"Input error\",\n \"Something about your input seems off. Check the arguments and try again.\"\n )\n self.bot.stats.incr(\"errors.other_user_input_error\")\n\n await ctx.send(embed=embed)\n await self.send_command_help(ctx)", "def isInputValid(self, input):\r\n pass", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def set_channel(self):\n\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\tif self.channel > 1 :\n\t\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\t\n\t\treturn self.channel", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def _raise_if_invalid(self):\n if self._stack_result == -1 and self._recm_data == -1:\n error_message = 'Worker result for request ID {} does not exist yet'.format(\n self.external_request_id)\n logger.exception(error_message)\n raise SARBRequestInvalidException(error_message)", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def test_dccAcceptMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_ACCEPT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND ACCEPT request: ['foo']\")", "async def error(self, channel_id,user_infos, user_id, team_id):\n # Message de commande incorrecte\n error = \"Commande invalide. Veuillez utiliser la commande [help] pour plus d'informations.\"\n return await self.sendText(error, channel_id,user_infos, team_id)", "def validate_message(self, state_id, msg):\n pass", "def set_channel(self):\r\n\t\tself.channel = int(input(\"Enter the Channel No.(0-8) = \"))\r\n\t\twhile self.channel > 8 :\r\n\t\t\tself.channel = int(input(\"Enter the Channel No.(0-8) = \"))\r\n\t\t\r\n\t\treturn self.channel", "def handle_channel(self, channel_id=None, entity=None):\n # print(\"handle_channel:\")\n # print(\"\\tchannel_id:{}\".format(channel_id))\n # print(\"\\tentitiy:{}\".format(entity))\n if channel_id:\n if channel_id.isdigit():\n # if int(channel_id) < 512:\n # if (0 < int(channel_id)) & (int(channel_id) < 512):\n if 0 <= int(channel_id) < 512:\n # print(\"int(channel_id): {}\".format(int(channel_id)))\n channel_value = self.channels[int(channel_id)]\n return channel_value\n else:\n return -1\n\n else:\n return self.channels", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def find_channel_id(self, channel_name):\n if not channel_name:\n raise AttributeError(f\"{self.find_channel_id.__name__}: Channel ID or Channel Name not given\")\n\n for channel in self.channels:\n if channel_name == channel['name']:\n return channel['id']\n raise NameError(f\"{self.find_channel_id.__name__}: Channel with given name not found\")", "def test_id_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"e-dad\"\n self.assertRaises(InvalidInputError, self.command.run)", "def test_dccSendMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_SEND, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND request: ['foo']\")", "def validate_device_id(device_id):\n regex = re.compile(r'^[0-9a-fA-F]{2,6}$')\n if regex.match(device_id) == None:\n raise ValidationError('Device ID must be 2-6 characters and must be hexadecimal (0-9,a-f,A-F).')", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def whenException(self, channel, call):", "def validateVfabric(output ,arg_dict, key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' = '%s' is not a valid Id. ID should be numeric \" % \n\t\t\t\t(key,id)))\n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n\t output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def checkUIDValidity(self, uid):\r\n if uid not in self._pendingContainer:\r\n raise CredentialError('Invalid environment ID.')", "def testIdNonUniqueIdOnInit(self):\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n cdl_convert.ColorCorrection,\n 'uniqueId',\n 'file'\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n try:\n cc = cdl_convert.ColorCorrection('uniqueId', 'file')\n except ValueError:\n self.fail(\"Non-unique ID was not accepted!\")\n\n self.assertEqual(\n 'uniqueId001',\n cc.id\n )", "def test_standup_send_invalid_channel (url, _pre_setup):\n\n token = _pre_setup[0]['token']\n\n standup_send_data = {\n 'token': token,\n 'channel_id': 99999,\n 'message': \"message\"\n }\n\n response = requests.post(url + \"standup/send\", json=standup_send_data)\n assert response.status_code == 400", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def _invalid_transport_key_id():\n pecan.abort(404, u._('Not Found. Provided transport key id is invalid.'))", "def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)", "def test_dccResumeMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_RESUME, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND RESUME request: ['foo']\")", "def handle_err(self, err, msg):\n assert \"BAD:\" in msg.value().decode('utf-8')\n assert err is not None\n self.remaining -= 1", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def validate_input(self, *args):\n return", "def check_channel_shell_request(self, channel):\n return False", "def input_error(self, errCode):\n errMsg = ''\n if 'A' in errCode: errMsg = errMsg + 'X column is not specified.\\n'\n if 'B' in errCode: errMsg = errMsg + 'X Column is not numeric.\\n'\n if 'C' in errCode: errMsg = errMsg + 'Y column is not specified.\\n'\n if 'D' in errCode: errMsg = errMsg + 'Y Column is not numeric.\\n'\n if 'E' in errCode: errMsg = errMsg + 'Z Column is not numeric.\\n'\n if 'F' in errCode: errMsg = errMsg + 'Calibration point 1 row is out of range.\\n'\n if 'G' in errCode: errMsg = errMsg + 'Calibration point 2 row is out of range.\\n'\n if 'H' in errCode: errMsg = errMsg + 'First row is not specified.\\n'\n if 'I' in errCode: errMsg = errMsg + 'Last row is not specified.\\n'\n if 'J' in errCode: errMsg = errMsg + 'First row is out of range.\\n'\n if 'K' in errCode: errMsg = errMsg + 'Last row is out of range.\\n'\n if 'L' in errCode: errMsg = errMsg + 'First and last rows are not compatible.\\n'\n self.wait_window(InputError(self, errMsg.rstrip('\\n')))", "def test_invalid_report_id(self) -> None:\n\n # `report_id` is negative\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/-123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is a non-numerical string\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/abcdef\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )\n\n # `report_id` is undefined\n channel = self.make_request(\n \"DELETE\",\n \"/_synapse/admin/v1/event_reports/\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n self.assertEqual(\n \"The report_id parameter must be a string representing a positive integer.\",\n channel.json_body[\"error\"],\n )", "def validate_input(self, argin):\n try:\n configuration_dict = json.loads(argin)\n _ = configuration_dict[\"id\"]\n except (KeyError, JSONDecodeError) as err:\n msg = f\"Validate configuration failed with error:{err}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n except Exception as other_errs:\n msg = f\"Validate configuration failed with unknown error:{other_errs}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n\n return (\n configuration_dict,\n ResultCode.OK,\n \"ConfigureScan arguments validation successful\",\n )", "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def validateIOmoduleId(output ,arg_dict , key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric \" % (key,id))) \n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def logInvalidId(self, *args):\n return _libsbml.CompSBasePlugin_logInvalidId(self, *args)", "def test_channel_leave_invalid_token():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'First', 'Last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n channel_leave(user['token'], userchannel_id['channel_id'])", "async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n if isinstance(error, InWhitelistCheckFailure):\n error.handled = True", "def test_ap_csa_invalid(dev, apdev):\n csa_supported(dev[0])\n ap = connect(dev[0], apdev)\n\n vals = [ 2461, 4900, 4901, 5181, 5746, 5699, 5895, 5899 ]\n for val in vals:\n if \"FAIL\" not in ap.request(\"CHAN_SWITCH 1 %d\" % val):\n raise Exception(\"Invalid channel accepted: %d\" % val)", "def test_invalid_course_key(self):\n errstring = \"Unparsable course_id\"\n with self.assertRaisesRegex(CommandError, errstring):\n call_command('export_olx', 'InvalidCourseID')", "async def handleChannelCreate(self, channel: discord.abc.GuildChannel):\n self.logger.info(\n \"Channel creation has been detected. Name: %s, ID: %s\", channel.name, channel.id\n )\n\n if not isinstance(channel, discord.TextChannel):\n return\n\n if channel.name == AH_CHANNEL:\n self.logger.info(\"%s detected, applying exceptions\", AH_CHANNEL)\n ctx = await self.getContext(channel)\n if not ctx:\n return\n await self.notifyChannel(ctx)\n await self.makeHighlightChanges(ctx, channel)\n await self.makeStarboardChanges(ctx, channel)\n await self.makeWordFilterChanges(ctx, channel)\n async with self.config.guild(channel.guild).get_attr(KEY_CHANNEL_IDS)() as channelIds:\n channelIds[channel.id] = {\"time\": datetime.now().timestamp()}", "def handle_invalid_command(self, msg):\n return self.create_response(Command.INVALID_COMMAND.value)", "def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def check_channel_exec_request(self, channel, command):\n return False", "def invalid(self):\n pass", "def handle_channel_set(self, channel_id=None, channel_value=None):\n # print(\"handle_channel_set:\")\n # print(\"\\tchannel_id:{}\".format(channel_id))\n # print(\"\\tchannel_value:{}\".format(channel_value))\n if channel_id is not None:\n # try:\n print(\"int(channel_id): {}\".format(int(channel_id)))\n # now please set the value of the given channel to new value..\n if int(channel_id) > 512:\n channel_id = 512\n if int(channel_id) < 0:\n channel_id = 0\n if int(channel_value) > 255:\n channel_value = 255\n if int(channel_value) < 0:\n channel_value = 0\n self.channels[int(channel_id)] = int(channel_value)\n # send new values\n self.ola_connection.dmx_send_frame()\n return self.channels[int(channel_id)]\n # except:\n # return -1\n else:\n # return error\n return -1", "def _check_id(self, keyword):\n if keyword not in self.request.data:\n return '{} parameter is missing'.format(keyword)\n \"\"\" Check if <keyword> parameter is not None \"\"\"\n if self.request.data[keyword] == '':\n return '{} ID cannot be None'.format(keyword)\n \"\"\" Check if <keyword> parameter is > 0 \"\"\"\n if int(self.request.data[keyword]) < 1:\n return '{} ID must be an integer > 0'.format(keyword)", "def __input_validator(msg):\n\n\t\tstatus = msg[\"status\"]\n\n\t\tif status == 1:\n\t\t\treturn status\n\t\telif status == 0:\n\t\t\tprint(msg[\"body\"])\n\t\telif status == -1:\n\t\t\tprint(\"Please enter something!\")\n\t\telif status == -2:\n\t\t\tprint(\"Your command {} is invalid\".format(msg[\"verb\"]))\n\t\telif status == -3:\n\t\t\tprint(\"No argument given after {}\".format(msg[\"verb\"]))", "async def gen_error(error_id: str, ctx: commands.Context) -> Embed:\n errors = get_file(\"errors\")\n error = Embed(color=error_color)\n error.add_field(name=\"⚠️ \" + errors[error_id][\"title\"], value=errors[error_id]['txt'])\n error = set_footer(error, ctx)\n await ctx.send(embed=error)", "def onNumberFailed( self, reason ):\n\t\tlog.warn( \n\t\t\t\"\"\"Unable to read number to user on channel %r: %s\"\"\",\n\t\t\tself.agi.variables['agi_channel'], reason.getTraceback(),\n\t\t)", "def make_iq_error(self, id, type='cancel',\n condition='feature-not-implemented', text=None):\n iq = self.Iq()._set_stanza_values({'id': id})\n iq['error']._set_stanza_values({'type': type,\n 'condition': condition,\n 'text': text})\n return iq", "def check_value(is_valid, error_msg):\n if not is_valid:\n raise ValueError(error_msg)", "def test_dccChatIndecipherablePort(self):\n result = self.assertRaises(\n irc.IRCBadMessage,\n self.client.dcc_CHAT,\n self.user,\n self.channel,\n \"foo.txt 127.0.0.1 sd@d\",\n )\n self.assertEqual(str(result), \"Indecipherable port 'sd@d'\")", "def clean_course_id(self):\r\n\r\n cleaned_id = self.cleaned_data[\"course_id\"]\r\n try:\r\n course_key = CourseKey.from_string(cleaned_id)\r\n except InvalidKeyError:\r\n try:\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(cleaned_id)\r\n except InvalidKeyError:\r\n msg = 'COURSE NOT FOUND'\r\n msg += u' --- Entered course id was: \"{0}\". '.format(cleaned_id)\r\n msg += 'Please recheck that you have supplied a valid course id.'\r\n raise forms.ValidationError(msg)\r\n\r\n if not modulestore().has_course(course_key):\r\n msg = 'COURSE NOT FOUND'\r\n msg += u' --- Entered course id was: \"{0}\". '.format(course_key.to_deprecated_string())\r\n msg += 'Please recheck that you have supplied a valid course id.'\r\n raise forms.ValidationError(msg)\r\n\r\n return course_key", "def getValidation(myInput):\r\n if myInput == \"\":\r\n print('You did not enter the number of bugs collected.')\r\n return -1\r\n elif myInput.isnumeric() == False:\r\n print('You entered a negative or a text value, please enter numerical digits only.')\r\n return -1\r\n elif myInput.isnumeric() == True:\r\n return int(myInput)\r\n else:\r\n print('There has been a read error, please reenter your number')\r\n return -1", "def check_user_id(user_id):\n\n try:\n message = (\n 'Validating submitted user id.'\n )\n logger.info(message)\n if user_id != '':\n invalid = (\n int(user_id) < 0 or\n cassy.check_user_id_exists(int(user_id))\n )\n if invalid:\n raise PlantalyticsDataException(USER_ID_INVALID)\n message = (\n 'Submitted user id successfully validated.'\n )\n logger.info(message)\n except PlantalyticsException as e:\n raise e\n except ValueError:\n raise PlantalyticsDataException(USER_ID_INVALID)\n except Exception as e:\n raise e", "def _validate_integer(self, action_result, parameter, key, allow_zero=False):\n\n if parameter is not None:\n try:\n if not float(parameter).is_integer():\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_VALID_INT_MSG.format(param=key)), None\n\n parameter = int(parameter)\n except:\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_VALID_INT_MSG.format(param=key)), None\n\n if parameter < 0:\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_NON_NEG_INT_MSG.format(param=key)), None\n if not allow_zero and parameter == 0:\n return action_result.set_status(phantom.APP_ERROR, AWSSECURITYHUB_NON_NEG_NON_ZERO_INT_MSG.format(param=key)), None\n\n return phantom.APP_SUCCESS, parameter", "def validate_crx_id(crx_id):\n try:\n assert isinstance(crx_id, str)\n assert crx_id.isalnum()\n assert len(crx_id) == 32\n except AssertionError:\n raise MalformedExtId", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def check_server_id(message):\r\n global server_id\r\n if server_id is 0: # the channel_id is not set up at all\r\n server_id = message.channel.id # this is the current server that we're talking to\r\n channel = client.get_channel(server_id) # this is the current channel in that server that we will send messages to\r\n return channel\r\n elif server_id is not message.channel.id: # our current set up is for another server, change it\r\n server_id = message.channel.id\r\n channel = client.get_channel(server_id)\r\n return channel", "def is_valid_channel_name(channel):\n if not is_channel_name(channel):\n return False\n\n test_section = channel[1:]\n\n if not MIN_CHANNEL_NAME_LEN < len(channel) < MAX_CHANNEL_NAME_LEN:\n return False\n\n valid_symbols = '#\\\\|^`[]{}_'\n valid_chars = string.ascii_letters + string.digits + valid_symbols\n\n for char in channel:\n if char not in valid_chars:\n return False", "def channel_name(self, channel_name):\n self.channel_id = self.get_channel_id(channel_name)\n LOG.debug(\"Mattermost channel id: %s\", self.channel_id)", "def test_id():\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n assert type(cmd.id) is int\r\n assert cmd.id == 0xD2", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def validate_port(port):\n invalid_ports = [80, 443, 6443, 22623]\n while True:\n try:\n check_for_string = port.isdigit()\n if not check_for_string:\n logging.warn('port has to be an integer')\n else:\n invalid_ports.index(int(port))\n logging.warn('ports {} are not allowed'.format(invalid_ports))\n port = input('enter a port: ')\n except AttributeError:\n break \n except ValueError:\n break\n\n return port", "def slack_channel_lookup_error(err):\n current_app.logger.exception(err)\n return err.message, 404", "def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None" ]
[ "0.7181985", "0.70581347", "0.6617073", "0.6448678", "0.64280605", "0.6396151", "0.6361458", "0.63370305", "0.62574375", "0.61125195", "0.61097026", "0.60695", "0.6059686", "0.6048345", "0.6008819", "0.5979831", "0.5916335", "0.5861755", "0.58391106", "0.5798364", "0.5798364", "0.5798364", "0.5798364", "0.5760914", "0.57522386", "0.5715913", "0.56864536", "0.56587905", "0.564108", "0.5630811", "0.5619767", "0.55657357", "0.55384904", "0.55066264", "0.54597074", "0.54489017", "0.5447331", "0.5419158", "0.54172796", "0.5383821", "0.5371004", "0.53639364", "0.53600144", "0.535953", "0.53472126", "0.53394365", "0.5339297", "0.5337203", "0.53307045", "0.53226364", "0.531064", "0.5300308", "0.5293846", "0.5290688", "0.52756035", "0.52360934", "0.5230501", "0.5225713", "0.5217249", "0.5216659", "0.5213629", "0.52108085", "0.52075547", "0.52039665", "0.5192777", "0.51903915", "0.51799405", "0.5177861", "0.5172546", "0.5167698", "0.5161038", "0.5161037", "0.5154548", "0.5149464", "0.514758", "0.514374", "0.5133743", "0.51220375", "0.5117858", "0.5111345", "0.509115", "0.50860125", "0.5078491", "0.5077163", "0.5070132", "0.5055183", "0.5050831", "0.5048352", "0.5045547", "0.50430423", "0.5042423", "0.50423944", "0.5035208", "0.50329626", "0.5031378", "0.50312054", "0.5027182", "0.5026759", "0.5025795", "0.5016683" ]
0.662972
2
checking if the user is already in the channel, raise accesserror if they are
def test_channel_join_already_in_channel(): clear() user = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) with pytest.raises(AccessError): channel_join(user['token'], userchannel_id['channel_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True", "def check_if_user_can_interact(bot, update, *args, **kwargs):\n\n user_id = update._effective_user\n # print(\"cerco user con id \" + str(user_id) + \", nel database\")\n user = DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user_id['id'],))\n # print(\"ho trovato : \" + str(user))\n if not user: # user non prensete nel db id_users\n if 'private' in update.message.chat.type: # se il messaggio è stato mandato in privata allora devo chiedere l'accesso\n self.request_access(bot, user_id)\n elif 'supergroup' in update.message.chat.type: # altrimenti guardo se è presente nei bot_users\n bot_users = DB.execute(TABELLE['bot_users']['select']['by_ids'], (user_id, bot.id))\n if not bot_users: # se non è presente glielo dico e lo salvo nel db\n update.message.reply_text(\"E tu chi sei? Non ti ho mai visto da queste parti...\"\n \"Perche non mi invii un bel messaggio di start cosi diventiamo amici?\",\n reply_to_message_id=update.message.message_id)\n self.add_bot_user(update._effective_user, bot.id)\n\n return\n elif user[\"banned\"]:\n update.message.reply_text(\"Spiacente sei stato bannato dal bot\")\n return\n else:\n sig = signature(func)\n if len(sig.parameters) > 1:\n return func(bot, update, *args, **kwargs)\n else:\n return func(*args, **kwargs)", "def already_logged_in(oauth_user, oauth_service):\n try:\n created = current_user.add_oauth_identity(oauth_user.service_name, oauth_user.service_user_id)\n if created:\n message = 'Linked your ' + oauth_service.value + ' account to your CatHerder account!'\n else:\n message = 'Your ' + oauth_service.value + ' account is already linked to a CatHerder user.'\n return current_user, message, True\n except Exception as e:\n return None, e.message, False", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True", "def vc_only():\n\n async def check(ctx):\n if ctx.guild and ctx.author.voice:\n if not ctx.guild.me.voice or ctx.author.voice.channel == ctx.guild.me.voice.channel:\n return True\n await ctx.reply(\"I'm already in another voice channel!\")\n return False\n await ctx.reply('You must join a server voice channel first!')\n return False\n\n return commands.check(check)", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def has_access(self, roomname: str, uid: str) -> bool:\n\n if roomname == 'global':\n return True\n\n try:\n room_exists = self.room_exists(roomname)\n get_username_query = Query.get_username(uid)\n username = self.db.read_execute_query(get_username_query)[0][0]\n\n if room_exists:\n get_members_query = Query.get_room_members(roomname)\n room_members: str = self.db.read_execute_query(get_members_query)[0][0]\n room_members_list = room_members.split()\n\n\n if username in room_members_list:\n self.logger.debug(f\"User with UID = '{uid}' has access to room '{roomname}'\")\n return True\n else:\n self.logger.error(f\"User with UID = '{uid}' does not have access to room '{roomname}'\")\n return False\n else:\n self.logger.error(f\"Room '{roomname}' does not exist\")\n return False\n\n except:\n self.logger.error(f\"Failed to verify room access\")\n return False", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])", "async def deny(self, ctx, user: discord.Member, *, reason: str=None):\n self.data_check(ctx)\n server = ctx.message.server\n try:\n defchannel = self.riceCog2[server.id][\"defchannel\"]\n except:\n defchannel = default_channel\n try:\n channelmute = self.riceCog2[server.id][\"channelmute\"]\n except:\n channelmute = defchannelmute \n channel = discord.utils.get(server.channels, name = defchannel)\n if channel is None:\n msg = await self.bot.say (\"I was unable to write to your log channel. Please make sure there is a channel called {} on the server!\".format(defchannel))\n return\n else:\n pass\n if reason is None:\n msg = await self.bot.say(\"Please enter a reason for the warning!\")\n await asyncio.sleep(5)\n await self.bot.delete_message(msg)\n return\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n msg = await self.bot.say(\"This user has already been denied access to the channel.\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n return\n else:\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n role = nobnl \n mod = ctx.message.author\n await self.bot.delete_message(ctx.message)\n await self.bot.add_roles(user, nobnl)\n dmuser = await self.bot.start_private_message(user)\n await self.bot.send_message(dmuser, \"Howdy!\\nThis is to let you know that you have been denied access to the channel for the reason:\\n\\n```{}``` \\nPlease speak to a member of staff if you have an issue.\".format(reason))\n user=user\n reason=reason\n ID = uuid.uuid4()\n embed=discord.Embed(title=\"User Denied:\", color=0xA00000)\n embed.add_field(name=\"Case ID:\", value=ID, inline=False)\n embed.add_field(name=\"Moderator:\", value=mod, inline=False)\n embed.add_field(name=\"User:\", value=\"{0} ({0.id})\".format(user), inline=False)\n embed.add_field(name=\"Reason:\", value=reason, inline=False)\n react = await self.bot.send_message(channel, embed=embed)\n await self.bot.add_reaction(react, \"\\U0001f44d\")\n await self.bot.add_reaction(react, \"\\U0001f44e\")\n await self.bot.add_reaction(react, \"\\U0001f937\")\n self.norole[server.id][user.id] = {\n 'Reason': reason,\n 'Mod': ctx.message.author.id,\n 'Role': True\n }\n dataIO.save_json(self.warninglist, self.norole)\n channel = discord.utils.get(server.channels, name = channelmute)\n for channel in server.channels:\n perms = discord.PermissionOverwrite()\n \n if channel.type == discord.ChannelType.text:\n perms.send_messages = False\n perms.read_messages = False\n await self.bot.edit_channel_permissions(channel, role, overwrite=perms) \n else:\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n role = nobnl \n mod = ctx.message.author\n await self.bot.delete_message(ctx.message)\n await self.bot.add_roles(user, nobnl)\n dmuser = await self.bot.start_private_message(user)\n await self.bot.send_message(dmuser, \"Howdy!\\nThis is to let you know that you have been denied access to the channel for the reason:\\n\\n```{}``` \\nPlease speak to a member of staff if you have an issue.\".format(reason))\n user=user\n reason=reason\n ID = uuid.uuid4()\n embed=discord.Embed(title=\"User Denied:\", color=0xA00000)\n embed.add_field(name=\"Case ID:\", value=ID, inline=False)\n embed.add_field(name=\"Moderator:\", value=mod, inline=False)\n embed.add_field(name=\"User:\", value=\"{0} ({0.id})\".format(user), inline=False)\n embed.add_field(name=\"Reason:\", value=reason, inline=False)\n react = await self.bot.send_message(channel, embed=embed)\n await self.bot.add_reaction(react, \"\\U0001f44d\")\n await self.bot.add_reaction(react, \"\\U0001f44e\")\n await self.bot.add_reaction(react, \"\\U0001f937\")\n self.norole[server.id][user.id] = {\n 'Reason': reason,\n 'Mod': ctx.message.author.id,\n 'Role': True\n }\n dataIO.save_json(self.warninglist, self.norole)\n channel = discord.utils.get(server.channels, name = channelmute)\n for channel in server.channels:\n perms = discord.PermissionOverwrite()\n \n if channel.type == discord.ChannelType.text:\n perms.send_messages = False\n perms.read_messages = False\n await self.bot.edit_channel_permissions(channel, role, overwrite=perms)", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def user_in_channel(self, server_id, user):\n srv = self.get_server_dict(server_id)\n return user.voice.voice_channel and srv['voice'] and user.voice.voice_channel == srv['voice'].channel", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def UserName_availabity():\r\n try:\r\n \r\n UserName=request.args.get(\"UserName\")\r\n user_details=fetch_details(UserName)\r\n user_name=user_details[0]['UserName']\r\n if str(UserName)==str(user_name):\r\n msg=\"UserName is already taken kindly choose another one\"\r\n except IndexError:\r\n msg=\"UserName is available.\"\r\n return msg", "async def register(ctx, *args):\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")", "def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def _checkUID(self, uid):\n return uid in self._reservedUID", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def _checkUserExists(username,self):\r\n \r\n exists = False\r\n \r\n if _findUser(username) is not None:\r\n exists = True\r\n \r\n return exists", "def channel_join(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # checks if user is already a part of channel\n for user_id in curr_channel[\"member_ids\"]:\n if curr_id == user_id:\n raise error.InputError(description=\"user is joining a channel user is already in\")\n\n # this checks if the channel is empty (or new) in this case we make the new member an owner.\n if curr_channel[\"member_ids\"] == []:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n # this checks if the user is an owner of the slacker\n # if they are they are given owner privelages in the channel\n # else they are a member\n elif user_perms[\"permission_id\"] == 1:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is True:\n # adds the user into the channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is False:\n raise error.InputError(description=\"\"\"channel_join recieved a channel_id\n for a private channel\"\"\")", "def test_user_login_attempt_when_user_already_logged_in(self):\n\t\tpass", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def check_user(entry_code):\n\tif len(User.objects.filter(unique_code=entry_code)) == 1:\n\t\treturn(True)\n\telse:\n\t\traise Http404('No users exist with this code.')", "async def accept(ctx, *args):\n user = ctx.message.author\n user_mention = \"<@\"+str(user.id)+\">\"\n\n if user in self.data[\"users_asked_to_be_registered.json\"]:\n self.data[\"users.json\"].append(user)\n self.data[\"users_asked_to_be_registered.json\"].remove(user)\n await ctx.message.channel.send(user_mention+\", you have been successfully registered. :blue_heart:\")\n else:\n await ctx.message.channel.send(user_mention+\", have not commanded .register yet. \"\n \"Please do so first. :blue_heart:\")", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def verify_user(self):\n if self.username == \"root\":\n print \"Error: Please do not run this script as root.\"\n sys.exit(1)\n\n members = grp.getgrnam(self.groupowner)[3]\n if not self.username in members:\n print \"Error: The user who runs this script must belong to the group: \" + self.groupowner\n sys.exit(1)", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def _check_access(user, course_id):\r\n if not has_access(user, 'staff', course_id):\r\n raise Http404\r\n\r\n return", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def check_user(self, username):\n self.dbcursor.execute(self.SQL_CHECK_USER, [username])\n row = self.dbcursor.fetchone()\n if row:\n return True \n return False", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def cog_check(self, ctx):\n if ctx.guild is None:\n raise commands.NoPrivateMessage()\n return True", "def slack_access(s, level=READ):\n try: slack_access_level = settings.SLACK_USERS[s.slack_uid]\n except: return False\n return (slack_access_level & level) != 0", "def has_user(self, username):\n\t\treturn username in self.users", "def test_already_existing_user(self):\n self.user.registration(\n \"Githeri\", \"[email protected]\", \"iwantgitheri\", \"iwantgitheri\")\n msg = self.user.registration(\"Githeri\",\n \"[email protected]\",\n \"iwantgitheri\",\n \"iwantgitheri\")\n self.assertEqual(msg, \"Your Account Already Active. Proceed to login\")", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def has_permission(self, request, view):\n user = request.user\n try:\n user.user_client\n return True\n except Exception:\n return False", "def can_read(self, user):\n raise Return(True)", "def credential_exist(cls,name):\n for credential in cls.credential_list:\n if credential.user_name == name:\n return True\n\n return False", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "async def check_in_game(user_id, ctx): # this is meant for when it is accessed by commands outside of BlackJack.\n check = ex.first_result(await ex.conn.fetchrow(\"SELECT COUNT(*) From blackjack.games WHERE player1 = $1 OR player2 = $1\", user_id))\n if check:\n await ctx.send(f\"> **{ctx.author}, you are already in a pending/active game. Please type {await ex.get_server_prefix_by_context(ctx)}endgame.**\")\n return True", "def invalid_user(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {'username':username})\n rows = cur.rowcount\n if rows > 0:\n return True\n return False", "def test_access_positive(self, api):\n self.builder.add_user(api.get_user())\n self.builder.upd_access(api.get_user(), False)\n r1 = api.access_user(api.get_user(), True)\n access_true = self.builder.get_access(api.get_user())\n self.builder.del_user(api.get_user())\n assert access_true == 1\n assert r1.status_code == 200", "def get_everyone_denied(self):", "def check_user_and_login(self) -> Response:\n pass", "def check_p4gf_user_write_permission(self):\n gf_client_map = P4.Map()\n gf_client_map.insert(\"//...\", \"//client/...\")\n utp = p4gf_protect.UserToProtect(self.ctx.p4)\n prot = utp.user_to_protect(p4gf_const.P4GF_USER)\n gf_write_filter = prot.map_for_perm(p4gf_protect.WRITE)\n gf_write_filter = P4.Map.join(gf_write_filter, gf_client_map)\n if not gf_write_filter.includes('//{depot}/...'.format(depot=p4gf_const.P4GF_DEPOT)):\n raise RuntimeError(_('permission denied'))", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "async def scan_single_name(self, guild, member):\n if exceptionsManager.contains('exempt', member.name):\n return False\n if profanity.contains_profanity(member.name) or exceptionsManager.contains('restricted', member.name):\n log_channel = self.config.getint('Settings', 'channel')\n if log_channel != -1:\n await guild.get_channel(log_channel).send('User ||' + member.name + '|| has been kicked from the server.')\n await member.send(self.config.get('Settings', 'kick_message'))\n await guild.kick(member, reason='Kicked automatically on join. Improper name detected.')\n return True\n else:\n return False", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def authorized(guild, channel):\n\tif str(guild.id) in Settings.authorized_guilds:\n\t\tif str(channel.id) in Settings.authorized_channels[str(guild.id)]:\n\t\t\treturn True\n\t\telse:\n\t\t\t# logger.info('%s is not an authorized channel in %s', channel.id, guild.id)\n\t\t\tpass\n\telse:\n\t\t# logger.info('%s is not an authorized guild id', guild.id)\n\t\tpass\n\treturn False", "async def test_user_exist(hass):\n mocked_device = _create_mocked_device()\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_called_once()", "def ask_server_if_user_exists(self, sn):\n\n ### <------ Called from show_prompts\n print(\"//asking server to look up user...\")\n\n ### -------> Outbound to Server\n response = ServerOperations().is_user_here(sn)\n\n if response == True:\n print(f\"-=- Waiting for {sn} to accept file. Press A to abort.\")\n return True\n\n else:\n print(f\"{sn} not found. Try again.\")\n return False", "def test_user_in_group_can_access(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n\n utils.test_can_access(self, self.url)", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "async def permission_valid_check(cls):\n pass", "def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)", "def account_exists(username):\n if username in Stores.account_store:\n return \"Account exist, Sign in to account\", username\n else:\n return False", "def check_if_admin(connection,username):\r\n with connection:\r\n c = connection.execute(SELECT_USER_BY_ADMIN_PREVILAGES,(username,))\r\n return c.fetchone()", "def test_by_user_user_is_not_in_group_or_recipient(self):\n thread = self.create_thread()\n user = self.create_user()\n result = Thread.public.by_user(user=user)\n self.assertNotIn(thread, result)", "def username_check(username):\n\n try: \n pwd.getpwnam(username)\n print(\"User %s DOES EXIST. Try a different username.\" % (username)) \n return False\n\n except KeyError: \n print(\"User %s DOES NOT exist. Continuing...\" % (username)) \n return True", "def allowed_user_access_create_different_org(user):\n return user.has_perm(\"vnswww.userprofile_create_different_org\")", "def user_has_perms_on_client(user, client):\n if client and client not in user.clients:\n return False\n\n return True", "async def set_channel(self, ctx, channel):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n self.stream_channel = channel\n await self.bot.say(\"Channel sucessfully assigned.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "def isOp(self, user, channel=None):\n if channel is not None:\n return user in self.opsets[channel]\n\n for ch in self.opsets:\n if user in self.opsets[ch]:\n return True\n return False", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_can_accept_new_users(membership):\n if membership is not None and not membership.is_left():\n return membership.can_accept_new_users()\n else:\n return False" ]
[ "0.6544167", "0.6521434", "0.64236474", "0.6420696", "0.63337475", "0.63098836", "0.6305338", "0.62802494", "0.6259734", "0.6218568", "0.6136835", "0.60846996", "0.6068947", "0.6049151", "0.6002369", "0.59844506", "0.597856", "0.5952112", "0.5945146", "0.5934715", "0.59036577", "0.59036577", "0.58986306", "0.58922917", "0.5889184", "0.58862996", "0.58634686", "0.5858502", "0.5858213", "0.5852138", "0.5837414", "0.5820886", "0.58205706", "0.5820149", "0.5815068", "0.58039784", "0.5794811", "0.5791003", "0.5789678", "0.5785786", "0.5774214", "0.5754293", "0.5749139", "0.57339716", "0.5707659", "0.5706649", "0.57030946", "0.56988215", "0.56947666", "0.5692606", "0.5686268", "0.56830513", "0.5680608", "0.5671939", "0.5671939", "0.56700873", "0.566943", "0.5647577", "0.564336", "0.5635183", "0.561079", "0.5606965", "0.5606209", "0.5606209", "0.5600748", "0.5599542", "0.5591905", "0.5588106", "0.55851686", "0.558437", "0.5579909", "0.55768466", "0.55736625", "0.55724657", "0.5570833", "0.5568116", "0.5563023", "0.55585325", "0.5557371", "0.5555237", "0.5540347", "0.5539225", "0.5534568", "0.5533177", "0.5533161", "0.55320305", "0.55317897", "0.55277216", "0.5527039", "0.5526823", "0.5525704", "0.55236006", "0.5523376", "0.5522448", "0.55024564", "0.549973", "0.5494814", "0.54941183", "0.54926735", "0.5491871" ]
0.7157796
0
check if channel_join behaves correctly given valid input
def test_channel_join_normal_case(): clear() user = auth_register('[email protected]', '123abc!@#', 'first', 'last') joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(joiner['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' }, { 'u_id' : joiner['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def test_channel_join():\n\n # Clear the data structure\n clear_v1()\n \n\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n \n \n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])\n\n # Black box testing version in waiting\n # Check if the user is successfully added to the channel data frame\n assert channels_list_v2(auth_token2) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])", "def join(phenny, input):\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): return\n if input.admin: \n channel, key = input.group(1), input.group(2)\n if not key: \n phenny.write(['JOIN'], channel)\n else: phenny.write(['JOIN', channel, key])", "def test_channel_join_private_global():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", False)\n\n\n # Global DREAM owner attempt to join a private channel \n channel_join_v2(auth_token1, channel_id1[\"channel_id\"])\n\n # Check if the global owner successfully join private channel\n assert channels_list_v2(auth_token1) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def join(self, channel):\n raise NotImplementedError", "def test_channel_leave_normal_case():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def test_double_join(self):\n now = datetime.datetime.now()\n\n channel = ChannelStatus.get_channel(channel_spec=self.channel)\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n now += datetime.timedelta(seconds=0.1)\n\n greet = channel.update_user_join_status(self.user, self.greeting, now=now)\n self.assertEqual(False, greet)", "def channel_join(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # checks if user is already a part of channel\n for user_id in curr_channel[\"member_ids\"]:\n if curr_id == user_id:\n raise error.InputError(description=\"user is joining a channel user is already in\")\n\n # this checks if the channel is empty (or new) in this case we make the new member an owner.\n if curr_channel[\"member_ids\"] == []:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n # this checks if the user is an owner of the slacker\n # if they are they are given owner privelages in the channel\n # else they are a member\n elif user_perms[\"permission_id\"] == 1:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is True:\n # adds the user into the channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is False:\n raise error.InputError(description=\"\"\"channel_join recieved a channel_id\n for a private channel\"\"\")", "def test_channel_leave_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_leave(leaver['token'], invalid_id)", "def newJoin(user, channel):\n\n message = \"\"\"\nWelcome to the official Slack for ZenCash!\n\n\nThe official links are:\nhttps://github.com/ZenCashOfficial/\nhttps://zencashofficial.io/\n\nNOTICE:\nDue to the recent plague that is SlackBot spamming with `/remind`, please do not click any links sent to you in a DM from slackbot.\n\nAdditionally, please copy the message to #spam so the Admins can ban the user.\n\nPlease remember to be civil, and have a great day!\n\"\"\"\n\n\n # General\n if channel == 'C4QGQ8SEM':\n return message\n\n # Bottesting\n if channel == \"C5JCER3NG\":\n return message", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def test_new_channel(self):\n pattern = \"test.?.foo.?\"\n name1 = channel_layer.new_channel(pattern)\n self.assertIsInstance(name1, six.text_type)\n # Send a message and make sure new_channel on second pass changes\n channel_layer.send(name1, {\"value\": \"blue\"})\n name2 = channel_layer.new_channel(pattern)\n # Make sure the two ?s are replaced by the same string\n bits = name2.split(\".\")\n self.assertEqual(bits[1], bits[3], \"New channel random strings don't match\")\n # Make sure we can consume off of that new channel\n channel, message = channel_layer.receive_many([name1, name2])\n self.assertEqual(channel, name1)\n self.assertEqual(message, {\"value\": \"blue\"})", "async def joinchannel(self, ctx: commands.Context, *channels: str):\n for channel in channels:\n channel_query = self._channel_query(channel)\n\n if channel_query == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n channel = self.bot.get_channel(channel_query.id)\n guild = self.bot.get_guild(SERVER_ID)\n member = guild.get_member(ctx.author.id)\n\n if channel == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n # Don't let a user join the channel again if they are already in it.\n if channel.permissions_for(member).is_superset(JOINED_PERMISSIONS):\n await ctx.send(f\"You're already a member of {channel}.\")\n continue\n\n await channel.set_permissions(member, read_messages=True, reason=\"UQCSbot added.\")\n join_message = await channel.send(f\"{member.display_name} joined {channel.mention}\")\n await join_message.add_reaction(\"👋\")\n await ctx.send(f\"You've joined {channel.mention}.\")", "def on_join(self, raw_msg, source, **kwargs):", "def test_join_unknown(self):\n self.node.fake_message(Join(), sender='999')\n self.assertNoMessages()", "def is_channel(target, channel_prefixes='!&#+'):\n return len(target) > 1 and target[0] in channel_prefixes", "def test_irc_JOIN(self):\n self.client.irc_JOIN(self.user, [self.channel])\n self.client.irc_JOIN(\"[email protected]\", [\"#python\"])\n self.assertEqual(\n self.client.methods,\n [(\"joined\", (self.channel,)), (\"userJoined\", (\"Svadilfari\", \"#python\"))],\n )", "def single_channel():\n return True", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def on_join(bot, trigger):\n\tfor channel in trigger.args[0].split(','):\n\t\tlog(bot, channel, '*** {} has joined {}', trigger.nick, channel);", "def test_join(self):\n self.node.fake_message(Join(), sender='F999')\n self.assertMessage(['F999'], Welcome(state='state', slot=2,\n decisions={1: PROPOSAL1}))", "def Join(self, channel, key=\"\"):\n if channel != \"0\":\n time.sleep(1)\n self.s.send(\"JOIN %s%s\\n\" % (channel, (key and (\" \" + key))))\n logger.log(\"JOIN %s%s\" % (channel, (key and (\" \" + key)))).LogSend()", "def _join_channels(self, conn):\n channels = sorted(self.channels_to_join)\n logger.info('Channels to join: %s', ', '.join(c.name for c in channels))\n\n for channel in channels:\n logger.info('Joining channel %s ...', channel.name)\n conn.join(channel.name, channel.password or '')", "def joined(self):\n return str(self) in holder.bot.conn.channels.keys()", "def _joined_all(self):\n if not self.channels:\n return False\n for channel in self:\n if not channel.joined:\n return False\n return True", "def joinedChannel(self, channel, users):\n pass", "def fjoin(var, wrapper, message):\n # keep this and the event in def join() in sync\n evt = Event(\"join\", {\n \"join_player\": join_player,\n \"join_deadchat\": join_deadchat,\n \"vote_gamemode\": vote_gamemode\n })\n\n if not evt.dispatch(var, wrapper, message, forced=True):\n return\n noticed = False\n fake = False\n if not message.strip():\n evt.data[\"join_player\"](var, wrapper, forced=True)\n\n parts = re.split(\" +\", message)\n possible_users = {u.lower().nick for u in wrapper.target.users}\n to_join = []\n if not botconfig.DEBUG_MODE:\n match = complete_one_match(users.lower(parts[0]), possible_users)\n if match:\n to_join.append(match)\n else:\n for i, s in enumerate(parts):\n match = complete_one_match(users.lower(s), possible_users)\n if match:\n to_join.append(match)\n else:\n to_join.append(s)\n for tojoin in to_join:\n tojoin = tojoin.strip()\n # Allow joining single number fake users in debug mode\n if users.predicate(tojoin) and botconfig.DEBUG_MODE:\n user = users._add(wrapper.client, nick=tojoin) # FIXME\n evt.data[\"join_player\"](var, type(wrapper)(user, wrapper.target), forced=True, who=wrapper.source)\n continue\n # Allow joining ranges of numbers as fake users in debug mode\n if \"-\" in tojoin and botconfig.DEBUG_MODE:\n first, hyphen, last = tojoin.partition(\"-\")\n if first.isdigit() and last.isdigit():\n if int(last)+1 - int(first) > var.MAX_PLAYERS - len(list_players()):\n wrapper.send(messages[\"too_many_players_to_join\"].format(wrapper.source.nick))\n break\n fake = True\n for i in range(int(first), int(last)+1):\n user = users._add(wrapper.client, nick=str(i)) # FIXME\n evt.data[\"join_player\"](var, type(wrapper)(user, wrapper.target), forced=True, who=wrapper.source)\n continue\n if not tojoin:\n continue\n\n maybe_user = None\n\n for user in wrapper.target.users:\n if users.equals(user.nick, tojoin):\n maybe_user = user\n break\n else:\n if not users.predicate(tojoin) or botconfig.DEBUG_MODE:\n if not noticed: # important\n wrapper.send(\"{0}{1}\".format(wrapper.source, messages[\"fjoin_in_chan\"]))\n noticed = True\n continue\n\n if maybe_user is not None:\n if not botconfig.DEBUG_MODE and var.ACCOUNTS_ONLY:\n if maybe_user.account is None:\n wrapper.pm(messages[\"account_not_logged_in\"].format(maybe_user))\n return\n elif botconfig.DEBUG_MODE:\n fake = True\n\n if maybe_user is not users.Bot:\n if maybe_user is None and users.predicate(tojoin) and botconfig.DEBUG_MODE:\n maybe_user = users._add(wrapper.client, nick=tojoin) # FIXME\n evt.data[\"join_player\"](var, type(wrapper)(maybe_user, wrapper.target), forced=True, who=wrapper.source)\n else:\n wrapper.pm(messages[\"not_allowed\"])\n if fake:\n wrapper.send(messages[\"fjoin_success\"].format(wrapper.source, len(list_players())))", "async def addjoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one_or_none()\n if existing:\n existing.joinable = True\n else:\n db_session.add(Channel(id=channel.id, name=channel.name, joinable=True))\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was added as a joinable channel.\")", "def test_max_1_channel(\n token_network: Contract, get_accounts: Callable, create_channel: Callable\n) -> None:\n (A, B) = get_accounts(2)\n create_channel(A, B)\n\n with pytest.raises(TransactionFailed, match=\"TN/open: channel exists for participants\"):\n token_network.functions.openChannel(A, B).call()\n with pytest.raises(TransactionFailed, match=\"TN/open: channel exists for participants\"):\n token_network.functions.openChannel(B, A).call()", "def _check_has_channel(data):\r\n return re.findall(\r\n r'^:[a-zA-Z0-9_]+\\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'\r\n r'\\.tmi\\.twitch\\.tv '\r\n r'JOIN #([a-zA-Z0-9_]+)$', data)", "def test_open_via_channel(testchannel, callit):\n\n channel = testchannel.channel() if callit else testchannel.channel\n\n with channel as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed", "def join(self, source, channel):\n\n self.channel_map[channel].add(source[0])\n self.nick_map[source[0]].add(channel)\n\n self.log(\"*** {0:s} has joined {1:s}\".format(source[0], channel))", "async def react_join(a: Message):\n if a.action.member_id == club_id:\n await a.answer(r_register_help)\n stats.jincr()", "async def test_chatroom_broadcast():\n\n # Login all the users.\n tokens = {}\n for name in USERS:\n username = name\n password = name * 2 + '$12345'\n tokens[name] = await attempt_login(username, password)\n\n # Alice, Bob, Carl connect to the server.\n communicators = {}\n for name in ['alice', 'bob', 'carl']:\n communicator = make_communicator(tokens[name])\n communicators[name] = communicator\n connected, _ = await communicator.connect()\n assert connected\n motd = await communicator.receive_json_from()\n assert motd['type'] == 'notification'\n assert motd['code'] == 'api-motd'\n await communicator.send_json_to({'type': 'join', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # Alice expects 3 joins.\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'alice'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['alice'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Bob expects 2 joins.\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'bob'\n assert joined['you']\n assert joined['room_name'] == 'family'\n joined = await communicators['bob'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert not joined['you']\n assert joined['room_name'] == 'family'\n # Carl expects 1 join.\n joined = await communicators['carl'].receive_json_from()\n assert joined['type'] == 'room:notification'\n assert joined['code'] == 'joined'\n assert joined['user'] == 'carl'\n assert joined['you']\n assert joined['room_name'] == 'family'\n # Now Alice sends a \"Hello guys\" message, and bob and carl\n # will read it.\n await communicators['alice'].send_json_to({'type': 'message', 'room_name': 'family', 'body': 'Hello guys'})\n message = await communicators['alice'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['bob'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n message = await communicators['carl'].receive_json_from()\n assert message['type'] == 'room:notification'\n assert message['code'] == 'message'\n assert not message['you']\n assert message['user'] == 'alice'\n assert message['room_name'] == 'family'\n assert message['body'] == 'Hello guys'\n # Now they all leave the channel.\n for name in ['alice', 'bob', 'carl']:\n await communicators[name].send_json_to({'type': 'part', 'room_name': 'family'})\n await asyncio.sleep(0.5)\n # And they will receive all the part messages.\n parted = await communicators['alice'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['bob'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'alice'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'bob'\n assert not parted['you']\n assert parted['room_name'] == 'family'\n parted = await communicators['carl'].receive_json_from()\n assert parted['type'] == 'room:notification'\n assert parted['code'] == 'parted'\n assert parted['user'] == 'carl'\n assert parted['you']\n assert parted['room_name'] == 'family'\n # And the 3 will disconnect.\n for name in ['alice', 'bob', 'carl']:\n await communicator.disconnect()", "def rejoin(inp):\n channel = inp.text if inp.text.startswith('#') else '#' + inp.text\n inp.raw(['JOIN', channel])\n return lex.rejoin(channel=channel)", "async def on_member_join(member: discord.Member):\n for channel in member.server.channels:\n print(channel)\n if channel == \"general\":\n await member.send(f\"\"\"Welcome to the server {member.mention}!\"\"\")", "def test_channel_hash(self):\n acq_channel_1 = AcquireChannel(123)\n acq_channel_2 = AcquireChannel(123)\n\n hash_1 = hash(acq_channel_1)\n hash_2 = hash(acq_channel_2)\n\n self.assertEqual(hash_1, hash_2)", "def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()", "async def removejoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n try:\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one()\n existing.joinable = False\n except NoResultFound:\n await ctx.send(f\"There was no record for {channel.mention}. The channel is not currently joinable.\")\n return\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was removed as a joinable channel.\")", "def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])", "def test__GuildJoinRequest__guild():\n guild_id_0 = 202305170038\n guild_id_1 = 202305170039\n \n for input_value, expected_output in (\n (0, None),\n (guild_id_0, None),\n (guild_id_1, Guild.precreate(guild_id_1)),\n ):\n event = GuildJoinRequest(\n guild_id = input_value,\n )\n \n vampytest.assert_is(event.guild, expected_output)", "async def join(self, channel : str):\n # todo: check if # is required. If it is, append it at the start if DNE.\n await self._connection.join(channel)", "def test_channel_list1():\n reset_data()\n user1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"[email protected]\")\n owner1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"[email protected]\")\n channel1_1 = channels_create(owner1['token'], \"channel1\", True)['channel_id']\n channel_join(user1['token'], channel1_1)\n channel_list1 = channels_list(user1['token'])\n channels = [channel['channel_id'] for channel in channel_list1]\n assert channels == [channel1_1]\n print(\"=========pass test1 : only one channel in channel_list========\")", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def irc_JOIN(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n channel = params[0]\n\n self.logger.debug(\n \"%s!%s@%s joined %s\" %\n (user.group(1), user.group(2), user.group(3), channel)\n )\n\n self.event_manager.fire(\"irc.join\", user, channel)", "def test_join_after_invite(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], -1\n )", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def test__validate_channels__passing(input_value):\n return validate_channels(input_value)", "def test_state_channel_identifier_invalid(\n token_network: Contract, get_accounts: Callable, create_channel: Callable\n) -> None:\n (A, B, C) = get_accounts(3)\n channel_id = 0\n\n pairs = permutations([A, B, C], 2)\n for pair in pairs:\n state = token_network.functions.getChannelState(channel_id, *pair).call()\n settle_block_number = token_network.functions.settleable_after(channel_id).call()\n assert settle_block_number == 0\n assert state == ChannelState.NONEXISTENT\n\n for pair in pairs:\n create_channel(*pair)\n state = token_network.functions.getChannelState(0, *pair).call()\n settle_block_number = token_network.functions.settleable_after(0)\n assert settle_block_number == 0 # initialized on channel close\n assert state == ChannelState.OPENED\n\n current_counter = token_network.functions.channel_counter().call()\n\n for pair in pairs:\n state = token_network.functions.getChannelState(current_counter + 1, *pair).call()\n settle_block_number = token_network.functions.settleable_after(current_counter + 1).call()\n assert settle_block_number == 0\n assert state == ChannelState.NONEXISTENT", "def join_channel(self, channel):\r\n self._send('JOIN #%s\\r\\n' % channel)", "def join(var, wrapper, message):\n # keep this and the event in fjoin() in sync\n evt = Event(\"join\", {\n \"join_player\": join_player,\n \"join_deadchat\": join_deadchat,\n \"vote_gamemode\": vote_gamemode\n })\n if not evt.dispatch(var, wrapper, message, forced=False):\n return\n if var.PHASE in (\"none\", \"join\"):\n if wrapper.private:\n return\n if var.ACCOUNTS_ONLY:\n if wrapper.source.account is None:\n wrapper.pm(messages[\"not_logged_in\"])\n return\n if evt.data[\"join_player\"](var, wrapper) and message:\n evt.data[\"vote_gamemode\"](var, wrapper, message.lower().split()[0], doreply=False)\n\n else: # join deadchat\n if wrapper.private and wrapper.source is not wrapper.target:\n evt.data[\"join_deadchat\"](var, wrapper.source)", "def test_reopen_channel(\n token_network: Contract,\n get_accounts: Callable,\n create_close_signature_for_no_balance_proof: Callable,\n time_travel: Callable,\n get_block_timestamp: Callable,\n) -> None:\n (A, B) = get_accounts(2)\n call_and_transact(token_network.functions.openChannel(A, B))\n channel_identifier1 = token_network.functions.getChannelIdentifier(A, B).call()\n channel_counter1 = token_network.functions.participants_hash_to_channel_identifier(\n get_participants_hash(A, B)\n ).call()\n\n # Opening twice fails\n with pytest.raises(TransactionFailed, match=\"TN/open: channel exists for participants\"):\n token_network.functions.openChannel(A, B).call()\n\n # Close channel\n closing_sig = create_close_signature_for_no_balance_proof(A, channel_identifier1)\n call_and_transact(\n token_network.functions.closeChannel(\n channel_identifier1,\n B,\n A,\n EMPTY_BALANCE_HASH,\n 0,\n EMPTY_ADDITIONAL_HASH,\n EMPTY_SIGNATURE,\n closing_sig,\n ),\n {\"from\": A},\n )\n\n # Reopen Channel before settlement fails\n with pytest.raises(TransactionFailed, match=\"TN/open: channel exists for participants\"):\n token_network.functions.openChannel(A, B).call()\n\n # Settlement window must be over before settling the channel\n time_travel(get_block_timestamp() + TEST_SETTLE_TIMEOUT + 2)\n\n # Settle channel\n call_and_transact(\n token_network.functions.settleChannel(\n channel_identifier1,\n A,\n 0,\n 0,\n LOCKSROOT_OF_NO_LOCKS,\n B,\n 0,\n 0,\n LOCKSROOT_OF_NO_LOCKS,\n ),\n {\"from\": A},\n )\n\n # Reopening the channel should work iff channel is settled\n call_and_transact(token_network.functions.openChannel(A, B))\n channel_identifier2 = token_network.functions.getChannelIdentifier(A, B).call()\n assert channel_identifier2 != channel_identifier1\n assert (\n token_network.functions.participants_hash_to_channel_identifier(\n get_participants_hash(A, B)\n ).call()\n == channel_counter1 + 1\n )\n\n state = token_network.functions.getChannelState(channel_identifier2, A, B).call()\n assert state == ChannelState.OPENED\n\n (\n A_deposit,\n A_withdrawn,\n A_is_the_closer,\n A_balance_hash,\n A_nonce,\n A_locksroot,\n A_locked_amount,\n ) = token_network.functions.getChannelParticipantInfo(channel_identifier2, A, B).call()\n assert A_deposit == 0\n assert A_withdrawn == 0\n assert A_is_the_closer is False\n assert A_balance_hash == EMPTY_BALANCE_HASH\n assert A_nonce == 0\n assert A_locksroot == NONEXISTENT_LOCKSROOT\n assert A_locked_amount == 0\n\n (\n B_deposit,\n B_withdrawn,\n B_is_the_closer,\n B_balance_hash,\n B_nonce,\n B_locksroot,\n B_locked_amount,\n ) = token_network.functions.getChannelParticipantInfo(channel_identifier2, B, A).call()\n assert B_deposit == 0\n assert B_withdrawn == 0\n assert B_is_the_closer is False\n assert B_balance_hash == EMPTY_BALANCE_HASH\n assert B_nonce == 0\n assert B_locksroot == NONEXISTENT_LOCKSROOT\n assert B_locked_amount == 0", "def on_badchannelkey(self, conn, event) -> None:\n channel_name = event.arguments[0]\n logger.warning('Cannot join channel %s (bad key).', channel_name)", "def test_join_first_time(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], 1\n )", "async def joinForce(self, ctx):\n print(\"joining\")\n channel = ctx.author.voice.channel\n await channel.connect()", "def join_channel(self, server, username, channel):\n for sock in self.socks:\n if sock.server == server and username == sock.username:\n if sock.channel == channel:\n return sock\n sock.send(\"JOIN {}\\r\\n\".format(channel))\n print (\"[!] channel {} joined on {} with username {}\".format(channel, server, username))\n sock = IRC.Socket(self.dispatcher, sock.sock, username, server, channel)\n self.replyer.add_sock(sock)\n return sock\n return self.add_sock(server=server, username=username, channel=channel)", "def test_open_channel_state(token_network: Contract, get_accounts: Callable) -> None:\n (A, B) = get_accounts(2)\n\n channel_counter = token_network.functions.channel_counter().call()\n participants_hash = token_network.functions.getParticipantsHash(A, B).call()\n\n assert (\n token_network.functions.participants_hash_to_channel_identifier(participants_hash).call()\n == 0\n )\n assert token_network.functions.getChannelIdentifier(A, B).call() == 0\n\n call_and_transact(token_network.functions.openChannel(A, B))\n channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()\n\n assert token_network.functions.channel_counter().call() == channel_counter + 1\n assert (\n token_network.functions.participants_hash_to_channel_identifier(participants_hash).call()\n == channel_counter + 1\n )\n\n state = token_network.functions.getChannelState(channel_identifier, A, B).call()\n assert state == ChannelState.OPENED\n\n response = token_network.functions.getChannelParticipantInfo(channel_identifier, A, B).call()\n A_deposit = response[ParticipantInfoIndex.DEPOSIT]\n A_withdrawn = response[ParticipantInfoIndex.WITHDRAWN]\n A_is_the_closer = response[ParticipantInfoIndex.IS_CLOSER]\n A_balance_hash = response[ParticipantInfoIndex.BALANCE_HASH]\n A_nonce = response[ParticipantInfoIndex.NONCE]\n A_locksroot = response[ParticipantInfoIndex.LOCKSROOT]\n A_locked_amount = response[ParticipantInfoIndex.LOCKED_AMOUNT]\n assert A_deposit == 0\n assert A_withdrawn == 0\n assert A_is_the_closer is False\n assert A_balance_hash == EMPTY_BALANCE_HASH\n assert A_nonce == 0\n assert A_locksroot == NONEXISTENT_LOCKSROOT\n assert A_locked_amount == 0\n\n (\n B_deposit,\n B_withdrawn,\n B_is_the_closer,\n B_balance_hash,\n B_nonce,\n B_locksroot,\n B_locked_amount,\n ) = token_network.functions.getChannelParticipantInfo(channel_identifier, B, A).call()\n assert B_deposit == 0\n assert B_withdrawn == 0\n assert B_is_the_closer is False\n assert B_balance_hash == EMPTY_BALANCE_HASH\n assert B_nonce == 0\n assert B_locksroot == NONEXISTENT_LOCKSROOT\n assert B_locked_amount == 0", "def test_join_after_leave(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.join(r1, u2, tok=u2token)\n self.helper.leave(r1, u2, tok=u2token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"left_members\"] - r1stats_ante[\"left_members\"], -1\n )", "def test_send_recv(self):\n channel_layer.send(\"sr_test\", {\"value\": \"blue\"})\n channel_layer.send(\"sr_test\", {\"value\": \"green\"})\n channel_layer.send(\"sr_test2\", {\"value\": \"red\"})\n # Get just one first\n channel, message = channel_layer.receive_many([\"sr_test\"])\n self.assertEqual(channel, \"sr_test\")\n self.assertEqual(message, {\"value\": \"blue\"})\n # And the second\n channel, message = channel_layer.receive_many([\"sr_test\"])\n self.assertEqual(channel, \"sr_test\")\n self.assertEqual(message, {\"value\": \"green\"})\n # And the other channel with multi select\n channel, message = channel_layer.receive_many([\"sr_test\", \"sr_test2\"])\n self.assertEqual(channel, \"sr_test2\")\n self.assertEqual(message, {\"value\": \"red\"})", "def joined(self, channel):\n log.msg(\"GbRobot joined \" + self.factory.channel)", "def test_channelCorrection(self):\n self.client.invite(\"foo\", \"bar\")\n self.assertEqual(self.client.lines, [\"INVITE foo #bar\"])", "def send_join(self, channel, key: str = None) -> None:\n\n if key:\n self.send_line('JOIN {} {}'.format(channel, key))\n else:\n self.send_line('JOIN {}'.format(channel))", "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "async def join(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if player.is_connected:\n return await ctx.send(\"I'm already in a voice channel :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"You are not in a voice channel :no_entry:\")\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n await ctx.send(\"Summoned to `{}` <:done:403285928233402378>\".format(ctx.author.voice.channel.name))", "def is_channel(self):\n return True", "def test_channel_leave_invalid_token():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'First', 'Last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n channel_leave(user['token'], userchannel_id['channel_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def join_channels(channels_to_join):\n bot_id = settings.SLACK_BOT_ID\n limit = 200\n cursor = \"\"\n channels_to_join = set(channels_to_join)\n\n while True:\n resp = slack_client.api_call(\"conversations.list\", limit=limit, cursor=cursor)\n\n if \"response_metadata\" in resp:\n cursor = resp[\"response_metadata\"][\"next_cursor\"]\n\n slack_channels = resp[\"channels\"]\n for channel in slack_channels:\n if channel[\"name\"] in channels_to_join:\n channels_to_join.remove(channel[\"name\"])\n channel_id = channel[\"id\"]\n resp = slack_client.api_call(\n \"conversations.invite\", channel=channel_id, users=bot_id\n )\n if resp.get(\"ok\"):\n logger.info(f\"Bot was invited to channel {channel_id}\")\n\n if cursor == \"\":\n break\n\n if not channels_to_join:\n break\n\n if channels_to_join:\n logger.warning(f\"Unable to find slack channels: {channels_to_join}\")\n else:\n logger.info(\"Bot in all required channels.\")", "async def join(self, msg, *, channel: discord.VoiceChannel = None):\n if msg.voice_client is not None:\n return await msg.send(f\"Bot is already in a voice channel\\nDid you mean to use {msg.prefix}moveTo\")\n\n if msg.voice_client is None:\n if channel is None:\n return await msg.author.voice.channel.connect(), await msg.message.add_reaction(emoji='✅')\n\n return await channel.connect(), await msg.message.add_reaction(emoji='✅')\n\n else:\n if msg.voice_client.is_playing() is False and not self.player[msg.guild.id]['queue']:\n return await msg.author.voice.channel.connect(), await msg.message.add_reaction(emoji='✅')", "def channel_join(token, channel_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n user = users.get(auth_u_id)\n if user[\"is_admin\"] is not True and channel[\"is_public\"] is False:\n raise AccessError(\"channel is not public\")\n\n channels.set(channel_id, \"all_members\", auth_u_id)", "async def tod_join(self, ctx, *args):\n if ctx.author not in self.players:\n self.players.append(ctx.author)\n message = f\"{ctx.author.mention} has been added to the game!\"\n await ctx.send(message)\n else:\n message = f\"{ctx.author.mention} has already joined!\"\n await ctx.send(message)\n\n # Updates the role if channel exists\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)\n return\n\n # Creates the channel if it doesn't exist\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n bots = discord.utils.get(ctx.guild.roles, name=\"Bots\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False),\n bots: discord.PermissionOverwrite(read_messages=True, send_messages=True),\n role: discord.PermissionOverwrite(read_messages=True, send_messages=True, connect=True, speak=True)\n }\n await ctx.guild.create_text_channel('truth-or-dare', overwrites=overwrites)\n await ctx.guild.create_voice_channel('secret-voice', overwrites=overwrites)\n\n # Adds the role\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)", "async def _check_channel(\n self, starboard: StarboardEntry, channel: discord.TextChannel\n ) -> bool:\n if starboard.whitelist_channel:\n return channel.id in starboard.whitelist_channel\n else:\n return channel.id not in starboard.blacklist_channel", "async def join(self, ctx, key: str):\n await ctx.message.delete()\n async with ctx.typing():\n data = await self.config.guild(ctx.guild).all()\n if data[\"private\"]:\n try:\n if ctx.author.voice.channel.id == data[\"pstart\"]:\n if key in data[\"pchannels\"]:\n await ctx.author.move_to(ctx.guild.get_channel(data[\"pchannels\"][key]))\n else:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n except AttributeError:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n else:\n await ctx.send(_(\"Private rooms are not enabled on this server.\"))", "async def join(self, ctx, *, channel: discord.VoiceChannel):\n \n if ctx.voice_client is not None:\n return await ctx.voice_client.move_to(channel)\n else:\n return await ctx.voice_client.move_to(bot.get_channel(247531960488951815))\n \n await channel.connect()", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def join_channel(channel):\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_join(channel=channel)\n assert response['ok'] is True\n return {'ok': response['ok']}\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "async def try_initialize_channel_move(client, event, source_channel, target_channel):\n CHANNEL_MOVER_ACTIVE_FROM.add(source_channel.id)\n CHANNEL_MOVER_ACTIVE_TO.add(target_channel.id)\n try:\n if target_channel.is_in_group_thread():\n channel_id = target_channel.parent_id\n else:\n channel_id = target_channel.id\n \n \n if event.type is InteractionType.message_component:\n await client.interaction_component_acknowledge(event)\n \n await (\n type(client).interaction_response_message_create\n if event.type is InteractionType.application_command else\n type(client).interaction_followup_message_create\n )(\n client,\n event,\n 'Initializing channel move.',\n show_for_invoking_user_only = True,\n )\n \n webhook = await get_webhook(client, channel_id)\n except:\n CHANNEL_MOVER_ACTIVE_FROM.discard(source_channel.id)\n CHANNEL_MOVER_ACTIVE_TO.discard(target_channel.id)\n raise\n \n return webhook", "async def test_channel_only(self):\n expected_channel = MockTextChannel()\n actual_channel, duration = self.cog.parse_silence_args(MockContext(), expected_channel, 10)\n\n self.assertEqual(expected_channel, actual_channel)\n self.assertEqual(10, duration)", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def check_chan(chan):\n chan = sorted(chan)\n assert all([c in range(16) for c in chan]),\\\n \"All spectrum channels must be between and 15\"\n num = len([c for c in chan if c < 8])\n chan = chan[:num],chan[num:]\n nchan = max(len(chan[0]),len(chan[1]))\n while nchan not in [1,2,4,8]:\n nchan += 1\n if len(chan[0]) not in (0,nchan) or len(chan[1]) not in (0,nchan):\n print(\"[Warning] Cannot open this combination of channels on Spectrum\")\n print(\" I will add useless channels for you\")\n while 0 < len(chan[0]) < nchan:\n left = [i for i in range(8) if i not in chan[0]]\n chan[0].append(left[0])\n while 0 < len(chan[1]) < nchan:\n left = [i for i in range(8,16) if i not in chan[1]]\n chan[1].append(left[0])\n rchan = []\n chan = sorted(chan[0]),sorted(chan[1])\n if chan[0] and chan[1]:\n for c1,c2 in zip(*chan):\n rchan.extend([c1,c2])\n else:\n rchan = chan[0] or chan[1]\n print(\"DEBUG channel order:\",rchan)\n return rchan", "async def join(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n player = ctx.message.author.name\n if player.lower() in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}... you're already playing Truth or Dare here!\".format(room))\n else:\n tod_games[room]['participants'][player.lower()] = {'spins': 0}\n await amor_manager.say(\"{} has joined Truth or Dare!\".format(player))", "def test_open_channel_call(token_network: Contract, get_accounts: Callable) -> None:\n (A, B) = get_accounts(2)\n\n # Validation failure with the number zero instead of an address\n with pytest.raises(ValidationError):\n token_network.functions.openChannel(0x0, B)\n\n # Validation failure with the empty string instead of an address\n with pytest.raises(ValidationError):\n token_network.functions.openChannel(\"\", B)\n\n # Validation failure with an odd-length string instead of an address\n with pytest.raises(ValidationError):\n token_network.functions.openChannel(NOT_ADDRESS, B)\n\n # Validation failure with the number zero instead of an address\n with pytest.raises(ValidationError):\n token_network.functions.openChannel(A, 0x0)\n\n # Validation failure with the empty string instead of an address\n with pytest.raises(ValidationError):\n token_network.functions.openChannel(A, \"\")\n\n # Validation failure with an odd-length string instead of an address\n with pytest.raises(ValidationError):\n token_network.functions.openChannel(A, NOT_ADDRESS)\n\n # Transaction failure with the zero address\n with pytest.raises(TransactionFailed, match=\"TN: participant address zero\"):\n token_network.functions.openChannel(EMPTY_ADDRESS, B).call()\n\n # Transaction failure with the zero address\n with pytest.raises(TransactionFailed, match=\"TN: partner address zero\"):\n token_network.functions.openChannel(A, EMPTY_ADDRESS).call()\n\n # Cannot open a channel between 2 participants with the same address\n with pytest.raises(TransactionFailed, match=\"TN: identical addresses\"):\n token_network.functions.openChannel(A, A).call()", "async def setjoinlogchannel(self, ctx, channel):\r\n guild = ctx.message.guild\r\n channel = discord.utils.get(guild.channels, name=channel)\r\n functions.updatesql(server=ctx.guild.id, joinchannel=channel.id)\r\n await ctx.send(embed=discord.Embed(title='Sucsessful!'))", "async def _join(self, ctx: commands.Context):\n\n destination = ctx.author.voice.channel\n if ctx.voice_state.voice:\n await ctx.voice_state.voice.move_to(destination)\n return\n ctx.voice_state.voice = await destination.connect()\n await ctx.message.add_reaction('✅')", "def test_datachannel_send_wait_notopened(testloop, testchannel):\n\n val = []\n\n @testchannel\n async def one(data):\n \"\"\"one\"\"\"\n val.append(data)\n\n async def run():\n \"\"\"run\"\"\"\n async for i in aiter(range(10)):\n asyncio.ensure_future(testchannel.send(i))\n await asyncio.sleep(0)\n await testchannel.join()\n\n testloop.run_until_complete(run())\n\n assert not val", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def test__validate_threads__0():\n channel_id = 202306130027\n channel_name = 'Koishi'\n \n channel = Channel.precreate(\n channel_id,\n channel_type = ChannelType.guild_thread_private,\n name = channel_name,\n )\n \n for input_value, expected_output in (\n (None, {}),\n ([], {}),\n ({}, {}),\n ([channel], {channel_id: channel}),\n ({channel_id: channel}, {channel_id: channel}),\n ):\n output = validate_threads(input_value)\n vampytest.assert_eq(output, expected_output)", "async def on_member_join(self, member):\n if channel is not None:\n await channel.send(\n 'I, too, wonder why I am here, {0.mention}?'.format(member)\n )", "def on_join(self, event):\n self.pre_check(event)\n state = event.guild.get_member(event.author).get_voice_state()\n if not state:\n return api_loop(\n event.channel.send_message,\n \"You must be in a voice channel to use that command.\",\n )\n if event.guild.id not in self.guilds:\n try:\n client = state.channel.connect(mute=False)\n except VoiceException as e:\n return api_loop(\n event.channel.send_message,\n \"Failed to connect to voice: `{}`\".format(e),\n )\n else:\n self.guilds[event.guild.id] = psuedo_queue(\n self,\n player=Player(client),\n guild_id=event.guild.id,\n )\n return", "def test_react_invalid_message_id_in_different_channel():\n clear()\n user_a, user_b = register_n_users(2)\n # user_a create a channel\n channels_create(user_a[\"token\"], \"public_channel_a\", True)[\"channel_id\"]\n # user_b create a channel and send message in his own channel\n public_channel_id_b = channels_create(user_b[\"token\"], \"public_channel_b\", True)[\n \"channel_id\"\n ]\n message_id_b = message_send(\n user_b[\"token\"], public_channel_id_b, \"I am in channel_b\"\n )[\"message_id\"]\n # user_a should not be able to react the the message in the public_channel_b\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], message_id_b, 1)", "def join(self, channel, password=None):\n\n if not channel:\n return\n\n # do join with password\n if password:\n self._raw('JOIN %s %s' % (channel, password))\n try:\n self.channels[channel.lower()]['key'] = password\n self.channels.save()\n except KeyError:\n pass\n else:\n # do pure join\n self._raw('JOIN %s' % channel)", "def handle_join_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling join room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user in _room.room_attrbts['members']:\n msg = f\"Client {user} is already a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].add(user)\n msg = f\"{user} successfully joined membership of room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return", "async def join(self, ctx):\n if lobby.count(f\"{ctx.author.mention}\") == 0:\n add(lobby, ctx.author.mention)\n await ctx.channel.send(\"You've been added to the queue!\")\n else:\n await ctx.channel.send(\"You're already queued for a match!\")\n await ctx.channel.send(embed=lobby_list())\n if len(lobby) == teamSizeMax:\n if roster:\n await ctx.channel.send(\n \"There is currently a match being picked right now, please try again after picking is finished\")\n else:\n assign_captains()", "def check_party_channel(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.PARTY:\n raise error_handling.InactiveChannelError()\n else:\n return True" ]
[ "0.71999747", "0.7167301", "0.70121294", "0.68428326", "0.6786554", "0.66476434", "0.6459602", "0.6427017", "0.6345139", "0.6233202", "0.6227343", "0.6181276", "0.6101245", "0.59976065", "0.5990562", "0.59204394", "0.5870411", "0.5867968", "0.58485466", "0.5762381", "0.575636", "0.57192934", "0.57131183", "0.56927025", "0.56910086", "0.56844056", "0.56553686", "0.5654209", "0.56485415", "0.5636963", "0.5626627", "0.56144774", "0.56092167", "0.5609146", "0.558514", "0.5583367", "0.55688244", "0.55512524", "0.5493963", "0.5476994", "0.5463209", "0.54594916", "0.5444484", "0.54434353", "0.5435476", "0.5423125", "0.54062486", "0.54049283", "0.5391433", "0.5389018", "0.537871", "0.535671", "0.5354949", "0.5353166", "0.53491694", "0.5341431", "0.5339126", "0.5332983", "0.5331678", "0.5323757", "0.53223383", "0.53133404", "0.5309114", "0.5298327", "0.52981865", "0.5297309", "0.52882093", "0.5284355", "0.52782935", "0.52634066", "0.5255523", "0.52520347", "0.5246626", "0.5243224", "0.5241463", "0.52336067", "0.52208656", "0.5216116", "0.5198705", "0.51958644", "0.51825565", "0.51814675", "0.517243", "0.5165099", "0.51501817", "0.5145024", "0.514481", "0.51443714", "0.5139459", "0.51360774", "0.5124166", "0.5123621", "0.51192987", "0.511857", "0.51167166", "0.51128864", "0.50977874", "0.5097411", "0.5097039", "0.5090757" ]
0.6979165
3
if the channel is private, but no invite is given to the user, then the owner of flockr can join the channel
def test_channel_join_private_owner(): clear() joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last') user = auth_register('[email protected]', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', False) channel_join(joiner['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' }, { 'u_id' : joiner['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")", "def channel_join(token, channel_id):\n\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # checks if user is already a part of channel\n for user_id in curr_channel[\"member_ids\"]:\n if curr_id == user_id:\n raise error.InputError(description=\"user is joining a channel user is already in\")\n\n # this checks if the channel is empty (or new) in this case we make the new member an owner.\n if curr_channel[\"member_ids\"] == []:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n # this checks if the user is an owner of the slacker\n # if they are they are given owner privelages in the channel\n # else they are a member\n elif user_perms[\"permission_id\"] == 1:\n # adds the user into channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is True:\n # adds the user into the channel_member\n curr_channel[\"member_ids\"].append(curr_id)\n elif curr_channel[\"is_public\"] is False:\n raise error.InputError(description=\"\"\"channel_join recieved a channel_id\n for a private channel\"\"\")", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def channel_invite(token, channel_id, u_id):\n authorised_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n user_to_invite = users.get(u_id)\n if user_to_invite is None:\n raise ValueError(\"u_id does not exist.\")\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if authorised_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n channels.set(channel_id, \"all_members\", u_id)", "def channel_invite(token, channel_id, u_id):\n\n if database.get_current_user(token) not in database.get_channel_data(channel_id)['member_ids']:\n raise error.AccessError(description=\"\"\"Authorised user is not\n a member of channel with that channel_id.\"\"\")\n if u_id in database.get_channel_data(channel_id).get('member_ids'):\n raise error.InputError(description=\"This user is already a part of the channel.\")\n\n new_channel_data = database.get_channel_data(channel_id)\n\n new_channel_data['member_ids'].append(u_id)\n if database.get_permission_dict(u_id).get('permission_id') == 1:\n new_channel_data['owner_ids'].append(u_id)\n\n database.set_channel_data(new_channel_data)\n\n return {}", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def joinedChannel(self, channel, users):\n pass", "def test_channel_join_private_global():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", False)\n\n\n # Global DREAM owner attempt to join a private channel \n channel_join_v2(auth_token1, channel_id1[\"channel_id\"])\n\n # Check if the global owner successfully join private channel\n assert channels_list_v2(auth_token1) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }", "def channel_join(token, channel_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n user = users.get(auth_u_id)\n if user[\"is_admin\"] is not True and channel[\"is_public\"] is False:\n raise AccessError(\"channel is not public\")\n\n channels.set(channel_id, \"all_members\", auth_u_id)", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_channel_join_normal_case():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def is_user_channel_member(channel_id, u_id):\n for selected_id in database.get_channel_data(channel_id)[\"member_ids\"]:\n if selected_id == u_id:\n return True\n return False", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))", "def canInvite(session):\n if session.user[\"userlevel\"] == \"admin\":\n return True\n\n dOrg = session.user[\"defaultOrganisation\"] or \"apache\"\n if session.DB.ES.exists(index=session.DB.dbname, doc_type=\"organisation\", id=dOrg):\n xorg = session.DB.ES.get(\n index=session.DB.dbname, doc_type=\"organisation\", id=dOrg\n )[\"_source\"]\n if session.user[\"email\"] in xorg[\"admins\"]:\n return True", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")", "def is_party_channel(channel: discord.TextChannel) -> bool:\n return get_active_feature(channel) == ActivationState.PARTY", "async def addjoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one_or_none()\n if existing:\n existing.joinable = True\n else:\n db_session.add(Channel(id=channel.id, name=channel.name, joinable=True))\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was added as a joinable channel.\")", "def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)", "def invite(self):\n pass", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "async def test_staff_members_can_bypass_channel_restriction(self, create_embed, constants):\n constants.STAFF_PARTNERS_COMMUNITY_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.moderator, channel=helpers.MockTextChannel(id=200))\n\n await self.cog.user_info(self.cog, ctx)\n\n create_embed.assert_called_once_with(ctx, self.moderator, False)\n ctx.send.assert_called_once()", "def test_channel_join():\n\n # Clear the data structure\n clear_v1()\n \n\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n \n \n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])\n\n # Black box testing version in waiting\n # Check if the user is successfully added to the channel data frame\n assert channels_list_v2(auth_token2) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }", "async def botinvite_command(self, ctx):\n invite = f\"https://discord.com/api/oauth2/authorize?client_id={self.client.user.id}&permissions=1374809815&scope=bot\"\n await ctx.send(invite)", "async def invite(self, ctx):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await ctx.send(\"Invite me to your server with this link!\\n\" + link)", "async def invite(self, context: Context) -> None:\n embed = discord.Embed(\n description=f\"Invite me by clicking [here](https://discordapp.com/oauth2/authorize?&client_id={self.bot.config['application_id']}&scope=bot+applications.commands&permissions={self.bot.config['permissions']}).\",\n color=0xD75BF4,\n )\n try:\n # To know what permissions to give to your bot, please see here: https://discordapi.com/permissions.html and remember to not give Administrator permissions.\n await context.author.send(embed=embed)\n await context.send(\"I sent you a private message!\")\n except discord.Forbidden:\n await context.send(embed=embed)", "async def set_channel(self, ctx, channel):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n self.stream_channel = channel\n await self.bot.say(\"Channel sucessfully assigned.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "async def joinchannel(self, ctx: commands.Context, *channels: str):\n for channel in channels:\n channel_query = self._channel_query(channel)\n\n if channel_query == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n channel = self.bot.get_channel(channel_query.id)\n guild = self.bot.get_guild(SERVER_ID)\n member = guild.get_member(ctx.author.id)\n\n if channel == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n # Don't let a user join the channel again if they are already in it.\n if channel.permissions_for(member).is_superset(JOINED_PERMISSIONS):\n await ctx.send(f\"You're already a member of {channel}.\")\n continue\n\n await channel.set_permissions(member, read_messages=True, reason=\"UQCSbot added.\")\n join_message = await channel.send(f\"{member.display_name} joined {channel.mention}\")\n await join_message.add_reaction(\"👋\")\n await ctx.send(f\"You've joined {channel.mention}.\")", "async def invite(self):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await self.bot.say(\"Invite me to your server with this link!\\n\" + link)", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def check_party_channel(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.PARTY:\n raise error_handling.InactiveChannelError()\n else:\n return True", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def accept(self, user):\n # If the user is already a collaborator on the project, we don't make another\n # But we do still consider the invitation accepted\n self.project.collaborators.get_or_create(\n user = user,\n defaults = dict(role = Collaborator.Role.CONTRIBUTOR)\n )\n self.delete()", "async def ccallow(self, ctx, channel: discord.TextChannel):\n channel_list = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in channel_list:\n channel_list.remove(channel.id)\n else:\n return await ctx.send(\"Channel is not on the deny list.\")\n await self.config.guild(ctx.guild).channel_deny.set(channel_list)\n await ctx.send(f\"{channel.mention} will be allowed for chatchart use.\")", "def newJoin(user, channel):\n\n message = \"\"\"\nWelcome to the official Slack for ZenCash!\n\n\nThe official links are:\nhttps://github.com/ZenCashOfficial/\nhttps://zencashofficial.io/\n\nNOTICE:\nDue to the recent plague that is SlackBot spamming with `/remind`, please do not click any links sent to you in a DM from slackbot.\n\nAdditionally, please copy the message to #spam so the Admins can ban the user.\n\nPlease remember to be civil, and have a great day!\n\"\"\"\n\n\n # General\n if channel == 'C4QGQ8SEM':\n return message\n\n # Bottesting\n if channel == \"C5JCER3NG\":\n return message", "async def on_member_join(member: discord.Member):\n for channel in member.server.channels:\n print(channel)\n if channel == \"general\":\n await member.send(f\"\"\"Welcome to the server {member.mention}!\"\"\")", "async def deny(self, ctx, user: discord.Member, *, reason: str=None):\n self.data_check(ctx)\n server = ctx.message.server\n try:\n defchannel = self.riceCog2[server.id][\"defchannel\"]\n except:\n defchannel = default_channel\n try:\n channelmute = self.riceCog2[server.id][\"channelmute\"]\n except:\n channelmute = defchannelmute \n channel = discord.utils.get(server.channels, name = defchannel)\n if channel is None:\n msg = await self.bot.say (\"I was unable to write to your log channel. Please make sure there is a channel called {} on the server!\".format(defchannel))\n return\n else:\n pass\n if reason is None:\n msg = await self.bot.say(\"Please enter a reason for the warning!\")\n await asyncio.sleep(5)\n await self.bot.delete_message(msg)\n return\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n msg = await self.bot.say(\"This user has already been denied access to the channel.\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n return\n else:\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n role = nobnl \n mod = ctx.message.author\n await self.bot.delete_message(ctx.message)\n await self.bot.add_roles(user, nobnl)\n dmuser = await self.bot.start_private_message(user)\n await self.bot.send_message(dmuser, \"Howdy!\\nThis is to let you know that you have been denied access to the channel for the reason:\\n\\n```{}``` \\nPlease speak to a member of staff if you have an issue.\".format(reason))\n user=user\n reason=reason\n ID = uuid.uuid4()\n embed=discord.Embed(title=\"User Denied:\", color=0xA00000)\n embed.add_field(name=\"Case ID:\", value=ID, inline=False)\n embed.add_field(name=\"Moderator:\", value=mod, inline=False)\n embed.add_field(name=\"User:\", value=\"{0} ({0.id})\".format(user), inline=False)\n embed.add_field(name=\"Reason:\", value=reason, inline=False)\n react = await self.bot.send_message(channel, embed=embed)\n await self.bot.add_reaction(react, \"\\U0001f44d\")\n await self.bot.add_reaction(react, \"\\U0001f44e\")\n await self.bot.add_reaction(react, \"\\U0001f937\")\n self.norole[server.id][user.id] = {\n 'Reason': reason,\n 'Mod': ctx.message.author.id,\n 'Role': True\n }\n dataIO.save_json(self.warninglist, self.norole)\n channel = discord.utils.get(server.channels, name = channelmute)\n for channel in server.channels:\n perms = discord.PermissionOverwrite()\n \n if channel.type == discord.ChannelType.text:\n perms.send_messages = False\n perms.read_messages = False\n await self.bot.edit_channel_permissions(channel, role, overwrite=perms) \n else:\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n role = nobnl \n mod = ctx.message.author\n await self.bot.delete_message(ctx.message)\n await self.bot.add_roles(user, nobnl)\n dmuser = await self.bot.start_private_message(user)\n await self.bot.send_message(dmuser, \"Howdy!\\nThis is to let you know that you have been denied access to the channel for the reason:\\n\\n```{}``` \\nPlease speak to a member of staff if you have an issue.\".format(reason))\n user=user\n reason=reason\n ID = uuid.uuid4()\n embed=discord.Embed(title=\"User Denied:\", color=0xA00000)\n embed.add_field(name=\"Case ID:\", value=ID, inline=False)\n embed.add_field(name=\"Moderator:\", value=mod, inline=False)\n embed.add_field(name=\"User:\", value=\"{0} ({0.id})\".format(user), inline=False)\n embed.add_field(name=\"Reason:\", value=reason, inline=False)\n react = await self.bot.send_message(channel, embed=embed)\n await self.bot.add_reaction(react, \"\\U0001f44d\")\n await self.bot.add_reaction(react, \"\\U0001f44e\")\n await self.bot.add_reaction(react, \"\\U0001f937\")\n self.norole[server.id][user.id] = {\n 'Reason': reason,\n 'Mod': ctx.message.author.id,\n 'Role': True\n }\n dataIO.save_json(self.warninglist, self.norole)\n channel = discord.utils.get(server.channels, name = channelmute)\n for channel in server.channels:\n perms = discord.PermissionOverwrite()\n \n if channel.type == discord.ChannelType.text:\n perms.send_messages = False\n perms.read_messages = False\n await self.bot.edit_channel_permissions(channel, role, overwrite=perms)", "async def botlink(self, ctx):\n async with self.bot.pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute('SELECT * FROM BLChannels WHERE \"id\"=%s;', (ctx.message.channel.id,))\n isregistered = await cur.fetchone()\n if isregistered: \n return\n else: \n embed = discord.Embed(title=\"Mafu, the multi use bot\", colour=discord.Colour(0x6c56b0),url=\"https://discordapp.com/oauth2/authorize?client_id=432292171371118592&scope=bot\") \n await ctx.send(embed=embed)", "async def omartrifacta(self, ctx):\n user_member1 = await ctx.guild.fetch_member(\"142084729674399745\")\n user_member2 = await ctx.guild.fetch_member(\"197784087476305921\")\n user_member3 = await ctx.guild.fetch_member(\"219969018369409024\")\n if user_member1 is not None and user_member2 is not None and user_member3 is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member1.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member2.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member3.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")", "async def invite(ctx):\r\n await ctx.send(\"https://discordapp.com/oauth2/authorize?client_id=457903893079392256&scope=bot&permissions=2146958591\")\r\n ctx.counter(n)", "def whisper(self,name):\n\n self.sendCommand(\"global /join\",name+self.userName+\" private\")\n self.master.after(300,self.sendCommand,name+self.userName+\" /invite\",name)", "def single_channel():\n return True", "def test_channelCorrection(self):\n self.client.invite(\"foo\", \"bar\")\n self.assertEqual(self.client.lines, [\"INVITE foo #bar\"])", "async def invite(self, ctx):\n embed = discord.Embed(title='Invite links for NOVA',\n description='[<:news:730866149109137520> Required Permissions](https://discord.com/api/'\n 'oauth2/authorize?client_id=709922850953494598&permissions=1573252215&scope='\n 'bot)\\n'\n '[<:news:730866149109137520> No Permissions]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&permi'\n 'ssions=0&scope=bot)\\n[<:news:730866149109137520> All Permissions (admin)]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&perm'\n 'issions=8&scope=bot)', color=0x5643fd)\n embed.set_footer(text='Developed by YeetVegetabales', icon_url='https://cdn.discordapp.com/avatars'\n '/569374429218603019'\n '/a_6dac6946906e498650f6c2466aa82200.gif?size'\n '=256&f=.gif')\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/54Mim4lahztGCP4hgmpy4lOdEUc4'\n '-dOeNA_x6hVHMlc/%3Fsize%3D4096/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n await ctx.send(embed=embed)", "async def tod_join(self, ctx, *args):\n if ctx.author not in self.players:\n self.players.append(ctx.author)\n message = f\"{ctx.author.mention} has been added to the game!\"\n await ctx.send(message)\n else:\n message = f\"{ctx.author.mention} has already joined!\"\n await ctx.send(message)\n\n # Updates the role if channel exists\n for channel in ctx.guild.channels:\n if channel.name.startswith(\"truth-or-dare\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)\n return\n\n # Creates the channel if it doesn't exist\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n bots = discord.utils.get(ctx.guild.roles, name=\"Bots\")\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False, send_messages=False),\n bots: discord.PermissionOverwrite(read_messages=True, send_messages=True),\n role: discord.PermissionOverwrite(read_messages=True, send_messages=True, connect=True, speak=True)\n }\n await ctx.guild.create_text_channel('truth-or-dare', overwrites=overwrites)\n await ctx.guild.create_voice_channel('secret-voice', overwrites=overwrites)\n\n # Adds the role\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.add_roles(role)", "async def invite(self, ctx):\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))", "async def _invite(self, ctx: Context):\n\n # read_messages=True,\n # send_messages=True,\n # manage_messages=True,\n # embed_links=True,\n # attach_files=True,\n # external_emojis=True,\n # add_reactions=True\n perms = discord.Permissions(322624)\n\n try:\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perms)\n value = (\n \"Add Brawlcord to your server by **[clicking here]\"\n f\"({invite_url})**.\\n\\n**Note:** By using the link\"\n \" above, Brawlcord will be able to\"\n \" read messages,\"\n \" send messages,\"\n \" manage messages,\"\n \" embed links,\"\n \" attach files,\"\n \" add reactions,\"\n \" and use external emojis\"\n \" wherever allowed.\\n\\n*You can remove the permissions manually,\"\n \" but that may break the bot.*\"\n )\n except Exception as exc:\n invite_url = None\n value = (\n f\"Error \\\"{exc}\\\" while generating invite link.\"\n \" Notify bot owner using the `-report` command.\"\n )\n\n embed = discord.Embed(color=EMBED_COLOR, description=value)\n embed.set_author(\n name=f\"Invite {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n # embed.add_field(name=\"__**Invite Link:**__\", value=value)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n \"I do not have the permission to embed a link.\"\n \" Please give/ask someone to give me that permission.\"\n )", "def joined(self):\n return str(self) in holder.bot.conn.channels.keys()", "def invite(self, target, channel):\n self.send_line('INVITE %s %s' % (target, channel))", "async def join(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if player.is_connected:\n return await ctx.send(\"I'm already in a voice channel :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"You are not in a voice channel :no_entry:\")\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n await ctx.send(\"Summoned to `{}` <:done:403285928233402378>\".format(ctx.author.voice.channel.name))", "async def coach(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n user = ctx.author\r\n dm_channel = user.dm_channel\r\n guild_data = self.config.guild(ctx.guild)\r\n coach_id = await guild_data.coachid()\r\n coach = ctx.guild.get_role(int(coach_id))\r\n channel_id = await self.config.guild(ctx.guild).coachchannel()\r\n channel = ctx.guild.get_channel(int(channel_id))\r\n if dm_channel is None:\r\n dm_channel = await user.create_dm()\r\n lst = await guild_data.get_raw(\"neededlist\")\r\n player_data = self.config.member(ctx.author)\r\n\r\n def check(m):\r\n return m.channel == dm_channel and m.author == user\r\n\r\n try:\r\n if user.id in lst:\r\n await ctx.send(\"You already have a coaching request pending please stay patient or contact our staff if its been over 48 hrs since your coaching request\")\r\n else:\r\n await ctx.send(\"Please check your DM's...\")\r\n await user.send(\"Please tell us your In game name?, Type 'stop' to stop the process\")\r\n ign = await self.bot.wait_for('message', timeout=60, check=check)\r\n ign_use = ign.content\r\n new_ign = ign.content.lower()\r\n if new_ign == \"stop\":\r\n raise UserEnd\r\n await user.send(\"Please tell us your Player Tag?, Type 'stop' to stop the process\")\r\n tag = await self.bot.wait_for('message', timeout=60, check=check)\r\n tag_use = tag.content\r\n new_tag = tag.content.lower()\r\n if new_tag == \"stop\":\r\n raise UserEnd\r\n await user.send(\"What time do you prefer for coaching? (Times in UTC only), Type 'stop' to stop the process\")\r\n time = await self.bot.wait_for('message', timeout=60, check=check)\r\n time_use = time.content\r\n np = time.content.lower()\r\n if np == \"stop\":\r\n raise UserEnd\r\n await user.send(\"What archatypes do you prefer to play?\")\r\n deck = await self.bot.wait_for('message', timeout=60, check=check)\r\n new_deck = deck.content.lower() # I know I could have made a function to check this but my brain is not working\r\n deck_use = deck.content\r\n if new_deck == \"stop\":\r\n raise UserEnd\r\n\r\n await user.send(\"You will be contacted by one of our coaches please stay patient.\")\r\n await channel.send(\"{} New coaching request from {}\".format(coach.mention, user.mention))\r\n await self.emb(ctx, \"Discord Name\", \"In Game Name\", \"Player Tag\", \"Preferred Time\", \"Deck Type\", user.mention, ign_use, tag_use, time_use, deck_use)\r\n lst.append(user.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await player_data.ign.set(ign_use)\r\n await player_data.tag.set(tag_use)\r\n await player_data.time.set(time_use)\r\n await player_data.deck_type.set(deck_use)\r\n\r\n except asyncio.exceptions.TimeoutError:\r\n await user.send(\"Timeout...\") # not sure where to send these messages\r\n return\r\n except UserEnd:\r\n await user.send(\"Stopped!\") # not sure where to send these messages\r\n return\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "async def unlock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n if (channel.category.name == CATEGORY_SO or channel.category.name == CATEGORY_GENERAL):\n await ctx.send(\"Synced permissions with channel category.\")\n return await channel.edit(sync_permissions=True)\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name != CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True, read_messages=True)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bRole = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(aRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Unlocked the channel to Member access. Please check if permissions need to be synced.\")", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def test_accept_existing_collaborator(self):\n # Make the invitation that we will accept\n invitation = self.project.invitations.create(email = '[email protected]')\n\n # Assert on the current state of the collaborators\n self.assertEqual(self.project.collaborators.count(), 1)\n\n # Accept the invitation as the project owner\n project_owner = self.project.collaborators.filter(role = Collaborator.Role.OWNER).first().user\n invitation.accept(project_owner)\n\n # Check that the number of collaborators has not increased\n self.assertEqual(self.project.collaborators.count(), 1)\n # Check that the project owner is still an owner\n collaborator = self.project.collaborators.get(user = project_owner)\n self.assertEqual(collaborator.role, Collaborator.Role.OWNER)\n\n # Check that the invitation no longer exists\n self.assertFalse(self.project.invitations.filter(pk = invitation.pk).exists())", "async def setcoachchannel(self, ctx, channel: int):\r\n if ctx.guild.id == 445092370006933505:\r\n await self.config.guild(ctx.guild).coachchannel.set(int(channel))\r\n await ctx.send(\"You set {} as the coaching channel\".format(channel))\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def is_invite_only(self):\n return self._tag == 'invite_only'", "def is_invite_only(self):\n return self._tag == 'invite_only'", "def accept_invite(self):\n url = API_PATH[\"accept_mod_invite\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url)", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "async def tc_join(self, ctx, key: str):\n await ctx.message.delete()\n async with ctx.typing():\n data = await self.config.guild(ctx.guild).all()\n if data[\"private_textchannels_enabled\"]:\n if key in data[\"private_textchannels\"]:\n await ctx.guild.get_channel(int(key)).set_permissions(\n ctx.author,\n read_message_history=True,\n read_messages=True,\n send_messages=True,\n view_channel=True,\n )\n else:\n await ctx.send(_(\"Private rooms are not enabled on this server.\"))", "def test_channel_leave_normal_case():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "async def _invite(self, ctx: Context):\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n value = (\n f\"Invite TvM Assistant to your bot by [clicking here]({invite_url}).\"\n \"\\n\\nInviting the bot will give it some management permissions. You can\"\n \" review them when you use the link.\"\n )\n\n embed = discord.Embed(color=await ctx.embed_colour(), description=value)\n embed.set_author(name=f\"Invite TvM Assistant\", icon_url=ctx.me.avatar_url)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n f\"{invite_url}\\n\\nInviting the bot will give it some management permissions.\"\n \" You can review them when you use the link.\"\n )", "async def invite(self, ctx):\n embed = discord.Embed(title=\"Invite\", description=f\"**{ctx.author.name}**, use this URL to invite me\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "async def on_member_join(self, member):\n verified = get(member.guild.roles, name='verified')\n verify_channel = get(member.guild.channels, name='verify')\n db_discord_user = PostgreSQL.get_discord_user(member.id)\n # Checks if the verified role exists, if it doesn't a DM is sent to the server owner to configure it\n if verified is None:\n await verify_channel.send(f'{member.guild.owner.mention} The verified role doesn\\'t exist in the server `{member.guild.name}`. Please type `!build` in one of the text channels in that server')\n return\n\n # Checks if the user exists in the database, if it doesn't a DM is sent to the user to tell them to get verified\n if db_discord_user is None:\n await verify_channel.send(f'{member.mention} You have not been verified yet. Please visit {WEBSITE} to get verified (VPN is required)')\n return\n \n db_openid_user = PostgreSQL.get_openid_user(db_discord_user[\"openidc_id\"])\n email = db_openid_user[\"username\"]\n await member.add_roles(verified, reason='Assigning user the verified role')\n\n if check_shelve_file(member.guild.id):\n await member.edit(nick=f'{member.name} [{email}]', reason=\"Changing users\\'s nickname\")", "async def join(self, ctx, key: str):\n await ctx.message.delete()\n async with ctx.typing():\n data = await self.config.guild(ctx.guild).all()\n if data[\"private\"]:\n try:\n if ctx.author.voice.channel.id == data[\"pstart\"]:\n if key in data[\"pchannels\"]:\n await ctx.author.move_to(ctx.guild.get_channel(data[\"pchannels\"][key]))\n else:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n except AttributeError:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n else:\n await ctx.send(_(\"Private rooms are not enabled on this server.\"))", "async def kickoldusers(self, ctx):\n s = db.session()\n\n for member in self.bot.get_guild(CONFIG.server).members:\n\n message = s.query(db.Message).filter(db.Message.author == member.id).first()\n if not message:\n if not member.bot:\n server = self.bot.get_guild(CONFIG.server)\n\n print(server.system_channel)\n inviteLink = await self.bot.get_guild(CONFIG.server).system_channel.create_invite(xkcd=True,\n max_uses=1)\n\n await ctx.send(\"No messages from \" + member.name)\n await member.send(\n \"You have been kicked from Inferno games because you have not posted anything for a while\")\n await member.send(\"If you feel this was a mistake, or if you wish to become active again you can rejoin by clicking \" + str(inviteLink))\n await member.kick()\n s.close()", "async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")", "def join_channel(self, server, username, channel):\n for sock in self.socks:\n if sock.server == server and username == sock.username:\n if sock.channel == channel:\n return sock\n sock.send(\"JOIN {}\\r\\n\".format(channel))\n print (\"[!] channel {} joined on {} with username {}\".format(channel, server, username))\n sock = IRC.Socket(self.dispatcher, sock.sock, username, server, channel)\n self.replyer.add_sock(sock)\n return sock\n return self.add_sock(server=server, username=username, channel=channel)", "def join_channel(\n channel_name: ChannelName,\n _response=Response,\n _db=Depends(get_db),\n Authorization=Header(None),\n):\n\n stat, auth_data = verification_details(Authorization)\n\n if stat != 200:\n _response.status_code = 500\n return {\"data\": \"something happened\"}\n\n res_status, _data = ChatController(_db).join_channel(\n auth_data[\"data\"][\"user\"][\"username\"], channel_name\n )\n\n _response.status_code = res_status\n\n return {\"data\": _data}", "def irc_INVITE(self, prefix, (user, channel)):\n self.join(channel)", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "async def react_join(a: Message):\n if a.action.member_id == club_id:\n await a.answer(r_register_help)\n stats.jincr()", "async def create(self, ctx, public: Optional[bool] = False, *, name: str):\n data = await self.config.guild(ctx.guild).all()\n if data[\"private\"]:\n try:\n if ctx.author.voice.channel.id == data[\"pstart\"]:\n key = await self._generate_key(data[\"pchannels\"].keys())\n try:\n await ctx.author.send(\n _(\n \"The key to your private room is: ``{key}``\\nGive this key to a friend and ask them to use ``{command}`` to join your private room.\"\n ).format(key=key, command=f\"{ctx.clean_prefix}vc join {key}\")\n )\n except discord.Forbidden:\n await ctx.send(\n _(\"Couldn't send the key to your private channel via DM. Aborting...\")\n )\n return\n if public:\n ov = {\n ctx.author: discord.PermissionOverwrite(\n view_channel=True, connect=True, speak=True, manage_channels=True\n )\n }\n else:\n ov = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n view_channel=True, connect=False\n ),\n ctx.author: discord.PermissionOverwrite(\n view_channel=True, connect=True, speak=True, manage_channels=True\n ),\n }\n c = await ctx.guild.create_voice_channel(\n name,\n overwrites=ov,\n category=ctx.guild.get_channel(data[\"pcat\"]),\n reason=_(\"Private room\"),\n )\n await ctx.author.move_to(c, reason=_(\"Private channel.\"))\n data[\"pchannels\"][key] = c.id\n await self.config.guild(ctx.guild).pchannels.set(data[\"pchannels\"])\n else:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n except AttributeError:\n await self.sendNotInStartChannelMessage(ctx, data[\"pstart\"])\n else:\n await ctx.send(_(\"Private rooms are not enabled on this server.\"))", "def signedOn(self):\n # create a session to respond to private messages from nicks\n # not in any channel I'm in\n\n self.ircNetwork = u'TODO' # TODO \n\n self.defaultSession = self.store.find(d20session.D20Session,\n d20session.D20Session.name == u'#@@default@@').one()\n self.defaultSession.isDefaultSession = True\n # join my default channel\n self.join(self.factory.channel)", "def join_channel(self) -> None:\n response = self.client.conversations_list()\n for channel in response.data[\"channels\"]:\n if channel[\"name\"] == BaseConfig.SLACK_CHANNEL:\n self.channel_id = channel[\"id\"]\n if not self.channel_id:\n raise Exception(f\"Channel: {BaseConfig.SLACK_CHANNEL} was not found\")\n self.client.conversations_join(channel=self.channel_id)", "async def togglechannel(self, ctx, channel):\n\n user = ctx.message.author\n await ctx.message.delete()\n\n if channel == \"nsfw\":\n\n if self.bot.nsfw_role in user.roles:\n await user.remove_roles(self.bot.nsfw_role)\n await user.send(\"Access to NSFW channels revoked.\")\n else:\n await user.add_roles(self.bot.nsfw_role)\n await user.send(\"Access to NSFW channels granted.\")\n else:\n await user.send(\"{} is not a togglable channel.\".format(channel))", "def vc_only():\n\n async def check(ctx):\n if ctx.guild and ctx.author.voice:\n if not ctx.guild.me.voice or ctx.author.voice.channel == ctx.guild.me.voice.channel:\n return True\n await ctx.reply(\"I'm already in another voice channel!\")\n return False\n await ctx.reply('You must join a server voice channel first!')\n return False\n\n return commands.check(check)", "def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])", "def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])", "def test_cant_ban_user_from_community_if_member(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user, type='P')\n community_name = community.name\n\n user.join_community_with_name(community_name)\n\n user_to_ban = make_user()\n\n url = self._get_url(community_name=community.name)\n response = self.client.post(url, {\n 'username': user_to_ban.username\n }, **headers)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.assertFalse(user_to_ban.is_banned_from_community_with_name(community.name))", "def join(phenny, input):\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): return\n if input.admin: \n channel, key = input.group(1), input.group(2)\n if not key: \n phenny.write(['JOIN'], channel)\n else: phenny.write(['JOIN', channel, key])", "def test_accept_member_with_owner(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')", "async def join_leaderboard(self, ctx: commands.Context) -> None:\n if ctx.channel.id != settings.aoc.channel_id:\n await ctx.send(f\"Please use the <#{settings.aoc.channel_id}> channel\")\n return\n\n author = ctx.message.author\n\n info_str = (\n \"Head over to https://adventofcode.com/leaderboard/private \"\n \"with code `975452-d90a48b0` to join the TWT private leaderboard!\"\n )\n try:\n await author.send(info_str)\n except discord.errors.Forbidden:\n await ctx.send(f\":x: {author.mention}, please (temporarily) enable DMs to receive the join code\")\n else:\n await ctx.message.add_reaction(\"\\U0001F4E8\")", "def test_joining_private_room_with_excluded_user(self) -> None:\n # Setup a support and two normal users.\n alice = self.register_user(\"alice\", \"pass\")\n alice_token = self.login(alice, \"pass\")\n bob = self.register_user(\"bob\", \"pass\")\n bob_token = self.login(bob, \"pass\")\n support = \"@support1:test\"\n self.get_success(\n self.store.register_user(\n user_id=support, password_hash=None, user_type=UserTypes.SUPPORT\n )\n )\n\n # Alice makes a room. Inject the support user into the room.\n room = self.helper.create_room_as(alice, is_public=False, tok=alice_token)\n self.get_success(inject_member_event(self.hs, room, support, \"join\"))\n # Check the DB state. The support user should not be in the directory.\n users, in_public, in_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {alice, bob})\n self.assertEqual(in_public, set())\n self.assertEqual(in_private, set())\n\n # Then invite Bob, who accepts.\n self.helper.invite(room, alice, bob, tok=alice_token)\n self.helper.join(room, bob, tok=bob_token)\n\n # Check the DB state. The support user should not be in the directory.\n users, in_public, in_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {alice, bob})\n self.assertEqual(in_public, set())\n self.assertEqual(in_private, {(alice, bob, room), (bob, alice, room)})", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def test_channel_addowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "async def invite(ctx):\n permissions = 2134207679\n url = discord.utils.oauth_url(client_id=bot.user.id, permissions=discord.Permissions(permissions=permissions),\n scopes=(\"bot\", \"applications.commands\"))\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"Invite\", url=url))\n await ctx.respond(\"I'm glad you want to add me to your server, here's a link!\", view=view)", "async def _sign_in(self, ctx: Context, *, ignored: str = None):\n\n guild: discord.Guild = ctx.guild\n channel: discord.TextChannel = ctx.channel\n author: discord.Member = ctx.author\n\n if await self.config.guild(guild).get_raw(\"cycle\", \"number\") is not None:\n return await ctx.send(_(\"You can't do that. The game has already started!\"))\n\n if not await self.config.guild(guild).signups_on():\n return await ctx.send(_(\"Sign-ups are closed!\"))\n\n if not await self.check_total(guild):\n return await ctx.send(_(\"Maximum allowed players signed up!\"))\n\n if await self.config.guild(guild).signup_channel() == channel.id:\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role not in author.roles:\n try:\n await author.add_roles(player_role)\n await self.update_total(ctx, override=1)\n except discord.Forbidden:\n return await ctx.send(\n _(\n \"I either don't have permissions to manage\"\n \" roles or the `{}` role is above my highest role!\"\n ).format(player_role.name)\n )\n\n await self.remove_extra_roles(ctx, [\"spec\", \"repl\"])\n\n await ctx.message.add_reaction(CHECK_MARK)" ]
[ "0.7135079", "0.6953994", "0.69395274", "0.68968785", "0.6849321", "0.6775492", "0.6759105", "0.67089385", "0.6606643", "0.6557231", "0.6467635", "0.64343905", "0.64186656", "0.64149857", "0.63559216", "0.6322832", "0.63140005", "0.630644", "0.6246793", "0.6224127", "0.62160075", "0.62142015", "0.62115926", "0.62019324", "0.619815", "0.61824137", "0.6147389", "0.61391115", "0.6130362", "0.610227", "0.6099161", "0.609485", "0.6090524", "0.6076559", "0.6076038", "0.60702854", "0.6064432", "0.6056499", "0.60512495", "0.6046687", "0.60402375", "0.60354954", "0.6002691", "0.60010403", "0.5999008", "0.5974852", "0.59610933", "0.5958664", "0.59569335", "0.594062", "0.5940426", "0.5940356", "0.5937475", "0.593599", "0.5915697", "0.58989286", "0.58832085", "0.587765", "0.5876684", "0.5871691", "0.58716255", "0.5871394", "0.58536965", "0.58533436", "0.5852095", "0.5847271", "0.5843096", "0.58378464", "0.58378464", "0.58371204", "0.5837069", "0.58324635", "0.58082616", "0.5807779", "0.5805711", "0.57973486", "0.57868737", "0.5778143", "0.5775922", "0.5772583", "0.57655996", "0.57522756", "0.57519734", "0.5745837", "0.57453966", "0.5744338", "0.57438964", "0.57391775", "0.5734761", "0.5727744", "0.5727694", "0.57252705", "0.5724212", "0.5715922", "0.5709074", "0.5697112", "0.56968415", "0.56934744", "0.568503", "0.5683712" ]
0.73244226
0
checking if adding another owner from the current owner's token works as expected.
def test_channel_addowner_standard_input(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == { 'name' : 'Random Channel', 'owner_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' }, { 'u_id': 3, 'name_first' : 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ], 'all_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' }, { 'u_id': 3, 'name_first' : 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ] })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def isowner(self, o):\n return self._owner is o", "def add_owner_id(data=None, **kw):\n data['owner_id'] = current_user.id", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def manage_owners():\n\n owner_data = request.get_json(force=True)\n return _get_owner_service().create_owner(owner_data)", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_post_add_album_contrib_as_owner(self):\n self.make_logged_in_owner()\n\n # get our manage page with form\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n\n # get and populate form\n myform = resp.context['addcontributorsform']\n data = myform.initial\n data['idname'] = self.u2.id\n\n # construct our post\n self.addcontribpostrequest = self.factory.post(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}), data=data)\n self.addcontribpostrequest.user = self.u\n\n # we do not successfully add because not friends, but still redirect\n # todo: why did this not raise?\n resp = album.add_contrib(self.addcontribpostrequest, self.testalbum.id)\n assert resp.status_code == 302\n assert not self.u2.profile in collate_owner_and_contrib(self.testalbum)\n\n # make friends and we will succeed in adding\n complete_add_friends(self.u.id, self.u2.id)\n\n resp = album.add_contrib(self.addcontribpostrequest, self.testalbum.id)\n assert resp.status_code == 302\n assert self.u2.profile in collate_owner_and_contrib(self.testalbum)", "def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def test_add_account(self):\n person1 = self.owner\n person2 = Person(\n self.initial_year, \"Spouse\", self.initial_year - 20,\n retirement_date=self.retirement_date,\n gross_income=Money(50000),\n spouse=person1, tax_treatment=self.tax_treatment)\n # Add an account and confirm that the Person passed as owner is\n # updated.\n account1 = Account(owner=person1)\n account2 = Account(owner=person1)\n self.assertEqual(person1.accounts, {account1, account2})\n self.assertEqual(person2.accounts, set())", "def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )", "def test_post_owner(self):\n self.client.force_authenticate(self.user)\n response = self.post(content='foo')\n self.assertEqual(response.data['owner'], self.user.pk)", "def items_should_contain_owner_object(context):\n items = context.response.json()['items']\n existing_owner_fields = [\n 'accept_rate', 'display_name', 'link', 'profile_image', 'reputation',\n 'user_id', 'user_type'\n ]\n non_existing_owner_fields = ['display_name', 'user_type']\n\n for item in items:\n assert 'owner' in item\n assert isinstance(item['owner'], dict)\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n all(field in owner for field in non_existing_owner_fields)\n logging.debug(\n ('Item %d contains an non-existing Owner object for user \"%s\" '\n 'with all required fields: %s'), item['question_id'], \n owner['display_name'], ', '.join(non_existing_owner_fields))\n else:\n all(field in owner for field in existing_owner_fields)\n logging.debug(\n ('Item %d contains an existing Owner object for user ID \"%d\" '\n 'with all required fields: %s'), item['question_id'], \n owner['user_id'], ', '.join(existing_owner_fields))", "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_owner_id(api: API):\n owner_id = 123456\n api.candlepin.get_owners.return_value = [{\"key\": owner_id}]\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n\n assert account._owner_id is None\n account.owner_id\n assert account.owner_id == owner_id\n api.candlepin.get_owners.assert_called_once()", "def update_owner(current_owner_email: str, new_owner_email: str):\n current_owner_id = find_user_id(current_owner_email)\n new_owner_id = find_user_id(new_owner_email) \n \n \"\"\" This block is executed to check if email addresses provided are associated with two Looker users \"\"\"\n \n if type(new_owner_id) != int and type(new_owner_id) != int:\n print(\"The email addresses for both the current owner and the new owner are not associated with any Looker user id\")\n\n elif type(current_owner_id) != int: \n print(\"The email address for the current owner is not associated with any Looker user id\")\n\n elif type(new_owner_id) != int:\n print(\"The email address for the new owner is not associated with any Looker user id\")\n\n else: \n body = {}\n body['user_id'] = new_owner_id\n find = find_schedules(current_owner_id) \n for i in find.values(): \n sdk.update_scheduled_plan(i,body)\n print(\"Successfully transfer all schedules of \" + current_owner_email + \" to \" + new_owner_email)", "def add_user_to_group(self, token, userGroup, userName, isOwner):\n requestUser = self.get_username_from_token(token)\n if self.check_user_has_owner_clearance(requestUser, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n members = dataBase['userGroups'][userGroup]['members']\n if isOwner and userName not in owners:\n dataBase['userGroups'][userGroup]['owners'].append(userName)\n elif not isOwner and userName not in members:\n dataBase['userGroups'][userGroup]['members'].append(userName)\n\n self.write_database(dataBase)\n else:\n raise UserPermissionException(\n \"Requesting user is not owner of specified user group\")", "def unorphaned(self):\n return self.new_owner == self.user", "def test_post_add_album_contrib_as_not_owner(self):\n complete_add_friends(self.u2.id, self.u3.id)\n\n self.make_logged_in_owner()\n\n # get our manage page with form (use self.u as self.u2 will not obtain the form)\n # using self.u will not affect our test later because we aren't using the client later\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n\n # get and populate form\n myform = resp.context['addcontributorsform']\n data = myform.initial\n data['idname'] = self.u3.id\n\n # construct our post\n self.addcontribpostrequest = self.factory.post(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}), data=data)\n\n self.user_escalate_post_test_helper(self.addcontribpostrequest, self.u2, self.testalbum, self.testalbum.id,\n album.add_contrib, ALBUM_PRIVATE+1)", "def test__put_owner_into():\n user = User.precreate(202211270016)\n team = Team.precreate(202211270017)\n \n for input_value, defaults, expected_output in (\n (ZEROUSER, False, {}),\n (ZEROUSER, True, {'owner': None, 'team': None}),\n (user, True, {'owner': user.to_data(defaults = True, include_internals = True), 'team': None}),\n (team, True, {'owner': team.to_data_user(), 'team': team.to_data(defaults = True, include_internals = True)}),\n ):\n output = put_owner_into(input_value, {}, defaults)\n vampytest.assert_eq(output, expected_output)", "async def claim(self, ctx: \"IceTeaContext\", otag: TagConverter):\n tag: models.Tag = otag\n author = ctx.guild.get_member(tag.author)\n if not author:\n tag.author = ctx.author.id\n await tag.save()\n await ctx.send(f\"You have sucessfully claimed {tag.id}\")\n else:\n await ctx.send(\"The Tag owner is still in the server\")", "def possessed_by(self, other):\r\n self.owner = other", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def test_transfer_new_inherited_owner(self):\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n self.assertEqual(\n self.project.get_owners(inherited_only=True)[0].user,\n self.user_owner_cat,\n )\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_owner_cat.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_owner_cat)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)\n self.assertEqual(\n self.project.get_role(self.user_owner_cat),\n RoleAssignment.objects.get(\n project=self.project,\n user=self.user_owner_cat,\n role=self.role_owner,\n ),\n )", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def _testAssistantOwnershipAfter(self, person=None, task='create'):\n if not person:\n person = self.person\n \n newperson = self.getPerson(id='def456', firstName=\"Test\", lastName=\"Assistant\")\n person.setAssistants([newperson.UID(),])\n self.simulateATGUIInteraction(person=person, task=task)\n owners = person.users_with_local_role('Owner')\n \n return 'def456' in owners", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_put_owner(self):\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def is_owner(self):\n return self._is_owner", "def test_user_is_group_owner(self):\n self.thread.group.owners.add(self.user)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def add_member(self, user):\n if user is self.owner:\n raise ValidationError('A trip owner cannot also be a member.')\n # check the user is not already a member\n if self.members.filter(pk=user.pk).exists():\n return\n self.members.add(user)", "def test_create_owner(self):\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "def getOwnerIdFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_no_owner_exception(api: API, owners: list):\n api.candlepin.get_owners.return_value = owners\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n with pytest.raises(IndexError):\n account.owner_id", "def save(self, **kwargs):\n # Would be good to move to models or signals\n if ('categoty' in self.validated_data and\n self.validated_data['category'].owner.id != kwargs['owner'].id):\n raise serializers.ValidationError(\n \"You are not the owner of the category!\")\n return super().save(**kwargs)", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def is_still_owner(self):\n raise tooz.NotImplemented", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "async def transfer(self, ctx, target: discord.Member, *, otag: TagConverter):\n tag: models.Tag = otag\n if tag.author == ctx.author.id:\n tag.author = target.id\n await tag.save()\n await ctx.send(f\"You have sucessfully transferred this tag to {target}\")\n else:\n await ctx.send(\"You do not own this tag\")", "def testAssistantOwnershipAfterCreate(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='create'), \"designated assistant is not listed as an owner\")", "def test_known_related_objects_identity_preservation(self):\n self.assertIs(self.aldous, self.brave_new_world.author)", "def manage_owner(owner_id):\n\n return _get_owner_service().get_owner(owner_id)", "def create_entity_owner(self, owner_data):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_CREATE_ENTITY_OWNER, owner_data)", "def test_accept_member_with_owner_bad_request(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)", "def test__validate_owner__0():\n user = User.precreate(202211270021)\n team = Team.precreate(202211270022)\n \n for input_value, expected_output in (\n (None, ZEROUSER),\n (user, user),\n (team, team),\n ):\n owner = validate_owner(input_value)\n vampytest.assert_is(owner, expected_output)", "def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def add_owner(self, *, table_uri: str, owner: str) -> None:\n user = RDSUser(rk=owner, email=owner)\n table_owner = RDSTableOwner(table_rk=table_uri, user_rk=owner)\n try:\n with self.client.create_session() as session:\n session.merge(user)\n session.merge(table_owner)\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to add owner {owner} for table {table_uri}')\n raise e", "def _init_owners(self, identity, record, **kwargs):\n # if the given identity is that of a user, we add the\n # corresponding user to the owners (record.access.owned_by)\n is_sys_id = system_process in identity.provides\n if not record.access.owned_by and not is_sys_id:\n record.access.owned_by.add({\"user\": identity.id})", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def test_transfer_old_inherited_owner(self):\n self.owner_as_cat.user = self.user_owner\n self.owner_as_cat.save()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_OWNER,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_guest)\n self.assertIsNone(\n RoleAssignment.objects.filter(\n project=self.project, user=self.user_owner\n ).first()\n )\n self.assertEqual(\n self.project.get_role(self.user_owner), self.owner_as_cat\n )\n self.assertEqual(self.owner_as.role, self.role_owner)", "def test_patch_project_owner(self):\n new_owner = self.make_user('new_owner')\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'owner': str(new_owner.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def get_owner_object(self):\n return False", "def test_can_see_owner(self):\n ThreadParticipant.objects.set_owner(self.thread, self.user)\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 200)\n\n response_json = response.json()\n self.assertEqual(response_json['title'], self.thread.title)\n self.assertEqual(\n response_json['participants'], [\n {\n 'id': self.user.id,\n 'username': self.user.username,\n 'avatars': self.user.avatars,\n 'url': self.user.get_absolute_url(),\n 'is_owner': True,\n },\n ]\n )", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def have_own_oid(self, oid):\r\n for order in self.owns:\r\n if order.oid == oid:\r\n return True\r\n return False", "def test_token_only_for_1_user(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertNotEqual(self.user.verify_auth_token(user_token),\n self.user2)", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_ownership(Ebb):\n assert Ebb.getOwner() == accounts[0]\n with pytest.reverts():\n Ebb.transferOwnership(ZERO_ADDRESS, {\"from\": accounts[0]})\n\n Ebb.transferOwnership(accounts[1], {\"from\": accounts[0]})\n assert Ebb.getOwner() == accounts[1]", "def _set_owner(atom_list, owner_array, atm, mol_id):\n # This could be written more simply as a recursive function, but that leads\n # to stack overflows, so I flattened it into an iterative one.\n partners = [atom_list[atm].bond_partners]\n loop_index = [0]\n atom_list[atm].marked = mol_id\n while len(partners) > 0:\n if loop_index[-1] >= len(partners[-1]):\n partners.pop()\n loop_index.pop()\n continue\n partner = partners[-1][loop_index[-1]]\n loop_index[-1] += 1\n if not partner.marked:\n owner_array.append(partner.idx)\n partner.marked = mol_id\n partners.append(partner.bond_partners)\n loop_index.append(0)\n elif partner.marked != mol_id:\n raise MoleculeError('Atom %d in multiple molecules' % partner.idx)", "def test_accept_member_with_owner(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')", "def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(user_id=1, screen_name='twitter')\r\n token = dict(oauth_token='token', oauth_token_secret='secret')\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(user_id=10, screen_name=self.name)\r\n token = dict(oauth_token='token2', oauth_token_secret='secret2')\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.twitter_user_id == 10, err_msg", "def test_is_team_owner_rank_permission(self):\n\n weak = RankFactory(name='weak soul', team=self.team)\n middle = RankFactory(name='middle soul', team=self.team)\n non_owner = AnotherUserFactory()\n params = {'pk': weak.id}\n edited_weak_name_name = 'small weak soul'\n edited_middle_name_name = 'edited middle soul'\n data = {'name': edited_weak_name_name}\n response = self.client.patch(reverse('api:ranks-detail', kwargs=params), data=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('name'), edited_weak_name_name)\n\n token = Token.objects.get(user=non_owner)\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')\n data = {'name': edited_middle_name_name}\n params = {'pk': middle.id}\n response = self.client.patch(reverse('api:ranks-detail', kwargs=params), data=data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "async def __add(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n if name.id in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"{} is already registered as a bookkeeper\".format(name.display_name))\n else:\n self.db[server.id][\"bookkeeper\"].append(name.id)\n self.save_db()\n await self.bot.say(\"{} has been registered as a bookkeeper.\".format(name.display_name))", "def _get_squonk2_owner_tokens(self) -> Optional[Tuple[str, str]]:\n assert self.__keycloak_hostname\n\n _LOGGER.debug('__keycloak_hostname=\"%s\" __keycloak_realm=\"%s\"'\n ' dm-client=%s as-client=%s org=%s org_owner=%s',\n self.__keycloak_hostname,\n self.__keycloak_realm,\n self.__CFG_OIDC_DM_CLIENT_ID,\n self.__CFG_OIDC_AS_CLIENT_ID,\n self.__CFG_SQUONK2_ORG_UUID,\n self.__CFG_SQUONK2_ORG_OWNER)\n\n self.__org_owner_as_token = Auth.get_access_token(\n keycloak_url=\"https://\" + self.__keycloak_hostname + \"/auth\",\n keycloak_realm=self.__keycloak_realm,\n keycloak_client_id=self.__CFG_OIDC_AS_CLIENT_ID,\n username=self.__CFG_SQUONK2_ORG_OWNER,\n password=self.__CFG_SQUONK2_ORG_OWNER_PASSWORD,\n )\n if not self.__org_owner_as_token:\n _LOGGER.warning('Failed to get access token for AS Organisation owner')\n return None\n\n self.__org_owner_dm_token = Auth.get_access_token(\n keycloak_url=\"https://\" + self.__keycloak_hostname + \"/auth\",\n keycloak_realm=self.__keycloak_realm,\n keycloak_client_id=self.__CFG_OIDC_DM_CLIENT_ID,\n username=self.__CFG_SQUONK2_ORG_OWNER,\n password=self.__CFG_SQUONK2_ORG_OWNER_PASSWORD,\n )\n if not self.__org_owner_dm_token:\n _LOGGER.warning('Failed to get access token for DM as AS Organisation owner')\n return None\n\n # OK if we get here\n return self.__org_owner_as_token, self.__org_owner_dm_token", "def test_store_saves_owner(self):\n self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)\n stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,\n owner_id=self.stack.id)\n stack_ownee.store()\n db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)\n self.assertEqual(self.stack.id, db_stack.owner_id)", "async def _ad_add(self, ctx, member: discord.Member):\n new_admin = sql.TalosAdmin((ctx.guild.id, member.id))\n if new_admin not in self.database.get_admins(ctx.guild.id):\n self.database.save_item(new_admin)\n await ctx.send(f\"Added admin {member.name}!\")\n else:\n await ctx.send(\"That user is already an admin!\")", "def add_to(self, newowner):\n self.prevai = newowner.ai\n newowner.ai = self", "def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]", "def test_update_owner(cards_db):\n i = cards_db.add_card(Card(\"foo\", owner=\"me\"))\n cards_db.update_card(i, Card(owner=\"not me\", state=None))\n\n mod = cards_db.get_card(i)\n assert mod == Card(\"foo\", owner=\"not me\")", "def validate_owner(json_data: dict, manufacturer: dict):\n error_msg = ''\n if not json_data.get('ownerGroups'):\n return ''\n if len(json_data.get('ownerGroups')) != 1:\n error_msg = OWNER_GROUP_COUNT_INVALID\n group = json_data['ownerGroups'][0]\n group_man = manufacturer['ownerGroups'][0]\n owner_man = group_man['owners'][0]\n if group.get('type', '') != group_man.get('type'):\n error_msg += OWNER_GROUP_TYPE_INVALID\n if not group.get('owners'):\n return error_msg\n if len(group.get('owners')) != 1:\n error_msg += OWNER_COUNT_INVALID\n owner = group['owners'][0]\n if owner.get('organizationName', '') != owner_man.get('organizationName') or \\\n owner.get('address') != owner_man['address']:\n error_msg += OWNER_MISMATCH\n return error_msg", "def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))", "def test_create_owner(self):\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n\n url = reverse(\n 'projectroles:api_invite_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'email': INVITE_USER_EMAIL,\n 'role': PROJECT_ROLE_OWNER,\n 'message': INVITE_MESSAGE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(\n ProjectInvite.objects.filter(project=self.project).count(), 0\n )\n self.assertEqual(len(mail.outbox), 0)", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")" ]
[ "0.68776894", "0.67999893", "0.6754783", "0.668292", "0.6530196", "0.64863986", "0.6447112", "0.6423208", "0.62997204", "0.6244084", "0.6210982", "0.612194", "0.61106676", "0.60979927", "0.6089244", "0.5948427", "0.59481156", "0.59479606", "0.59267443", "0.5896144", "0.5848834", "0.5823093", "0.5814024", "0.58080256", "0.5790891", "0.5777423", "0.5742974", "0.57394016", "0.57389796", "0.57377875", "0.5716597", "0.5676617", "0.5652669", "0.56442463", "0.56421584", "0.5641315", "0.56378675", "0.5630231", "0.5627602", "0.5612182", "0.5606625", "0.56006587", "0.559585", "0.5581061", "0.5569133", "0.55621344", "0.5553851", "0.5541993", "0.55408746", "0.55341595", "0.55329764", "0.5525299", "0.5525036", "0.5523175", "0.55030054", "0.54996496", "0.54922086", "0.5490332", "0.54873157", "0.5487273", "0.54860234", "0.54809606", "0.5476497", "0.54703695", "0.546728", "0.5457677", "0.54564893", "0.54476255", "0.5443209", "0.54187113", "0.54127806", "0.54125303", "0.54071194", "0.5384682", "0.53823966", "0.5375492", "0.53741974", "0.5373168", "0.5371782", "0.5371053", "0.5369528", "0.5367357", "0.5359046", "0.53555024", "0.53540593", "0.53393257", "0.53376746", "0.5333463", "0.53298444", "0.5328225", "0.5320494", "0.5316973", "0.53162277", "0.53154343", "0.5310301", "0.5307708", "0.5306915", "0.53062785", "0.5280622", "0.52796966" ]
0.6017289
15
checking whether adding an owner after the user has logged out raises an accesserror as expected
def test_channel_addowner_invalid_token_after_logout(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) assert(auth_logout(register_second_result['token'])["is_success"] is True) with pytest.raises(AccessError): assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def unorphaned(self):\n return self.new_owner == self.user", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def is_still_owner(self):\n raise tooz.NotImplemented", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def testLoggedInNotCreator(self):\r\n\t\tadmin = User.objects.get(username=\"admin\")\r\n\t\tpeebs = User.objects.get(username=\"peebs\")\r\n\t\tdr1 = DataRequest.objects.create(name=\"Important Data Needed!\", description=\"A very important piece of data\", slug=\"important-data-needed\", creator=admin)\r\n\t\tdr2 = DataRequest.objects.create(name=\"datarequest\", description=\"description\", slug=\"datarequest\", creator=peebs)\r\n\t\t\r\n\t\t# log in as someone other than the creator\r\n\t\tlogin = self.client.login(username='peebs', password='map')\r\n\t\tself.failUnless(login, 'Could not login')\r\n\t\t\r\n\t\t# Verify only the owner can edit and that cheaters to go the datarequest url\r\n\t\tdr1_edit_url = reverse(\"epic.datarequests.views.edit_datarequest\", args=[], kwargs={'item_id':dr1.id})\r\n\t\tresponse = self.client.get(dr1_edit_url)\r\n\t\tdr1_url = reverse(\"epic.datarequests.views.view_datarequest\", args=[], kwargs={'item_id':dr1.id})\r\n\t\tself.assertRedirects(response, dr1_url)", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def _check_namespace_access(self, namespace, user):\n if not namespace.owners.filter(id=user.id).count():\n raise exceptions.PermissionDenied(\n 'The namespace listed on your filename must match one of '\n 'the namespaces you have access to.'\n )", "def isowner(self, o):\n return self._owner is o", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def must_be_owner(func):\n @functools.wraps(func)\n @login_required()\n def wrapped(request, poll_name, *args, **kwargs):\n try:\n cur_poll = Poll.objects.get(url=poll_name)\n except Poll.DoesNotExist:\n return db_error(_('This poll does not seem to exist, sorry.'))\n if cur_poll.is_owner(request.user.userinformation):\n return func(request, poll_name, *args, **kwargs)\n else:\n return redirect(reverse('login')+'?next='+request.path)\n return wrapped", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def is_owner(self):\n return self._is_owner", "def test_no_owner_exception(api: API, owners: list):\n api.candlepin.get_owners.return_value = owners\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n with pytest.raises(IndexError):\n account.owner_id", "def CAN_ASSIGN_OWNER(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def user_can_edit(self, user):\n return user == self.owner", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )", "def test_post_owner(self):\n self.client.force_authenticate(self.user)\n response = self.post(content='foo')\n self.assertEqual(response.data['owner'], self.user.pk)", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def verify_user(self):\n if self.username == \"root\":\n print \"Error: Please do not run this script as root.\"\n sys.exit(1)\n\n members = grp.getgrnam(self.groupowner)[3]\n if not self.username in members:\n print \"Error: The user who runs this script must belong to the group: \" + self.groupowner\n sys.exit(1)", "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"[email protected]\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if request.user.profile.role == UserRole.CLIENT and obj.owner != request.user:\n return False\n if request.user.profile.role == UserRole.EXECUTOR and obj.executor != request.user:\n return False\n return True", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def avoid_lockouts():\n db = get_db()\n if db.count_admins()[0][0] <= 2:\n session[\"last_error\"] = \"There must always be at least two administrators.\"\n return False\n return True", "def testAssistantOwnershipAfterCreate(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='create'), \"designated assistant is not listed as an owner\")", "def test_permission_add_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission add anonymous WIKI_CREATE '\n 'WIKI_VIEW WIKI_MODIFY')\n self.assertEqual(0, rv)\n rv, output2 = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output + output2)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_handle_add_not_admin(self):\n test_user = User(\"userid\")\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team add brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.add_team_member.assert_not_called()", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def test_not_creator_cannot_delete_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Tab.objects.all()), 1)", "def has_add_permission(self, request):\n return request.user.is_superuser or super().has_add_permission(request)", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "def get_everyone_denied(self):", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def test_update_by_non_owner(self):\n # User 1\n saved1 = self.create_article()\n article_url = saved1[0]\n # get user2 details\n token = self.create_article_user2()\n response = self.test_client.put(article_url,self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_put_owner(self):\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def test_owner_create_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n response = self.user_01.post(self.assessment_list_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def has_ownership(self):\n user = self.request.user\n object = self.get_object()\n if object.owned_by(user):\n return True\n else:\n return False", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def available(self, o):\n return not self.locked() or self.isowner(o)", "def test_get_owned(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/users/{}'.format(user.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], user.uid)", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "def test_create_owner(self):\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def test_15_admin_user_add_del_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n self.register(fullname=\"Juan Jose2\", name=\"juan2\",\r\n email=\"[email protected]\", password=\"juan2\")\r\n self.signout()\r\n self.signin(email=\"[email protected]\", password=\"juan2\")\r\n # Add user.id=2 to admin group\r\n res = self.app.get(\"/admin/users/add/2\", follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\",\\\r\n \"This action should be forbidden, not enought privileges\"\r\n # Remove user.id=2 from admin group\r\n res = self.app.get(\"/admin/users/del/2\", follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\",\\\r\n \"This action should be forbidden, not enought privileges\"", "def test_owner_edit_assessment_invalid(self):\n req, resp = data.get_assessment(self.contract['id'])\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def allowed_organization_access_create(user):\n return user.has_perm(\"vnswww.add_organization\")", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def handle_no_ownership(self, request, *args, **kwargs):\n user = request.user\n if user.is_superuser:\n message = _('Viewing as {}.'.format('admin'))\n messages.info(request, message)\n return super().dispatch(request, *args, **kwargs)\n else:\n message = _('Ownership required.')\n messages.warning(request, message)\n return redirect(request.META.get('HTTP_REFERER', '/'))", "def has_permission(self, request, view):\n return request.user.group != 'patient'", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def only_owner(func):\n def decorated(*_, **kwargs):\n id = kwargs['id']\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != id:\n abort(403)\n return func(**kwargs)\n\n return decorated", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def raise_for_ownership(self, resource: Model) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset import db\n\n if self.is_admin():\n return\n\n # Set of wners that works across ORM models.\n owners: List[User] = []\n\n orig_resource = db.session.query(resource.__class__).get(resource.id)\n\n if orig_resource:\n if hasattr(resource, \"owners\"):\n owners += orig_resource.owners\n\n if hasattr(resource, \"owner\"):\n owners.append(orig_resource.owner)\n\n if hasattr(resource, \"created_by\"):\n owners.append(orig_resource.created_by)\n\n if g.user.is_anonymous or g.user not in owners:\n raise SupersetSecurityException(\n SupersetError(\n error_type=SupersetErrorType.MISSING_OWNERSHIP_ERROR,\n message=f\"You don't have the rights to alter [{resource}]\",\n level=ErrorLevel.ERROR,\n )\n )", "def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n return UserAS.objects.filter(as_ptr=obj).exists()", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")", "def test_user_is_group_owner(self):\n self.thread.group.owners.add(self.user)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def check_owner(func):\n\n @wraps(func)\n def wrapper(sport_id):\n sport = session.query(Sport).filter_by(id=sport_id).one()\n\n creator = getUserInfo(sport.user_id)\n user = getUserInfo(login_session['user_id'])\n\n if creator.id != login_session['user_id']:\n return redirect(url_for('showSports'))\n else:\n return func(sport_id)\n return wrapper", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")" ]
[ "0.7260797", "0.70191914", "0.6896338", "0.67979467", "0.6756282", "0.67163837", "0.6641159", "0.65771526", "0.65635395", "0.6563323", "0.6472536", "0.6423421", "0.6411672", "0.6408827", "0.63631994", "0.63437754", "0.6336135", "0.6251188", "0.62497646", "0.62204605", "0.62168294", "0.618654", "0.61818737", "0.61624825", "0.6158438", "0.61533266", "0.6145117", "0.6122239", "0.6107541", "0.6057838", "0.6057838", "0.605402", "0.6050544", "0.6041632", "0.5997752", "0.5995393", "0.5991428", "0.5987904", "0.59868807", "0.59538364", "0.5945301", "0.5943843", "0.5935194", "0.5930293", "0.5923231", "0.5922865", "0.591358", "0.5898346", "0.58981675", "0.5894317", "0.5889844", "0.58826566", "0.58779246", "0.5867624", "0.5845859", "0.58363533", "0.583472", "0.58343476", "0.5831916", "0.5831506", "0.58268255", "0.58268255", "0.5825162", "0.5824375", "0.5817415", "0.58141845", "0.58120394", "0.5802156", "0.5800624", "0.5797829", "0.5793647", "0.57909596", "0.579014", "0.57869494", "0.57844436", "0.5779098", "0.57771856", "0.57755303", "0.57731104", "0.57657725", "0.57607895", "0.57606995", "0.5758962", "0.5755747", "0.5755137", "0.57472634", "0.57469946", "0.5736607", "0.57274157", "0.57193244", "0.57158643", "0.5715811", "0.57141745", "0.570258", "0.5696687", "0.5695848", "0.56946373", "0.56936264", "0.56902164", "0.5684641" ]
0.64897716
10
checking if an inputerror is raised if attempting to add a user as an owner who is already an owner
def test_channel_addowner_already_an_owner(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(InputError): assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def _validate_ip_owner(ip, mac, row_number):\n mac = MACAddressField.normalize(mac)\n try:\n dev = Device.admin_objects.get(ethernet__mac=mac)\n except Device.DoesNotExist:\n if ip_address_exists(ip):\n raise forms.ValidationError(\n \"Row %s: IP address already exists.\" % row_number\n )\n else:\n # Does another device have this IPAddress?\n if(Device.objects.filter(\n ipaddress__number=int(ipaddr.IPAddress(ip)),\n ).exclude(\n pk=dev.id,\n ).exists()):\n raise forms.ValidationError(\n \"Row %s: IP address used by another device.\" % row_number\n )", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def add():\r\n ch = input('You are about to ADD an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n')).lower().capitalize()) # lower, cap first, remove whitespace\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n')).lower().capitalize())\r\n\r\n if search2(xln, xfn): # search if an entry already exists for user's input\r\n print('An entry already exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return add() # if an entry already exists make user enter another\r\n\r\n xgr = None\r\n try: # try except user's inputted grade\r\n xgr = int(input('Grade?\\n'))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n add()\r\n\r\n xsr = str(input('Stream? (eg. Academic, IB, etc...)\\n')).lower().capitalize()\r\n xrl = str(input('Role? (eg. Design Member)\\n')).lower().capitalize()\r\n xcm = str(input('Any comments?\\n')).lower().capitalize()\r\n\r\n ch2 = input('Are you sure you wish to add this individual to the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been added to the database.')\r\n with conn: # input corresponding info to table with context manager\r\n c.execute(\"\"\"INSERT INTO personnel VALUES (\r\n :last, :first, :grade, :stream, :role, :comments)\"\"\",\r\n {'last': xln, 'first': xfn, 'grade': xgr, 'stream': xsr, 'role': xrl, 'comments': xcm})\r\n\r\n start() # after user's action has been completed, ask for another\r\n else:\r\n print('Your add action has been cancelled.')\r\n start()\r\n else: # ask for another if user wishes to perform another action\r\n start()", "async def cog_command_error(self, ctx:utils.Context, error:commands.CheckFailure):\n\n # Throw errors properly for me\n if ctx.author.id in self.bot.config['owners']:\n text = f'```py\\n{error}```'\n await ctx.send(text)\n raise error\n\n elif isinstance(error, commands.NotOwner):\n await ctx.send(\"You need to be registered as an owner to run this command.\")\n return", "def test_signup_dupe_username(self):\n\n invalid_u = User.signup(\"[email protected]\", \"allison\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def validate_username(self, username):\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username already exists. Please choose another username.')", "def test__validate_owner__0():\n user = User.precreate(202211270021)\n team = Team.precreate(202211270022)\n \n for input_value, expected_output in (\n (None, ZEROUSER),\n (user, user),\n (team, team),\n ):\n owner = validate_owner(input_value)\n vampytest.assert_is(owner, expected_output)", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n us = User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"Phone number must already be registered before doing this\")\n\n if us.hierarchy != 'master':\n raise serializers.ValidationError(\"Phone number must not be a slave to another user\")\n\n return attrs", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def verify_user(self):\n if self.username == \"root\":\n print \"Error: Please do not run this script as root.\"\n sys.exit(1)\n\n members = grp.getgrnam(self.groupowner)[3]\n if not self.username in members:\n print \"Error: The user who runs this script must belong to the group: \" + self.groupowner\n sys.exit(1)", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]", "def _onAdd(self, event):\n finished = False\n pattern = re.compile(r'[^a-zA-Z0-9_]')\n while not finished:\n dialog = wx.TextEntryDialog(self, 'New user name.', 'Username',\n '', style = wx.OK | wx.CANCEL)\n if dialog.ShowModal() == wx.ID_OK:\n newusername = dialog.GetValue()\n if re.search(pattern, newusername):\n message = wx.MessageDialog(self, _USERNAME_ERROR_MESSAGE,\n 'Error', wx.OK | wx.ICON_ERROR)\n message.ShowModal()\n elif newusername in self.users:\n message = wx.MessageDialog(self,\n 'That user already exists.',\n 'Error', wx.OK | wx.ICON_ERROR)\n else:\n c.addUser(newusername)\n self.users.append(newusername)\n self.userlist.SetItems(self.users)\n finished = True\n else:\n finished = True", "def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c", "def username_prompt(): \n\n print(\"Valid usernames contain only the characters 'a-z', e.g. pdiddy.\")\n\n while True: \n username = str(input(\"Enter username to add: \"))\n confirm_name = str(input(\"To confirm, re-enter username: \"))\n \n if username != confirm_name or not re.match(\"^[a-z]+$\", username):\n print(TRY_AGAIN)\n continue \n \n else:\n print(\"OK, checking if user: %s exists...\" %(username))\n return username", "def test_no_owner_exception(api: API, owners: list):\n api.candlepin.get_owners.return_value = owners\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n with pytest.raises(IndexError):\n account.owner_id", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def validate_username(form, field):\n if User.query.filter_by(username=form.username.data).first():\n form.username.errors.append(\"Username already taken!\")\n raise ValidationError", "def handle_empty_owners():\n result = shared_ldap.find_from_email(shared.globals.REPORTER)\n if result is not None:\n shared_sd.post_comment(\n \"Adding %s as the owner of the group.\" % shared.globals.REPORTER, True)\n return [result]\n\n # OK - something stupid is happening but let's give ourselves\n # a safety net.\n shared_sd.post_comment(\n \"Unable to add %s as an owner as the email address cannot be \"\n \"found in Linaro Login. This means the automation has not \"\n \"been able to find any of the specified email addresses in \"\n \"Linaro Login. Consequently, IT Services will need to manage \"\n \"it in the interim.\" % shared.globals.REPORTER, True)\n return [\"cn=its,ou=mailing,ou=groups,dc=linaro,dc=org\"]", "def test__put_owner_into():\n user = User.precreate(202211270016)\n team = Team.precreate(202211270017)\n \n for input_value, defaults, expected_output in (\n (ZEROUSER, False, {}),\n (ZEROUSER, True, {'owner': None, 'team': None}),\n (user, True, {'owner': user.to_data(defaults = True, include_internals = True), 'team': None}),\n (team, True, {'owner': team.to_data_user(), 'team': team.to_data(defaults = True, include_internals = True)}),\n ):\n output = put_owner_into(input_value, {}, defaults)\n vampytest.assert_eq(output, expected_output)", "def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username already exists. Please choose another username.')", "def input_and_create_user(self):\n print(\"Please input username!\")\n new_username = input()\n new_user = user.User(new_username)\n self.users.append(new_user)", "def save(self, **kwargs):\n # Would be good to move to models or signals\n if ('categoty' in self.validated_data and\n self.validated_data['category'].owner.id != kwargs['owner'].id):\n raise serializers.ValidationError(\n \"You are not the owner of the category!\")\n return super().save(**kwargs)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def isowner(self, o):\n return self._owner is o", "def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError(\"Username already in use.\")", "def test_empty_username():\n expect_error(register, InputError, \"\", \"abcdef\", \"A\", \"A\", \"A\")", "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def validate_name(self, username: str) -> bool:\n\t\treturn not self.registry.name_taken(username)", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "def validate_owner(json_data: dict, manufacturer: dict):\n error_msg = ''\n if not json_data.get('ownerGroups'):\n return ''\n if len(json_data.get('ownerGroups')) != 1:\n error_msg = OWNER_GROUP_COUNT_INVALID\n group = json_data['ownerGroups'][0]\n group_man = manufacturer['ownerGroups'][0]\n owner_man = group_man['owners'][0]\n if group.get('type', '') != group_man.get('type'):\n error_msg += OWNER_GROUP_TYPE_INVALID\n if not group.get('owners'):\n return error_msg\n if len(group.get('owners')) != 1:\n error_msg += OWNER_COUNT_INVALID\n owner = group['owners'][0]\n if owner.get('organizationName', '') != owner_man.get('organizationName') or \\\n owner.get('address') != owner_man['address']:\n error_msg += OWNER_MISMATCH\n return error_msg", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n return attrs\n raise serializers.ValidationError(\"Phone number already exists. If are trying to glue, consider the glue option\")", "def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n # if user is not None\n raise ValidationError('That username is taken. Please choose a different one.')", "def _handleBusOwnerChanged(self, new_owner):\n if new_owner == '':\n logger.warn('No owner anymore for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n raise Exception('LostDhcpSlave')\n else:\n pass # Owner exists", "def add_member(self, user):\n if user is self.owner:\n raise ValidationError('A trip owner cannot also be a member.')\n # check the user is not already a member\n if self.members.filter(pk=user.pk).exists():\n return\n self.members.add(user)", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def validate_username(self, username_field):\n if User.get_by_username(username_field.data):\n raise ValidationError('This username is already taken.')", "def _validate_user(_):\n pass", "def invalid_user(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {'username':username})\n rows = cur.rowcount\n if rows > 0:\n return True\n return False", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def _validate_add_command(args):\n res = _check_entry_name(args)\n if res != 0:\n return res\n\n return _check_property_arguments(args, args.type)", "def add_me():\n\n root_user_check()\n\n username = username_prompt()\n while not username_check(username): \n username = username_prompt()\n\n comment = comment_prompt()\n password = passwd_prompt()\n\n add_user(username, comment, password)", "def username_exist_check(form, field):\n username = field.data\n user = UserModel.query(UserModel.username==username).get()\n if user:\n raise validators.ValidationError('username exists, choose a different one!')", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"[email protected]\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def first_is_valid(command_from_user):\n arguement_entered_user = command_from_user[0]\n if arguement_entered_user == 'list':\n return True\n \n elif arguement_entered_user == 'clashes':\n return True\n \n else:\n return False", "def test_channel_addowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def validate_user(user):\n username = user.get(\"username\")\n if username is None:\n return False, USERNAME_NOT_AVAILABLE\n\n total = user.get(\"total\")\n if total is None:\n return False, TOTAL_NOT_AVAILABLE\n\n if mongo.db.users.find_one({\"username\": username}):\n return False, USER_ALREADY_EXISTS\n\n return True, SUCCESSFUL_VALIDATION_MESSAGE", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def validate_username(self, username_field):\n\n if User.query.filter_by(username=username_field.data).first():\n raise ValidationError(\"This username is already taken.\")", "def test_register_duplicate(self):\n self._storage.register_user(\"user1\", \"code1\")\n with self.assertRaises(DuplicateUserException):\n self._storage.register_user(\"user1\", \"code1\")", "def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def is_valid_username(self, username): # WORKS\n done1 = self.cur.execute(\"SELECT username FROM users WHERE username=\\\"{}\\\"\".format(username))\n done2 = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done1 == 0 and done2 == 0: # If both queries are unsuccessful, username doesn't exist in both tables.\n return False\n else:\n return True", "def check_unique(self):\n pass", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def test_email_not_unique(bot):\n expect_error(register, InputError, \"a\", \"abcdef\", \"a\", \"a\", bot.email)", "def test_invalid_update_request_with_taken_username(self):\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(self.author.get_key()))\n response: Response = self.client.patch(BASE_URL + '/update/', data={\n 'username': self.temporary_author.username\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, msg=data)\n self.assertEqual(data, {'detail': f\"User '{self.temporary_author.username}' already exists.\"})", "def test_existence_conflict(self, username, email, validate_suggestions):\n user = UserFactory.create(username='user', email='[email protected]')\n self.assertValidationDecision(\n {\n 'username': username,\n 'email': email\n },\n {\n # pylint: disable=no-member\n \"username\": USERNAME_CONFLICT_MSG.format(\n username=user.username\n ) if username == user.username else '',\n # pylint: disable=no-member\n \"email\": EMAIL_CONFLICT_MSG.format(\n email_address=user.email\n ) if email == user.email else ''\n },\n validate_suggestions\n )", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def test_signup_missing_last_name(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", None, None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def username_error(self, msg):\n print('\\nusername error: %s' % msg, file=self.console)\n self.username = input('Username: ')", "def test_adduser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.assertTrue(self.run_function(\"group.adduser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertIn(self._user, str(group_info[\"members\"]))\n # try add a non existing user\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._group, self._no_user])\n )\n # try add a user to non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._user])\n )\n # try add a non existing user to a non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._no_user])\n )", "def clean(self):\n\n # Fetch cleaned email and username data.\n email = self.cleaned_data.get('email')\n username = self.cleaned_data.get('username')\n\n # Fetch possible user objects from the database\n # based on provided and email and password.\n user_email = User.objects.filter(email=email)\n user_uname = User.objects.filter(username=username)\n\n # If user exists based on the email address or username,\n # raise validation error.\n if user_email:\n self._errors[\"email\"] = \"Email address is already associated with another account\"\n\n if user_uname:\n self._errors[\"username\"] = \"Usename is already associated with another account\"", "def is_username_taken(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_USERNAME_TAKEN, username)", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def username_check(username):\n\n try: \n pwd.getpwnam(username)\n print(\"User %s DOES EXIST. Try a different username.\" % (username)) \n return False\n\n except KeyError: \n print(\"User %s DOES NOT exist. Continuing...\" % (username)) \n return True", "def _validate_box_form(form):\n if not db(db.box.owner == auth.user.id)(db.box.name.like(form.vars.name))(db.box.id != form.record).isempty():\n form.errors.name = 'Box already exists'", "def validateUsername(username):\n\n if not(username):\n return \"You must specify your archive.org username.\"", "def test_accept_member_with_owner_bad_request(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "async def _ad_add(self, ctx, member: discord.Member):\n new_admin = sql.TalosAdmin((ctx.guild.id, member.id))\n if new_admin not in self.database.get_admins(ctx.guild.id):\n self.database.save_item(new_admin)\n await ctx.send(f\"Added admin {member.name}!\")\n else:\n await ctx.send(\"That user is already an admin!\")", "def associate_error(request):\r\n info=''\r\n error_msg = \"Whoops, this account is already linked to another Seattle Clearninghouse user.\"\r\n return profile(request, info, error_msg)", "def test_create_with_duplicate_userid(self):\n\n self.sdkapi.guest_create(self.userid, 1, 1024)\n try:\n self.sdkapi.guest_create(self.userid, 1, 1024)\n except exception.SDKSMUTRequestFailed as err:\n self.assertEqual(err.results['rc'], 400)\n self.assertEqual(err.results['rs'], 8)", "def test_registeration_duplicate_username(self):\n self.signup_a_user(self.user_data)\n response_duplicate = self.signup_a_user(\n self.user_data_duplicate_username)\n self.assertEqual(response_duplicate.status_code,\n status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response_duplicate.data[\"errors\"][\"username\"],\n [\"user with this username already exists.\"])\n self.assertNotIn(\"token\", response_duplicate.data)", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)", "def is_still_owner(self):\n raise tooz.NotImplemented", "def test_signup_dupe_email(self):\n\n invalid_u = User.signup(\"[email protected]\", \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def add_user(self, u, p):\r\n\t\tlogger.debug(\"Entering\")\r\n\r\n\t\ttry:\r\n\t\t\tlogin.add_user(u, p)\r\n\t\texcept ValueError as e:\r\n\t\t\tlogger.debug(\"Exiting - failure\")\r\n\t\t\treturn False, e.message\r\n\t\t\t\r\n\t\tlogger.debug(\"Exiting - success\")\r\n\t\treturn True, \"%s has been added.\" % u", "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def items_should_contain_owner_object(context):\n items = context.response.json()['items']\n existing_owner_fields = [\n 'accept_rate', 'display_name', 'link', 'profile_image', 'reputation',\n 'user_id', 'user_type'\n ]\n non_existing_owner_fields = ['display_name', 'user_type']\n\n for item in items:\n assert 'owner' in item\n assert isinstance(item['owner'], dict)\n owner = item['owner']\n if owner['user_type'] == 'does_not_exist':\n all(field in owner for field in non_existing_owner_fields)\n logging.debug(\n ('Item %d contains an non-existing Owner object for user \"%s\" '\n 'with all required fields: %s'), item['question_id'], \n owner['display_name'], ', '.join(non_existing_owner_fields))\n else:\n all(field in owner for field in existing_owner_fields)\n logging.debug(\n ('Item %d contains an existing Owner object for user ID \"%d\" '\n 'with all required fields: %s'), item['question_id'], \n owner['user_id'], ', '.join(existing_owner_fields))" ]
[ "0.66523606", "0.62497765", "0.6178345", "0.61579764", "0.61557573", "0.6095019", "0.60053355", "0.6005061", "0.59492403", "0.5933165", "0.59200364", "0.590281", "0.58125436", "0.57823414", "0.5743092", "0.5735133", "0.5727868", "0.5720738", "0.57188445", "0.5711546", "0.5709298", "0.5688679", "0.56570625", "0.56531507", "0.564492", "0.56375366", "0.5630461", "0.5612386", "0.55922127", "0.5585293", "0.55830604", "0.5580659", "0.5577421", "0.55757976", "0.55657804", "0.55604386", "0.55532557", "0.5541958", "0.5523554", "0.55144835", "0.54828703", "0.544964", "0.5441728", "0.5438705", "0.5436032", "0.54332036", "0.54326034", "0.54174286", "0.5414245", "0.54139125", "0.5410782", "0.5402898", "0.53951496", "0.5392877", "0.5391962", "0.5391025", "0.538477", "0.5384522", "0.5373543", "0.53702426", "0.5369338", "0.53537434", "0.53508496", "0.53095126", "0.5304496", "0.5300073", "0.5294725", "0.52893174", "0.5281066", "0.527986", "0.5269108", "0.5265754", "0.52649134", "0.5262883", "0.5260464", "0.5253236", "0.5252597", "0.5250912", "0.524969", "0.5246517", "0.5241706", "0.5241438", "0.5239464", "0.52320886", "0.5224679", "0.52213925", "0.5218891", "0.5215199", "0.5212412", "0.5212221", "0.521002", "0.5207821", "0.52048665", "0.52042997", "0.520392", "0.5202727", "0.52013546", "0.5201113", "0.5199583", "0.51983774" ]
0.6923221
0
checking if an inputerror is raised if an invalid Channel ID is inputted into the function
def test_channel_addowner_invalid_channel_id(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def _check_channel_input(self, channel):\n # da `.get` `None` zurueckgibt wenn der Schluessel `channel` nicht existiert,\n # wird auch bei fehlender Konfiguration der Fehler geworfen\n if self.channels.get(channel) != GPIO.IN:\n raise RuntimeError(\"You must setup() the GPIO channel as an input first\")", "def test__validate_channels__type_error(input_value):\n validate_channels(input_value)", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_id = -1\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], invalid_channel_id, 1)", "def test__validate_channels__passing(input_value):\n return validate_channels(input_value)", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "async def ticker_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def isInputValid(self, input):\r\n pass", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_channel_leave_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_leave(leaver['token'], invalid_id)", "def validateDevChannel( self, dev, devChannel ):\n d = self.dcDict\n if devChannel not in d[dev]['devChannels'].keys(): raise DCBoxError( 0 )", "def validate_input(self, *args):\n return", "def getValidation(myInput):\r\n if myInput == \"\":\r\n print('You did not enter the number of bugs collected.')\r\n return -1\r\n elif myInput.isnumeric() == False:\r\n print('You entered a negative or a text value, please enter numerical digits only.')\r\n return -1\r\n elif myInput.isnumeric() == True:\r\n return int(myInput)\r\n else:\r\n print('There has been a read error, please reenter your number')\r\n return -1", "def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")", "def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "def add_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Add as bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelAdd(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2", "def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.param.name)\n self.bot.stats.incr(\"errors.missing_required_argument\")\n elif isinstance(e, errors.TooManyArguments):\n embed = self._get_error_embed(\"Too many arguments\", str(e))\n self.bot.stats.incr(\"errors.too_many_arguments\")\n elif isinstance(e, errors.BadArgument):\n embed = self._get_error_embed(\"Bad argument\", str(e))\n self.bot.stats.incr(\"errors.bad_argument\")\n elif isinstance(e, errors.BadUnionArgument):\n embed = self._get_error_embed(\"Bad argument\", f\"{e}\\n{e.errors[-1]}\")\n self.bot.stats.incr(\"errors.bad_union_argument\")\n elif isinstance(e, errors.ArgumentParsingError):\n embed = self._get_error_embed(\"Argument parsing error\", str(e))\n await ctx.send(embed=embed)\n self.bot.stats.incr(\"errors.argument_parsing_error\")\n return\n else:\n embed = self._get_error_embed(\n \"Input error\",\n \"Something about your input seems off. Check the arguments and try again.\"\n )\n self.bot.stats.incr(\"errors.other_user_input_error\")\n\n await ctx.send(embed=embed)\n await self.send_command_help(ctx)", "def test_react_invalid_message_id_in_different_channel():\n clear()\n user_a, user_b = register_n_users(2)\n # user_a create a channel\n channels_create(user_a[\"token\"], \"public_channel_a\", True)[\"channel_id\"]\n # user_b create a channel and send message in his own channel\n public_channel_id_b = channels_create(user_b[\"token\"], \"public_channel_b\", True)[\n \"channel_id\"\n ]\n message_id_b = message_send(\n user_b[\"token\"], public_channel_id_b, \"I am in channel_b\"\n )[\"message_id\"]\n # user_a should not be able to react the the message in the public_channel_b\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], message_id_b, 1)", "def input_error(self, errCode):\n errMsg = ''\n if 'A' in errCode: errMsg = errMsg + 'X column is not specified.\\n'\n if 'B' in errCode: errMsg = errMsg + 'X Column is not numeric.\\n'\n if 'C' in errCode: errMsg = errMsg + 'Y column is not specified.\\n'\n if 'D' in errCode: errMsg = errMsg + 'Y Column is not numeric.\\n'\n if 'E' in errCode: errMsg = errMsg + 'Z Column is not numeric.\\n'\n if 'F' in errCode: errMsg = errMsg + 'Calibration point 1 row is out of range.\\n'\n if 'G' in errCode: errMsg = errMsg + 'Calibration point 2 row is out of range.\\n'\n if 'H' in errCode: errMsg = errMsg + 'First row is not specified.\\n'\n if 'I' in errCode: errMsg = errMsg + 'Last row is not specified.\\n'\n if 'J' in errCode: errMsg = errMsg + 'First row is out of range.\\n'\n if 'K' in errCode: errMsg = errMsg + 'Last row is out of range.\\n'\n if 'L' in errCode: errMsg = errMsg + 'First and last rows are not compatible.\\n'\n self.wait_window(InputError(self, errMsg.rstrip('\\n')))", "def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')", "def validate_inputs(name, country, catches):\n while not name:\n name = input('Player name cannot be empty: ')\n\n while not country:\n country = input('Enter a valid country name: ')\n\n while not catches:\n catches = input('Now enter number of catches record: ')\n try: # Once user has input data, try to cast it to integer to verify is not string\n int(catches)\n except ValueError: # if input data is not an integer, print message and clear catches value to keep asking user to enter data\n print('Data given is not a number')\n catches = ''\n\n return name, country, catches", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def _is_valid_input(self, parameter_name):\n raise NotImplementedError()", "def _CHK(self,_err):\n if _err < 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(_err,ctypes.byref(buf),buf_size)\n raise RuntimeError(\"nidaq call failed with error %d: %s\"%(_err,repr(buf.value)))\n if _err > 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(_err,ctypes.byref(buf),buf_size)\n raise RuntimeError(\"nidaq generated warning %d: %s\"%(_err,repr(buf.value)))", "def test_dccChatMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_CHAT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC CHAT request: ['foo']\")", "def __input_validator(msg):\n\n\t\tstatus = msg[\"status\"]\n\n\t\tif status == 1:\n\t\t\treturn status\n\t\telif status == 0:\n\t\t\tprint(msg[\"body\"])\n\t\telif status == -1:\n\t\t\tprint(\"Please enter something!\")\n\t\telif status == -2:\n\t\t\tprint(\"Your command {} is invalid\".format(msg[\"verb\"]))\n\t\telif status == -3:\n\t\t\tprint(\"No argument given after {}\".format(msg[\"verb\"]))", "def test_ap_csa_invalid(dev, apdev):\n csa_supported(dev[0])\n ap = connect(dev[0], apdev)\n\n vals = [ 2461, 4900, 4901, 5181, 5746, 5699, 5895, 5899 ]\n for val in vals:\n if \"FAIL\" not in ap.request(\"CHAN_SWITCH 1 %d\" % val):\n raise Exception(\"Invalid channel accepted: %d\" % val)", "def del_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Delete bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelDel(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "def check_int(ch, chs):\r\n if ch not in chs:\r\n print('Applicable responses: ', chs)\r\n ch = int(input('Please enter an applicable response.\\n'))\r\n return check(ch, chs) # if not applicable, perform recursion\r\n return ch", "def whenException(self, channel, call):", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def test_should_raise_value_error_for_missing_parameters(self):\n\n assert_raises(ValueError, TCPControlBits)", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def check_input(the_user_entry):\n try:\n for z in range(length_of_bad_input):\n if bad_input[z] == the_user_entry:\n messagebox.showwarning(title=\"Invalid input!\",\n message=\"The following characters are forbidden:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")\n clear_box()\n raise ValueError\n except ValueError:\n print(\"The user entered an invalid character in the entry box\\n\"\n \"potentially one of the following:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")", "def check_channel_exec_request(self, channel, command):\n return False", "def error_check(command):\r\n\r\n # TODO\r", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def set_channel(self):\n\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\tif self.channel > 1 :\n\t\t\tself.channel = int(input(\"Enter the Channel No. = \"))\n\t\t\n\t\treturn self.channel", "def check_input(input_array):\n if len(input_array) != 3:\n print(responses.ERROR_INVALID_INPUT)\n return False\n\n if not valid_port(input_array):\n return False\n\n return True", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def validateIOmoduleId(output ,arg_dict , key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric \" % (key,id))) \n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)", "def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected", "def check_channel_shell_request(self, channel):\n return False", "def test_dccAcceptMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_ACCEPT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND ACCEPT request: ['foo']\")", "def _check_input(self, func, args, kwargs):\n fullargspec = inspect.getfullargspec(func)\n return_msg = ''\n if fullargspec.varkw is None:\n for key in kwargs:\n if not key in fullargspec.kwonlyargs:\n return_msg += f'[Error]: not support param `{key}`. \\n'\n if fullargspec.varargs is None:\n if len(fullargspec.args) == 0:\n max_args_len = 0\n else:\n max_args_len = len(fullargspec.args)-1 if fullargspec.args[0] == 'self' else len(fullargspec.args)\n defaults_nums = 0 if fullargspec.defaults is None else len(fullargspec.defaults)\n min_args_len = max_args_len - defaults_nums\n if len(args) < min_args_len:\n return_msg += f'[Error]: have min {min_args_len} input, but you input {len(args)} args. \\n'\n if max_args_len < len(args):\n return_msg += f'[Error]: have max {max_args_len} input, but you input {len(args)} args. \\n'\n return return_msg", "def get_number_input(msg=\"Provide a number: \", num_type=int):\n while True:\n try:\n num = num_type(input(msg))\n except ValueError:\n print(f\"Whoops!! Please enter a correct number of {num_type}!!\")\n continue\n else:\n print(\"Number accepted!!\")\n return num", "def dev_port():\n while True:\n try:\n d_prt = int(\n raw_input(\n \"\\nEnter the Port number of the device (press enter if unsure): \"\n )\n or \"80\"\n )\n if int(d_prt) > 65535:\n print(\"Error! Number must be below 65535\")\n else:\n print(d_prt)\n return d_prt\n except ValueError:\n print(\"\\nThat is not a valid port number!\\nTry again.\")", "def on_badchannelkey(self, conn, event) -> None:\n channel_name = event.arguments[0]\n logger.warning('Cannot join channel %s (bad key).', channel_name)", "def test_dccSendMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_SEND, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND request: ['foo']\")", "def check_result(ec):\r\n # NOTE: This will break some oscilloscopes that are powered by USB.\r\n # Some of the newer scopes, can actually be powered by USB and will\r\n # return a useful value. That should be given back to the user.\r\n # I guess we can deal with these edge cases in the functions themselves\r\n if ec == 0:\r\n return\r\n\r\n else:\r\n ecName = error_num_to_name(ec)\r\n ecDesc = error_num_to_desc(ec)\r\n raise IOError('Error calling %s: %s (%s)' % (\r\n str(inspect.stack()[1][3]), ecName, ecDesc))", "def validateVfabric(output ,arg_dict, key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' = '%s' is not a valid Id. ID should be numeric \" % \n\t\t\t\t(key,id)))\n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n\t output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def CHK(err):\n if err < 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(err,ctypes.byref(buf),buf_size)\n raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value)))", "def checkResult(self, errorCode):\n # NOTE: This will break some oscilloscopes that are powered by USB.\n # Some of the newer scopes, can actually be powered by USB and will\n # return a useful value. That should be given back to the user.\n # I guess we can deal with these edge cases in the functions themselves\n if errorCode == 0:\n return\n\n else:\n ecName = self.errorNumToName(errorCode)\n ecDesc = self.errorNumToDesc(errorCode)\n raise IOError('Error calling %s: %s (%s)' % (\n str(inspect.stack()[1][3]), ecName, ecDesc))", "def __connect_failed__(self):\n # Ask the user what to do with the error\n choice = input(\"[A]bort, [C]hange address and port, or [R]etry?\")\n if (choice.lower() == \"a\"):\n exit()\n elif (choice.lower() == \"c\"):\n address = input(\"Please enter the address:\")\n port_number = input(\"Please enter the port:\")", "def set_channel(self):\r\n\t\tself.channel = int(input(\"Enter the Channel No.(0-8) = \"))\r\n\t\twhile self.channel > 8 :\r\n\t\t\tself.channel = int(input(\"Enter the Channel No.(0-8) = \"))\r\n\t\t\r\n\t\treturn self.channel", "def CHK(err):\n if err < 0:\n buf_size = 100\n buf = create_string_buffer(b'\\000' * buf_size)\n ni.DAQmxGetErrorString(err,byref(buf),buf_size)\n raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value)))\n if err > 0:\n buf_size = 100\n buf = create_string_buffer(b'\\000' * buf_size)\n ni.DAQmxGetErrorString(err,byref(buf),buf_size)\n raise RuntimeError('nidaq generated warning %d: %s'%(err,repr(buf.value)))", "def test_check_response_length_invalid(input):\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n with pytest.raises(ShdlcResponseError):\r\n cmd.check_response_length(input)", "def _check_for_incomplete_input(self):\n pass", "def amount_entered():\n while True: #Run until a suitable input is passed.\n try:\n amt = int(input(\"Enter value you wish to trade >>> \"))\n if amt <= 0:\n raise Exception\n return amt\n except ValueError: #if a string is entered\n print(\"Please enter an integer\")\n except Exception: #if a negative digit is entered\n print(\"Value cannot be less than or equal to 0\")", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def __check_inputs__(self):\n # | - __check_inputs__\n # #####################################################################\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n # #####################################################################\n\n if stop_mode == \"num_generations\":\n mess_i = \"stop_mode='num_generations', \\\n Must pass int to 'stop_num_generations'\"\n assert type(stop_num_generations) == type(1), mess_i\n #__|", "def _on_invalid_call(self, msg):\r\n # Workaround: Maybe a bug in their server software,\r\n # I don't know what's missing. Its all poorly documented :-(\r\n # Sometimes some API calls fail the first time for no reason,\r\n # if this happens just send them again. This happens only\r\n # somtimes (10%) and sending them again will eventually succeed.\r\n\r\n if msg[\"id\"] == \"idkey\":\r\n self.debug(\"### resending private/idkey\")\r\n self.client.send_signed_call(\r\n \"private/idkey\", {}, \"idkey\")\r\n\r\n elif msg[\"id\"] == \"info\":\r\n self.debug(\"### resending private/info\")\r\n self.client.send_signed_call(\r\n \"private/info\", {}, \"info\")\r\n\r\n elif msg[\"id\"] == \"orders\":\r\n self.debug(\"### resending private/orders\")\r\n self.client.send_signed_call(\r\n \"private/orders\", {}, \"orders\")\r\n\r\n elif \"order_add:\" in msg[\"id\"]:\r\n parts = msg[\"id\"].split(\":\")\r\n typ = parts[1]\r\n price = int(parts[2])\r\n volume = int(parts[3])\r\n self.debug(\"### resending failed\", msg[\"id\"])\r\n self.client.send_order_add(typ, price, volume)\r\n\r\n elif \"order_cancel:\" in msg[\"id\"]:\r\n parts = msg[\"id\"].split(\":\")\r\n oid = parts[1]\r\n self.debug(\"### resending failed\", msg[\"id\"])\r\n self.client.send_order_cancel(oid)\r\n\r\n else:\r\n self.debug(\"### _on_invalid_call() ignoring:\", msg)", "def validate_num(number):\n\n if number <= 0:\n new_num = int(raw_input(\"Oops, your number has to be greater than 0. Please pick again: \"))\n return validate_num(new_num)\n\n else:\n return number", "def validate_input(self, argin):\n try:\n configuration_dict = json.loads(argin)\n _ = configuration_dict[\"id\"]\n except (KeyError, JSONDecodeError) as err:\n msg = f\"Validate configuration failed with error:{err}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n except Exception as other_errs:\n msg = f\"Validate configuration failed with unknown error:{other_errs}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n\n return (\n configuration_dict,\n ResultCode.OK,\n \"ConfigureScan arguments validation successful\",\n )", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')", "def acceptComm():\r\n\tcommand = input(\"Enter a number: \")\r\n\tif command in COMMANDS:\r\n\t\treturn command\r\n\telse: \r\n\t\tprint(\"ERROR: Command NOT Recognized\")\r\n\t\treturn acceptComm()", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def check_choice():\n is_valid = 0\n while not is_valid:\n try:\n choice = int(input('Enter your choice [1-3] : '))\n if choice in [1, 2, 3]:\n is_valid = 1\n else:\n print (\"'%d' is not an option.\\n\" % choice)\n except ValueError:\n print (\"Invaid value!\")\n return choice", "def check_input(self):\n try:\n if(self.datatype == \"eeg\"):\n self.model.set_datatype(self.datatype)\n self.model.set_dyad(self.dyad)\n self.model.set_channel(self.channel_or_video)#causes loading of data\n elif(self.datatype == \"motion\"):\n self.model.set_datatype(self.datatype)\n self.model.set_filepath(self.database.dictionary[str(self.dyad)][\"video\"][str(self.channel_or_video)][\"motion\"][\"in_roi\"][\"1\"][\"path\"])#TODO NOT ALWAYS 1\n self.model.set_channel(self.channel_or_video)\n else:\n QMessageBox.about(self, \"Incorrect selection\", \"Choose datatype\")\n self.accept()\n except KeyError as e:\n QMessageBox.about(self, \"Incorrect selection\", \"Please choose wisely\" + str(e))", "def validated_input(input_msg: str, error_msg: str, validator, screenshot:str =None):\n while(True):\n reset_screen()\n\n if screenshot is not None:\n print(screenshot)\n\n data = input(input_msg)\n\n try:\n return validator(data)\n except:\n reset_screen()\n popup(error_msg.format(data), screenshot)\n input(\"\")", "def _raise_if_invalid(self):\n if self._stack_result == -1 and self._recm_data == -1:\n error_message = 'Worker result for request ID {} does not exist yet'.format(\n self.external_request_id)\n logger.exception(error_message)\n raise SARBRequestInvalidException(error_message)", "def test_get_flow_request_by_channel_id_wrong_channel_id(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/search/?channel_id=unknown', **headers)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res.json(), {'errors': ['not_found']})", "def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_errors(self) -> None:", "def test_api_invalid_stream_id(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n result = self.api_patch(\n user,\n \"/api/v1/users/me/subscriptions/121\",\n {\"property\": \"is_muted\", \"value\": \"somevalue\"},\n )\n self.assert_json_error(result, \"Invalid stream ID\")", "def check_raw(print_string='Please try again: '):\n try:\n x = int(input(\"Please guess a number between 0 and 100! You have 10 guesses\"))\n except:\n print('Please try again!')\n x = check_raw()\n return x", "async def validate(self, ctx: Context, argument: str) -> bool:\n return True", "def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))", "def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input", "def test_wrong_input():\n dwd = DwdWeatherWarningsAPI(None)\n assert not dwd.data_valid\n assert dwd.warncell_id is None\n assert dwd.warncell_name is None\n assert dwd.last_update is None\n assert dwd.current_warning_level is None\n assert dwd.expected_warning_level is None\n assert dwd.current_warnings is None\n assert dwd.expected_warnings is None", "def test_id_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"e-dad\"\n self.assertRaises(InvalidInputError, self.command.run)", "def testCCHalt(self):\n cdl_convert.config.HALT_ON_ERROR = True\n\n def getCC():\n self.ccr_bad.cc\n\n self.assertRaises(\n ValueError,\n getCC\n )", "def test_wrong_input(self):\r\n self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),\r\n 'valid', input=T.dmatrix())\r\n self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),\r\n 'valid', filters=T.dvector())\r\n self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),\r\n 'valid', input=T.dtensor3())", "def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")", "def testIdNonUniqueIdOnInit(self):\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n cdl_convert.ColorCorrection,\n 'uniqueId',\n 'file'\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n try:\n cc = cdl_convert.ColorCorrection('uniqueId', 'file')\n except ValueError:\n self.fail(\"Non-unique ID was not accepted!\")\n\n self.assertEqual(\n 'uniqueId001',\n cc.id\n )", "async def error(self, channel_id,user_infos, user_id, team_id):\n # Message de commande incorrecte\n error = \"Commande invalide. Veuillez utiliser la commande [help] pour plus d'informations.\"\n return await self.sendText(error, channel_id,user_infos, team_id)" ]
[ "0.6859394", "0.6717858", "0.6630168", "0.64727926", "0.6436091", "0.6371448", "0.63068455", "0.6227858", "0.61192703", "0.60558033", "0.60163033", "0.59885454", "0.5890788", "0.58832693", "0.58782107", "0.586804", "0.58606905", "0.58477086", "0.5845201", "0.58342546", "0.5813406", "0.5790506", "0.5763038", "0.5736778", "0.5717677", "0.5711501", "0.5689048", "0.5667916", "0.565401", "0.5636305", "0.5618129", "0.5605088", "0.5603609", "0.55796576", "0.5572275", "0.5559755", "0.55594504", "0.5559265", "0.5552037", "0.5540013", "0.55224043", "0.5502939", "0.5495573", "0.5483012", "0.5473067", "0.54719216", "0.5470567", "0.5463557", "0.54535025", "0.54519796", "0.5450142", "0.5433971", "0.5413566", "0.54080147", "0.54051435", "0.5394385", "0.5389237", "0.5381232", "0.5357435", "0.53562886", "0.53558576", "0.53547347", "0.5346126", "0.5345324", "0.53443795", "0.5316279", "0.53007376", "0.5299131", "0.5298943", "0.52942616", "0.52927315", "0.5286988", "0.52813387", "0.52790505", "0.5277735", "0.5277652", "0.5274268", "0.5267341", "0.5265576", "0.5251827", "0.5249392", "0.5246592", "0.52324194", "0.52309287", "0.5227926", "0.5226278", "0.5214976", "0.5213126", "0.5209441", "0.5204141", "0.5201234", "0.51944935", "0.5189664", "0.518936", "0.51881295", "0.5185331", "0.5178355", "0.5174822", "0.5174737", "0.5170267" ]
0.63421535
6
checking if owner of the flockr who is not the channel owner can add owner
def test_channel_addowner_owner_flockr(): clear() register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def isowner(self, o):\n return self._owner is o", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def is_owner(self):\n return self._is_owner", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_still_owner(self):\n raise tooz.NotImplemented", "def test_channel_addowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def available(self, o):\n return not self.locked() or self.isowner(o)", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "def test_user_is_group_owner(self):\n self.thread.group.owners.add(self.user)\n self.assertEqual(\n Thread.public.get_by_user(\n thread_id=self.thread.pk, user=self.user),\n self.thread\n )", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def get_owner_object(self):\n return False", "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def unorphaned(self):\n return self.new_owner == self.user", "def allow_sudo(message):\n if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:\n return True\n if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:\n return True\n return False", "async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def must_be_owner(func):\n @functools.wraps(func)\n @login_required()\n def wrapped(request, poll_name, *args, **kwargs):\n try:\n cur_poll = Poll.objects.get(url=poll_name)\n except Poll.DoesNotExist:\n return db_error(_('This poll does not seem to exist, sorry.'))\n if cur_poll.is_owner(request.user.userinformation):\n return func(request, poll_name, *args, **kwargs)\n else:\n return redirect(reverse('login')+'?next='+request.path)\n return wrapped", "def CAN_ASSIGN_OWNER(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_can_see_owner(self):\n ThreadParticipant.objects.set_owner(self.thread, self.user)\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 200)\n\n response_json = response.json()\n self.assertEqual(response_json['title'], self.thread.title)\n self.assertEqual(\n response_json['participants'], [\n {\n 'id': self.user.id,\n 'username': self.user.username,\n 'avatars': self.user.avatars,\n 'url': self.user.get_absolute_url(),\n 'is_owner': True,\n },\n ]\n )", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def requireOwn(func):\n def wrappedFunc(self, unit,*args):\n if unit.owner != self.playerID is not self:\n return \"You do not own %s\" % unit.id\n else:\n return func(self, unit, *args)\n return wrappedFunc", "def has_commit_poller(self, name):\n return 'source_repo_owner' in self._config[name]", "def has_ownership(self):\n user = self.request.user\n object = self.get_object()\n if object.owned_by(user):\n return True\n else:\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def user_can_edit(self, user):\n return user == self.owner", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def addme(update: 'Update', context: 'CallbackContext'):\n user_id = update.effective_user.id\n chat_id = update.effective_chat.id\n chats = get_chat_ids(DB)\n\n if chat_id not in chats:\n update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')\n else:\n if add_member_id(DB, user_id): \n update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')\n else:\n update.message.reply_text('You are already on the whitelist.')", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def require_owner():\n def add_attribute(func):\n if not hasattr(func, \"owner\"):\n func.owner = True\n return func\n return add_attribute", "def is_comment_owner(praw_comment, username):\n return praw_comment.author and praw_comment.author.name.lower() == username.lower()", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )", "def __updater_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in updater\")\n for comp_tuple in self._updater_map.keys():\n if int(comp_tuple[0]) not in self.msg.get_ownershipList():\n del self._updater_map[comp_tuple]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def UserOwnsProject(project, effective_ids):\n return not effective_ids.isdisjoint(project.owner_ids or set())", "def has_permission(self, request, view):\n board_id = view.kwargs.get(\"pk\")\n owner_id = Board.objects.get(pk=board_id).owner.pk\n return request.user.id == owner_id", "async def ticket_add(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n\n if user.id in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is already added.\")\n return\n\n adding_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if adding_is_admin:\n await ctx.send(\"You cannot add a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n try:\n await channel.set_permissions(user, send_messages=True, read_messages=True)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].append(user.id)\n\n await ctx.send(f\"{user.mention} has been added to the ticket.\")", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def cog_check(self, ctx):\n if ctx.guild is None:\n raise commands.NoPrivateMessage()\n return True", "def try_aquire_lock():\n minor_print(\"Försöker skaffa låset ...\")\n\n lock_path = GITTED_FOLDER / \"lock\"\n\n try:\n lock_path.touch()\n except FileExistsError:\n return False\n\n shell_command([\"git\", \"add\", lock_path.as_posix()])\n shell_command([\"git\", \"commit\", \"-m\", f\"Aquired lock for {WORLD_NAME}\"])\n return True", "def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if request.user.profile.role == UserRole.CLIENT and obj.owner != request.user:\n return False\n if request.user.profile.role == UserRole.EXECUTOR and obj.executor != request.user:\n return False\n return True", "def has_chain(praw_r, praw_comment, username):\n if not hasattr(praw_comment, 'parent_id'):\n return False\n parent = praw_r.get_info(thing_id=praw_comment.parent_id)\n if not parent or type(parent) != praw.objects.Comment:\n return False\n return is_comment_owner(parent, username)", "def add_contract(self, contract: 'cn.Contract_HTLC') -> bool:\n if self.is_owner1(contract.payer):\n if self.amount_owner1_can_transfer_to_owner2 < contract.amount_in_msat:\n contract.invalidate()\n return False\n self._owner1_htlc_locked_setter(self._owner1_htlc_locked + contract.amount_in_msat)\n else:\n if self.amount_owner2_can_transfer_to_owner1 < contract.amount_in_msat:\n contract.invalidate()\n return False\n self._owner2_htlc_locked_setter(self._owner2_htlc_locked + contract.amount_in_msat)\n self._state.htlc_contracts.append(contract)\n return True", "def test_has_perm_or_owns_thread_edit(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n self.context['request'].user = me\n perm = 'forums_forum.thread_edit_forum'\n allowed = has_perm_or_owns(self.context, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = has_perm_or_owns(self.context, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def __is_belong_to_me(self, iceberg):\n return iceberg.owner.equals(self.__game.get_myself())", "def test_accept_member_with_owner(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'id': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(5, data['id'])\n self.assertEqual('1', data['status'])\n time.sleep(1)\n self.assertEqual(1, len(mail.outbox))\n self.assertEqual(mail.outbox[0].subject,\n '[Smartribe] Membership accepted')", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.registration.child.family:\n return True\n return False", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.child.family:\n return True\n return False", "def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def is_current_session_owner(self):\n\t\treturn bool(call_sdk_function('PrlAcl_IsCurrentSessionOwner', self.handle))", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def added_by(self, user):\n return ChefsHasRecipes.objects.filter(recipe=self, chef=user).exists()", "def locked(self):\n return self._owner is not None", "def allow(self, message):\n if message.author.id == Guard.AUTHOR:\n return True\n if message.author.id in Guard.BANNED_USERS:\n return False\n if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message):\n return False\n if self.state == State.SUDO_ONLY and not Guard.allow_sudo(message):\n return False\n return True", "def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id", "def check_user(self, requestor, requestee, default=None):\n self.lock.acquire()\n self.users.add(requestor)\n ok = requestee in self.users\n self.lock.release()\n\n # if this isn't the default app, also add the user to the default app\n if default != self and default != None:\n default.check_user(requestor, requestee)\n\n return ok", "async def interaction_check(self, interaction: Interaction) -> bool:\n if interaction.user != self.interaction_owner:\n await interaction.response.send_message(\n \":x: This is not your command to react to!\",\n ephemeral=True\n )\n return False\n return True" ]
[ "0.7385676", "0.7370181", "0.7258386", "0.70770735", "0.700007", "0.69392306", "0.6930852", "0.6926534", "0.6849369", "0.6614387", "0.6608435", "0.65947664", "0.65030146", "0.6469669", "0.6452825", "0.64186174", "0.63679105", "0.6348153", "0.6341457", "0.6329557", "0.6293552", "0.62052315", "0.6156214", "0.6154839", "0.6056385", "0.6056385", "0.6012472", "0.59986794", "0.5998133", "0.59975505", "0.59974504", "0.59606105", "0.5933716", "0.59122247", "0.5909522", "0.5908724", "0.5893155", "0.58699876", "0.58697844", "0.58689934", "0.58559185", "0.58300257", "0.5828202", "0.5793865", "0.5785643", "0.57816917", "0.57792354", "0.57758003", "0.57731265", "0.57697403", "0.576438", "0.57618034", "0.5758765", "0.57564914", "0.5756429", "0.5754732", "0.5749403", "0.5749403", "0.57488656", "0.57462144", "0.574561", "0.5724077", "0.5722168", "0.57104045", "0.5694246", "0.56735045", "0.56694245", "0.56684786", "0.5662742", "0.5644898", "0.5644898", "0.5627231", "0.5624415", "0.5583185", "0.5573969", "0.5564841", "0.55597097", "0.5543247", "0.5539969", "0.5535515", "0.5524395", "0.5519239", "0.55128676", "0.5509518", "0.5499012", "0.5493412", "0.5474767", "0.54650617", "0.54569286", "0.54523706", "0.54458195", "0.5443383", "0.5433051", "0.5431583", "0.5425356", "0.54076016", "0.540423", "0.5399398", "0.53936404", "0.5380974" ]
0.7151373
3
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_addowner_owner_flockr_not_member(): clear() register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])", "async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")", "async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def available(self, o):\n return not self.locked() or self.isowner(o)", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def get_everyone_denied(self):", "def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_has_perm_or_owns_thread_edit(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n self.context['request'].user = me\n perm = 'forums_forum.thread_edit_forum'\n allowed = has_perm_or_owns(self.context, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = has_perm_or_owns(self.context, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def test_component_chown_error_bad_component(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component chown bad_component changed_owner')\n self.assertEqual(2, rv)\n # We currently trigger a deprecation warning with py26 so we\n # can currrently only verify that the end of the output string is\n # correct\n self.assertEqual(output.endswith(self.expected_results[test_name]), True)", "def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "async def unlock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n if (channel.category.name == CATEGORY_SO or channel.category.name == CATEGORY_GENERAL):\n await ctx.send(\"Synced permissions with channel category.\")\n return await channel.edit(sync_permissions=True)\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name != CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True, read_messages=True)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bRole = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(aRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Unlocked the channel to Member access. Please check if permissions need to be synced.\")", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_06_self_cannot_upgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def testLockDenied(t, env):\n c = env.c1\n c.init_connection()\n # Create a file and lock it\n fh, stateid = c.create_confirm(t.code)\n res1 = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res1, msg=\"Locking file %s for first owner\" % t.code)\n res2 = c.lock_file(t.code, fh, stateid, 0, 10)\n check(res2, msg=\"Locking file %s for second owner\" % t.code)\n # Create and replay LOCK ops\n ops = c.use_obj(fh)\n lock_owner = exist_lock_owner4(res1.lockid, 1)\n locker = locker4(FALSE, lock_owner=lock_owner)\n ops += [c.lock_op(WRITE_LT, FALSE, 0, 10, locker)]\n _replay(c, ops, NFS4ERR_DENIED)", "def test_missing_authorize_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n node.give_message(authorize, self._mm)\n\n # OTHER wants the proof that OWNER is allowed to grant authorization to NODE\n node.give_message(other.create_missing_proof(authorize.authentication.member, authorize.distribution.global_time), other)\n\n # NODE sends dispersy-authorize containing authorize(MASTER, OWNER) to OTHER\n _, authorize = other.receive_message(names=[u\"dispersy-authorize\"]).next()\n\n permission_triplet = (self._mm.my_member.mid, u\"protected-full-sync-text\", u\"permit\")\n authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]\n self.assertIn(permission_triplet, authorize_permission_triplets)", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def DeniedPermissions(self) -> _n_6_t_0:", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def is_still_owner(self):\n raise tooz.NotImplemented", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def test_channel_leave_invalid_token():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'First', 'Last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n channel_leave(user['token'], userchannel_id['channel_id'])", "def is_private(event):\n channel = event.get('channel')\n return channel.startswith('D')", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def slack_access(s, level=READ):\n try: slack_access_level = settings.SLACK_USERS[s.slack_uid]\n except: return False\n return (slack_access_level & level) != 0", "def is_permission_err(exc):\n assert isinstance(exc, OSError), exc\n # On Python 2 OSError doesn't always have 'winerror'. Sometimes\n # it does, in which case the original exception was WindowsError\n # (which is a subclass of OSError).\n return exc.errno in (errno.EPERM, errno.EACCES) or \\\n getattr(exc, \"winerror\", -1) in (cext.ERROR_ACCESS_DENIED,\n cext.ERROR_PRIVILEGE_NOT_HELD)", "def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "async def permission_valid_check(cls):\n pass", "def test_group_is_private_user_is_not_member(self):\n thread = self.create_thread()\n thread.group.private = True\n thread.save()\n message = thread.first_message\n user = self.create_user()\n self.assertFalse(message.visible_to_user(user))", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def cog_check(self, ctx):\n if ctx.guild is None:\n raise commands.NoPrivateMessage()\n return True", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def test_error_on_unauthorized_read(self):\n hooks = setup_hooks(verbose=True)\n\n result = hooks.act_on_cloned_repo(UNAUTHORIZED_READ_FILE_REPO)\n\n assert result.status == Status.WARNING\n assert (\n \"java.security.AccessControlException: access denied\" in result.msg\n )", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def check_sane(self):\n st = os.stat(self.path)\n if st.st_uid != os.getuid():\n raise Exception('Auth dir %s not owned by user %d.' % (\n self.path, os.getuid()))\n # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU)\n # In other words, a directory with mode bits rwx------\n if st.st_mode != 16832:\n raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)", "def _check_access_priv(required_privilege_level):\n auth_user, prog_name, user, host, uuid = _get_client_info()\n priv_level = _get_priv_level(auth_user)\n if (PRIVILEGE_LEVELS.index(priv_level) <\n PRIVILEGE_LEVELS.index(required_privilege_level)):\n err = CONNECT_DENIED_PRIV_TMPL % (\n priv_level, required_privilege_level,\n user, host, prog_name, uuid)\n #LOG.warning(err)\n # Raise an exception to be sent back to the client.\n raise InvalidUsage(err, status_code=403)\n return True", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n if depot_path.startswith(prefix):\n continue\n\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths not in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = self._current_branch.writable_stream_name\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def _check_caller_authority(caller, role):\r\n if not (caller.is_authenticated() and caller.is_active):\r\n raise PermissionDenied\r\n # superuser\r\n if GlobalStaff().has_user(caller):\r\n return\r\n\r\n if isinstance(role, (GlobalStaff, CourseCreatorRole)):\r\n raise PermissionDenied\r\n elif isinstance(role, CourseRole): # instructors can change the roles w/in their course\r\n if not has_access(caller, CourseInstructorRole(role.course_key)):\r\n raise PermissionDenied", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def is_locked(self):\r\n pass", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def allow_sudo(message):\n if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:\n return True\n if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:\n return True\n return False", "def test_has_perm_per_object(self):\n user = User.objects.get(pk=47963)\n perm = 'forums_forum.thread_edit_forum'\n assert access.has_perm(user, perm, self.forum_1)\n assert not access.has_perm(user, perm, self.forum_2)", "async def test_regular_member_cannot_target_another_member(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author)\n\n await self.cog.user_info(self.cog, ctx, self.target)\n\n ctx.send.assert_called_once_with(\"You may not use this command on users other than yourself.\")", "def test_accept_member_with_owner_bad_request(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)", "def ensure_access(self, target_member : M, accessor : M, permission : str):\n if not permission: \n return True\n if accessor is None:\n raise errors.NotAllowed(\"Accessor not found\")\n if target_member != accessor:\n raise errors.NotAllowed(\"Access not allowed for permission '%s'\" % permission)\n return True", "def canBeAccessed(self):\n \n try:\n self._client.log(self._repositoryUri)\n return True\n except ClientError, error:\n _logger.debug(error.args[0])\n for _, errorCode in error.args[1]:\n if errorCode == 160006: # We have no commit in the repository, but its ok.\n return True\n return False" ]
[ "0.7090575", "0.6885205", "0.68405485", "0.650094", "0.63612247", "0.6272654", "0.6271527", "0.6261493", "0.62362427", "0.616833", "0.61552966", "0.611927", "0.6118742", "0.6108415", "0.60745674", "0.60703945", "0.6057438", "0.6057438", "0.6045126", "0.60374135", "0.6017522", "0.60170907", "0.5998715", "0.5953348", "0.591603", "0.58893955", "0.58893955", "0.58849776", "0.5876605", "0.5849653", "0.58414704", "0.58393854", "0.58293754", "0.58213407", "0.5815885", "0.58077526", "0.58076066", "0.58000934", "0.57956827", "0.5770739", "0.57586014", "0.57500273", "0.57381374", "0.57366663", "0.5729214", "0.57228255", "0.5707738", "0.56978846", "0.5691752", "0.56667113", "0.56662697", "0.5649454", "0.56455636", "0.56373596", "0.5626388", "0.5606881", "0.5591546", "0.55836403", "0.5578642", "0.5574008", "0.55548036", "0.55225974", "0.55168474", "0.55007106", "0.5499966", "0.549569", "0.5488996", "0.5482177", "0.54733217", "0.5468266", "0.54658556", "0.5465092", "0.5461603", "0.5450234", "0.54336315", "0.54309255", "0.54309255", "0.54290867", "0.5423642", "0.5422101", "0.541178", "0.5407145", "0.54037267", "0.5389531", "0.5388961", "0.5388648", "0.5388648", "0.5368361", "0.5367741", "0.5364607", "0.53612524", "0.53591895", "0.53518873", "0.5350133", "0.53463817", "0.5343384", "0.53398055", "0.5338882", "0.533672", "0.5334451" ]
0.7249187
0
checking if AccessError is returned as expected if member is not an owner
def test_channel_addowner_not_owner(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_access(self, target_member : M, accessor : M, permission : str):\n if not permission: \n return True\n if accessor is None:\n raise errors.NotAllowed(\"Accessor not found\")\n if target_member != accessor:\n raise errors.NotAllowed(\"Access not allowed for permission '%s'\" % permission)\n return True", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def is_still_owner(self):\n raise tooz.NotImplemented", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def isowner(self, o):\n return self._owner is o", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_invalid_member(self):\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n },\n 'cn=agroup,ou=groups,dc=esmgquadrivium,dc=nl': {\n 'cn': ['agroup'],\n 'member': ['uid=wronguid,ou=people,dc=esmgquadrivium,dc=nl'],\n }\n }\n with self.assertRaises(CloneError):\n clone(entries)", "def test_accept_member_with_owner_bad_request(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)", "def _check_namespace_access(self, namespace, user):\n if not namespace.owners.filter(id=user.id).count():\n raise exceptions.PermissionDenied(\n 'The namespace listed on your filename must match one of '\n 'the namespaces you have access to.'\n )", "def test_06_self_cannot_upgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"[email protected]\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def test_group_member_access(self, group):\n assert len(group.members) == 2", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def test_no_owner_exception(api: API, owners: list):\n api.candlepin.get_owners.return_value = owners\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n with pytest.raises(IndexError):\n account.owner_id", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def raise_for_ownership(self, resource: Model) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset import db\n\n if self.is_admin():\n return\n\n # Set of wners that works across ORM models.\n owners: List[User] = []\n\n orig_resource = db.session.query(resource.__class__).get(resource.id)\n\n if orig_resource:\n if hasattr(resource, \"owners\"):\n owners += orig_resource.owners\n\n if hasattr(resource, \"owner\"):\n owners.append(orig_resource.owner)\n\n if hasattr(resource, \"created_by\"):\n owners.append(orig_resource.created_by)\n\n if g.user.is_anonymous or g.user not in owners:\n raise SupersetSecurityException(\n SupersetError(\n error_type=SupersetErrorType.MISSING_OWNERSHIP_ERROR,\n message=f\"You don't have the rights to alter [{resource}]\",\n level=ErrorLevel.ERROR,\n )\n )", "def test_user_not_in_group_cannot_update(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def user_can_edit(self, user):\n return user == self.owner", "def available(self, o):\n return not self.locked() or self.isowner(o)", "def is_owner(self):\n return self._is_owner", "def test_has_access_is_not_in_group(self):\n user, usrmgr_mock = self.__get_test_instance(\n \"@foouser\", 1337, group=\"bargroup\")\n usrmgr_mock.return_value.user_is_in_group.return_value = False\n usrmgr_mock.return_value.verify_user.return_value = False\n with patch.object(user, \"save\"):\n user.has_access(\"foogroup\")", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def CAN_ASSIGN_OWNER(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "async def test_regular_member_cannot_target_another_member(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author)\n\n await self.cog.user_info(self.cog, ctx, self.target)\n\n ctx.send.assert_called_once_with(\"You may not use this command on users other than yourself.\")", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')", "def test_list_members_with_owner_rights(self):\n url = '/api/v1/communities/4/retrieve_members/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(3, data['count'])", "def has_error(self):\n return len(self.unmapped) or len(self.author_denied) \\\n or len(self.pusher_denied) or len(self.foruser_denied) \\\n or len(self.fusion_denied)", "def test_inconsistent_membership(self):\n # Has qMemberStart+qMemberEnd, so should not be in current members group\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qMemberStart': [datetime(2010, 2, 2)],\n 'qMemberEnd': [datetime(2010, 5, 2)],\n },\n 'cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl': {\n 'cn': ['Huidige leden'],\n 'member': ['uid=TEst,ou=people,dc=esmgquadrivium,dc=nl'],\n }\n }\n with self.assertRaises(CloneError):\n clone(entries)\n\n # Has no qMemberStart+qMemberEnd, so should not be in current members group\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n },\n 'cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl': {\n 'cn': ['Huidige leden'],\n 'member': ['uid=test,ou=people,dc=esmgquadrivium,dc=nl'],\n }\n }\n with self.assertRaises(CloneError):\n clone(entries)\n\n # Has only qMemberStart, so should be in current members group\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qMemberStart': [datetime(2010, 2, 2)],\n },\n 'cn=huidige leden,ou=groups,dc=esmgquadrivium,dc=nl': {\n 'cn': ['Huidige leden'],\n 'member': [],\n }\n }\n with self.assertRaises(CloneError):\n clone(entries)\n\n # Has only qMemberEnd, should raise error\n entries = {\n 'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qMemberEnd': [datetime(2010, 2, 2)],\n },\n }\n with self.assertRaises(CloneError):\n clone(entries)", "def assertNotIn(self, member, container, msg=None):\r\n if member in container:\r\n standardMsg = '%s unexpectedly found in %s' % (safe_repr(member), \r\n safe_repr(container))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def verify_user(self):\n if self.username == \"root\":\n print \"Error: Please do not run this script as root.\"\n sys.exit(1)\n\n members = grp.getgrnam(self.groupowner)[3]\n if not self.username in members:\n print \"Error: The user who runs this script must belong to the group: \" + self.groupowner\n sys.exit(1)", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def is_member(self) -> bool:\n if self._is_member is _missing:\n return False\n return self._is_member", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def test_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_request\n )", "def check_access(circle, member_required=False):\n member = db.session.query(Member).filter(db.and_(Member.circle == circle, Member.user == g.user)).first()\n if member:\n return member\n elif member_required:\n abort(404)\n else:\n if str(circle.id) in g.invitations:\n return None\n else:\n abort(404)", "def is_member(cls, attr):\n return cls._attributes[attr].kind == ResourceAttributeKinds.MEMBER", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def has_permission(self, request, view):\n return request.user.group != 'patient'", "def test_cant_ban_user_from_community_if_member(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user, type='P')\n community_name = community.name\n\n user.join_community_with_name(community_name)\n\n user_to_ban = make_user()\n\n url = self._get_url(community_name=community.name)\n response = self.client.post(url, {\n 'username': user_to_ban.username\n }, **headers)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.assertFalse(user_to_ban.is_banned_from_community_with_name(community.name))", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)", "def validate_access(self, view, rights, prefix, scope_path, field):\n\n access_level = self.cleaned_data[field]\n\n if not has_access(rights, access_level, scope_path, prefix):\n self._errors[field] = ErrorList([DEF_NO_RIGHTS_FOR_ACL_MSG])\n del self.cleaned_data[field]", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_list_members_with_mod_rights_not_accepted(self):\n url = '/api/v1/communities/3/retrieve_members/'\n\n # Test before acceptation\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)", "def is_private():", "def test_get_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user1_template.id, request=mock_request\n )", "def test_cant_unban_user_from_community_if_member(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user, type='P')\n community_name = community.name\n\n user.join_community_with_name(community_name)\n\n user_to_unban = make_user()\n\n other_user.ban_user_with_username_from_community_with_name(username=user_to_unban.username,\n community_name=community_name)\n\n url = self._get_url(community_name=community.name)\n response = self.client.post(url, {\n 'username': user_to_unban.username\n }, **headers)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n self.assertTrue(user_to_unban.is_banned_from_community_with_name(community.name))", "def _checkPlayer(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.owner == self.player):\r\n message = (\"Player (%r) is not allowed to move that pawn (%r)\" %\r\n (self.player, pawn))\r\n raise IllegalMoveException(message)", "def check_sane(self):\n st = os.stat(self.path)\n if st.st_uid != os.getuid():\n raise Exception('Auth dir %s not owned by user %d.' % (\n self.path, os.getuid()))\n # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU)\n # In other words, a directory with mode bits rwx------\n if st.st_mode != 16832:\n raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)", "def test_call_bad_perms(self):\r\n self.assertRaises(ValueError, self.cs_overview, -1)", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.child.family:\n return True\n return False", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_anon_private_owned(self):\n self.do_visible(False, 'pattieblack', False)", "def validate_member_user(self, member):\n if TeamMember.objects.filter(team=self.team, member=member).exists():\n raise forms.ValidationError(_('User is already a team member'),)\n return member", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_detail_not_contributor_forbidden(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c2.pk))\n self.assert403(resp)", "def test_get_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_group_is_private_user_is_not_member(self):\n thread = self.create_thread()\n thread.group.private = True\n thread.save()\n message = thread.first_message\n user = self.create_user()\n self.assertFalse(message.visible_to_user(user))", "def has_object_permission(self, request, view, obj):\n return not obj.permission == \"author\"", "def has_ownership(self):\n user = self.request.user\n object = self.get_object()\n if object.owned_by(user):\n return True\n else:\n return False", "def test_upsert_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user2_template, request=mock_request\n )", "def test_has_perm_or_owns_thread_edit(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n self.context['request'].user = me\n perm = 'forums_forum.thread_edit_forum'\n allowed = has_perm_or_owns(self.context, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = has_perm_or_owns(self.context, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_get_other_users_template_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.get_by_id(\n self.fixture.user2_template.id, request=mock_request\n )", "def test_logged_in_owner(self):\n self.make_logged_in_owner()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u, album.display_album, ALBUM_PRIVATE)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PRIVATE)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u, album.display_photo, ALBUM_PRIVATE)", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False" ]
[ "0.71381223", "0.679959", "0.67456234", "0.65781903", "0.6487132", "0.64113057", "0.6391973", "0.6355338", "0.6317414", "0.6261393", "0.6230763", "0.6224077", "0.62140906", "0.6203331", "0.6188333", "0.6188333", "0.6179087", "0.6173048", "0.6162917", "0.6162814", "0.6100645", "0.60996383", "0.6093759", "0.6087477", "0.60468113", "0.6038829", "0.6022544", "0.60149544", "0.60100067", "0.59882796", "0.59799045", "0.5957047", "0.5956374", "0.59485966", "0.59475946", "0.5925814", "0.59243894", "0.59224606", "0.5921394", "0.5908442", "0.5901203", "0.58678705", "0.58678705", "0.58641124", "0.58618563", "0.5861148", "0.5847537", "0.58384424", "0.5829558", "0.5829281", "0.58269864", "0.5818585", "0.58097005", "0.58073354", "0.5798779", "0.57983667", "0.5788674", "0.5787339", "0.57871455", "0.57825613", "0.5777521", "0.57749903", "0.5772771", "0.5771869", "0.5766166", "0.5752832", "0.57401043", "0.57334757", "0.5730369", "0.57248574", "0.57227147", "0.57173127", "0.57102305", "0.5706701", "0.5706344", "0.57035476", "0.5694764", "0.5693543", "0.5687308", "0.5687308", "0.5684941", "0.5673647", "0.56702316", "0.5667523", "0.56661046", "0.5644445", "0.56430453", "0.5633598", "0.5633534", "0.56290805", "0.5627972", "0.56252575", "0.56246704", "0.56222147", "0.56125665", "0.5611432", "0.560789", "0.5606447", "0.56035", "0.56027496" ]
0.60019386
29
checking if able to remove an owner who is an owner with authorised token is sucessful
def test_channel_removeowner_standard_input(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == { 'name' : 'Random Channel', 'owner_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ], 'all_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' }, { 'u_id': 3, 'name_first' : 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ] })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def post(self, request):\n if 'person_id' in self.request.POST:\n user = User.objects.get(person__id=self.request.POST['person_id'])\n if AccessToken.objects.filter(user=user).exists():\n tokens = AccessToken.objects.filter(user=user)\n for token in tokens:\n token.revoke()\n logout(request)\n return Response({'status': True})\n return Response({'status': False})", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def test_delete_saved_filter_not_owner(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n _, token = AuthToken.objects.create(self.user_2)\n response = \\\n self.client.delete(url,\n HTTP_AUTHORIZATION='Token {}'.format(token))\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_delete_owner(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.owner_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 3)", "def test_destroy_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.delete(path=url)\n self.assertEqual(first=204, second=response.status_code)\n self.assertEqual(first=0, second=Post.objects.all().count())", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def test_delete_request_by_non_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_assigned_resource_by_non_admin(self):\n CommonTestCases.user_token_assert_in(\n self,\n delete_assigned_resource_mutation,\n \"You are not authorized to perform this action\"\n )", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_delete_request_by_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n response = client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def delete_user():", "def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)", "async def claim(self, ctx: \"IceTeaContext\", otag: TagConverter):\n tag: models.Tag = otag\n author = ctx.guild.get_member(tag.author)\n if not author:\n tag.author = ctx.author.id\n await tag.save()\n await ctx.send(f\"You have sucessfully claimed {tag.id}\")\n else:\n await ctx.send(\"The Tag owner is still in the server\")", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_order_can_be_deleted_by_owner(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertEqual(result[\"message\"], \"Order deleted succesfully\")", "def delete_entity_owner(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_DELETE_ENTITY_OWNER + ':' + username)", "def test_kyc_delete_legal_share_holder_natural(self):\n pass", "def handle_owner_delete(owner_id):\n\n owner = Owner.find_by_id(owner_id)\n # flash error message if owner does not exist\n if not owner:\n flash(f'Owner does not exist!', 'danger')\n return 'not deleted', 404\n # flash error message if owner still has existing content\n elif owner.contents:\n flash(f'{owner.owner_name} still has existing content!', 'danger')\n return 'not deleted', 400\n\n # owner is deleted and user is redirected (redirect code in owners.js)\n # deleting owner errors handled\n try:\n owner.delete_owner()\n except HTTPException:\n return \"Server cannot delete the owner at this time\", 500\n\n flash(f'{owner.owner_name} has been deleted!', 'success')\n return 'deleted', 202", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def collection_special_author_cancel(user_id, author_id):\n\n another_user_id = author_id\n if (user_id == another_user_id):\n return \"self\"\n query = db_session.query(Collection_User).filter_by(\n user_id=user_id, another_user_id=another_user_id).all()\n if len(query) == 1:\n db_session.delete(query[0])\n db_session.commit()\n update_collection_num(user_id, another_user_id, False)\n else:\n return \"already\"\n return \"success\"", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def test_delete_author_logged(self):\n self.client.force_authenticate(user=self.user)\n\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if instance.role_type.lower() == 'owner':\n return Response({'message': 'Owner cannot be deleted'}, status=status.HTTP_400_BAD_REQUEST)\n else:\n self.perform_destroy(instance)\n return Response(status=status.HTTP_204_NO_CONTENT)", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def destroy(self, request, *args, **kwargs):\n instance = Group.objects.get(pk=kwargs['pk'])\n\n if instance.owner_id != request.user.id and not request.user.is_superuser:\n return not_allowed_to_do()\n\n return super().destroy(request, args, kwargs)", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def testPostAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPost(user, data=self.post_data)\n self.response_201(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertIn(\"sodar_uuid\", data)\n SequencingMachine.objects.filter(sodar_uuid=data[\"sodar_uuid\"]).delete()", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "async def _ad_remove(self, ctx, member):\n member_object = discord.utils.find(\n lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),\n ctx.guild.members\n )\n if member_object is not None:\n member = member_object.id\n elif member.isnumeric():\n member = int(member)\n\n admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))\n if admin:\n self.database.remove_item(admin[0])\n if member_object:\n await ctx.send(f\"Removed admin from {member_object.name}\")\n else:\n await ctx.send(\"Removed admin from invalid user\")\n else:\n await ctx.send(\"That person isn't an admin!\")", "def has_remove_permissions(self, obj):\n return True", "def removeToken(self, token):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n # Check if the given token is a personal access token so it can be\n # removed.\n user = self.getLoggedInUser()\n num_of_removed = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.token == token) \\\n .filter(Session.can_expire.is_(False)) \\\n .delete(synchronize_session=False)\n session.commit()\n\n if not num_of_removed:\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.DATABASE,\n \"Personal access token {0} was not found in the \"\n \"database.\".format(token))\n\n # Invalidate the local session by token.\n self.__manager.invalidate_local_session(token)\n\n LOG.info(\"Personal access token '%s...' has been removed by '%s'.\",\n token[:5], self.getLoggedInUser())\n\n return True", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def test_remove_user(self):\n pass", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def remove(self, token):\n self.rpc.call(MsfRpcMethod.AuthTokenRemove, [token])", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_captain_removes_teammate_success(self):\n team = Team.create(name='foo', program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n captain = User.create(name='captain', email='[email protected]',\n user_type='user', owned_teams=[team.uid])\n team.captain_id = captain.uid\n user.put()\n captain.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(captain),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def delete_principal_data( user_email ):\n \n sp = get_principal_data( user_email )\n if sp is not None:\n sp.delete()\n \n return True", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "async def remove(message, client, extra_args):\n\n if await funnypts_transaction(message, client, extra_args, \"remove\"):\n await message.channel.send(\"BRUH, THAT WAS CRINGE. SOMEONE JUST REVOKED YOUR FUNNYPOINT\")", "def test_owner_delete_assessment(self):\n response = self.user_01.delete(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n response = self.user_01.get(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def remove_member(self, request, pk):\n farm = self.get_object()\n user = request.data.get('user')\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def _should_delete(self, msg, ctx):\n # Do not remove the user's call\n if msg.id == ctx.message.id:\n return False\n # Remove command calls\n if msg.content.startswith(ctx.prefix):\n return True\n # Remove our bot's messages\n if msg.author == self.bot.user:\n return True\n return False", "def delete_user():\n #TODO user delete\n pass", "def test_cannot_delete_user_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/users/2',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def destroy_master_token(user, repo, config, name):\n tokens = get_master_tokens(user, repo, config)\n\n for token in tokens:\n if token['name'] == name:\n print(\"Found token with name: {}\".format(name))\n try:\n url = \"{}{}\".format(config['domain_base'],\n token['paths']['self'])\n resp = (api_call(url, 'delete', config['debug']))\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n if resp.status_code == 204:\n print(\"Token destroyed, name: {}\".format(name))\n print(\"Result: {}\" % resp)\n else:\n eprint(\"ERROR: Destroying token {} failed\".format(name))\n eprint(\"Result: {}\".format(resp))\n\n return True", "def delete_leader(self):", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','bigfirms'):\n abort(403)", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_delete_assigned_resource_by_admin(self):\n CommonTestCases.admin_token_assert_equal(\n self,\n assign_resource_mutation,\n assign_resource_mutation_response,\n )\n\n CommonTestCases.admin_token_assert_equal(\n self,\n delete_assigned_resource_mutation,\n delete_assigned_resource_response\n )", "async def remove(ctx, pkmn_id: int):\n res = database.remove_from_party(ctx.message.author, pkmn_id)\n if not res:\n ctx.send(\"**Oak**: Make sure you actually have that pokemon or if your party is not full ya scrub.\")\n return await show_party(ctx.message.author)", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_delete_user_with_valid_input_using_token(self):\n # setup\n user = self.generate_username_password()\n resp1 = self.create_user(user)\n try:\n assert resp1.status_code == 201\n assert resp1.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp1.request)\n self.pprint_response(resp1)\n resp_body1 = resp1.json()\n uuid_ = resp_body1[\"userID\"]\n resp2 = self.generate_token(user)\n try:\n assert resp2.status_code == 200\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp2.request)\n self.pprint_response(resp2)\n resp_body2 = resp2.json()\n token = resp_body2[\"token\"]\n\n # test\n resp3 = self.delete_user_token(uuid_, token)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)\n\n # teardown: none", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def deltoken(confirm, name):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n mph.wallet.removeTokenFromPublicName(name)\n set_shared_morphene_instance(stm)", "def test_kyc_delete_legal_board_member(self):\n pass", "def can_delete(self, user):\n raise Return(False)", "def test_delete_o_auth_authorize_token(self):\n pass", "def unorphaned(self):\n return self.new_owner == self.user", "async def remove_player(ctx, group_name: str, player_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to modify that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n \n if owner in bg_bot.manager.groups:\n for group in bg_bot.manager.groups[owner]['groups']:\n if group.name == group_name:\n if group.remove_member(player_name):\n response = f'Removed {player_name} from {group_name} successfully!'\n break\n else:\n response = \"Error removing player!\"\n break\n\n else:\n response = \"No groups exist that match the input criteria.\"\n \n await ctx.send(response)", "def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def test_delete_collection_o_auth_authorize_token(self):\n pass", "def _user_delete(sender, instance, using, **kwargs):\n Booking.objects.filter(requester=instance).update(\n requester=get_sentinel_user(instance.group)\n )", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete', 'advisors'):\n abort(403)", "def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)", "def test_is_team_owner_rank_permission(self):\n\n weak = RankFactory(name='weak soul', team=self.team)\n middle = RankFactory(name='middle soul', team=self.team)\n non_owner = AnotherUserFactory()\n params = {'pk': weak.id}\n edited_weak_name_name = 'small weak soul'\n edited_middle_name_name = 'edited middle soul'\n data = {'name': edited_weak_name_name}\n response = self.client.patch(reverse('api:ranks-detail', kwargs=params), data=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('name'), edited_weak_name_name)\n\n token = Token.objects.get(user=non_owner)\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')\n data = {'name': edited_middle_name_name}\n params = {'pk': middle.id}\n response = self.client.patch(reverse('api:ranks-detail', kwargs=params), data=data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','admins'):\n abort(403)", "async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)", "def test_owner_delete_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n owner = UserFactory.create_batch(2)[1]\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.create(app=app, owner=owner)\r\n\r\n assert self.mock_authenticated.id == blogpost.owner.id\r\n assert_not_raises(Exception, getattr(require, 'blogpost').delete, blogpost)", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def _check_token_is_revoked(self, jti: str) -> None:\n redis = self._conn_redis()\n entry = redis.get(jti)\n if entry and entry == 'true':\n raise HTTPException(status_code=401,detail=\"Token has been revoked\")" ]
[ "0.6920005", "0.6687383", "0.66869026", "0.652796", "0.65141463", "0.6513035", "0.6434651", "0.6367297", "0.6212406", "0.6208368", "0.62047887", "0.61992234", "0.61753887", "0.61734676", "0.61203825", "0.6116047", "0.61136854", "0.60691315", "0.60652006", "0.6058773", "0.6047488", "0.60381395", "0.6035085", "0.6028748", "0.60180974", "0.60172087", "0.6012856", "0.59962857", "0.5986537", "0.5975413", "0.5925387", "0.5920951", "0.5902252", "0.5891231", "0.58884406", "0.5882365", "0.58794755", "0.58789486", "0.58765477", "0.58696705", "0.58305216", "0.5816738", "0.5814086", "0.5812053", "0.58112574", "0.5788053", "0.57726544", "0.57704055", "0.5767898", "0.57616407", "0.57611793", "0.57599527", "0.5759245", "0.5743253", "0.570711", "0.5706308", "0.56981814", "0.56816167", "0.56721", "0.56349605", "0.5634201", "0.56304675", "0.5626892", "0.56242245", "0.56224465", "0.5606735", "0.5599548", "0.5593126", "0.55926144", "0.5592319", "0.55829513", "0.5580463", "0.5579149", "0.55678946", "0.55537957", "0.5550643", "0.55457395", "0.55440396", "0.5537756", "0.553769", "0.55300456", "0.55299014", "0.55269057", "0.55250126", "0.55025923", "0.55016327", "0.5499288", "0.5487914", "0.54843456", "0.5482303", "0.546856", "0.5460393", "0.5454654", "0.5451238", "0.54437053", "0.5437805", "0.54374236", "0.5435445", "0.5431889", "0.542544" ]
0.627882
8
checking if inputerror is raised as expected if attempting to use an invalid Channel ID
def test_channel_removeowner_invalid_channel_id(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_id = -1\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], invalid_channel_id, 1)", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def _check_channel_input(self, channel):\n # da `.get` `None` zurueckgibt wenn der Schluessel `channel` nicht existiert,\n # wird auch bei fehlender Konfiguration der Fehler geworfen\n if self.channels.get(channel) != GPIO.IN:\n raise RuntimeError(\"You must setup() the GPIO channel as an input first\")", "def test__validate_channels__type_error(input_value):\n validate_channels(input_value)", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "async def ticker_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def isInputValid(self, input):\r\n pass", "def test_validate_input_rejection(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('000011')", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def test_channel_leave_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_leave(leaver['token'], invalid_id)", "def test__validate_channels__passing(input_value):\n return validate_channels(input_value)", "def validateDevChannel( self, dev, devChannel ):\n d = self.dcDict\n if devChannel not in d[dev]['devChannels'].keys(): raise DCBoxError( 0 )", "def test_dccChatMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_CHAT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC CHAT request: ['foo']\")", "async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel exists.\")\n else:\n logging.warning(error)", "def test_react_invalid_message_id_in_different_channel():\n clear()\n user_a, user_b = register_n_users(2)\n # user_a create a channel\n channels_create(user_a[\"token\"], \"public_channel_a\", True)[\"channel_id\"]\n # user_b create a channel and send message in his own channel\n public_channel_id_b = channels_create(user_b[\"token\"], \"public_channel_b\", True)[\n \"channel_id\"\n ]\n message_id_b = message_send(\n user_b[\"token\"], public_channel_id_b, \"I am in channel_b\"\n )[\"message_id\"]\n # user_a should not be able to react the the message in the public_channel_b\n with pytest.raises(InputError):\n message_react(user_a[\"token\"], message_id_b, 1)", "def test_ap_csa_invalid(dev, apdev):\n csa_supported(dev[0])\n ap = connect(dev[0], apdev)\n\n vals = [ 2461, 4900, 4901, 5181, 5746, 5699, 5895, 5899 ]\n for val in vals:\n if \"FAIL\" not in ap.request(\"CHAN_SWITCH 1 %d\" % val):\n raise Exception(\"Invalid channel accepted: %d\" % val)", "def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected", "def _check(self,err):\r\n if err < 0:\r\n buf_size = 128\r\n buf = create_string_buffer('\\000' * buf_size)\r\n self.nidaq.DAQmxGetErrorString(err,byref(buf),buf_size)\r\n raise RuntimeError('NI-DAQ call failed with error %d: %s'%(err,repr(buf.value)))", "def test_dccAcceptMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_ACCEPT, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND ACCEPT request: ['foo']\")", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def test_invalid_event(bot):\n expect_error(edit, InputError, bot.username, 1, False, None, None)", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.param.name)\n self.bot.stats.incr(\"errors.missing_required_argument\")\n elif isinstance(e, errors.TooManyArguments):\n embed = self._get_error_embed(\"Too many arguments\", str(e))\n self.bot.stats.incr(\"errors.too_many_arguments\")\n elif isinstance(e, errors.BadArgument):\n embed = self._get_error_embed(\"Bad argument\", str(e))\n self.bot.stats.incr(\"errors.bad_argument\")\n elif isinstance(e, errors.BadUnionArgument):\n embed = self._get_error_embed(\"Bad argument\", f\"{e}\\n{e.errors[-1]}\")\n self.bot.stats.incr(\"errors.bad_union_argument\")\n elif isinstance(e, errors.ArgumentParsingError):\n embed = self._get_error_embed(\"Argument parsing error\", str(e))\n await ctx.send(embed=embed)\n self.bot.stats.incr(\"errors.argument_parsing_error\")\n return\n else:\n embed = self._get_error_embed(\n \"Input error\",\n \"Something about your input seems off. Check the arguments and try again.\"\n )\n self.bot.stats.incr(\"errors.other_user_input_error\")\n\n await ctx.send(embed=embed)\n await self.send_command_help(ctx)", "def validate_channel_value(value: int) -> None:\n if 0 <= value <= 255:\n pass\n else:\n raise ValueError(\"Color channel has to be in range [0; 255]\")", "def validate_input(self, *args):\n return", "def test_check_response_length_invalid(input):\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n with pytest.raises(ShdlcResponseError):\r\n cmd.check_response_length(input)", "def test_dccSendMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_SEND, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND request: ['foo']\")", "def test_id_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"e-dad\"\n self.assertRaises(InvalidInputError, self.command.run)", "def error_check(command):\r\n\r\n # TODO\r", "def input_error(self, errCode):\n errMsg = ''\n if 'A' in errCode: errMsg = errMsg + 'X column is not specified.\\n'\n if 'B' in errCode: errMsg = errMsg + 'X Column is not numeric.\\n'\n if 'C' in errCode: errMsg = errMsg + 'Y column is not specified.\\n'\n if 'D' in errCode: errMsg = errMsg + 'Y Column is not numeric.\\n'\n if 'E' in errCode: errMsg = errMsg + 'Z Column is not numeric.\\n'\n if 'F' in errCode: errMsg = errMsg + 'Calibration point 1 row is out of range.\\n'\n if 'G' in errCode: errMsg = errMsg + 'Calibration point 2 row is out of range.\\n'\n if 'H' in errCode: errMsg = errMsg + 'First row is not specified.\\n'\n if 'I' in errCode: errMsg = errMsg + 'Last row is not specified.\\n'\n if 'J' in errCode: errMsg = errMsg + 'First row is out of range.\\n'\n if 'K' in errCode: errMsg = errMsg + 'Last row is out of range.\\n'\n if 'L' in errCode: errMsg = errMsg + 'First and last rows are not compatible.\\n'\n self.wait_window(InputError(self, errMsg.rstrip('\\n')))", "def test_invalid_channel(self, mock_get, mock_subscribe):\n mock_get.return_value = {'XXX': False}\n token = jwt.encode({'room': '123', 'uuid': 'XXX'}, 'XXXX').decode('utf-8')\n ws = yield self.ws_connect('/socket?token={}&channel=ABC'.format(token))\n self.assertSocketError(ws, 4300, 'Invalid channel.')\n self.assertTrue(mock_get.called)\n self.assertFalse(mock_subscribe.called)", "def test_should_raise_value_error_for_missing_parameters(self):\n\n assert_raises(ValueError, TCPControlBits)", "def add_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Add as bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelAdd(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "def check_input(the_user_entry):\n try:\n for z in range(length_of_bad_input):\n if bad_input[z] == the_user_entry:\n messagebox.showwarning(title=\"Invalid input!\",\n message=\"The following characters are forbidden:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")\n clear_box()\n raise ValueError\n except ValueError:\n print(\"The user entered an invalid character in the entry box\\n\"\n \"potentially one of the following:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")", "def _validate_call_id(self, call_id):\n\n self._validate_required_data(call_id, self.CALL_ID)\n\n query = CallRecord.objects.filter(call_id=call_id)\n\n if query.exists():\n raise NotAcceptable(\n detail='Call id is already in use. Please, choose another')", "def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def check_input(input_array):\n if len(input_array) != 3:\n print(responses.ERROR_INVALID_INPUT)\n return False\n\n if not valid_port(input_array):\n return False\n\n return True", "def _CHK(self,_err):\n if _err < 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(_err,ctypes.byref(buf),buf_size)\n raise RuntimeError(\"nidaq call failed with error %d: %s\"%(_err,repr(buf.value)))\n if _err > 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(_err,ctypes.byref(buf),buf_size)\n raise RuntimeError(\"nidaq generated warning %d: %s\"%(_err,repr(buf.value)))", "def test_dccResumeMalformedRequest(self):\n result = self.assertRaises(\n irc.IRCBadMessage, self.client.dcc_RESUME, self.user, self.channel, \"foo\"\n )\n self.assertEqual(str(result), \"malformed DCC SEND RESUME request: ['foo']\")", "def getValidation(myInput):\r\n if myInput == \"\":\r\n print('You did not enter the number of bugs collected.')\r\n return -1\r\n elif myInput.isnumeric() == False:\r\n print('You entered a negative or a text value, please enter numerical digits only.')\r\n return -1\r\n elif myInput.isnumeric() == True:\r\n return int(myInput)\r\n else:\r\n print('There has been a read error, please reenter your number')\r\n return -1", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2", "def _raise_if_invalid(self):\n if self._stack_result == -1 and self._recm_data == -1:\n error_message = 'Worker result for request ID {} does not exist yet'.format(\n self.external_request_id)\n logger.exception(error_message)\n raise SARBRequestInvalidException(error_message)", "def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())", "def checkUIDValidity(self, uid):\r\n if uid not in self._pendingContainer:\r\n raise CredentialError('Invalid environment ID.')", "def test_qubits_not_on_device(self, valkmusa, qubit):\n\n with pytest.raises(ValueError, match='Qubit not on device'):\n valkmusa.validate_operation(cirq.X(qubit))", "def test_wrong_input():\n dwd = DwdWeatherWarningsAPI(None)\n assert not dwd.data_valid\n assert dwd.warncell_id is None\n assert dwd.warncell_name is None\n assert dwd.last_update is None\n assert dwd.current_warning_level is None\n assert dwd.expected_warning_level is None\n assert dwd.current_warnings is None\n assert dwd.expected_warnings is None", "def check_channel_shell_request(self, channel):\n return False", "def test_authentication_errors(input, output, reason):\n with pytest.raises(output) as e:\n Dexcom(input[0], input[1])\n assert reason == str(e.value)", "def __connect_failed__(self):\n # Ask the user what to do with the error\n choice = input(\"[A]bort, [C]hange address and port, or [R]etry?\")\n if (choice.lower() == \"a\"):\n exit()\n elif (choice.lower() == \"c\"):\n address = input(\"Please enter the address:\")\n port_number = input(\"Please enter the port:\")", "def CHK(err):\n if err < 0:\n buf_size = 100\n buf = ctypes.create_string_buffer('\\000' * buf_size)\n nidaq.DAQmxGetErrorString(err,ctypes.byref(buf),buf_size)\n raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value)))", "def non_existing_recipe_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def check_errors(self) -> None:", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "def test_empty_username():\n expect_error(register, InputError, \"\", \"abcdef\", \"A\", \"A\", \"A\")", "def check_channel_exec_request(self, channel, command):\n return False", "def test_invalid_course_key(self):\n errstring = \"Unparsable course_id\"\n with self.assertRaisesRegex(CommandError, errstring):\n call_command('export_olx', 'InvalidCourseID')", "def _check_for_incomplete_input(self):\n pass", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def testIdNonUniqueIdOnInit(self):\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n cdl_convert.ColorCorrection,\n 'uniqueId',\n 'file'\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n try:\n cc = cdl_convert.ColorCorrection('uniqueId', 'file')\n except ValueError:\n self.fail(\"Non-unique ID was not accepted!\")\n\n self.assertEqual(\n 'uniqueId001',\n cc.id\n )", "def test_wrong_input(self):\r\n self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),\r\n 'valid', input=T.dmatrix())\r\n self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),\r\n 'valid', filters=T.dvector())\r\n self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5),\r\n 'valid', input=T.dtensor3())", "def CHK(err):\n if err < 0:\n buf_size = 100\n buf = create_string_buffer(b'\\000' * buf_size)\n ni.DAQmxGetErrorString(err,byref(buf),buf_size)\n raise RuntimeError('nidaq call failed with error %d: %s'%(err,repr(buf.value)))\n if err > 0:\n buf_size = 100\n buf = create_string_buffer(b'\\000' * buf_size)\n ni.DAQmxGetErrorString(err,byref(buf),buf_size)\n raise RuntimeError('nidaq generated warning %d: %s'%(err,repr(buf.value)))", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def _is_valid_input(self, parameter_name):\n raise NotImplementedError()", "def __input_validator(msg):\n\n\t\tstatus = msg[\"status\"]\n\n\t\tif status == 1:\n\t\t\treturn status\n\t\telif status == 0:\n\t\t\tprint(msg[\"body\"])\n\t\telif status == -1:\n\t\t\tprint(\"Please enter something!\")\n\t\telif status == -2:\n\t\t\tprint(\"Your command {} is invalid\".format(msg[\"verb\"]))\n\t\telif status == -3:\n\t\t\tprint(\"No argument given after {}\".format(msg[\"verb\"]))", "def test_api_invalid_stream_id(self) -> None:\n user = self.example_user(\"hamlet\")\n self.login_user(user)\n result = self.api_patch(\n user,\n \"/api/v1/users/me/subscriptions/121\",\n {\"property\": \"is_muted\", \"value\": \"somevalue\"},\n )\n self.assert_json_error(result, \"Invalid stream ID\")", "def validate_inputs(name, country, catches):\n while not name:\n name = input('Player name cannot be empty: ')\n\n while not country:\n country = input('Enter a valid country name: ')\n\n while not catches:\n catches = input('Now enter number of catches record: ')\n try: # Once user has input data, try to cast it to integer to verify is not string\n int(catches)\n except ValueError: # if input data is not an integer, print message and clear catches value to keep asking user to enter data\n print('Data given is not a number')\n catches = ''\n\n return name, country, catches", "def test_invalid_arguments(self):\n # More than two arguments should report an error.\n exit_code, output = run_cli('a', 'b', 'c')\n assert exit_code != 0\n assert \"Error\" in output\n # Invalid `ionice' values should report an error.\n exit_code, output = run_cli('--ionice=foo')\n assert exit_code != 0\n assert \"Error\" in output", "def test_dccChatIndecipherablePort(self):\n result = self.assertRaises(\n irc.IRCBadMessage,\n self.client.dcc_CHAT,\n self.user,\n self.channel,\n \"foo.txt 127.0.0.1 sd@d\",\n )\n self.assertEqual(str(result), \"Indecipherable port 'sd@d'\")", "def is_valid_channel_name(channel):\n if not is_channel_name(channel):\n return False\n\n test_section = channel[1:]\n\n if not MIN_CHANNEL_NAME_LEN < len(channel) < MAX_CHANNEL_NAME_LEN:\n return False\n\n valid_symbols = '#\\\\|^`[]{}_'\n valid_chars = string.ascii_letters + string.digits + valid_symbols\n\n for char in channel:\n if char not in valid_chars:\n return False", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def failure(self, input: str) -> enumFail:\n pass", "def test_is_gene_continuously_amplified_wrong_input(self):\n self.assertEqual(\"Wrong input data\", is_gene_continuously_amplified(13))", "def del_badchannel(self):\n text = 'Channel number: \\n(e.g.: 3, 5, 8-12)'\n uinp, ok = QInputDialog.getText(None, 'Delete bad channel', text)\n if ok:\n uinp = uinp.replace(' ', '') # removes blank spaces\n ch_str = uinp.split(',') # splits csv\n try:\n ch_list = []\n for elem in ch_str:\n if '-' in elem: # if given a range e.g. 7-12\n elem_lims = elem.split('-')\n seq = range(int(elem_lims[0]), int(elem_lims[1]) + 1)\n ch_list.extend(seq)\n else: # if given a single value\n ch_list.append(int(elem))\n self.model.BadChannelDel(ch_list=ch_list)\n except Exception as ex:\n print(str(ex))", "def test_get_flow_request_by_channel_id_wrong_channel_id(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/search/?channel_id=unknown', **headers)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res.json(), {'errors': ['not_found']})", "def on_badchannelkey(self, conn, event) -> None:\n channel_name = event.arguments[0]\n logger.warning('Cannot join channel %s (bad key).', channel_name)", "def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)", "def check_input_socketname(self, socket_name):\n socket = self.__inputs.get(socket_name)\n if not socket: raise InvalidSocketError('input: '+str(socket_name))", "def test_ibmq_no_token_error(self):\n with pytest.raises(ValueError, match=\"Please pass a valid IBMQX token\"):\n dev = qml.device(\"orquestra.ibmq\", wires=2, analytic=False)", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def _check_error(self, ipi):\n\n ipi_error = ipi.communicate(timeout=120)[1].decode(\"ascii\")\n assert \"\" == ipi_error, \"IPI ERROR OCCURED: {}\".format(ipi_error)", "def validate_input(self, argin):\n try:\n configuration_dict = json.loads(argin)\n _ = configuration_dict[\"id\"]\n except (KeyError, JSONDecodeError) as err:\n msg = f\"Validate configuration failed with error:{err}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n except Exception as other_errs:\n msg = f\"Validate configuration failed with unknown error:{other_errs}\"\n self.logger.error(msg)\n return (None, ResultCode.FAILED, msg)\n\n return (\n configuration_dict,\n ResultCode.OK,\n \"ConfigureScan arguments validation successful\",\n )", "def non_existing_package_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel -p hash1\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def test_arg_atomMalformedAtom(self):\n self.assertRaises(imap4.IllegalClientResponse,\n self.server.arg_atom, b\" not an atom \")", "def test_invalid_qasmname_cr(self):\n self.assertRaises(QISKitError, ClassicalRegister, size=3, name='Cr')", "async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> None:\n client = MatterClient(data[CONF_URL], aiohttp_client.async_get_clientsession(hass))\n await client.connect()", "def _check_error(self):\n\n if self.error_code_test != 0:\n return False\n else:\n return True", "def test_index_hostid_notnum(self):\n self.check_response(\n '/attributes?h=xyz',\n ('Please enter an integer value for Host ID'))", "def test_invalid_type_cr_spec(self):\n QPS_SPECS_NONAMES = {\n \"circuits\": [{\n \"quantum_registers\": [{\n \"size\": 3}],\n \"classical_registers\": [{\n \"name\": 1,\n \"size\": 3}]\n }]\n }\n\n self.assertRaises(QISKitError, QuantumProgram, specs=QPS_SPECS_NONAMES)" ]
[ "0.698628", "0.6813436", "0.6781097", "0.6761938", "0.67249954", "0.6659239", "0.64130294", "0.6412095", "0.63696516", "0.628801", "0.62493896", "0.6245839", "0.6161329", "0.61323416", "0.60636854", "0.59984154", "0.59828895", "0.59685725", "0.5941027", "0.5924498", "0.591388", "0.5908196", "0.59041107", "0.58775896", "0.58704627", "0.58546156", "0.5852013", "0.584429", "0.5833557", "0.58304834", "0.5828934", "0.58248836", "0.5809644", "0.58083147", "0.5805674", "0.57967436", "0.5790822", "0.57763803", "0.5759516", "0.5758326", "0.57290745", "0.57259494", "0.5695417", "0.5684839", "0.5672049", "0.5663323", "0.5662899", "0.565782", "0.56574774", "0.5631846", "0.5630056", "0.56273353", "0.562598", "0.5623274", "0.5612913", "0.5610106", "0.5591538", "0.55873275", "0.5587148", "0.5583307", "0.5578275", "0.55638087", "0.5556583", "0.5547497", "0.55462354", "0.5543899", "0.55398965", "0.55319536", "0.55157065", "0.550955", "0.5507368", "0.55059195", "0.55017257", "0.5501309", "0.54926026", "0.54901516", "0.54899365", "0.5487838", "0.5483475", "0.54786086", "0.54766905", "0.5472567", "0.545739", "0.54573643", "0.5450957", "0.5440276", "0.5435278", "0.5431034", "0.54261094", "0.54247034", "0.5420641", "0.5417659", "0.54134923", "0.541273", "0.5408707", "0.5403121", "0.54020256", "0.53968835", "0.53947", "0.5389762" ]
0.63203907
9
checking if removing an owner with an invalid user ID raises an inputerror
def test_channel_removeowner_invalid_user_id(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], "[email protected]")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)", "def remove():\r\n ch = input('You are about to REMOVE an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter info for the following fields...\\n')\r\n xln = re.sub(r'\\s', '', str(input('Last name?\\n'))).lower().capitalize()\r\n xfn = re.sub(r'\\s', '', str(input('First name?\\n'))).lower().capitalize()\r\n\r\n if not search2(xln, xfn):\r\n print('No entry exists for', xfn, xln, end='. Please enter another entry.\\n')\r\n return remove()\r\n\r\n ch2 = input('Are you sure you wish to remove this individual from the database? YES or NO?\\n')\r\n if y_n(ch2):\r\n print(xfn, xln, 'has been removed from the database.')\r\n with conn:\r\n c.execute(\"\"\"DELETE from personnel WHERE first=:first COLLATE NOCASE and last=:last COLLATE NOCASE\"\"\",\r\n {'first': xfn, 'last': xln})\r\n\r\n start()\r\n else:\r\n print('Your remove action has been cancelled.')\r\n start()\r\n else:\r\n start()", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def delete_user():", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def test_delete_user_by_id_mismatch(client: FlaskClient) -> None:\n username = create_random_username()\n other_username = create_random_username()\n auth_token = create_auth_token(other_username)\n response = delete_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.FORBIDDEN)", "def handle_owner_delete(owner_id):\n\n owner = Owner.find_by_id(owner_id)\n # flash error message if owner does not exist\n if not owner:\n flash(f'Owner does not exist!', 'danger')\n return 'not deleted', 404\n # flash error message if owner still has existing content\n elif owner.contents:\n flash(f'{owner.owner_name} still has existing content!', 'danger')\n return 'not deleted', 400\n\n # owner is deleted and user is redirected (redirect code in owners.js)\n # deleting owner errors handled\n try:\n owner.delete_owner()\n except HTTPException:\n return \"Server cannot delete the owner at this time\", 500\n\n flash(f'{owner.owner_name} has been deleted!', 'success')\n return 'deleted', 202", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_remove_user(self):\n db = database.Database()\n db.remove_user('nick')\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'DELETE FROM quota_violations WHERE username LIKE (%s)'\n\n self.assertEqual(sql, expected_sql)", "def collection_special_author_cancel(user_id, author_id):\n\n another_user_id = author_id\n if (user_id == another_user_id):\n return \"self\"\n query = db_session.query(Collection_User).filter_by(\n user_id=user_id, another_user_id=another_user_id).all()\n if len(query) == 1:\n db_session.delete(query[0])\n db_session.commit()\n update_collection_num(user_id, another_user_id, False)\n else:\n return \"already\"\n return \"success\"", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def _delete_user(self, user):\n if User.delete_user(user):\n self.session.output({'deleted': 'user {} and their related accounts'.format(user)})\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to delete user ]')\n return False", "def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def test_remove_user(self):\n pass", "def test_handle_remove_github_error(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n team = Team(\"BRS\", \"brs\", \"web\")\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user]\n self.db.query.return_value = [team]\n self.gh.has_team_member.side_effect = GithubAPIException(\"error\")\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (\"User removed unsuccessfully with the \"\n \"following error: error\", 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_user_id_delete(self):\n pass", "def delete_user():\n #TODO user delete\n pass", "def delete_user(id):\n pass", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_delete_nonexist_username(self):\n print('(' + self.test_delete_nonexist_username.__name__+')',\n self.test_delete_nonexist_username.__doc__)\n with self.assertRaises(ValueError):\n self.connection.delete_user(NON_EXIST_PATIENT_USERNAME)", "def test_handle_remove_lookup_error(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n self.db.retrieve.side_effect = LookupError\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.lookup_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def del_usr (conn, id):\n\n try:\n csr = conn.cursor()\n\n cmd = \"DELETE FROM {tbl} WHERE {col1} = {val1};\".\\\n format(tbl = _tbl_users,\n col1 = _tbl_users_col1, val1 = id)\n print(cmd)\n\n csr.execute(cmd)\n csr.close()\n\n except Exception as ex:\n print(\"Error - del_usr: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok", "def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_delete_user_by_id_non_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_remove_invalid_user(self) -> None:\n admin = self.example_user(\"iago\")\n self.login_user(admin)\n self.assertTrue(admin.is_realm_admin)\n\n stream_name = \"hümbüǵ\"\n self.make_stream(stream_name)\n\n result = self.client_delete(\n \"/json/users/me/subscriptions\",\n {\n \"subscriptions\": orjson.dumps([stream_name]).decode(),\n \"principals\": orjson.dumps([99]).decode(),\n },\n )\n self.assert_json_error(\n result, \"User not authorized to execute queries on behalf of '99'\", status_code=403\n )", "def remove_user(self, username):\n if(self.isBlank(username) or self.isValidLen(username)):\n return False\n safe_input = (username,)\n #this method should be secured ie. need more than just username to call it \n self.cur.execute(\"DELETE FROM Users WHERE Username = ?\" , safe_input)\n self.con.commit()\n logging.info('%s was removed', username)\n return True", "def delete_user(change):\n return change()", "def test_delete_user_by_id_non_existent(client: FlaskClient) -> None:\n username = create_random_username()\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert_error_response(response, HTTPStatus.NOT_FOUND)", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def __ui_remove_person(self):\n remove_person_id = int(input(\"Introduce the ID of the person you want to remove: \"))\n self.__person_service.service_remove_person(remove_person_id)\n print(\"Person successfully removed from your agenda!\\n\")", "def delete_user():\r\n raise NotImplementedError()", "def test_admin_cannot_delete_user_with_vague_user_id(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/kk',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'The user id should be a number!')\n self.assertEqual(resp.status_code, 400)", "def _validate_ip_owner(ip, mac, row_number):\n mac = MACAddressField.normalize(mac)\n try:\n dev = Device.admin_objects.get(ethernet__mac=mac)\n except Device.DoesNotExist:\n if ip_address_exists(ip):\n raise forms.ValidationError(\n \"Row %s: IP address already exists.\" % row_number\n )\n else:\n # Does another device have this IPAddress?\n if(Device.objects.filter(\n ipaddress__number=int(ipaddr.IPAddress(ip)),\n ).exclude(\n pk=dev.id,\n ).exists()):\n raise forms.ValidationError(\n \"Row %s: IP address used by another device.\" % row_number\n )", "def delete_student(user_inputs):\r\n no_space = (remove_space(user_inputs))\r\n first_last = no_space.split(\",\")\r\n if len(first_last) != 2:\r\n print(\"Invalid number of arguments, please only enter first and last name\")\r\n else:\r\n first_n, last_n = first_last[0], first_last[1]\r\n original_count = len(StudentRoster)\r\n for student in StudentRoster:\r\n if student.first == first_n:\r\n if student.last == last_n:\r\n StudentRoster.remove(student)\r\n if original_count == len(StudentRoster):\r\n print(\"Error! No student with that name was found in the roster.\")", "def removeuser(jenni, input):\n line = input.group()\n if line:\n line = line.lstrip().rstrip()\n scores.rmuser(jenni, input, line)", "def _validate_delete_command(args):\n return _check_entry_name(args)", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def test_is_valid_user_id_valid(self):\n ids = (\n \"NDcyMjY1OTQzMDYyNDEzMzMy\",\n \"NDc1MDczNjI5Mzk5NTQ3OTA0\",\n \"NDY3MjIzMjMwNjUwNzc3NjQx\",\n )\n\n for user_id in ids:\n with self.subTest(user_id=user_id):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertTrue(result)", "def delete_entity_owner(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_DELETE_ENTITY_OWNER + ':' + username)", "def test_users_username_delete(self):\n pass", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def rm_person():\n # get person name from user\n responses = accept_inputs([\"Person name\"])\n person_name = responses[\"Person name\"]\n # check for existence of person\n results = query_with_results(\"select id from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s' that we could remove.\" % person_name)\n return\n # the person exists, so remove it\n query_no_results(\"delete from person where name = ?\", [person_name])\n # remove all associations with tasks\n query_no_results(\"delete from task_person_pair where person = ?\", [results[0][0]])\n print(\"Person with name '%s' removed.\" % person_name)", "def test__validate_owner__0():\n user = User.precreate(202211270021)\n team = Team.precreate(202211270022)\n \n for input_value, expected_output in (\n (None, ZEROUSER),\n (user, user),\n (team, team),\n ):\n owner = validate_owner(input_value)\n vampytest.assert_is(owner, expected_output)", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_del_user(self, api):\n self.builder.add_user(api.get_user())\n resp = api.del_user(api.get_user())\n assert resp.status_code == 204\n with pytest.raises(ObjectDeletedError):\n assert self.builder.check_user(api.get_user()) is False", "def test_delete_no_username(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n # This should fail either an username or user object should be given\n self.assertRaises(ValueError, api.user.delete)\n self.assertRaises(ValueError, api.user.delete,\n username='[email protected]', user=mock.Mock())\n\n api.user.create(email='[email protected]', password='secret')\n api.user.delete(username='[email protected]')\n\n user = api.user.create(email='[email protected]', password='secret')\n api.user.delete(user=user)", "def del_user(self, username):\n pass", "def remove(self, user_id):\n pass", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "async def cog_command_error(self, ctx:utils.Context, error:commands.CheckFailure):\n\n # Throw errors properly for me\n if ctx.author.id in self.bot.config['owners']:\n text = f'```py\\n{error}```'\n await ctx.send(text)\n raise error\n\n elif isinstance(error, commands.NotOwner):\n await ctx.send(\"You need to be registered as an owner to run this command.\")\n return", "def testDeleteUserWithUnknownUsername(self):\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n deferred = self.facade.deleteUser(session, u'unknown')\n error = yield self.assertFailure(deferred, TNoSuchUser)\n self.assertEqual(u'unknown', error.name)", "def delete_inventory():\r\n strIDDel = input('Which ID would you like to delete?: ').strip()\r\n while ValueError:\r\n try: \r\n int(strIDDel)\r\n break\r\n except ValueError:\r\n strIDDel = input('Error: ID must be numeric. Enter ID: ').strip()\r\n return strIDDel", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def delete_user(username, user_id):\r\n global sql_cursor\r\n global database\r\n\r\n print(\"Are you absolutely sure that you want to delete your account.\")\r\n conf_del = input(\"(y/n) : \").lower()\r\n\r\n if conf_del == \"y\":\r\n\r\n print(\"Deleting...\")\r\n\r\n sql_cursor.execute(f\"DELETE FROM passwords WHERE user_id={user_id};\")\r\n sql_cursor.execute(f'DELETE FROM users WHERE username=\"{username}\";')\r\n database.commit()\r\n\r\n print(\"Account successfully deleted\")\r\n print(\"You need to start the program again\")\r\n print(\"Exiting now\")\r\n sleep(5)\r\n quit()\r\n\r\n else:\r\n print(\"Cancelling deletion ...\")\r\n return", "def check_deletion():\n\n if newrev == zero:\n ERROR(\"[POLICY] Refusing to delete this ref\")\n sys.exit(1)", "def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )", "def test_deluser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"group.adduser\", [self._group, self._user])\n self.assertTrue(self.run_function(\"group.deluser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertNotIn(self._user, str(group_info[\"members\"]))", "async def _ad_remove(self, ctx, member):\n member_object = discord.utils.find(\n lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),\n ctx.guild.members\n )\n if member_object is not None:\n member = member_object.id\n elif member.isnumeric():\n member = int(member)\n\n admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))\n if admin:\n self.database.remove_item(admin[0])\n if member_object:\n await ctx.send(f\"Removed admin from {member_object.name}\")\n else:\n await ctx.send(\"Removed admin from invalid user\")\n else:\n await ctx.send(\"That person isn't an admin!\")", "def _validate_author_id(cls, item):\n if (\n item.author_id and\n not user_services.is_user_or_pseudonymous_id(item.author_id)\n ):\n cls._add_error(\n 'final %s' % (\n base_model_validators.ERROR_CATEGORY_AUTHOR_CHECK),\n 'Entity id %s: Author ID %s is in a wrong format. '\n 'It should be either pid_<32 chars> or uid_<32 chars>.'\n % (item.id, item.author_id))", "def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_no_owner_exception(api: API, owners: list):\n api.candlepin.get_owners.return_value = owners\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n with pytest.raises(IndexError):\n account.owner_id", "def validate_owner(json_data: dict, manufacturer: dict):\n error_msg = ''\n if not json_data.get('ownerGroups'):\n return ''\n if len(json_data.get('ownerGroups')) != 1:\n error_msg = OWNER_GROUP_COUNT_INVALID\n group = json_data['ownerGroups'][0]\n group_man = manufacturer['ownerGroups'][0]\n owner_man = group_man['owners'][0]\n if group.get('type', '') != group_man.get('type'):\n error_msg += OWNER_GROUP_TYPE_INVALID\n if not group.get('owners'):\n return error_msg\n if len(group.get('owners')) != 1:\n error_msg += OWNER_COUNT_INVALID\n owner = group['owners'][0]\n if owner.get('organizationName', '') != owner_man.get('organizationName') or \\\n owner.get('address') != owner_man['address']:\n error_msg += OWNER_MISMATCH\n return error_msg", "def delete():\n id_num = int(input('Enter the ID number of the item you wish to delete\\n'))\n db_actions.remove(id_num)", "def clean_role():", "def test_invalid_update_request_with_taken_username(self):\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(self.author.get_key()))\n response: Response = self.client.patch(BASE_URL + '/update/', data={\n 'username': self.temporary_author.username\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, msg=data)\n self.assertEqual(data, {'detail': f\"User '{self.temporary_author.username}' already exists.\"})", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_delete_request_by_non_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def do_delete_users(self, value=None):\n if not value:\n key = input(\"Provide key through which you want to delete user :\\n\")\n value = input(f\"Provide {key} to delete :\")\n else:\n key = 'id'\n value = value\n self.connection_obj.delete_user(key, value)\n print(\"Deletion Successful\")", "def delete_item(self, id: str, user: User) -> bool:", "def validate_username(self, attrs, source):\n phone_no = attrs[source]\n if not phoneCleaner(phone_no):\n raise serializers.ValidationError(\"Please check your phone no. the format is incorrect\")\n\n try:\n us = User.objects.get(username__iexact=phone_no)\n except User.DoesNotExist:\n raise serializers.ValidationError(\"Phone number must already be registered before doing this\")\n\n if us.hierarchy != 'master':\n raise serializers.ValidationError(\"Phone number must not be a slave to another user\")\n\n return attrs", "def test_admin_cannot_delete_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/5',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This attendant does not exist!\")\n self.assertEqual(resp.status_code, 404)", "def do_deluser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.del_contact(line)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help delete'\")\n\t\telse:\n\t\t\tprint(\"To delete contacts you need to open or create a book.\")", "def del_user(user):\n\ttry:\n\t\tmyfile = open('tracked_users', 'r')\n\t\tlines = myfile.readlines()\n\t\tmyfile.close()\n\t\tmyfile = open('tracked_users', 'w')\n\t\tfor line in lines:\n\t\t\tif line.lower() != user.lower()+'\\n':\n\t\t\t\tmyfile.write(line.lower())\n\t\tmyfile.close()\n\t\tos.remove('data/'+user.lower())\n\t\treturn 1\n\texcept Exception as e:\n\t\tfd = open('tracked_users', 'r')\n\t\tprint(fd.read())\n\t\tfd.close()\n\t\tprint(e)\n\t\treturn -1", "def remove_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_remove = get_id_from_username(request.form['remove_user'])\n if not friend_to_remove or friend_to_remove==user_id:\n return redirect(url_for('message.converse'))\n remove_friend_db(user_id, friend_to_remove)\n return redirect(url_for('message.converse'))", "def test_delete_username(self):\n\n api.user.create(username='unwanted', password='secret',\n email='[email protected]')\n api.user.delete(username='unwanted')\n\n user = api.user.create(username='steven', password='secret',\n email='[email protected]')\n api.user.delete(user=user)", "def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)", "def test_delete_user_by_id_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n admin_username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(admin_username, admin=True)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None", "def _validate_original_author_id(cls, item):\n if (\n item.original_author_id and\n not user_services.is_user_or_pseudonymous_id(\n item.original_author_id)\n ):\n cls._add_error(\n 'final %s' % (\n base_model_validators.ERROR_CATEGORY_AUTHOR_CHECK),\n 'Entity id %s: Original author ID %s is in a wrong format. '\n 'It should be either pid_<32 chars> or uid_<32 chars>.'\n % (item.id, item.original_author_id))", "def remove_user(self, username):\n\n row = self.c.execute(\"SELECT * FROM profiles WHERE name =?\",\n (username,))\n for i in row:\n user = i[1]\n print(user)\n if user == username:\n self.c.execute(\"SELECT id FROM profiles WHERE name=?\",\n (username,))\n i_d = self.c.fetchone()[0]\n self.c.execute(\"DELETE FROM events WHERE user_id=?\", (i_d,))\n self.c.execute(\"DELETE FROM profiles WHERE name=?\", (username,))\n self.conn.commit()\n return True\n else:\n print ('User not found.')" ]
[ "0.6941911", "0.67129594", "0.6252531", "0.62405235", "0.6239856", "0.62261367", "0.62119555", "0.62015164", "0.61586416", "0.6044805", "0.60105914", "0.6007393", "0.59954685", "0.59929734", "0.59919494", "0.5990558", "0.598856", "0.598853", "0.598045", "0.594045", "0.5923282", "0.59194237", "0.59108675", "0.5897748", "0.5883618", "0.5880244", "0.58571076", "0.5854038", "0.58468896", "0.5845754", "0.58002496", "0.5776815", "0.57754946", "0.5773961", "0.576732", "0.57634014", "0.5761556", "0.5761412", "0.5750839", "0.57498676", "0.574586", "0.573489", "0.5718739", "0.57121634", "0.57070965", "0.5705962", "0.5699123", "0.5682769", "0.56616807", "0.5646288", "0.5635406", "0.5630867", "0.56227887", "0.5616607", "0.5614742", "0.560457", "0.5601277", "0.5599854", "0.5592847", "0.55795246", "0.5573138", "0.5566447", "0.55630314", "0.5560468", "0.55547893", "0.5552242", "0.5551827", "0.55429775", "0.5541708", "0.55366844", "0.55322903", "0.551569", "0.550635", "0.5505278", "0.55017984", "0.5501316", "0.5500953", "0.5499071", "0.54920405", "0.54864144", "0.547719", "0.5469192", "0.54686874", "0.54590434", "0.5456989", "0.5450396", "0.54446584", "0.5442704", "0.5442341", "0.54423225", "0.5437274", "0.54356486", "0.542654", "0.542616", "0.5425132", "0.54212505", "0.54141396", "0.5413172", "0.54130095", "0.5407615" ]
0.72741085
0
checking whether removing an owner after the user has logged out raises an accesserror as expected
def test_channel_removeowner_invalid_token_after_logout(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channels_create(register_third_result['token'], 'Random Channel 2', True) channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) auth_logout(register_second_result['token']) with pytest.raises(AccessError): assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_remove_user(self):\n pass", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def delete_user():", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_destroy_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.delete(path=url)\n self.assertEqual(first=204, second=response.status_code)\n self.assertEqual(first=0, second=Post.objects.all().count())", "def teardown_volume_access( user_email, volume_name ):\n client = connect_syndicate()\n \n # block the user from creating more gateways, and delete the gateways\n try:\n rc = client.remove_user_from_volume( user_email, volume_name )\n assert rc is True, \"Failed to remove access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n return True", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "def ensure_principal_absent( user_email ):\n \n ensure_user_absent( user_email )\n delete_principal_data( user_email )\n return True", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False", "def has_remove_permissions(self, obj):\n return True", "def testDeleteIsAllowed(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n namespaces = SecureNamespaceAPI(self.system.users['fluiddb'])\n namespaces.delete([u'user/private'])\n self.users.delete([u'user'])\n self.assertIdentical(None, getUser(u'user'))", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_delete_owner(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.owner_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 3)", "def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def delete_user():\n #TODO user delete\n pass", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_delete_author_logged(self):\n self.client.force_authenticate(user=self.user)\n\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)", "def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")", "def test_delete_root_forbidden(self, mapp):\n mapp.login_root()\n mapp.delete_user(user=\"root\", code=403)", "def unorphaned(self):\n return self.new_owner == self.user", "def test_deluser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"group.adduser\", [self._group, self._user])\n self.assertTrue(self.run_function(\"group.deluser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertNotIn(self._user, str(group_info[\"members\"]))", "def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_owner_delete_assessment(self):\n response = self.user_01.delete(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n response = self.user_01.get(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def delete_entity_owner(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_DELETE_ENTITY_OWNER + ':' + username)", "def testDeleteAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n SequencingMachine.objects.all().delete()\n machine = self.make_machine()\n self.assertEqual(SequencingMachine.objects.count(), 1)\n response = self.runDelete(user, sequencer=machine.sodar_uuid)\n self.response_204(response)\n self.assertEqual(SequencingMachine.objects.count(), 0)", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_user_deletion(self):\n User.objects.filter(username=self.user.username).delete()\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_WARNING)", "def destroy(self, request, *args, **kwargs):\n instance = Group.objects.get(pk=kwargs['pk'])\n\n if instance.owner_id != request.user.id and not request.user.is_superuser:\n return not_allowed_to_do()\n\n return super().destroy(request, args, kwargs)", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_del_user(self, api):\n self.builder.add_user(api.get_user())\n resp = api.del_user(api.get_user())\n assert resp.status_code == 204\n with pytest.raises(ObjectDeletedError):\n assert self.builder.check_user(api.get_user()) is False", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def test_admin_cannot_delete_non_existant_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n \n resp = self.client.delete(\n '/api/v1/users/5',\n content_type='application/json',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], \"This attendant does not exist!\")\n self.assertEqual(resp.status_code, 404)", "def test_delete_assigned_resource_by_non_admin(self):\n CommonTestCases.user_token_assert_in(\n self,\n delete_assigned_resource_mutation,\n \"You are not authorized to perform this action\"\n )", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_permission_remove_action_not_granted(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove anonymous TICKET_CREATE')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def delete_user():\r\n raise NotImplementedError()", "def test_remove_authz_wrong(self):\n self.test_add_authz()\n self.app.delete(\"/config/authorize?operation=config\", status=400)\n self.app.delete(\"/config/authorize?dn=/DN=a.test.user\", status=204)", "def test_resource_user_resource_remove_user_from_user_groups_delete(self):\n pass", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def test_negative_conditions(self):\r\n outline_url = reverse_course_url('course_handler', self.course.id)\r\n # register a non-staff member and try to delete the course branch\r\n non_staff_client, _ = self.create_non_staff_authed_user_client()\r\n response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')\r\n self.assertEqual(response.status_code, 403)", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def test_delete_saved_filter_not_owner(self):\n filter_id = self.filter_1.pk\n url = reverse('xds_api:saved-filter', args=(filter_id,))\n _, token = AuthToken.objects.create(self.user_2)\n response = \\\n self.client.delete(url,\n HTTP_AUTHORIZATION='Token {}'.format(token))\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def onUserDeletion(event):\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n storage = getUtility(IPubSubStorage)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n if principal_id in storage.leaf_nodes:\n storage.leaf_nodes.remove(principal_id)\n if principal_id in storage.publishers:\n del storage.publishers[principal_id]\n if principal_id in storage.node_items:\n del storage.node_items[principal_id]\n if principal_id in storage.collections['people']:\n storage.collections['people'].remove(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = deletePrincipal(client, principal_jid)\n return d", "def test_delete_request_by_non_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def testDeleteUserIsDenied(self):\n [(objectID, username)] = UserAPI().create(\n [(u'user', u'secret', u'User', u'[email protected]')])\n self.store.commit()\n with login(u'user', objectID, self.transact) as session:\n deferred = self.facade.deleteUser(session, u'doomed')\n error = yield self.assertFailure(deferred, TPathPermissionDenied)\n self.assertEqual(u'doomed', error.path)", "def test_delete_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user1_template, request=mock_request\n )", "def test_destroy_container_privilege(self):\n pass", "def test_blogpost_is_not_deleted_after_owner_deletion(self):\r\n self.configure_fixtures()\r\n owner = User(\r\n email_addr=\"[email protected]\",\r\n name=\"johndoe2\",\r\n fullname=\"John Doe2\",\r\n locale=\"en\")\r\n blogpost = Blogpost(title='title', body=\"body\", app=self.app, owner=owner)\r\n db.session.add(blogpost)\r\n db.session.commit()\r\n\r\n assert owner in db.session\r\n assert blogpost in db.session\r\n\r\n db.session.delete(owner)\r\n db.session.commit()\r\n assert owner not in db.session\r\n assert blogpost in db.session\r\n assert blogpost.owner == None, blogpost.owner", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def test_non_owner_authenticated_user_delete_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n blogpost = BlogpostFactory.create()\r\n\r\n assert self.mock_authenticated.id != blogpost.owner.id\r\n assert not self.mock_authenticated.admin\r\n assert_raises(Forbidden, getattr(require, 'blogpost').delete, blogpost)", "def test_remove_user(self):\n db = database.Database()\n db.remove_user('nick')\n\n the_args, _ = db._cursor.execute.call_args\n sql = the_args[0]\n expected_sql = 'DELETE FROM quota_violations WHERE username LIKE (%s)'\n\n self.assertEqual(sql, expected_sql)", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def test_delete_user_by_id_non_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session, username)\n auth_token = create_auth_token(username)\n response = delete_user(client, username, auth_token.signed)\n assert response.status_code == HTTPStatus.NO_CONTENT\n assert response.content_length is None\n assert GifSyncUser.get_by_username(username) is None", "def handle_owner_delete(owner_id):\n\n owner = Owner.find_by_id(owner_id)\n # flash error message if owner does not exist\n if not owner:\n flash(f'Owner does not exist!', 'danger')\n return 'not deleted', 404\n # flash error message if owner still has existing content\n elif owner.contents:\n flash(f'{owner.owner_name} still has existing content!', 'danger')\n return 'not deleted', 400\n\n # owner is deleted and user is redirected (redirect code in owners.js)\n # deleting owner errors handled\n try:\n owner.delete_owner()\n except HTTPException:\n return \"Server cannot delete the owner at this time\", 500\n\n flash(f'{owner.owner_name} has been deleted!', 'success')\n return 'deleted', 202", "def delete_principal_data( user_email ):\n \n sp = get_principal_data( user_email )\n if sp is not None:\n sp.delete()\n \n return True", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','admins'):\n abort(403)", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def test_delete_collection_user(self):\n pass", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_delete_shelf_unauthorized(self, *_):\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n rat = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"password\",\n local=True,\n localname=\"rat\",\n )\n request = self.factory.post(\"\")\n request.user = rat\n\n with self.assertRaises(PermissionDenied):\n views.delete_shelf(request, self.shelf.id)\n\n self.assertTrue(models.Shelf.objects.filter(id=self.shelf.id).exists())", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete', 'advisors'):\n abort(403)", "def can_delete(self, user):\n raise Return(False)", "def _check_remove_last_super(user_obj):\n if not user_obj.is_superuser:\n return\n\n # Is there any other active superuser left?\n all_active_su = User.objects.filter(is_superuser__exact = True,\n is_active__exact = True)\n num_active_su = all_active_su.count()\n assert num_active_su >= 1, _(\"No active superuser configured.\")\n if num_active_su == 1:\n raise PopupException(_(\"You cannot remove the last active superuser from the configuration.\"), error_code=401)", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def test_delete_other_users_template_as_staff_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.staff_user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def test_jenkins_user_delete(self):\n ju = JenkinsUser.objects.get(username=\"user_1\")\n self.assertRaises(django.db.models.deletion.ProtectedError, ju.delete)", "def test_15_admin_user_add_del_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n self.register(fullname=\"Juan Jose2\", name=\"juan2\",\r\n email=\"[email protected]\", password=\"juan2\")\r\n self.signout()\r\n self.signin(email=\"[email protected]\", password=\"juan2\")\r\n # Add user.id=2 to admin group\r\n res = self.app.get(\"/admin/users/add/2\", follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\",\\\r\n \"This action should be forbidden, not enought privileges\"\r\n # Remove user.id=2 from admin group\r\n res = self.app.get(\"/admin/users/del/2\", follow_redirects=True)\r\n assert res.status == \"403 FORBIDDEN\",\\\r\n \"This action should be forbidden, not enought privileges\"", "def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))", "def test_remove_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.remove_facility'))", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def admins_remove(request):\n if len(models.User.admins()) > 1:\n username = request.params['remove']\n user = models.User.get_by_username(username)\n user.admin = False\n return httpexceptions.HTTPSeeOther(\n location=request.route_url('admin_admins'))", "def test_remove_virt_realm(self):\n pass", "def test_delete_global_template_as_user_raises_access_control_error(self):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.global_template, request=mock_request\n )", "def test_remove_classroom_optional_object_error(self):\n with self.assertRaises(InvalidPermission):\n self.admin.has_perm('auth.remove_classroom', {})", "def test_user_not_in_group_cannot_delete(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_not_logged_in(event_member):\n admin, member, event_id = event_member\n log_out(member.username)\n expect_error(edit, AuthError, member.username, event_id, True, None, None)" ]
[ "0.70658934", "0.7061854", "0.673374", "0.66135883", "0.65965587", "0.6513615", "0.64951664", "0.6487198", "0.6482636", "0.6462151", "0.6452183", "0.64459676", "0.6433408", "0.6396297", "0.63769233", "0.63756007", "0.6364545", "0.63642037", "0.6347555", "0.6339679", "0.63275635", "0.6316628", "0.631383", "0.63045657", "0.62708694", "0.62668663", "0.626224", "0.62565", "0.6251059", "0.62223214", "0.6219419", "0.6210365", "0.61948174", "0.61878085", "0.61863023", "0.61750823", "0.6170981", "0.61624116", "0.6145366", "0.61378294", "0.612772", "0.61261004", "0.6124922", "0.61114687", "0.60864913", "0.6074987", "0.60685706", "0.60637414", "0.60630995", "0.6038172", "0.603817", "0.6032518", "0.60259783", "0.6013138", "0.60012466", "0.5985652", "0.59845823", "0.5973029", "0.5970969", "0.59526736", "0.5946303", "0.5945341", "0.5940751", "0.5932438", "0.59289145", "0.59287286", "0.59251153", "0.5924545", "0.5919912", "0.5905274", "0.5903968", "0.59018034", "0.5895302", "0.5886605", "0.5883497", "0.5873959", "0.5873144", "0.5871227", "0.5867036", "0.585611", "0.5854674", "0.58536285", "0.5852031", "0.5847918", "0.5845277", "0.58432883", "0.5838747", "0.58363354", "0.5826595", "0.5823642", "0.5820788", "0.5813229", "0.5798491", "0.5788122", "0.57880944", "0.578632", "0.57790303", "0.5769289", "0.57657105", "0.5764971" ]
0.70261496
2
checking if removing an owner without owner permissions raises an accesserror
def test_channel_removeowner_not_owner_permissions(): clear() auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def has_remove_permissions(self, obj):\n return True", "def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')", "def test_permission_remove_unknown_user(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove joe TICKET_VIEW')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False", "def test_permission_remove_action_not_granted(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission remove anonymous TICKET_CREATE')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_destroy_not_owner(self):\n\n self.assertEqual(first=1, second=Post.objects.all().count())\n url = reverse('post-detail', args=(self.post.id,))\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.delete(path=url)\n self.assertEqual(first=403, second=response.status_code)\n self.assertEqual(first=1, second=Post.objects.all().count())", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"[email protected]\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def _try_delete_and_return_permissions_error(component_url):\n try:\n delete_object_task.DeleteObjectTask(component_url, verbose=False).execute()\n except api_errors.CloudApiError as e:\n status = getattr(e, 'status_code', None)\n if status == 403:\n return e\n raise", "def _check_namespace_access(self, namespace, user):\n if not namespace.owners.filter(id=user.id).count():\n raise exceptions.PermissionDenied(\n 'The namespace listed on your filename must match one of '\n 'the namespaces you have access to.'\n )", "def testDeleteAccessDenied(self):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(None, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.runDelete(user, sequencer=self.hiseq2000.sodar_uuid)\n self.assertEqual(SequencingMachine.objects.count(), 1)\n self.response_403()", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def is_still_owner(self):\n raise tooz.NotImplemented", "def test_delete_owner(self):\n self.assertEqual(RoleAssignment.objects.count(), 3)\n url = reverse(\n 'projectroles:api_role_destroy',\n kwargs={'roleassignment': self.owner_as.sodar_uuid},\n )\n response = self.request_knox(url, method='DELETE')\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(RoleAssignment.objects.count(), 3)", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def delete_entity_owner(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_DELETE_ENTITY_OWNER + ':' + username)", "def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def test_permission_remove_multiple_actions_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous WIKI_CREATE WIKI_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_permission_remove_action_for_all_users(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission add anonymous TICKET_CREATE')\n self._execute('permission remove * TICKET_CREATE')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_delete_request_by_non_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def del_ro(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def test_remove_classroom_optional_object_error(self):\n with self.assertRaises(InvalidPermission):\n self.admin.has_perm('auth.remove_classroom', {})", "def test_delete_author_unlogged(self):\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)", "def test_kyc_delete_legal_share_holder_natural(self):\n pass", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_delete_non_owner(self):\n another_user = CustomUser.objects.create(id=134, email='[email protected]', is_active=True)\n another_user.set_password('qwerty12345')\n another_user.save()\n\n self.client.login(email='[email protected]', password='qwerty12345')\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 87876})\n\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 403)", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "def test_handle_remove_not_admin(self):\n test_user = User(\"userid\")\n team = Team(\"BRS\", \"brs\", \"web\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n with self.app.app_context():\n self.assertTupleEqual(self.testcommand.handle(\"team remove\"\n \" brs ID\", user),\n (self.testcommand.permission_error, 200))\n self.db.store.assert_not_called()\n self.gh.remove_team_member.assert_not_called()", "def test_delete_assigned_resource_by_non_admin(self):\n CommonTestCases.user_token_assert_in(\n self,\n delete_assigned_resource_mutation,\n \"You are not authorized to perform this action\"\n )", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def permission_absent(self):\n # fetch object at the path\n path = self.module.params[\"object\"]\n username = self.module.params[\"subject\"]\n recursive = self.module.params[\"recursive\"]\n subject_zone = self.module.params[\"subject_zone\"]\n if not subject_zone:\n subject_zone = self.module.params[\"zone\"]\n\n # get existing permission of the object\n obj_type = self._object_type(path)\n if not obj_type:\n self._fail(\"Object does not exist\")\n permission = self._get_permission(obj_type, path, username)\n self.result[\"perm_before\"] = permission\n\n if recursive and obj_type != Collection:\n self._fail(\"recursive option can only be used on collection\")\n\n # remove permission if exists\n if recursive and not self._check_null_permission_recursive(path, username):\n self._set_permission(path, \"null\", username, recursive=True, user_zone=subject_zone)\n self.result[\"changed\"] = True\n elif not recursive and permission:\n self._set_permission(path, \"null\", username, user_zone=subject_zone)\n self.result[\"changed\"] = True\n self.result[\"perm_after\"] = self._get_permission(obj_type, path, username)\n self._success(\"permission is absent\")", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_remove_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.remove_facility'))", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def raise_for_ownership(self, resource: Model) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset import db\n\n if self.is_admin():\n return\n\n # Set of wners that works across ORM models.\n owners: List[User] = []\n\n orig_resource = db.session.query(resource.__class__).get(resource.id)\n\n if orig_resource:\n if hasattr(resource, \"owners\"):\n owners += orig_resource.owners\n\n if hasattr(resource, \"owner\"):\n owners.append(orig_resource.owner)\n\n if hasattr(resource, \"created_by\"):\n owners.append(orig_resource.created_by)\n\n if g.user.is_anonymous or g.user not in owners:\n raise SupersetSecurityException(\n SupersetError(\n error_type=SupersetErrorType.MISSING_OWNERSHIP_ERROR,\n message=f\"You don't have the rights to alter [{resource}]\",\n level=ErrorLevel.ERROR,\n )\n )", "def test_remove_facility_pt4(self):\n with self.assertRaises(InvalidPermission):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility', obj=[]))", "def test_delete_permission(self):\r\n self.assertFalse(self.creator_admin.has_delete_permission(self.request))", "def test_remove_facility_pt3(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_facility'))", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def isowner(self, o):\n return self._owner is o", "def has_delete_permission(self, request, obj=None):\n has_perm = super(ShortURLAdmin, self).has_delete_permission(request, obj)\n if not has_perm:\n return False\n if obj is not None and not request.user.has_perm('deflect.list_all') and request.user.id != obj.creator.id:\n return False\n return True", "def test_delete_root_forbidden(self, mapp):\n mapp.login_root()\n mapp.delete_user(user=\"root\", code=403)", "def check_exists(name):\n if arcpy.Exists(name):\n arcpy.Delete_management(name)\n return", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def destroy(self, request, *args, **kwargs):\n instance = Group.objects.get(pk=kwargs['pk'])\n\n if instance.owner_id != request.user.id and not request.user.is_superuser:\n return not_allowed_to_do()\n\n return super().destroy(request, args, kwargs)", "def ensure_principal_absent( user_email ):\n \n ensure_user_absent( user_email )\n delete_principal_data( user_email )\n return True", "def testDeleteAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n SequencingMachine.objects.all().delete()\n machine = self.make_machine()\n self.assertEqual(SequencingMachine.objects.count(), 1)\n response = self.runDelete(user, sequencer=machine.sodar_uuid)\n self.response_204(response)\n self.assertEqual(SequencingMachine.objects.count(), 0)", "def testDeleteIsAllowed(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n namespaces = SecureNamespaceAPI(self.system.users['fluiddb'])\n namespaces.delete([u'user/private'])\n self.users.delete([u'user'])\n self.assertIdentical(None, getUser(u'user'))", "def can_delete(self, user):\n raise Return(False)", "def remove_permission(self, label):\r\n return self.connection.remove_permission(self, label)", "def changeOwnership(self, document):\n document.changeOwnership(getSecurityManager().getUser(), False)", "def test_owner_delete_assessment(self):\n response = self.user_01.delete(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n response = self.user_01.get(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_remove_user(self):\n pass", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def unorphaned(self):\n return self.new_owner == self.user", "def test_permission_remove_all_actions_for_user(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous *')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def validate_ownership(item, user_id):\n if item.user_id != user_id:\n raise Forbidden('You are not allowed to modify this item.')", "def allowed_topology_access_delete(user, topology):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return topology.owner == user or user.has_perm(\"vnswww.topology_delete_any\") or (user.has_perm(\"vnswww.topology_delete_org\") and up.org == topology.org)", "def canRemove(self, p_int): # real signature unknown; restored from __doc__\n return False", "def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_IDeleteCapability(self):\n self.assertFalse(self.ldap.allowDeletePrincipal(\"uid0\"))\n self.assertFalse(self.ldap.allowDeletePrincipal(\"unknownuser\"))", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def delPermission(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deletePermission(request[\"admin_username\"],request[\"perm_name\"])", "def test_delete_other_users_template_as_user_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.user1)\n with self.assertRaises(AccessControlError):\n template_api.delete(\n self.fixture.user2_template, request=mock_request\n )", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_deletion(oc_name, org):\n duplicate_name = org['name']\n\n distance = org_tools.getDistance(oc_name, duplicate_name)\n\n if distance <= 0.35:\n org['can_delete'] = 1\n else:\n org['can_delete'] = 0\n\n return org" ]
[ "0.6874314", "0.6790394", "0.6770987", "0.67394656", "0.6579873", "0.6498488", "0.6473038", "0.642164", "0.63643247", "0.63571066", "0.6270544", "0.62456524", "0.62216616", "0.62143797", "0.6190839", "0.6165569", "0.6157032", "0.6144713", "0.61358136", "0.6118853", "0.6115285", "0.6112563", "0.6102568", "0.6087466", "0.60833216", "0.60736954", "0.60509664", "0.60501486", "0.60377055", "0.6037685", "0.6036524", "0.60215205", "0.6011772", "0.60115564", "0.59639615", "0.59603006", "0.5956188", "0.5947204", "0.59389544", "0.5936835", "0.5925014", "0.59114265", "0.5904474", "0.59001637", "0.58939314", "0.58845323", "0.5883615", "0.5877721", "0.5874746", "0.5865921", "0.58649445", "0.5860425", "0.58542955", "0.58536893", "0.58520734", "0.5845838", "0.5819925", "0.58185226", "0.58166", "0.5814731", "0.58055645", "0.5800836", "0.5798492", "0.57979035", "0.579251", "0.57920694", "0.5786705", "0.57661563", "0.57656807", "0.57567155", "0.57525086", "0.5751839", "0.5749649", "0.5740742", "0.5724203", "0.5703603", "0.57026255", "0.57016385", "0.5701083", "0.56820005", "0.56774634", "0.56758255", "0.56748515", "0.56747234", "0.5670815", "0.5669922", "0.566111", "0.56600046", "0.56513035", "0.5651064", "0.5650878", "0.5645175", "0.56277794", "0.5624552", "0.56235206", "0.561573", "0.5612549", "0.5611774", "0.561075", "0.56078166" ]
0.7237803
0
checking if able to remove an owner who is the last owner of the channel
def test_channel_removeowner_last_owner(): clear() register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) # removing third user channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def unorphaned(self):\n return self.new_owner == self.user", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def is_still_owner(self):\n raise tooz.NotImplemented", "def isowner(self, o):\n return self._owner is o", "def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False", "def leave_union(self):\n if self.union is None:\n return f'{self.username} is not a member of any guild'\n\n if self.union.has_member(self):\n union_name = self.union.name\n self.union = None\n self.save()\n return f'{self.username} has been removed from {union_name}'", "def is_owner(self):\n return self._is_owner", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "async def remove_player(ctx, group_name: str, player_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to modify that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n \n if owner in bg_bot.manager.groups:\n for group in bg_bot.manager.groups[owner]['groups']:\n if group.name == group_name:\n if group.remove_member(player_name):\n response = f'Removed {player_name} from {group_name} successfully!'\n break\n else:\n response = \"Error removing player!\"\n break\n\n else:\n response = \"No groups exist that match the input criteria.\"\n \n await ctx.send(response)", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def __updater_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in updater\")\n for comp_tuple in self._updater_map.keys():\n if int(comp_tuple[0]) not in self.msg.get_ownershipList():\n del self._updater_map[comp_tuple]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "async def _remove(self, ctx, points: int, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(\n lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(\n lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(\n lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(\n lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. Please add them first using points member add\"\n \" <discord name or Nickname>\".format(x.display_name))\n else:\n self.db[server.id][x.id][\"Lifetime Loss\"] += points\n self.db[server.id][x.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, x.name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please register first using points member add\"\n \" <Discord name or nickname>\".format(namea))\n return\n self.db[server.id][name.id][\"Lifetime Loss\"] += points\n self.db[server.id][name.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, name.name))\n self.save_db()", "async def on_member_remove(member):\r\n pass", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def delete_entity_owner(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_DELETE_ENTITY_OWNER + ':' + username)", "def test_channel_addowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def has_remove_permissions(self, obj):\n return True", "def _should_delete(self, msg, ctx):\n # Do not remove the user's call\n if msg.id == ctx.message.id:\n return False\n # Remove command calls\n if msg.content.startswith(ctx.prefix):\n return True\n # Remove our bot's messages\n if msg.author == self.bot.user:\n return True\n return False", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def untether(self) -> None:\n if self.msg.sender != self.owner:\n revert(f'Only the owner can call the untether method.')\n pass", "async def omartrifacta(self, ctx):\n user_member1 = await ctx.guild.fetch_member(\"142084729674399745\")\n user_member2 = await ctx.guild.fetch_member(\"197784087476305921\")\n user_member3 = await ctx.guild.fetch_member(\"219969018369409024\")\n if user_member1 is not None and user_member2 is not None and user_member3 is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member1.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member2.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member3.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")", "def _handleBusOwnerChanged(self, new_owner):\n if new_owner == '':\n logger.warn('No owner anymore for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n raise Exception('LostDhcpSlave')\n else:\n pass # Owner exists", "def collection_special_author_cancel(user_id, author_id):\n\n another_user_id = author_id\n if (user_id == another_user_id):\n return \"self\"\n query = db_session.query(Collection_User).filter_by(\n user_id=user_id, another_user_id=another_user_id).all()\n if len(query) == 1:\n db_session.delete(query[0])\n db_session.commit()\n update_collection_num(user_id, another_user_id, False)\n else:\n return \"already\"\n return \"success\"", "def _check_remove_last_super(user_obj):\n if not user_obj.is_superuser:\n return\n\n # Is there any other active superuser left?\n all_active_su = User.objects.filter(is_superuser__exact = True,\n is_active__exact = True)\n num_active_su = all_active_su.count()\n assert num_active_su >= 1, _(\"No active superuser configured.\")\n if num_active_su == 1:\n raise PopupException(_(\"You cannot remove the last active superuser from the configuration.\"), error_code=401)", "def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def remove_user_collection_self(args):\n is_parameter_exists([\n constants.ID\n ], args)\n\n collection_id = int(args[constants.ID])\n\n request_user = args[constants.USER]\n\n # Check Collection Id\n if not Collection.objects.filter(id=collection_id).exists():\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n try:\n collection_user = CollectionUser.objects.get(collection_id=collection_id, user_id=request_user.id)\n except ObjectDoesNotExist:\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n # if request_user is not member, then raise AUTH_ERROR\n # the owner cannot delete himself or herself\n # if the owner want to leave a collection, he or she must transfer it to other user\n # or deleting the collection would be a solution\n if collection_user.type != COLLECTION_USER_TYPE[1]:\n raise ApiError(constants.AUTH_ERROR)\n\n collection_user.delete()\n\n # Get the number of Members(including owner) Of Collections\n user_counts = __get_collection_user_count([collection_id], 'collection_id')\n return {constants.USERS: user_counts[collection_id] if collection_id in user_counts else 0}", "async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id", "async def _ad_remove(self, ctx, member):\n member_object = discord.utils.find(\n lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),\n ctx.guild.members\n )\n if member_object is not None:\n member = member_object.id\n elif member.isnumeric():\n member = int(member)\n\n admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))\n if admin:\n self.database.remove_item(admin[0])\n if member_object:\n await ctx.send(f\"Removed admin from {member_object.name}\")\n else:\n await ctx.send(\"Removed admin from invalid user\")\n else:\n await ctx.send(\"That person isn't an admin!\")", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def test_realm_admin_remove_multiple_users_from_stream(self) -> None:\n target_users = [\n self.example_user(name)\n for name in [\"cordelia\", \"prospero\", \"iago\", \"hamlet\", \"outgoing_webhook_bot\"]\n ]\n result = self.attempt_unsubscribe_of_principal(\n query_count=27,\n cache_count=8,\n target_users=target_users,\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 5)\n self.assert_length(json[\"not_removed\"], 0)", "def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "async def omar(self, ctx, user):\n user = user.replace(\"<\",\"\").replace(\">\",\"\").replace(\"@\",\"\").replace(\"!\",\"\")\n print(user)\n user_member = await ctx.guild.fetch_member(user)\n if user_member is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")", "def test_remove_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Invalid Requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def get_owner_object(self):\n return False", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def test_remove_self_from_team_success(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[team.uid])\n user.put()\n team.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(user),\n )\n\n # User is removed from team.\n self.assertEqual(json.loads(response.body)['owned_teams'], [])", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "async def remove(message, client, extra_args):\n\n if await funnypts_transaction(message, client, extra_args, \"remove\"):\n await message.channel.send(\"BRUH, THAT WAS CRINGE. SOMEONE JUST REVOKED YOUR FUNNYPOINT\")", "async def clean(self, ctx, user: discord.Member):\n self.data_check(ctx)\n author = ctx.message.author\n server = author.server\n colour = server.me.colour\n channel = ctx.message.channel\n can_role = channel.permissions_for(server.me).manage_roles\n count = self.riceCog[server.id][user.id][\"Count\"]\n muterole = await self.get_role(user.server)\n\n if server.id not in self.riceCog:\n self.riceCog[server.id] = {}\n dataIO.save_json(self.profile,\n self.riceCog)\n if user.id not in self.riceCog[server.id]:\n self.riceCog[server.id][user.id] = {}\n dataIO.save_json(self.profile,\n self.riceCog)\n else:\n pass\n else:\n if user.id not in self.riceCog[server.id]:\n self.riceCog[server.id][user.id] = {}\n dataIO.save_json(self.profile,\n self.riceCog)\n else:\n pass\n await self.bot.delete_message(ctx.message)\n if \"Count\" in self.riceCog[server.id][user.id]:\n count = self.riceCog[server.id][user.id][\"Count\"]\n else:\n count = 0\n await self.bot.say(\"**The following punishments for {} have been removed:**\".format(user))\n if count != 0:\n count = 0\n self.riceCog[server.id][user.id].update({\"Count\": count})\n dataIO.save_json(self.profile,\n self.riceCog)\n\n self.bot.remove_roles(user, muterole)\n msg = await self.bot.say(\"Mute Role\")\n if 'poop' in self.riceCog2[server.id] and can_role:\n if self.riceCog2[server.id]['poop'] == True:\n try:\n role = role = list(filter(lambda r: r.name.startswith('Warning \\U0001f528'), server.roles))\n await self.bot.remove_roles(user, *role)\n msg = await self.bot.say(\"Warning Roles\")\n except discord.errors.Forbidden:\n await self.bot.say(\"No permission to add roles\") \n\n if user.id in self.norole[server.id] and 'Role' == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say(\"NoBNL Role\")\n\n else:\n msg = await self.bot.say(\"No more punishments to remove!\")", "def available(self, o):\n return not self.locked() or self.isowner(o)", "async def nogroup(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n author = ctx.message.author\n roles = author.roles\n for role in roles:\n if role.name.lower() in changeable_groups:\n roles.remove(role)\n await amor_manager.replace_roles(author, *roles)\n await amor_manager.say('{0} removed from color groups'.format(author.name))", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def check_delete_permission(self):\n if getSecurityManager().checkPermission(\"Delete objects\", self):\n username = getSecurityManager().getUser().getUserName()\n if username == self.getOwner().getId():\n return True\n return False", "def test_stream_stale_follows(self):\n self.user2.delete()\n self.assertNotIn('Two', str(user_stream(self.user1)))", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "async def deluser(self, ctx, member: discord.Member):\r\n for k, v in player.items():\r\n if k == member.name:\r\n del player[k]\r\n cur.execute(\"DELETE FROM players WHERE name=%s\", [k])\r\n conn.commit()\r\n await ctx.send(k + ' has been removed from the player-base')\r\n break", "def is_user_is_owner(self):\n return self._tag == 'user_is_owner'", "def is_add_remove_member(string, nickname):\n if string.startswith(f\"{nickname} added \") and string.endswith(\" to the group.\"):\n return True\n \n if string.startswith(f\"{nickname} removed \") and string.endswith(\" from the group.\"):\n return True\n return False", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def __remove_request_from_queue(self, sender):\n with self.__queue.mutex:\n for x in self.__queue.queue:\n if x[1] == sender:\n self.__queue.queue.remove(x)\n return True\n return False", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text" ]
[ "0.73305464", "0.7195283", "0.7129515", "0.712579", "0.70183945", "0.6805983", "0.67637455", "0.66506666", "0.65761715", "0.6486065", "0.6440695", "0.6412174", "0.6344784", "0.6314831", "0.6306158", "0.62849295", "0.62610173", "0.6227882", "0.6078866", "0.607737", "0.60758984", "0.6071704", "0.6057858", "0.60495794", "0.60468155", "0.60443354", "0.6019361", "0.5957048", "0.59558165", "0.5955704", "0.5946238", "0.5904482", "0.5873481", "0.58659786", "0.5835943", "0.58330727", "0.5812923", "0.5801583", "0.5785584", "0.5770477", "0.5715459", "0.5647765", "0.5643723", "0.5637824", "0.56305987", "0.5614075", "0.56082344", "0.55831194", "0.55465615", "0.552955", "0.5525876", "0.5523647", "0.55091256", "0.5498746", "0.54857", "0.54847574", "0.5481278", "0.5477339", "0.5472002", "0.5467103", "0.54640377", "0.5456026", "0.5443782", "0.5440099", "0.5410624", "0.5403102", "0.5396589", "0.5395369", "0.5378188", "0.5375786", "0.5374069", "0.53505665", "0.5349108", "0.53486305", "0.5347586", "0.5326596", "0.5326596", "0.5315503", "0.53004265", "0.52897006", "0.5286954", "0.52825254", "0.5275198", "0.5261925", "0.5254594", "0.5253892", "0.52488095", "0.52307135", "0.52233696", "0.52147174", "0.5214509", "0.5212742", "0.5210931", "0.52102274", "0.51989985", "0.51903576", "0.5189627", "0.51881254", "0.5186195", "0.51827216" ]
0.7552469
0
checking if owner of the flockr who is not the channel owner can remove owner
def test_channel_removeowner_owner_flockr(): clear() register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def channel_removeowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id not in channel[\"owners\"]:\n raise ValueError(\"user is not an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to remove owners\")\n\n channels.remove(channel_id, \"owners\", u_id)", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=False,\n target_users_subbed=False,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 0)\n self.assert_length(json[\"not_removed\"], 1)", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_cant_remove_other_users_from_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=8,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)", "def test_realm_admin_remove_others_from_unsubbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=False,\n invite_only=True,\n target_users_subbed=True,\n other_sub_users=[self.example_user(\"othello\")],\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def test_realm_admin_remove_others_from_subbed_private_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=17,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=True,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def is_still_owner(self):\n raise tooz.NotImplemented", "def unorphaned(self):\n return self.new_owner == self.user", "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=16,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)", "def isowner(self, o):\n return self._owner is o", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def untether(self) -> None:\n if self.msg.sender != self.owner:\n revert(f'Only the owner can call the untether method.')\n pass", "def has_remove_permissions(self, obj):\n return True", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "async def remove(message, client, extra_args):\n\n if await funnypts_transaction(message, client, extra_args, \"remove\"):\n await message.channel.send(\"BRUH, THAT WAS CRINGE. SOMEONE JUST REVOKED YOUR FUNNYPOINT\")", "async def remove_player(ctx, group_name: str, player_name: str, owner: str=None):\n\n if owner and owner != ctx.message.author.name:\n if ctx.message.author.id != bot.owner_id:\n await ctx.send(\"Sorry, you don't have permission to modify that group. Nerd.\")\n else:\n owner = ctx.message.author.name\n \n if owner in bg_bot.manager.groups:\n for group in bg_bot.manager.groups[owner]['groups']:\n if group.name == group_name:\n if group.remove_member(player_name):\n response = f'Removed {player_name} from {group_name} successfully!'\n break\n else:\n response = \"Error removing player!\"\n break\n\n else:\n response = \"No groups exist that match the input criteria.\"\n \n await ctx.send(response)", "def remove_owner(urn: str, owner_urn: str) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.remove_owner(owner=_get_owner_urn(owner_urn))\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"remove owners\")\n for mcp in dataproduct_patcher.build():\n print(json.dumps(mcp.to_obj()))\n graph.emit(mcp)", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def __updater_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in updater\")\n for comp_tuple in self._updater_map.keys():\n if int(comp_tuple[0]) not in self.msg.get_ownershipList():\n del self._updater_map[comp_tuple]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def is_owner(self):\n return self._is_owner", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "async def remove(self, ctx, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n namesp = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(x.display_name))\n elif x.id in self.db[server.id]:\n del self.db[server.id][x.id]\n self.save_db()\n await self.bot.say(\"{} has been removed from the list.\".format(x.display_name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please make sure they have been added first to \"\n \"the list.\".format(name.display_name))\n return\n elif name.id in self.db[server.id]:\n del self.db[server.id][name.id]\n self.save_db()\n await self.bot.say(\"{} has been deleted from the list.\".format(name.display_name))", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_remove_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "def test_channel_addowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def delete_entity_owner(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_DELETE_ENTITY_OWNER + ':' + username)", "async def on_member_remove(member):\r\n pass", "def check_owner_permission(payload: dict, allow_user_owner: bool, obj: models.Model):\n for entity_type in [\"users\", \"groups\"]:\n for user_identification, permission in payload.get(entity_type, {}).items():\n if permission == \"owner\":\n if entity_type == \"users\" and not allow_user_owner:\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )\n\n if entity_type == \"groups\":\n raise exceptions.ParseError(\n \"Owner permission cannot be assigned to a group\"\n )\n # Here we have to check if owner permission is being revoked.\n # Unfortunately there is no way to do this without hitting the\n # database.\n elif entity_type == \"users\":\n if not allow_user_owner:\n user = fetch_user(str(user_identification))\n if obj.is_owner(user):\n raise exceptions.PermissionDenied(\n \"Only owners can grant/revoke owner permission\"\n )", "def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "async def unlock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n if (channel.category.name == CATEGORY_SO or channel.category.name == CATEGORY_GENERAL):\n await ctx.send(\"Synced permissions with channel category.\")\n return await channel.edit(sync_permissions=True)\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name != CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True, read_messages=True)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bRole = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(aRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Unlocked the channel to Member access. Please check if permissions need to be synced.\")", "def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))", "async def omartrifacta(self, ctx):\n user_member1 = await ctx.guild.fetch_member(\"142084729674399745\")\n user_member2 = await ctx.guild.fetch_member(\"197784087476305921\")\n user_member3 = await ctx.guild.fetch_member(\"219969018369409024\")\n if user_member1 is not None and user_member2 is not None and user_member3 is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member1.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member2.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await user_member3.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")", "def is_participant(self, message: discord.Message):\n if message.author in self.participants:\n self.participants.remove(message.author)\n return True\n\n return False", "def __remove_request_from_queue(self, sender):\n with self.__queue.mutex:\n for x in self.__queue.queue:\n if x[1] == sender:\n self.__queue.queue.remove(x)\n return True\n return False", "def test_transfer_old_inherited_owner_demote(self):\n self.owner_as_cat.user = self.user_owner\n self.owner_as_cat.save()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_DELEGATE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n self.assertEqual(\n self.project.get_role(self.user_guest).role, self.role_guest\n )", "def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def _remove_player(self, player, player_party, other_party):\n\n party = vars(self)[player_party][:]\n party.remove(player)\n vars(self)[player_party].remove(player)\n for other in vars(self)[other_party]:\n if player in other.prefs:\n other.forget(player)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "async def remove_from(self, target: discord.Member) -> None:\n role = await self.get_role(target.guild)\n if role:\n await target.remove_roles(role)\n\n if not role.members:\n await role.delete()", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def _handleBusOwnerChanged(self, new_owner):\n if new_owner == '':\n logger.warn('No owner anymore for bus name ' + RemoteDhcpClientControl.DBUS_NAME)\n raise Exception('LostDhcpSlave')\n else:\n pass # Owner exists", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def available(self, o):\n return not self.locked() or self.isowner(o)", "def user_deletable(self):\n source_module_id = getattr(self, 'source_module_id', False)\n if not source_module_id:\n return True\n\n root_module_id = getattr(self, 'root_module_id', False)\n if not root_module_id:\n return True\n\n app = self.get_app()\n parent_module = app.get_module_by_unique_id(root_module_id)\n\n if parent_module.module_type == 'shadow':\n return False\n\n return True", "def allow_sudo(message):\n if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:\n return True\n if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:\n return True\n return False", "def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))", "async def ccallow(self, ctx, channel: discord.TextChannel):\n channel_list = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in channel_list:\n channel_list.remove(channel.id)\n else:\n return await ctx.send(\"Channel is not on the deny list.\")\n await self.config.guild(ctx.guild).channel_deny.set(channel_list)\n await ctx.send(f\"{channel.mention} will be allowed for chatchart use.\")", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "def requireOwn(func):\n def wrappedFunc(self, unit,*args):\n if unit.owner != self.playerID is not self:\n return \"You do not own %s\" % unit.id\n else:\n return func(self, unit, *args)\n return wrappedFunc", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "async def canceltorment(self, ctx):\r\n\t\t\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\r\n\t\t# Only allow owner to change server stats\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tif not self.toTorment:\r\n\t\t\tawait ctx.message.author.send('Not currently tormenting.')\r\n\t\t\treturn\r\n\t\t# Cancel it!\r\n\t\tself.toTorment = False\r\n\t\tawait ctx.message.author.send('Tormenting cancelled.')", "def _should_delete(self, msg, ctx):\n # Do not remove the user's call\n if msg.id == ctx.message.id:\n return False\n # Remove command calls\n if msg.content.startswith(ctx.prefix):\n return True\n # Remove our bot's messages\n if msg.author == self.bot.user:\n return True\n return False", "def __reader_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in reader\") \n for key in self._reader_map.keys():\n if int(key) not in self.msg.get_ownershipList():\n del self._reader_map[key]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def __call__(self,camp):\n if self.npc in camp.party:\n camp.assign_pilot_to_mecha(self.npc,None)\n camp.party.remove(self.npc)\n for mek in list(camp.party):\n if hasattr(mek,\"owner\") and mek.owner is self.npc:\n camp.party.remove(mek)", "def test_01_self_unshare_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n dog.uaccess.unshare_resource_with_user(holes, dog)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertFalse(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_resource_unshare_users(holes)))", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "async def omar(self, ctx, user):\n user = user.replace(\"<\",\"\").replace(\">\",\"\").replace(\"@\",\"\").replace(\"!\",\"\")\n print(user)\n user_member = await ctx.guild.fetch_member(user)\n if user_member is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")", "async def _ad_remove(self, ctx, member):\n member_object = discord.utils.find(\n lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),\n ctx.guild.members\n )\n if member_object is not None:\n member = member_object.id\n elif member.isnumeric():\n member = int(member)\n\n admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))\n if admin:\n self.database.remove_item(admin[0])\n if member_object:\n await ctx.send(f\"Removed admin from {member_object.name}\")\n else:\n await ctx.send(\"Removed admin from invalid user\")\n else:\n await ctx.send(\"That person isn't an admin!\")", "def _should_remove(self, mac, obj):\n ret = False\n if getattr(obj, self.toggle_val) == self.toggle_check\\\n and self.toggle.state == 'down':\n ret = True\n return ret", "async def handler(event):\n\n con = event.pattern_match.group(1).lower()\n del_u = 0\n del_status = \"`No deleted accounts found, this group is clean asf.`\"\n\n if con != \"clean\":\n await event.edit(\"`Searching for ghost/deleted/zombie accounts...`\")\n async for user in event.client.iter_participants(event.chat_id):\n\n if user.deleted:\n del_u += 1\n await sleep(1)\n if del_u > 0:\n del_status = f\"`Found` **{del_u}** `ghost/deleted/zombie account(s) in this group,\\\n \\nclean them by using .zombies clean`\"\n await event.edit(del_status)\n return\n\n # Here laying the sanity check\n chat = await event.get_chat()\n admin = chat.admin_rights\n creator = chat.creator\n\n # Well\n if not admin and not creator:\n await event.edit(\"`I am not an admin here!`\")\n return\n\n await event.edit(\"`Cleaning up this mess..`\")\n del_u = 0\n del_a = 0\n\n async for user in event.client.iter_participants(event.chat_id):\n if user.deleted:\n try:\n await event.client(\n EditBannedRequest(event.chat_id, user.id, BANNED_RIGHTS))\n except ChatAdminRequiredError:\n await event.edit(\"`I don't have ban rights in this group!`\")\n return\n except UserAdminInvalidError:\n del_u -= 1\n del_a += 1\n await event.client(\n EditBannedRequest(event.chat_id, user.id, UNBAN_RIGHTS))\n del_u += 1\n \n if del_u > 0:\n del_status = f\"Cleaned **{del_u}** deleted account(s)\"\n if del_a > 0:\n del_status = f\"Cleaned **{del_u}** deleted account(s) \\\n \\n**{del_a}** deleted admin account(s) could not be removed.\"\n \n await event.edit(del_status)\n await sleep(2)\n await event.delete()", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "async def _remove(self, ctx, points: int, *, name=None):\n server = ctx.message.server\n author = ctx.message.author\n names = None\n if not self.permcheck(ctx):\n return\n if name is None:\n name = author\n elif \",\" in str(name):\n if \", \" in name:\n names = name.split(\", \")\n elif \",\" in name:\n names = name.split(\",\")\n namesp = names.copy()\n for i in range(len(names)):\n names[i] = discord.utils.find(\n lambda m: m.display_name == names[i], server.members)\n if names[i] is None:\n names[i] = discord.utils.find(\n lambda m: m.name == names[i], server.members)\n name = None\n else:\n namea = name[:]\n name = discord.utils.find(\n lambda m: m.display_name == name, server.members)\n if name is None:\n name = discord.utils.find(\n lambda m: m.name == name, server.members)\n if name is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namea))\n return\n if server.id not in self.db:\n self.db[server.id] = {}\n if not name:\n counter = -1\n for x in names:\n counter += 1\n if x is None:\n await self.bot.say(\"{} was not found, please check the spelling and also make \"\n \"sure that the member name being entered is a member in your Discord and \"\n \"that its the same as their Discord name / nickname.\".format(namesp[counter]))\n await asyncio.sleep(1)\n continue\n elif x.id not in self.db[server.id]:\n await self.bot.say(\"{} was not found. Please add them first using points member add\"\n \" <discord name or Nickname>\".format(x.display_name))\n else:\n self.db[server.id][x.id][\"Lifetime Loss\"] += points\n self.db[server.id][x.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, x.name))\n await asyncio.sleep(1)\n else:\n if name.id not in self.db[server.id]:\n await self.bot.say(\"{} is not in the list, please register first using points member add\"\n \" <Discord name or nickname>\".format(namea))\n return\n self.db[server.id][name.id][\"Lifetime Loss\"] += points\n self.db[server.id][name.id][\"Balance\"] -= points\n await self.bot.say(\"{} points substracted from {}\".format(points, name.name))\n self.save_db()", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False" ]
[ "0.7614531", "0.74451256", "0.7375203", "0.70898795", "0.695579", "0.68203926", "0.68009984", "0.6557617", "0.6531711", "0.651857", "0.64480776", "0.639784", "0.6383161", "0.6346579", "0.63352793", "0.62960595", "0.618517", "0.6165083", "0.6161233", "0.61429024", "0.61311483", "0.60626054", "0.6053295", "0.5988746", "0.5937176", "0.59287184", "0.59270394", "0.5915345", "0.59145075", "0.590622", "0.5901539", "0.5889973", "0.58665353", "0.5860181", "0.5858806", "0.5830739", "0.5783144", "0.5737263", "0.57199544", "0.57133806", "0.56769425", "0.56587124", "0.5646099", "0.56428266", "0.56384575", "0.56285787", "0.5598595", "0.5581564", "0.5581449", "0.5565346", "0.5560572", "0.5541832", "0.55342215", "0.5529007", "0.5476704", "0.5475378", "0.54721195", "0.5457662", "0.5453234", "0.54495305", "0.5445951", "0.54342455", "0.54263896", "0.5412538", "0.54118264", "0.5396617", "0.5381387", "0.53803325", "0.5379416", "0.53699785", "0.5369338", "0.53677154", "0.53510153", "0.5332046", "0.5315907", "0.5315168", "0.5304112", "0.5304074", "0.53003746", "0.5295843", "0.5287946", "0.5285264", "0.52803755", "0.52724904", "0.52705824", "0.52685815", "0.5247455", "0.5244625", "0.524008", "0.5238052", "0.5226514", "0.52242804", "0.5218453", "0.521704", "0.52048373", "0.52040845", "0.52019215", "0.5180923", "0.5174848", "0.5173084" ]
0.7702833
0
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_removeowner_owner_flockr_not_member(): clear() register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(AccessError): assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def test_channel_removeowner_not_owner_permissions():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_util_has_perm_or_owns_sanity(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n perm = 'forums_forum.thread_edit_forum'\n allowed = access.has_perm_or_owns(me, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = access.has_perm_or_owns(me, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_addowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n assert(auth_logout(register_second_result['token'])[\"is_success\"] is True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_channel_removeowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])", "def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_not_owner(self):\n creating_user = create_user()\n creating_user.save()\n festival = create_festival('test', creating_user)\n festival.save()\n\n concert = create_concert(festival, 'test')\n concert.save()\n\n login(self.client)\n\n client = create_client('test')\n client.delete_access = True\n client.save()\n\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': concert.pk})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_channel_removeowner_invalid_token_after_logout():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channels_create(register_third_result['token'], 'Random Channel 2', True)\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n auth_logout(register_second_result['token'])\n with pytest.raises(AccessError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_channel_leave_normal_case_owner():\n \n clear()\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last') \n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n channel_join(leaver['token'], userchannel_id['channel_id'])\n channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id'])\n channel_leave(leaver['token'], userchannel_id['channel_id'])\n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['owner_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])", "def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False", "async def lock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name == CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Locked the channel to Member access.\")", "def test_channel_leave_invalid_user():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n leaver = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_leave(leaver['token'], userchannel_id['channel_id'])", "async def __local_check(self, ctx):\n if not isinstance(ctx.channel, discord.TextChannel):\n raise InvalidChannelCheck(ctx.command)\n me = ctx.me.guild_permissions\n perms = (me.manage_messages, me.manage_nicknames, me.ban_members, me.kick_members)\n if not all(perms):\n raise BotPermissionsCheck(ctx.command)\n else:\n return True", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def available(self, o):\n return not self.locked() or self.isowner(o)", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "def get_everyone_denied(self):", "def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_03_self_cannot_upgrade_resource(self):\n holes = self.holes\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_resource_with_user(holes, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in holes.raccess.edit_users)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_resource_with_user(\n holes, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in holes.raccess.view_users)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_resource_unshare_users(holes)))", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def test_has_perm_or_owns_thread_edit(self):\n me = User.objects.get(pk=118533)\n my_t = Thread.objects.filter(creator=me)[0]\n other_t = Thread.objects.exclude(creator=me)[0]\n self.context['request'].user = me\n perm = 'forums_forum.thread_edit_forum'\n allowed = has_perm_or_owns(self.context, perm, my_t, self.forum_1)\n eq_(allowed, True)\n allowed = has_perm_or_owns(self.context, perm, other_t, self.forum_1)\n eq_(allowed, False)", "def test_channel_addowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def test_channel_removeowner_invalid_user_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], \"[email protected]\")", "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n with pytest.raises(InputError):\n assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])", "def owner_or_permissions(**perms):\n original = commands.has_permissions(**perms).predicate\n\n async def extended_check(ctx):\n if ctx.guild is None:\n raise errors.NoPrivateMessage\n return ctx.guild.owner_id == ctx.author.id or await original(ctx)\n\n return commands.check(extended_check)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"User %s does not own study %d\" %\n (user.id, study.id))", "def cog_check(self, ctx):\n return ctx.author.guild_permissions.administrator", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False", "def test_component_chown_error_bad_component(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component chown bad_component changed_owner')\n self.assertEqual(2, rv)\n # We currently trigger a deprecation warning with py26 so we\n # can currrently only verify that the end of the output string is\n # correct\n self.assertEqual(output.endswith(self.expected_results[test_name]), True)", "def test_channel_join_except_invalid_auth():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n\n # Create invalid token for the test\n invalid_user = 999\n invalid_token = generate_token(invalid_user)\n\n with pytest.raises(AccessError):\n channel_join_v2(invalid_token, channel_id1[\"channel_id\"])", "def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False", "async def owner(c, m):\n if not m.id in ids:\n await c.send('You must be an owner to use this command.')\n raise Exception()\n return True", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "async def unlock(ctx):\n member = ctx.message.author\n channel = ctx.message.channel\n\n if (channel.category.name in [\"beta\", \"staff\", \"Pi-Bot\"]):\n return await ctx.send(\"This command is not suitable for this channel because of its category.\")\n\n if (channel.category.name == CATEGORY_SO or channel.category.name == CATEGORY_GENERAL):\n await ctx.send(\"Synced permissions with channel category.\")\n return await channel.edit(sync_permissions=True)\n\n member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)\n if (channel.category.name != CATEGORY_STATES):\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True, read_messages=True)\n else:\n await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True)\n\n wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)\n gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n bRole = discord.utils.get(member.guild.roles, name=ROLE_BT)\n await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(aRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.channel.set_permissions(bRole, add_reactions=True, send_messages=True, read_messages=True)\n await ctx.send(\"Unlocked the channel to Member access. Please check if permissions need to be synced.\")", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_can_info_does_not_exist(self):\n fake_user = User(username='Fake', password='')\n self.assertFalse(send_rotate_to_can(fake_user, self.BIN_NUM))", "def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])", "def test_06_self_cannot_upgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner", "def testLockDenied(t, env):\n c = env.c1\n c.init_connection()\n # Create a file and lock it\n fh, stateid = c.create_confirm(t.code)\n res1 = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res1, msg=\"Locking file %s for first owner\" % t.code)\n res2 = c.lock_file(t.code, fh, stateid, 0, 10)\n check(res2, msg=\"Locking file %s for second owner\" % t.code)\n # Create and replay LOCK ops\n ops = c.use_obj(fh)\n lock_owner = exist_lock_owner4(res1.lockid, 1)\n locker = locker4(FALSE, lock_owner=lock_owner)\n ops += [c.lock_op(WRITE_LT, FALSE, 0, 10, locker)]\n _replay(c, ops, NFS4ERR_DENIED)", "def test_missing_authorize_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n node.give_message(authorize, self._mm)\n\n # OTHER wants the proof that OWNER is allowed to grant authorization to NODE\n node.give_message(other.create_missing_proof(authorize.authentication.member, authorize.distribution.global_time), other)\n\n # NODE sends dispersy-authorize containing authorize(MASTER, OWNER) to OTHER\n _, authorize = other.receive_message(names=[u\"dispersy-authorize\"]).next()\n\n permission_triplet = (self._mm.my_member.mid, u\"protected-full-sync-text\", u\"permit\")\n authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]\n self.assertIn(permission_triplet, authorize_permission_triplets)", "def channel_addowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n # check if user u_id is already an owner of the channel and raise InputError if so\n # also checks to see if current auth user is a owner of channel\n\n # a counter to check if user is a member of the channel\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n raise error.InputError(description=\"user u_id is already an owner of this channel\")\n # checks if curr_id is an owner of channel\n if curr_id == owner_id:\n is_curr_owner = True\n\n # checks if the user u_id is a member of the channel already\n is_u_member = False\n for member_id in curr_channel[\"member_ids\"]:\n if u_id == member_id:\n is_u_member = True\n\n\n # if the auth user is an owner of the slackr, allow him to add u_id as owner of channel\n if is_u_member is True:\n if user_perms[\"permission_id\"] == 1:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # if the auth user is an owner of the channel, allow him to add u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].append(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"current user is not an owner of the channel,\n or of the slackr\"\"\")", "def DeniedPermissions(self) -> _n_6_t_0:", "def test_group_is_not_private_user_is_not_member(self):\n thread = self.create_thread()\n user = self.create_user()\n self.assertTrue(thread.first_message.visible_to_user(user))", "def is_still_owner(self):\n raise tooz.NotImplemented", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def test_locked_asset_not_registered(self):\r\n self.client.login(username=self.usr, password=self.pwd)\r\n resp = self.client.get(self.url_locked)\r\n self.assertEqual(resp.status_code, 403) # pylint: disable=E1103\r", "def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)", "def test_channel_leave_invalid_token():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'First', 'Last')\n userchannel_id = channels_create(user['token'], 'userchannel', True)\n auth_logout(user['token'])\n with pytest.raises(AccessError):\n channel_leave(user['token'], userchannel_id['channel_id'])", "def is_private(event):\n channel = event.get('channel')\n return channel.startswith('D')", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_user_does_not_have_access(self):\n self.assertRaises(\n ObjectDoesNotExist,\n Thread.public.get_by_user,\n **{'thread_id': self.thread.pk, 'user': self.user}\n )", "def test_auth_sharable_cannot_share(self):\n self.do_sharable(False, 'pattieblack', FakeMembership(False),\n tenant='froggy')", "def slack_access(s, level=READ):\n try: slack_access_level = settings.SLACK_USERS[s.slack_uid]\n except: return False\n return (slack_access_level & level) != 0", "def is_permission_err(exc):\n assert isinstance(exc, OSError), exc\n # On Python 2 OSError doesn't always have 'winerror'. Sometimes\n # it does, in which case the original exception was WindowsError\n # (which is a subclass of OSError).\n return exc.errno in (errno.EPERM, errno.EACCES) or \\\n getattr(exc, \"winerror\", -1) in (cext.ERROR_ACCESS_DENIED,\n cext.ERROR_PRIVILEGE_NOT_HELD)", "def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)", "def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass", "async def permission_valid_check(cls):\n pass", "def test_group_is_private_user_is_not_member(self):\n thread = self.create_thread()\n thread.group.private = True\n thread.save()\n message = thread.first_message\n user = self.create_user()\n self.assertFalse(message.visible_to_user(user))", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def cog_check(self, ctx):\n if ctx.guild is None:\n raise commands.NoPrivateMessage()\n return True", "def test_not_member(bot, event):\n _, event_id = event\n expect_error(edit, InputError, bot.username, event_id, False, None, None)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def owners_only(command):\n @wraps(command)\n def wrapped_up(bot):\n if bot.message.nick not in conf.get('owners', []):\n return irc.Response('Sorry, you are not an owner thus not authorised to use this command', pm_user=True)\n return command(bot)\n wrapped_up.owner_only = True\n return wrapped_up", "def test_error_on_unauthorized_read(self):\n hooks = setup_hooks(verbose=True)\n\n result = hooks.act_on_cloned_repo(UNAUTHORIZED_READ_FILE_REPO)\n\n assert result.status == Status.WARNING\n assert (\n \"java.security.AccessControlException: access denied\" in result.msg\n )", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])", "def check_sane(self):\n st = os.stat(self.path)\n if st.st_uid != os.getuid():\n raise Exception('Auth dir %s not owned by user %d.' % (\n self.path, os.getuid()))\n # Mode 16832 is equal to (stat.S_IFDIR | stat.S_IRWXU)\n # In other words, a directory with mode bits rwx------\n if st.st_mode != 16832:\n raise Exception('Auth dir %s not a dir or wrong permissions.' % self.path)", "def _check_access_priv(required_privilege_level):\n auth_user, prog_name, user, host, uuid = _get_client_info()\n priv_level = _get_priv_level(auth_user)\n if (PRIVILEGE_LEVELS.index(priv_level) <\n PRIVILEGE_LEVELS.index(required_privilege_level)):\n err = CONNECT_DENIED_PRIV_TMPL % (\n priv_level, required_privilege_level,\n user, host, prog_name, uuid)\n #LOG.warning(err)\n # Raise an exception to be sent back to the client.\n raise InvalidUsage(err, status_code=403)\n return True", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'):\n user = context['request'].user\n if user.is_anonymous():\n return False\n return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)", "def is_owner(self, author):\n return not self.server or author == self.server.owner", "def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n if depot_path.startswith(prefix):\n continue\n\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths not in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = self._current_branch.writable_stream_name\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)", "def run(self):\n # Determine if this filter doesn't apply.\n if (self.owner == None \\\n or (self.sense and self.user != self.owner) \\\n or ((not self.sense) and self.user == self.owner)):\n return 0\n\n # Perform the child actions.\n self.context.tokens['Owner'] = self.owner\n return super(FilterLockOwner, self).run()", "def _check_caller_authority(caller, role):\r\n if not (caller.is_authenticated() and caller.is_active):\r\n raise PermissionDenied\r\n # superuser\r\n if GlobalStaff().has_user(caller):\r\n return\r\n\r\n if isinstance(role, (GlobalStaff, CourseCreatorRole)):\r\n raise PermissionDenied\r\n elif isinstance(role, CourseRole): # instructors can change the roles w/in their course\r\n if not has_access(caller, CourseInstructorRole(role.course_key)):\r\n raise PermissionDenied", "def test_channel_removeowner_standard_input():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == {\n 'name' : 'Random Channel',\n 'owner_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ],\n 'all_members': [\n {\n 'u_id': 2,\n 'name_first': 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }, \n {\n 'u_id': 3,\n 'name_first' : 'Jane',\n 'name_last': 'Citizen',\n 'profile_img_url': ''\n }\n ]\n })", "def is_locked(self):\r\n pass", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def allow_sudo(message):\n if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:\n return True\n if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:\n return True\n return False", "def test_has_perm_per_object(self):\n user = User.objects.get(pk=47963)\n perm = 'forums_forum.thread_edit_forum'\n assert access.has_perm(user, perm, self.forum_1)\n assert not access.has_perm(user, perm, self.forum_2)", "async def test_regular_member_cannot_target_another_member(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author)\n\n await self.cog.user_info(self.cog, ctx, self.target)\n\n ctx.send.assert_called_once_with(\"You may not use this command on users other than yourself.\")", "def test_accept_member_with_owner_bad_request(self):\n url = '/api/v1/communities/3/accept_member/'\n data = {\n 'lol': 5\n }\n\n response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)", "def ensure_access(self, target_member : M, accessor : M, permission : str):\n if not permission: \n return True\n if accessor is None:\n raise errors.NotAllowed(\"Accessor not found\")\n if target_member != accessor:\n raise errors.NotAllowed(\"Access not allowed for permission '%s'\" % permission)\n return True", "def canBeAccessed(self):\n \n try:\n self._client.log(self._repositoryUri)\n return True\n except ClientError, error:\n _logger.debug(error.args[0])\n for _, errorCode in error.args[1]:\n if errorCode == 160006: # We have no commit in the repository, but its ok.\n return True\n return False" ]
[ "0.72484523", "0.688483", "0.68403906", "0.65012413", "0.6361957", "0.62719554", "0.6271346", "0.626074", "0.6235811", "0.6169581", "0.6156016", "0.6120171", "0.61183786", "0.6108167", "0.6076352", "0.6069219", "0.6057893", "0.6057893", "0.60458887", "0.6039936", "0.6019325", "0.601874", "0.6000169", "0.59550685", "0.59170294", "0.5891027", "0.5891027", "0.58853716", "0.5876037", "0.58495075", "0.584116", "0.5839991", "0.5828554", "0.5820725", "0.58150935", "0.58073115", "0.5806312", "0.5801726", "0.5797243", "0.57722807", "0.5757997", "0.575072", "0.57384247", "0.57367694", "0.57295495", "0.5725292", "0.5706979", "0.56990093", "0.5692146", "0.56674284", "0.5665795", "0.564881", "0.5645295", "0.56370103", "0.5627267", "0.56083465", "0.55908734", "0.55843556", "0.55793977", "0.5574745", "0.5556105", "0.5523981", "0.55162525", "0.5500793", "0.55001736", "0.54974914", "0.5488615", "0.548437", "0.5471759", "0.54692805", "0.54675627", "0.5465972", "0.54617566", "0.54514194", "0.5433567", "0.5432058", "0.5432058", "0.54298824", "0.54246604", "0.54217595", "0.54108834", "0.5407013", "0.54033846", "0.53906983", "0.5389986", "0.53889203", "0.53889203", "0.53679436", "0.53661823", "0.53637046", "0.5361167", "0.53589946", "0.5354014", "0.5350761", "0.53479403", "0.5343993", "0.53399235", "0.533853", "0.53365606", "0.5334444" ]
0.7089838
1
Sets a system Hamiltonian to the Hubbard Hamiltonian. Does exactly this. If the system hamiltonian has some other terms on it, there are not touched. So be sure to use this function only in newly created `System` objects.
def set_hamiltonian(self, system): system.clear_hamiltonian() if 'bh' in system.left_block.operators.keys(): system.add_to_hamiltonian(left_block_op='bh') if 'bh' in system.right_block.operators.keys(): system.add_to_hamiltonian(right_block_op='bh') system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U)) # system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U) system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)\n system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)", "def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_block_hamiltonian('c_up_dag', 'c_up', -1.)\n system.add_to_block_hamiltonian('c_down', 'c_down_dag', -1.)\n system.add_to_block_hamiltonian('c_down_dag', 'c_down', -1.)\n system.add_to_block_hamiltonian('id', 'u', self.U)\n system.add_to_block_hamiltonian('u', 'id', self.U)", "def set_hamiltonian_to_AF_Heisenberg(system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('id', 'id', 's_z', 's_z')\n system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)\n system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)\n system.add_to_hamiltonian('id', 's_z', 's_z', 'id')\n system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)\n system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)\n system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')\n system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)\n system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)", "def set_block_hamiltonian(self, tmp_matrix_for_bh, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U))\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U))\n# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U)\n# system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)", "def set_block_hamiltonian_to_AF_Heisenberg(system):\n tmp_matrix_size = None\n if system.growing_side == 'left':\n tmp_matrix_size = system.get_left_dim()\n else: \n tmp_matrix_size = system.get_right_dim()\n tmp_matrix_for_bh = np.zeros((tmp_matrix_size, tmp_matrix_size))\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)\n system.operators_to_add_to_block['bh'] = tmp_matrix_for_bh", "def Hamiltonian(self):\n return None", "def generate_hamiltonian(self):\n ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n\n if self.pulses is not None:\n self.pulses.generate_pulses(dimensions=ham.dimensions, bath=self.cluster, vectors=ham.vectors)\n\n return ham", "def __init__(self, hamiltonian):\n self.ham = hamiltonian", "def hamiltonian(self):\n hamiltonian = self.bare_hamiltonian()\n for interaction_term in self.interaction_list:\n hamiltonian += interaction_term.hamiltonian()\n return hamiltonian", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)", "def set_hbond(self) -> None:\n ...", "def hubbard_hamiltonian_MF(H_no_Hubbard, ns_up, ns_dn, U): \n n_orb = H_no_Hubbard.shape[0]\n ns = [ns_up, ns_dn]\n H = []\n for i in [0, 1]:\n Hi = copy.deepcopy(H_no_Hubbard)\n Hi = Hi + U*ns[1-i]*np.identity(n_orb)\n H.append(Hi)\n return H", "def get_hamiltonian(self):\n return self.hamiltonian()", "def get_hamiltonian(self):\n assert (self._integrator == 'HMC' and self._metric == 'Euclidean') or self._integrator == 'RMHMC', 'Parameter dependent metrics require the RMHMC integrator'\n if self._integrator == 'RMHMC':# and self._metric != 'Euclidean':\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.inverse_ = self.metric_.inverse()\n self.capacitor_ = self.get_capacitor()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.capacitor_ + self.kinetic_\n else:\n self.potential_ = self.get_potential()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.kinetic_\n self.hamiltonian_ = ham\n return ham", "def set_operators_to_update_to_AF_Heisenberg(system):\n system.add_to_operators_to_update('s_z', site_op='s_z')\n system.add_to_operators_to_update('s_p', site_op='s_p')\n system.add_to_operators_to_update('s_m', site_op='s_m')", "def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)", "def get_bare_hamiltonian(self):\n warnings.warn('bare_hamiltonian() is deprecated, use bare_hamiltonian() instead', FutureWarning)\n return self.bare_hamiltonian()", "def test_hamiltonian(model):\n h = model.hamiltonian\n assert isinstance(h, csr_matrix)\n assert h.dtype == np.float32\n assert h.shape == (2, 2)\n assert pytest.fuzzy_equal(h.data, [graphene.t] * 2)\n assert pytest.fuzzy_equal(h.indices, [1, 0])\n assert pytest.fuzzy_equal(h.indptr, [0, 1, 2])\n\n assert h.data.flags['OWNDATA'] is False\n assert h.data.flags['WRITEABLE'] is False\n\n with pytest.raises(ValueError) as excinfo:\n h.data += 1\n assert \"read-only\" in str(excinfo.value)\n\n h2 = model.hamiltonian\n assert h2.data is not h.data\n assert point_to_same_memory(h2.data, h.data)", "def bare_hamiltonian(self):\n bare_hamiltonian = 0\n for subsys in self:\n evals = subsys.eigenvals(evals_count=subsys.truncated_dim)\n bare_hamiltonian += self.diag_hamiltonian(subsys, evals)\n return bare_hamiltonian", "def _hamiltonian(\n self,\n y: phase_space.PhaseSpace,\n params: utils.Params,\n **kwargs: Any\n ) -> jnp.ndarray:", "def test_set_hs(self):\n s = State(substance=\"water\")\n s.hs = Q_(1061602.391543017, \"J/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hs[0], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.hs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def display_hamiltonian(H):\n terms = split_hamiltonian(H)\n\n def label(s):\n if s == 'H0':\n return r'\\hat{H}_0'\n elif s == 'Hint':\n return r'\\hat{H}_{\\text{int}}'\n else:\n try:\n prefix, ind = s.split('_')\n except ValueError:\n print(s)\n raise\n return r'\\hat{H}_{\\Omega_%s}' % ind\n\n lines = []\n lines.append(r'\\begin{align}')\n lines.append(r' \\hat{H} &= %s\\\\' % \" + \".join([label(name) for name in terms.keys()]))\n for name, H in terms.items():\n lines.append(r' %s &= %s\\\\' % (label(name), tex(H)))\n lines.append(r'\\end{align}')\n display(Latex(\"\\n\".join(lines)))", "def _ctrl_hum_set(self, osrs_h):\n data = osrs_h & 0x7\n self._bus.write_byte_data(self.addr, self.CTRL_HUM,\n data)", "def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)", "def Hamiltonian(self):\n Vmat = sparse.spdiags([self.U], [0], len(self.U), len(self.U))\n Kmat = -self.KE * Schrodinger.D2mat(numpts=len(self.x), delta=self.x[1] - self.x[0], periodic=self.periodic,\n q=self.q)\n return Kmat + Vmat", "def set_state(state):\n global HMC_MOM\n assert type(state) == dict, 'state has to be a state dictionary'\n assert state.has_key('randstate'), 'state does not contain randstate'\n assert state.has_key('mom'), 'state does not contain momentum'\n np.random.set_state(state['randstate'])\n HMC_MOM = state['mom']", "def set_operators_to_update(self, system):\n # If you have a block hamiltonian in your block, update it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_operators_to_update('bh', block_op='bh')\n system.add_to_operators_to_update('c_up', site_op='c_up')\n system.add_to_operators_to_update('c_up_dag', site_op='c_up_dag')\n system.add_to_operators_to_downdate('c_down', site_op='c_down')\n system.add_to_operators_to_downdate('c_down_dag', site_op='c_down_dag')\n system.add_to_operators_to_update('u', site_op='u')", "def HamiltonianMatrix(self):\n self.Inter = sp.Matrix([[0,self.t],[self.t,0]])\n self.Intra1 = sp.Matrix([[0,v],[w,0]])\n self.Intra2 = sp.Matrix([[0,w],[v,0]])\n H = sp.Matrix([])\n for i in range(1, self.N+1):\n fila = sp.Matrix([])\n for j in range(1, self.N+1):\n if j==i:\n fila = fila.row_join(self.Inter)\n elif j==i+1:\n fila = fila.row_join(self.Intra1)\n elif j==i-1:\n fila = fila.row_join(self.Intra2)\n else:\n fila = fila.row_join(sp.Matrix([[0,0],[0,0]]))\n H = H.col_join(fila) \n H.simplify()\n #printer = StrPrinter()\n #print(H.table(printer,align='center'))\n self.H = H", "def set_H0(self):\n self.slot.H0 = self.lf_H0.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def test_multiorbital_hamiltonian():\n def lattice():\n lat = pb.Lattice([1])\n lat.add_sublattices((\"A\", [0], [[1, 3j],\n [0, 2]]))\n lat.register_hopping_energies({\n \"t22\": [[0, 1],\n [2, 3]],\n \"t11\": 1, # incompatible hopping - it's never used so it shouldn't raise any errors\n })\n lat.add_hoppings(([1], \"A\", \"A\", \"t22\"))\n return lat\n\n model = pb.Model(lattice(), pb.primitive(3))\n h = model.hamiltonian.toarray()\n\n assert model.system.num_sites == 3\n assert h.shape[0] == 6\n assert pytest.fuzzy_equal(h, h.T.conjugate())\n assert pytest.fuzzy_equal(h[:2, :2], h[-2:, -2:])\n assert pytest.fuzzy_equal(h[:2, :2], [[ 1, 3j],\n [-3j, 2]])\n assert pytest.fuzzy_equal(h[:2, 2:4], [[0, 1],\n [2, 3]])\n\n @pb.onsite_energy_modifier\n def onsite(energy, x, sub_id):\n return 3 * energy + sub_id.eye * 0 * x\n\n @pb.hopping_energy_modifier\n def hopping(energy):\n return 2 * energy\n\n model = pb.Model(lattice(), pb.primitive(3), onsite, hopping)\n h = model.hamiltonian.toarray()\n\n assert model.system.num_sites == 3\n assert h.shape[0] == 6\n assert pytest.fuzzy_equal(h, h.T.conjugate())\n assert pytest.fuzzy_equal(h[:2, :2], h[-2:, -2:])\n assert pytest.fuzzy_equal(h[:2, :2], [[ 3, 9j],\n [-9j, 6]])\n assert pytest.fuzzy_equal(h[:2, 2:4], [[0, 2],\n [4, 6]])\n assert pytest.fuzzy_equal(h[2:4, 4:6], [[0, 2],\n [4, 6]])\n\n def lattice_with_zero_diagonal():\n lat = pb.Lattice([1])\n lat.add_sublattices((\"A\", [0], [[0, 3j],\n [0, 0]]))\n return lat\n\n model = pb.Model(lattice_with_zero_diagonal(), pb.primitive(3))\n h = model.hamiltonian.toarray()\n\n assert model.system.num_sites == 3\n assert h.shape[0] == 6\n assert pytest.fuzzy_equal(h, h.T.conjugate())\n assert pytest.fuzzy_equal(h[:2, :2], h[-2:, -2:])\n assert pytest.fuzzy_equal(h[:2, :2], [[0, 3j],\n [-3j, 0]])", "def set_heading(self, heading):\n self._kernel.set_heading(float(heading))", "def SetSolarSystem(self, ss):\n return _gmat_py.PowerSystem_SetSolarSystem(self, ss)", "def setHBin(self, hbin):\n with self.lock:\n self.hbin = hbin", "def _set_bmus(\n self, X: np.ndarray, som_array: Optional[np.ndarray] = None\n ) -> None:\n self.bmus_ = self.get_bmus(X=X, som_array=som_array)", "def HI_mass(mhalo,aa):\n zp1 = 1.0/aa\n zz = zp1-1\n # Set the parameters of the HOD, using the \"simple\" form.\n # MHI ~ M0 x^alpha Exp[-1/x] x=Mh/Mmin\n # from the Appendix of https://arxiv.org/pdf/1804.09180.pdf, Table 6.\n # Fits valid for 1<z<6:\n mcut= 1e10*(6.11-1.99*zp1+0.165*zp1**2)\n alp = (1+2*zz)/(2+2*zz)\n # Work out the HI mass/weight per halo -- ignore prefactor.\n xx = mhalo/mcut+1e-10\n mHI = xx**alp * np.exp(-1/xx)\n # Scale to some standard number in the right ball-park.\n mHI*= 2e9*np.exp(-1.9*zp1+0.07*zp1**2)\n # Return the HI masses.\n return(mHI)\n #", "def set_mass_flow(self):\n self.exh.mdot_exp = self.exh.flow_array * self.exh.rho_array\n self.exh.C = self.exh.mdot_exp * self.exh.c_p_air\n self.exh.enthalpy_flow = self.exh.C * self.exh.T_inlet_array", "def set_hedra(self) -> Tuple[bool, Hedron, Hedron]:\n ...", "def run_test0():\r\n \r\n ndia, nadi, nnucl, ntraj = 1, 1, 2, 500\r\n\r\n # ======= Hierarchy of Hamiltonians =======\r\n ham = nHamiltonian(ndia, nadi, nnucl)\r\n ham.init_all(2)\r\n print \"id=\", ham.id, \" level=\", ham.level\r\n\r\n ham1 = [] \r\n for tr in xrange(ntraj):\r\n ham1.append( nHamiltonian(ndia, nadi, nnucl) ) \r\n print ham1[tr].id, ham1[tr].level\r\n ham1[tr].init_all(2)\r\n ham.add_child(ham1[tr])\r\n print Cpp2Py(ham1[tr].get_full_id())\r\n\r\n # Set up the models and compute internal variables\r\n # Initialization\r\n # Model parameters \r\n params = { \"model\":1 }\r\n\r\n # Simulation parameters\r\n dt = 1.0\r\n\r\n # Dynamical variables and system-specific properties\r\n mean_q = MATRIX(nnucl,1); \r\n sigma_q = MATRIX(nnucl,1); \r\n mean_p = MATRIX(nnucl,1); \r\n sigma_p = MATRIX(nnucl,1); \r\n iM = MATRIX(nnucl,1);\r\n\r\n for i in xrange(nnucl):\r\n mean_q.set(i,0, -1.0) \r\n sigma_q.set(i,0, 0.05) \r\n mean_p.set(i,0, 0.0) \r\n sigma_p.set(i,0, 0.0)\r\n iM.set(i,0, 1.0/2000.0)\r\n\r\n rnd = Random()\r\n q = MATRIX(nnucl,ntraj); aux_functs.sample(q, mean_q, sigma_q, rnd)\r\n p = MATRIX(nnucl,ntraj); aux_functs.sample(p, mean_p, sigma_p, rnd) \r\n\r\n # Initial calculations\r\n q.show_matrix()\r\n\r\n # Compute Initial trajectory probability distributions for all dof\r\n #bin(q, -2.0, 2.0, 0.01)\r\n\r\n ham.compute_diabatic(compute_model, q, params, 1)\r\n ham.compute_adiabatic(1, 1);\r\n ham.add_ethd_adi(q, iM, 1)\r\n\r\n os.system(\"mkdir _2D_dist\")\r\n out1 = open(\"_output.txt\", \"w\"); out1.close() \r\n\r\n # Do the propagation\r\n for i in xrange(100):\r\n\r\n aux_functs.bin2(q, -2.0, 2.0, 0.1, -2.0, 2.0, 0.1, \"_2D_dist/_2D_distrib_\"+str(i)+\"_.txt\")\r\n\r\n Verlet1(dt, q, p, iM, ham, compute_model, params, 1)\r\n\r\n #=========== Properties ==========\r\n\r\n Ekin, Epot, Etot = aux_functs.compute_etot(ham, p, iM)\r\n\r\n # Print the ensemble average - kinetic, potential, and total energies\r\n # Print the tunneling information. Here, we count each trajectory across the barrier.\r\n out1 = open(\"_output.txt\", \"a\")\r\n out1.write( \" %8.5f %8.5f %8.5f %8.5f\\n\" % ( i*dt, Ekin, Epot, Etot ) )\r\n out1.close()", "def MAH_Hearin_2021(halo_mass_t0, cosmic_t):\r\n\r\n #U_a_early = 2.5\r\n #U_a_early_late = 0.3\r\n #log10tau_c = 1.25\r\n\r\n k = 3.5\r\n\r\n a_late_early = 2.5-0.3 #np.log( np.power(np.e, U_a_early_late) + 1. )\r\n a_early = 2.5 #np.log( np.power(np.e, U_a_early) + 1. )\r\n tau_c = 1.25 #np.power(10., log10tau_c)\r\n alpha = a_early + a_late_early / (1. + np.exp(-k*(cosmic_t - tau_c)) )\r\n\r\n MAH = np.log10( 10.**halo_mass_t0 * np.power(cosmic_t / Cosmo.age(0), alpha) )\r\n\r\n return MAH", "def test_set_hT(self):\n s = State(substance=\"water\")\n s.hT = Q_(2730301.3859201893, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.hT[0], Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def simple_harmonic_motion(self):\n\n if abs(self.shm['val']) == 8:\n self.shm['dir'] *= -1\n if self.shm['dir'] == 1:\n self.shm['val'] += 1\n else:\n self.shm['val'] -= 1", "def rm_hamiltonian(params, momentum, log_prob_func, jitter, normalizing_const, softabs_const=1e6, sampler=Sampler.HMC, integrator=Integrator.EXPLICIT, metric=Metric.HESSIAN):\n\n log_prob = log_prob_func(params)\n ndim = params.nelement()\n pi_term = ndim * torch.log(2.*torch.tensor(pi))\n\n fish, abs_eigenvalues = fisher(params, log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, metric=metric)\n\n if abs_eigenvalues is not None:\n if util.has_nan_or_inf(fish) or util.has_nan_or_inf(abs_eigenvalues):\n print('Invalid Fisher: {} , abs_eigenvalues: {}, params: {}'.format(fish, abs_eigenvalues, params))\n raise util.LogProbError()\n else:\n if util.has_nan_or_inf(fish):\n print('Invalid Fisher: {}, params: {}'.format(fish, params))\n raise util.LogProbError()\n\n if metric == Metric.SOFTABS:\n log_det_abs = abs_eigenvalues.log().sum()\n else:\n log_det_abs = torch.slogdet(fish)[1]\n fish_inverse_momentum = cholesky_inverse(fish, momentum)\n quadratic_term = torch.matmul(momentum.view(1, -1), fish_inverse_momentum)\n hamiltonian = - log_prob + 0.5 * pi_term + 0.5 * log_det_abs + 0.5 * quadratic_term\n if util.has_nan_or_inf(hamiltonian):\n print('Invalid hamiltonian, log_prob: {}, params: {}, momentum: {}'.format(log_prob, params, momentum))\n raise util.LogProbError()\n\n return hamiltonian", "def reset_phis ( self ) :\n for f in self.__phis : f.setVal(0)", "def set_mist(self, humidity):\n if humidity > 100:\n humidity = 100\n elif humidity < 0:\n humidity = 0\n # could set acknolage\n self.__hum = humidity", "def single_mol_system(self):\n molecule = self.build_molecule(5)\n system = vermouth.system.System()\n system.molecules = [molecule]\n return system", "def setHeadway(self, new_headway: int):\n self.headway = new_headway", "def system(self, system):\n\n self._system = system", "def system(self, system):\n\n self._system = system", "def get_qubit_hamiltonian(mol):\n m_ham = mol.get_molecular_hamiltonian()\n int_ham = InteractionOperator(*(m_ham.n_body_tensors.values()))\n f_ham = get_fermion_operator(int_ham)\n q_ham = Transform(f_ham).jordan_wigner()\n return q_ham", "def sinh(self):\n return type(self)(self.parent(),\n self._simplify(self._express.sinh()))", "def set_H2(self):\n self.slot.H2 = self.lf_H2.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def compute_Hamiltonian(self, xHat, pxHat, yHat, pyHat):\n\n hamiltonian = 0.5*(pxHat**2 + pyHat**2) + 0.5 *(xHat**2 + yHat**2)\n\n return hamiltonian", "def initial_shear_modulus(self):\n pass", "def update_hubbard_settings(self, key, value):\n\n if self._hubbard_settings:\n if key in self._hubbard_settings:\n self._hubbard_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {ldau, ldatype, ldaul, dlauu, ldauj, lmaxmix}\")\n else:\n print(\"hybrid settings not present!\")", "def setUp(self):\n problem = setup_house_L(size=(40, 40))\n\n env = MetroLayoutEnv()\n\n costfn = objectives.ConstraintsHeur(problem,\n wmap={'AspectConstraint':0.1,\n 'AreaConstraint': 2\n },\n default=1.)\n\n model = algo.MetropolisHastings(env, costfn)\n\n self.exp = SimpleMH(\n env,\n problem,\n model=model,\n cost_fn=costfn,\n num_iter=1000,\n initializer=PointsInBound(problem, env, size=3, seed=69)\n )", "def sinh(x):\n raise NotImplementedError", "def homozygotie(self):\n if self.allele[1] == 0.0:\n self.homozygote = True", "def compute_Hamiltonian(self, xHat, pxHat, yHat, pyHat):\n\n quadratic = 0.5 * (pxHat**2 + pyHat**2)\n\n hamiltonian = quadratic + self.compute_potential(xHat, yHat)\n\n return hamiltonian", "def _corresponding_simu(self):\n return SimuHawkes()", "def test_hilbert_schmidt_bell():\n\n rho = bell(0) * bell(0).conj().T\n sigma = bell(3) * bell(3).conj().T\n\n res = hilbert_schmidt(rho, sigma)\n\n np.testing.assert_equal(np.isclose(res, 1), True)", "def test_trotter_hamiltonian_three_qubit_term(backend):\n from scipy.linalg import expm\n from qibo.core.terms import HamiltonianTerm\n m1 = random_hermitian(3)\n m2 = random_hermitian(2)\n m3 = random_hermitian(1)\n\n terms = [HamiltonianTerm(m1, 0, 1, 2), HamiltonianTerm(m2, 2, 3),\n HamiltonianTerm(m3, 1)]\n ham = hamiltonians.SymbolicHamiltonian()\n ham.terms = terms\n\n # Test that the `TrotterHamiltonian` dense matrix is correct\n eye = np.eye(2, dtype=m1.dtype)\n mm1 = np.kron(m1, eye)\n mm2 = np.kron(np.kron(eye, eye), m2)\n mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye))\n target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3)\n K.assert_allclose(ham.matrix, target_ham.matrix)\n\n dt = 1e-2\n initial_state = random_state(4)\n if K.op is not None:\n with pytest.raises(NotImplementedError):\n circuit = ham.circuit(dt=dt)\n else:\n circuit = ham.circuit(dt=dt)\n final_state = circuit(np.copy(initial_state))\n u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)]\n target_state = u[1].dot(u[0].dot(initial_state))\n target_state = u[0].dot(u[1].dot(target_state))\n K.assert_allclose(final_state, target_state)", "def coupled_transmons_hamiltonian(w_q0, w_q1, alpha_q0, alpha_q1, J, w_bus):\n\n raise NotImplementedError(\"Old way of handling the hamiltonian H_0. Use calc_hamiltonian\")\n\n eps=0\n delta_q1=w_q1-w_bus\n delta_q0_interactionpoint=(w_q1-alpha_q0)-w_bus\n delta_q0=(w_q0+eps)-w_bus\n\n J_new = J / ((delta_q1+delta_q0_interactionpoint)/(delta_q1*delta_q0_interactionpoint)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)\n\n H_0 = w_q0 * n_q0 + w_q1 * n_q1 + \\\n 1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) +\\\n J_new * (a.dag() + a) * (b + b.dag())\n return H_0", "def __init__(self, graph: Graph, energies=(1, 1), code=qubit, IS_subspace=False):\n if energies == (1, 1) and IS_subspace:\n energies = (1,)\n self.code = code\n self.graph = graph\n self.n = self.graph.n\n self.energies = energies\n self.IS_subspace = IS_subspace\n self.optimization = 'max'\n if not self.IS_subspace:\n # Store node and edge terms separately so the Hamiltonian can be dynamically updated when energies\n # are changed\n\n if tools.is_diagonal(self.code.Q):\n self._hamiltonian_edge_terms = np.zeros([1, (self.code.d ** self.code.n) ** self.n])\n self._hamiltonian_node_terms = np.zeros([1, (self.code.d ** self.code.n) ** self.n])\n self._is_diagonal = True\n\n Q = np.expand_dims(np.diagonal(self.code.Q), axis=0)\n\n def my_eye(n):\n return np.ones(np.asarray(self.code.d ** self.code.n) ** n)\n else:\n # TODO: generate a sparse matrix instead\n self._hamiltonian_edge_terms = np.zeros([(self.code.d ** self.code.n) ** self.n,\n (self.code.d ** self.code.n) ** self.n])\n self._hamiltonian_node_terms = np.zeros([(self.code.d ** self.code.n) ** self.n,\n (self.code.d ** self.code.n) ** self.n])\n\n Q = np.expand_dims(np.diagonal(qubit.Q), axis=0)\n\n def my_eye(n):\n return np.ones(np.asarray(qubit.d ** qubit.n) ** n)\n\n self._optimum_edge_terms = np.zeros([(qubit.d ** qubit.n) ** self.n,\n (qubit.d ** qubit.n) ** self.n])\n self._optimum_node_terms = np.zeros([(qubit.d ** qubit.n) ** self.n,\n (qubit.d ** qubit.n) ** self.n])\n\n for i, j in graph.graph.edges:\n if j < i:\n i, j = j, i\n self._optimum_edge_terms = self._optimum_edge_terms + graph.graph.edges[(i, j)]['weight'] * \\\n tools.tensor_product(\n [my_eye(i), Q, my_eye(j - i - 1), Q, my_eye(self.n - j - 1)])\n\n for i in graph.graph.nodes:\n self._optimum_node_terms = self._optimum_node_terms + graph.graph.nodes[i]['weight'] * \\\n tools.tensor_product([my_eye(i), Q, my_eye(self.n - i - 1)])\n\n self._is_diagonal = False\n Q = self.code.Q\n\n def my_eye(n):\n return np.identity(np.asarray(self.code.d ** self.code.n) ** n)\n for i, j in graph.graph.edges:\n if j < i:\n i, j = j, i\n self._hamiltonian_edge_terms = self._hamiltonian_edge_terms + graph.graph.edges[(i, j)]['weight'] * \\\n tools.tensor_product(\n [my_eye(i), Q, my_eye(j - i - 1), Q, my_eye(self.n - j - 1)])\n for i in graph.graph.nodes:\n self._hamiltonian_node_terms = self._hamiltonian_node_terms + graph.graph.nodes[i]['weight'] * \\\n tools.tensor_product([my_eye(i), Q, my_eye(self.n - i - 1)])\n self._hamiltonian_node_terms = self._hamiltonian_node_terms.T\n self._hamiltonian_edge_terms = self._hamiltonian_edge_terms.T\n if self._is_diagonal:\n self._optimum_edge_terms = self._hamiltonian_edge_terms\n self._optimum_node_terms = self._hamiltonian_node_terms\n self._diagonal_hamiltonian_edge_terms = self._hamiltonian_edge_terms.copy()\n self._diagonal_hamiltonian_node_terms = self._hamiltonian_node_terms.copy()\n self._hamiltonian_node_terms = sparse.csr_matrix(\n (self._hamiltonian_node_terms.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),\n np.arange(self.code.d ** (self.code.n * self.n)))),\n shape=(self.code.d ** (self.code.n * self.n),\n self.code.d ** (self.code.n * self.n)))\n self._hamiltonian_edge_terms = sparse.csr_matrix(\n (self._hamiltonian_edge_terms.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),\n np.arange(self.code.d ** (self.code.n * self.n)))),\n shape=(self.code.d ** (self.code.n * self.n),\n self.code.d ** (self.code.n * self.n)))\n # TODO: what happens to _optimum_node_terms if not _is_diagonal\n else:\n self._diagonal_hamiltonian_edge_terms = self._hamiltonian_edge_terms.copy()\n self._diagonal_hamiltonian_node_terms = self._hamiltonian_node_terms.copy()\n self._hamiltonian_edge_terms = sparse.csr_matrix(self._hamiltonian_edge_terms)\n self._hamiltonian_node_terms = sparse.csr_matrix(self._hamiltonian_node_terms)\n self._left_acting_hamiltonian_edge_terms = None\n self._right_acting_hamiltonian_edge_terms = None\n\n else:\n self._is_diagonal = True\n if not (self.code == qubit or self.code == rydberg):\n raise NotImplementedError(\"IS subspace only implemented for qubit and Rydberg codes.\")\n # Don't generate anything that depends on the entire Hilbert space as to save space\n\n # These are your independent sets of the original graphs, ordered by node and size\n if self.code == qubit:\n node_weights = np.asarray([self.graph.graph.nodes[i]['weight'] for i in range(self.graph.n)])\n independent_sets = enumerate_independent_sets(self.graph.graph)\n # Generate a list of integers corresponding to the independent sets in binary\n # All ones\n k = self.graph.num_independent_sets - 2\n self.mis_size = 0\n C = np.zeros(self.graph.num_independent_sets, dtype=float)\n C[-1] = 0\n for i in independent_sets:\n C[k] = np.sum([node_weights[j] for j in i])\n k -= 1\n self._hamiltonian = sparse.csr_matrix((C, (np.arange(self.graph.num_independent_sets),\n np.arange(self.graph.num_independent_sets))))\n\n C = np.expand_dims(C, axis=0).T\n\n # Otherwise, we need to include the possibility that we are in one of many ground space states\n elif self.code == rydberg:\n # TODO: fix this to reflect the new way of notating independent sets!\n # Count the number of elements in the ground space and map to their representation in ternary\n # Determine how large to make the array\n independent_sets, num_IS = self.graph.generate_independent_sets_qudit(self.code)\n # Generate Hamiltonian from independent sets\n node_weights = np.asarray([self.graph.graph.nodes[i]['weight'] for i in range(self.graph.n)])\n C = np.zeros((num_IS, 1), dtype=np.complex128)\n for k in independent_sets:\n C[k, 0] = np.sum((independent_sets[k][2] == 0) * node_weights)\n self._diagonal_hamiltonian_node_terms = C\n C = C.flatten()\n\n self._hamiltonian_node_terms = sparse.csr_matrix((\n C, (np.arange(len(C)), np.arange(len(C)))), shape=(len(C), len(C)))\n\n self._left_acting_hamiltonian_node_terms = None\n self._right_acting_hamiltonian_node_terms = None", "def set_ham(self,constant,displacement,pcoef,xcoef,*,real=True):\n self.constant = constant\n self.p0 = np.imag(displacement)\n self.x0 = np.real(displacement)\n self.pcoef = pcoef\n self.xcoef = xcoef\n extra_size = max(len(pcoef),len(xcoef))\n # Calculates powers of x and p via matrix powers. Must use an increased\n # base truncation size in order to correctly resolve x^n and p^n\n self.calculation_size = self.size + extra_size\n self.set_x_and_p()\n ham = np.diag(np.ones(self.calculation_size,dtype=complex)*constant)\n for n in range(0,len(pcoef)):\n p = self.p - np.diag(np.ones(self.calculation_size))*self.p0\n ham += pcoef[n] * np.linalg.matrix_power(p,n+2)\n for m in range(0,len(xcoef)):\n x = self.x - np.diag(np.ones(self.calculation_size))*self.x0\n ham += xcoef[m] * np.linalg.matrix_power(x,m+2)\n self.ham = ham[:self.size,:self.size]\n if real:\n self.ham = np.real(self.ham)", "def construct_one_body_propagator(self, system, dt):\n H1 = system.h1e_mod\n # No spin dependence for the moment.\n if (system.diagH1):\n self.BH1 = numpy.array([numpy.diag(numpy.exp(-0.5*dt*numpy.diag(H1[0]))),\n numpy.diag(numpy.exp(-0.5*dt*numpy.diag(H1[1])))])\n else:\n self.BH1 = numpy.array([scipy.linalg.expm(-0.5*dt*H1[0]),\n scipy.linalg.expm(-0.5*dt*H1[1])])", "def evaluate_hamiltonian(self, ham_sig_vals: Array) -> Array:\n if self.empty_dissipators:\n signal_values = ham_sig_vals\n else:\n zero_padding = np.zeros(self.num_operators - len(ham_sig_vals))\n signal_values = np.append(ham_sig_vals, zero_padding, axis=0)\n return 1j * super().evaluate(signal_values)", "def evaluate_hamiltonian(self, ham_sig_vals: Array) -> Array:\n if self.empty_dissipators:\n signal_values = ham_sig_vals\n else:\n zero_padding = np.zeros(self.num_operators - len(ham_sig_vals))\n signal_values = np.append(ham_sig_vals, zero_padding, axis=0)\n return 1j * super().evaluate(signal_values)", "def __init__(self, logp, start, mass=None, step_size=1, n_steps=5, **kwargs):\n\n super(Hamiltonian, self).__init__(logp, start, **kwargs)\n\n if mass is None:\n self.mass = default_mass(self.state, self.scale, self.conditional)\n else:\n assert np.all(mass.T - mass <= 1e-6), 'mass matrix is asymmetric'\n assert self.state.tovector().size == mass.shape[0], \\\n 'mass matrix dimensionality does not match states'\n self.mass = mass\n\n self.dim = self.mass.shape[0]\n self.mass_chol = la.cholesky(self.mass, lower=True)\n self.step_size = step_size / self.dim**(1/4)\n self.n_steps = n_steps", "def setSimulation(self, simulation):\r\n raise NotImplementedError()", "def generate_from_halton(self):\n halton_variables = [\n v for v in self.list if v.kind.lower() not in EXCLUDE_FROM_HALTON\n ]\n if halton_variables:\n nd_halton_seq = halton((self.samples, len(halton_variables)))\n for idx, v in enumerate(halton_variables):\n v.generate_values(nd_halton_seq[:, idx])", "def start_hanchan(self):\n raise NotImplemented()", "def reset_hll(self):\n self.hll = HyperLogLog(250)\n self.hll64 = HyperLogLog64(2**17)", "def update_h(x):\n cell.modify_mech_param('soma', 'h', 'ghbar', x[0])\n cell.modify_mech_param('trunk', 'h', 'ghbar', origin='soma', slope=x[1], tau=x[2], xhalf=x[3])\n cell.modify_mech_param('basal', 'h', 'ghbar', origin='soma')\n for sec_type in ['apical', 'tuft']:\n cell.modify_mech_param(sec_type, 'h', 'ghbar', origin='trunk')", "def test_H_hat(self):\n\t\tposition = [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477]\n\t\tpotential = [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0]\n\t\tc = 1\n\t\tposition = tf.constant(position, shape = [1, len(position)], dtype = tf.float32)\n\t\tpotential = tf.constant(potential, shape = [1, len(potential)], dtype = tf.float32)\n\t\tbasis = schrodinger.create_basis(5)\n\t\tv = schrodinger.v0(position, potential, basis)\n\t\tcoeff = schrodinger.coefficient(position, basis)\n\t\tv0_hat = tf.linalg.solve(coeff, v)\n\t\tH = schrodinger.H_hat(c, len(basis), v0_hat)\n\t\tself.assertEqual(coeff.get_shape(), [len(basis), len(basis)])", "def make_Hamiltonian(skf_dir, atom_types, disp, kpts, write_band=False, use_omp=False):\n if disp == 'D3': #from dftb manual\n dispersion = '''DftD3{\n Damping = BeckeJohnson{\n a1 = 0.5719\n a2 = 3.6017\n }\n s6 = 1.0\n s8 = 0.5883\n }\n '''\n\n elif disp == 'D30': #zero dampling\n dispersion = '''DftD3{\n Damping = ZeroDamping{\n sr6 = 0.746\n alpah6 = 4.191\n }\n s6 = 1.0\n s8 = 3.209\n }\n '''\n\n elif disp == 'D4':\n dispersion = '''DftD4{\n s6 = 1\n s8 = 0.6635015\n s9 = 1\n a1 = 0.5523240\n a2 = 4.3537076\n }\n '''\n\n elif disp == 'MBD': #1.0 from J. Phys. Chem. Lett. 2018, 9, 399−405\n dispersion = 'MBD{\\n\\tKGrid = ' + str(kpts)[1:-1] + '\\n\\tBeta = 1.0}\\n'\n\n elif disp == 'TS': #1.05 from J. Phys. Chem. Lett. 2018, 9, 399−405\n dispersion = '''TS{\n Damping = 20.0\n RangeSeparation = 1.0\n }\n '''\n\n elif disp == 'LJ':\n dispersion = 'LennardJones{Parameters = UFFParameters{}}'\n else:\n dispersion = None\n\n\n kwargs = {'Hamiltonian_SCC': 'yes',\n 'Hamiltonian_SCCTolerance': 1e-06,\n 'Hamiltonian_MaxSCCIterations': 1000,\n #'Hamiltonian_Mixer': 'DIIS{}', #Default is Broyden\n #'Hamiltonian_Dispersion': dispersion,\n 'slako_dir': skf_dir,\n 'Analysis_': '',\n 'Analysis_WriteBandOut': 'No',\n 'Analysis_MullikenAnalysis': 'No',\n 'Analysis_CalculateForces': 'Yes',\n }\n if write_band: \n kwargs['Analysis_WriteBandOut'] = 'Yes'\n if use_omp:\n kwargs['Parallel_'] = ''\n kwargs['Parallel_UseOmpThreads'] = 'Yes'\n if dispersion is not None:\n kwargs['Hamiltonian_Dispersion'] = dispersion\n\n if skf_dir.find('3ob') > 0: \n calc_type = '3ob'\n elif skf_dir.find('mio') > 0: \n calc_type = 'mio'\n elif skf_dir.find('pbc') > 0:\n calc_type = 'pbc'\n elif skf_dir.find('matsci') > 0:\n calc_type = 'matsci'\n\n #https://dftb.org/parameters/download/3ob/3ob-3-1-cc\n if calc_type == '3ob':\n kwargs['Hamiltonian_ThirdOrderFull'] = 'Yes'\n kwargs['Hamiltonian_HCorrection'] = 'Damping {\\n\\tExponent = 4.00\\n\\t}'\n HD = {\"Br\": -0.0573,\n \"C\": -0.1492,\n \"N\": -0.1535,\n \"Ca\": -0.0340, \n \"Na\": -0.0454,\n \"Cl\": -0.0697, \n \"Zn\": -0.03,\n \"O\": -0.1575,\n \"F\": -0.1623,\n \"P\": -0.14,\n \"H\": -0.1857, \n \"S\": -0.11,\n \"I\": -0.0433, \n \"K\": -0.0339,\n }\n strs = '{'\n for ele in atom_types:\n if ele == 'H':\n kwargs['Hamiltonian_MaxAngularMomentum_H']='s'\n elif ele in ['Mg', 'C', 'N', 'Ca', 'Na', 'O', 'F', 'K']:\n kwargs['Hamiltonian_MaxAngularMomentum_'+ele]='p'\n elif ele in ['Br', 'Cl', 'P', 'S', 'I', 'Zn']:\n kwargs['Hamiltonian_MaxAngularMomentum_'+ele]='d'\n else:\n raise RuntimeError(\"3-ob-1 doesnot support\", ele)\n strs +='\\n\\t'+ele+' = '+str(HD[ele])\n strs += '\\n\\t}'\n kwargs['Hamiltonian_HubbardDerivs'] = strs\n elif calc_type == 'pbc':\n #https://dftb.org/parameters/download/pbc/pbc-0-3-cc\n for ele in atom_types:\n if ele == 'H':\n kwargs['Hamiltonian_MaxAngularMomentum_H']='s'\n elif ele in ['C', 'O', 'N', 'F']:\n kwargs['Hamiltonian_MaxAngularMomentum_'+ele]='p'\n elif ele in ['Si', 'Fe']:\n kwargs['Hamiltonian_MaxAngularMomentum_'+ele]='d'\n else:\n raise RuntimeError(\"pbc-0-3 doesnot support\", ele)\n elif calc_type in ['matsci', 'mio']:\n #https://dftb.org/parameters/download/pbc/pbc-0-3-cc\n for ele in atom_types:\n if ele == 'H':\n kwargs['Hamiltonian_MaxAngularMomentum_H']='s'\n elif ele in ['B', 'O', 'C', 'N']:\n kwargs['Hamiltonian_MaxAngularMomentum_'+ele]='p'\n elif ele in ['Si']:\n kwargs['Hamiltonian_MaxAngularMomentum_'+ele]='d'\n else:\n raise RuntimeError(calc_type, \"doesnot support\", ele)\n \n #DFTB2\n\n #pbc-0-3\n #matsci\n #ob2\n #pbc\n #print(calc_type, kwargs)\n return kwargs", "def _update_hessian(self) -> None:\n assert self._species and self._coords is not None and self._method\n\n species = self._species.new_species(\n name=f\"{self._species.name}_opt_{self.iteration}\"\n )\n species.coordinates = self._coords.to(\"cartesian\")\n\n species.calc_hessian(\n method=self._method,\n keywords=self._method.keywords.hess,\n n_cores=self._n_cores,\n )\n assert species.hessian is not None, \"Failed to calculate H\"\n\n self._species.hessian = species.hessian.copy()\n self._coords.update_h_from_cart_h(self._species.hessian.to(\"Ha Å^-2\"))", "def get_symmetric_system(self):\n W = DynamicalSystem(lambda x:1.0-self.f1(1.0-x),\n lambda x:1.0-self.f0(1.0-x))\n W.set_rho(1.0-self.rho)\n return W", "def hamming_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity score.\n return measure.get_sim_score(s1, s2)", "def setZeroTheory(self):\n\t\tself.theo = np.zeros((self.totalBins), dtype = complex)", "def set_cosmology(self, cosmo):\n self.cosmo = cosmo\n self.h70 = cosmo['h'] # Hubble parameter, H0 = 100h km/s/Mpc\n self.Om = cosmo['omega_M_0'] # Omega_matter\n self.Ol = cosmo['omega_lambda_0'] # Omega_Lambda", "def do_rotate_basis_quadratic_hamiltonian(self, real):\n n_qubits = 5\n\n # Initialize a particle-number-conserving quadratic Hamiltonian\n # and compute its orbital energies\n quad_ham = random_quadratic_hamiltonian(n_qubits, True, real=real)\n orbital_energies, constant = quad_ham.orbital_energies()\n\n # Rotate a basis where the Hamiltonian is diagonal\n _, diagonalizing_unitary, _ = (\n quad_ham.diagonalizing_bogoliubov_transform())\n quad_ham.rotate_basis(diagonalizing_unitary.T)\n\n # Check that the rotated Hamiltonian is diagonal with the correct\n # orbital energies\n D = numpy.zeros((n_qubits, n_qubits), dtype=complex)\n D[numpy.diag_indices(n_qubits)] = orbital_energies\n self.assertTrue(numpy.allclose(quad_ham.combined_hermitian_part, D))\n\n # Check that the new Hamiltonian still conserves particle number\n self.assertTrue(quad_ham.conserves_particle_number)\n\n # Check that the orbital energies and constant are the same\n new_orbital_energies, new_constant = quad_ham.orbital_energies()\n self.assertTrue(numpy.allclose(orbital_energies, new_orbital_energies))\n self.assertAlmostEqual(constant, new_constant)", "def _build_ham(self):\n path = self._solverpath.long_tail()\n print(path)\n current, k = self.snake.head(), 0\n for direc in path:\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = direc\n current = current.adj(direc)\n k += 1\n # Process snake bodies\n current = self.snake.tail()\n for _ in range(self.snake.len() - 1):\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = self.snake.direc\n current = current.adj(self.snake.direc)\n k += 1", "def test_set_sh(self):\n s = State(substance=\"water\")\n s.sh = Q_(3028.9867985920914, \"J/(kg*K)\"), Q_(1061602.391543017, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sh[0], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.sh[1], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )", "def simulateForHeight(rocket,H_1):\n rocket.setLandingBurnStartHeight(H_1)\n \n # Simulate the dynamics\n t_f = 100.\n x_0 = np.array([rocket.H_0,rocket.v_0,rocket.m_0, # Ideal system\n rocket.H_0,rocket.v_0,rocket.m_0 # Real system\n ])\n \n state = solve_ivp(fun = rocket.dynamics,\n t_span = (0,t_f),\n y0 = x_0,\n events = [rocket.burnEvent,rocket.ascentEvent],\n max_step = 0.01)\n \n # Extract simulation results\n t = state.t\n h = state.y[3]\n m = state.y[5]\n \n fuel_used = m[0]-m[-1]\n burn_thrust = rocket.T\n burn_time = rocket.t_b\n time_history = t\n height_history = h\n \n return fuel_used,burn_thrust,burn_time,time_history,height_history", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n\n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy()\n simulation.context.setVelocitiesToTemperature(300*kelvin)\n print('Created simulation')\n return simulation", "def test_set_ph(self):\n s = State(substance=\"water\")\n s.ph = Q_(101325.0, \"Pa\"), Q_(1061602.391543017, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[1], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.ph = Q_(101325.0, \"Pa\"), Q_(3336406.139862406, \"J/kg\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ph[1], Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def setharmonic(self, theta0, kb):\n\n if isinstance(theta0, float):\n self.theta0 = theta0\n else:\n print \"1st arg should be float\"\n raise TypeError\n\n if isinstance(kb, float):\n self.kb = kb\n else:\n print \"2nd arg should be float\"\n raise TypeError", "def test_set_vh(self):\n s = State(substance=\"water\")\n s.vh = Q_(0.4772010021515822, \"m**3/kg\"), Q_(1061602.391543017, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vh[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vh[1], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def test_set_hp(self):\n s = State(substance=\"water\")\n s.hp = Q_(1061602.391543017, \"J/kg\"), Q_(101325.0, \"Pa\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hp[0], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.hp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.hp = Q_(3336406.139862406, \"J/kg\"), Q_(101325.0, \"Pa\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hp[0], Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert np.isclose(s.hp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def h5_mhe(simulated_array, observed_array, replace_nan=None, replace_inf=None, remove_neg=False,\n remove_zero=False):\n\n # Treats data\n simulated_array, observed_array = treat_values(\n simulated_array,\n observed_array,\n replace_nan=replace_nan,\n replace_inf=replace_inf,\n remove_neg=remove_neg,\n remove_zero=remove_zero\n )\n\n top = (simulated_array - observed_array)\n bot = np.reciprocal(0.5 * (np.reciprocal(observed_array) + np.reciprocal(simulated_array)))\n h = top / bot\n return np.mean(h)", "def setStoichiometryMath(self, *args):\n return _libsbml.SpeciesReference_setStoichiometryMath(self, *args)", "def huber(t=1.345):\n return sm.robust.norms.HuberT(t)", "def h(self, h):\n\n self._h = h", "def setHsl ( self, h, s = 0.0, l = 0.0 ):\n self.setHsla( h, s, l )", "def setup_simulation(system, pdb, integrator):\n #platform = Platform.getPlatformByName('CPU')\n platform = Platform.getPlatformByName('OpenCL')\n prop = {'OpenCLPrecision':'single'}\n \n simulation = Simulation(pdb.topology, system, integrator, platform, prop)\n simulation.context.setPositions(pdb.positions)\n simulation.minimizeEnergy()\n simulation.context.setVelocitiesToTemperature(310*kelvin)\n print('Created simulation')\n return simulation", "def test_hky_uniformization(self):\n distribution = {'A':.2,'C':.3,'G':.3,'T':.2}\n kappa = 2\n rate_matrix_object = RateMatrix.get_unscaled_hky85_rate_matrix(distribution, kappa)\n rate_matrix_object.normalize()\n rate_matrix = rate_matrix_object.get_dictionary_rate_matrix()\n path_length = 2\n initial_state = 'A'\n terminal_state = 'C'\n states = 'ACGT'\n iterations = 200\n # get the modified rejection sampling changes, where each change is the number of events on a sampled path\n nielsen_changes = []\n i = 0\n while i < iterations:\n nielsen_events = get_nielsen_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n if nielsen_events is not None:\n nielsen_changes.append(len(nielsen_events))\n i += 1\n # get the uniformization changes, where each change is the number of events on a sampled path\n uniformization_changes = []\n for i in range(iterations):\n uniformization_events = get_uniformization_sample(initial_state, terminal_state, states, path_length, rate_matrix)\n uniformization_changes.append(len(uniformization_events))\n # see if there is a statistically significant difference between the sampled path lengths\n #print sum(nielsen_changes)\n #print sum(uniformization_changes)\n t, p = scipy.stats.mannwhitneyu(uniformization_changes, nielsen_changes)\n self.failIf(p < .001, p)" ]
[ "0.77002096", "0.7557386", "0.74360436", "0.7119834", "0.7010251", "0.6295535", "0.60164326", "0.6015403", "0.59823936", "0.58867145", "0.58867145", "0.5827976", "0.57418686", "0.56327134", "0.56063366", "0.55363756", "0.5516487", "0.5502509", "0.54257786", "0.53823394", "0.5379448", "0.5376192", "0.5376085", "0.5345973", "0.53369564", "0.5248801", "0.5248801", "0.5248193", "0.5241568", "0.5221934", "0.5211156", "0.51879174", "0.5164523", "0.51256204", "0.5078332", "0.5066466", "0.503613", "0.500563", "0.5001846", "0.49978516", "0.4961445", "0.49301708", "0.49283308", "0.49248713", "0.49047318", "0.48990774", "0.48828593", "0.48743707", "0.48521197", "0.4842834", "0.4842834", "0.48419476", "0.48367816", "0.48341662", "0.48273665", "0.48224008", "0.48220247", "0.48179296", "0.48155832", "0.47930318", "0.4791855", "0.47912648", "0.47800174", "0.47724798", "0.47709683", "0.4764673", "0.47627378", "0.47626662", "0.47464812", "0.47464812", "0.47424188", "0.47355542", "0.47343197", "0.4733096", "0.4731394", "0.47303534", "0.47258043", "0.4715176", "0.4711825", "0.47117049", "0.47015554", "0.46989515", "0.46926543", "0.46919817", "0.46861413", "0.46769446", "0.46756625", "0.46677268", "0.46664822", "0.4664266", "0.4660809", "0.46531087", "0.46521828", "0.46451524", "0.46373537", "0.46370864", "0.46365574", "0.463532", "0.46213683", "0.46172377" ]
0.7601821
1
Sets the block Hamiltonian to the Hubbard model block Hamiltonian.
def set_block_hamiltonian(self, tmp_matrix_for_bh, system): # If you have a block hamiltonian in your block, add it if 'bh' in system.growing_block.operators.keys(): system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id') system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U)) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U)) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_block_hamiltonian('c_up_dag', 'c_up', -1.)\n system.add_to_block_hamiltonian('c_down', 'c_down_dag', -1.)\n system.add_to_block_hamiltonian('c_down_dag', 'c_down', -1.)\n system.add_to_block_hamiltonian('id', 'u', self.U)\n system.add_to_block_hamiltonian('u', 'id', self.U)", "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('c_up', 'c_up_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_up_dag', 'c_up', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down', 'c_down_dag', 'id', 'id', -1.)\n system.add_to_hamiltonian('c_down_dag', 'c_down', 'id', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up', 'c_up_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_up_dag', 'c_up', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down', 'c_down_dag', 'id', -1.)\n system.add_to_hamiltonian('id', 'c_down_dag', 'c_down', 'id', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up', 'c_up_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_up_dag', 'c_up', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down', 'c_down_dag', -1.)\n system.add_to_hamiltonian('id', 'id', 'c_down_dag', 'c_down', -1.)\n system.add_to_hamiltonian('u', 'id', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'u', 'id', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'u', 'id', self.U)\n system.add_to_hamiltonian('id', 'id', 'id', 'u', self.U)", "def set_block_hamiltonian_to_AF_Heisenberg(system):\n tmp_matrix_size = None\n if system.growing_side == 'left':\n tmp_matrix_size = system.get_left_dim()\n else: \n tmp_matrix_size = system.get_right_dim()\n tmp_matrix_for_bh = np.zeros((tmp_matrix_size, tmp_matrix_size))\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z')\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_p', 's_m', .5)\n system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_m', 's_p', .5)\n system.operators_to_add_to_block['bh'] = tmp_matrix_for_bh", "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U))\n system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U))\n \n# system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U)\n# system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U)\n\n system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.)\n system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.)\n \n system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.)\n system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.)\n\n system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.)\n system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)", "def set_hamiltonian_to_AF_Heisenberg(system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n system.add_to_hamiltonian('id', 'id', 's_z', 's_z')\n system.add_to_hamiltonian('id', 'id', 's_p', 's_m', .5)\n system.add_to_hamiltonian('id', 'id', 's_m', 's_p', .5)\n system.add_to_hamiltonian('id', 's_z', 's_z', 'id')\n system.add_to_hamiltonian('id', 's_p', 's_m', 'id', .5)\n system.add_to_hamiltonian('id', 's_m', 's_p', 'id', .5)\n system.add_to_hamiltonian('s_z', 's_z', 'id', 'id')\n system.add_to_hamiltonian('s_p', 's_m', 'id', 'id', .5)\n system.add_to_hamiltonian('s_m', 's_p', 'id', 'id', .5)", "def set_hbond(self) -> None:\n ...", "def Hamiltonian(self):\n return None", "def __init__(self, hamiltonian):\n self.ham = hamiltonian", "def setHBin(self, hbin):\n with self.lock:\n self.hbin = hbin", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE_HUBBARD\n return HAM_SPINLESS_RI_CORE_HUBBARD(self)", "def hamiltonian(self):\n hamiltonian = self.bare_hamiltonian()\n for interaction_term in self.interaction_list:\n hamiltonian += interaction_term.hamiltonian()\n return hamiltonian", "def generate_hamiltonian(self):\n ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n\n if self.pulses is not None:\n self.pulses.generate_pulses(dimensions=ham.dimensions, bath=self.cluster, vectors=ham.vectors)\n\n return ham", "def set_mass_flow(self):\n self.exh.mdot_exp = self.exh.flow_array * self.exh.rho_array\n self.exh.C = self.exh.mdot_exp * self.exh.c_p_air\n self.exh.enthalpy_flow = self.exh.C * self.exh.T_inlet_array", "def get_hamiltonian(self):\n return self.hamiltonian()", "def get_hamiltonian(self):\n assert (self._integrator == 'HMC' and self._metric == 'Euclidean') or self._integrator == 'RMHMC', 'Parameter dependent metrics require the RMHMC integrator'\n if self._integrator == 'RMHMC':# and self._metric != 'Euclidean':\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.inverse_ = self.metric_.inverse()\n self.capacitor_ = self.get_capacitor()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.capacitor_ + self.kinetic_\n else:\n self.potential_ = self.get_potential()\n self.kinetic_ = self.get_kinetic()\n ham = self.potential_ + self.kinetic_\n self.hamiltonian_ = ham\n return ham", "def hubbard_hamiltonian_MF(H_no_Hubbard, ns_up, ns_dn, U): \n n_orb = H_no_Hubbard.shape[0]\n ns = [ns_up, ns_dn]\n H = []\n for i in [0, 1]:\n Hi = copy.deepcopy(H_no_Hubbard)\n Hi = Hi + U*ns[1-i]*np.identity(n_orb)\n H.append(Hi)\n return H", "def set_H0(self):\n self.slot.H0 = self.lf_H0.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def h(self, h):\n\n self._h = h", "def __init__(self, breite, höhe,\n block_größe=None, lebendig=set()):\n if block_größe is None:\n block_größe = min(9, 800//min(breite, höhe)) or 1\n self.breite = breite\n self.höhe = höhe\n self.block = int(block_größe)\n self.lebendig = lebendig\n try:\n self.sense = SenseHat()\n except OSError:\n self.sense = None\n super().__init__()", "def get_bare_hamiltonian(self):\n warnings.warn('bare_hamiltonian() is deprecated, use bare_hamiltonian() instead', FutureWarning)\n return self.bare_hamiltonian()", "def set_heading(self, heading):\n self._kernel.set_heading(float(heading))", "def reset_hessian_and_bias(self):\n # reset_shared_var(self.t_H)\n t = self.QUAD_REG\n if len(t.shape) == 1:\n self.t_H.set_value(np.diag(self.QUAD_REG))\n elif len(t.shape) == 2:\n self.t_H.set_value(self.QUAD_REG)\n else:\n raise ValueError('Invalid quad_reg shape')\n\n reset_shared_var(self.t_B)", "def block(self, block):\n\n self._block = block", "def block(self, block):\n\n self._block = block", "def __init__(self, d_model, n_heads, use_cos, kernel, dropout,\n ffn_ratio, ln_eps, denom_eps, bias):\n super(MHA_block_rezero, self).__init__()\n self.mha = MHA(\n d_model, n_heads, use_cos, kernel, dropout, denom_eps, bias)\n self.ffn = FFN(d_model, ffn_ratio, dropout, bias)\n self.alpha = nn.Parameter(torch.Tensor([0]))", "def set_channel_h_unit(self , channel_h_unit:float):\n self.__channel_h_unit = channel_h_unit", "def penblock(self, block):\n self.block = block", "def test_hamiltonian(model):\n h = model.hamiltonian\n assert isinstance(h, csr_matrix)\n assert h.dtype == np.float32\n assert h.shape == (2, 2)\n assert pytest.fuzzy_equal(h.data, [graphene.t] * 2)\n assert pytest.fuzzy_equal(h.indices, [1, 0])\n assert pytest.fuzzy_equal(h.indptr, [0, 1, 2])\n\n assert h.data.flags['OWNDATA'] is False\n assert h.data.flags['WRITEABLE'] is False\n\n with pytest.raises(ValueError) as excinfo:\n h.data += 1\n assert \"read-only\" in str(excinfo.value)\n\n h2 = model.hamiltonian\n assert h2.data is not h.data\n assert point_to_same_memory(h2.data, h.data)", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)", "def create_ham(self):\n from tcc.interaction import HAM_SPINLESS_RI_CORE\n return HAM_SPINLESS_RI_CORE(self)", "def _hamiltonian(\n self,\n y: phase_space.PhaseSpace,\n params: utils.Params,\n **kwargs: Any\n ) -> jnp.ndarray:", "def setHeadway(self, new_headway: int):\n self.headway = new_headway", "def setUp(self):\n problem = setup_house_L(size=(40, 40))\n\n env = MetroLayoutEnv()\n\n costfn = objectives.ConstraintsHeur(problem,\n wmap={'AspectConstraint':0.1,\n 'AreaConstraint': 2\n },\n default=1.)\n\n model = algo.MetropolisHastings(env, costfn)\n\n self.exp = SimpleMH(\n env,\n problem,\n model=model,\n cost_fn=costfn,\n num_iter=1000,\n initializer=PointsInBound(problem, env, size=3, seed=69)\n )", "def _set_block(self, pos, block_):\n raise NotImplementedError", "def HamiltonianMatrix(self):\n self.Inter = sp.Matrix([[0,self.t],[self.t,0]])\n self.Intra1 = sp.Matrix([[0,v],[w,0]])\n self.Intra2 = sp.Matrix([[0,w],[v,0]])\n H = sp.Matrix([])\n for i in range(1, self.N+1):\n fila = sp.Matrix([])\n for j in range(1, self.N+1):\n if j==i:\n fila = fila.row_join(self.Inter)\n elif j==i+1:\n fila = fila.row_join(self.Intra1)\n elif j==i-1:\n fila = fila.row_join(self.Intra2)\n else:\n fila = fila.row_join(sp.Matrix([[0,0],[0,0]]))\n H = H.col_join(fila) \n H.simplify()\n #printer = StrPrinter()\n #print(H.table(printer,align='center'))\n self.H = H", "def setBlockMassParams(self):\n for b in self.getBlocks():\n b.p.kgHM = b.getHMMass() / units.G_PER_KG\n b.p.kgFis = b.getFissileMass() / units.G_PER_KG\n b.p.puFrac = (\n b.getPuMoles() / b.p.molesHmBOL if b.p.molesHmBOL > 0.0 else 0.0\n )", "def __init__(self, dm, h, hidden, drop_rate=0.1):\r\n super(EncoderBlock, self).__init__()\r\n self.mha = MultiHeadAttention(dm, h)\r\n self.dense_hidden = tf.keras.layers.Dense(units=hidden,\r\n activation='relu')\r\n self.dense_output = tf.keras.layers.Dense(units=dm)\r\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\r\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\r\n self.dropout1 = tf.keras.layers.Dropout(drop_rate)\r\n self.dropout2 = tf.keras.layers.Dropout(drop_rate)", "def test_init_hebbian_2(self):\n v_one = [1, -1, -1, -1, 1, -1, -1, -1, 1]\n v_two = [-1, -1, -1, 1, 1, 1, -1, -1, -1]\n network = HopfieldNetwork([v_one, v_two])\n expected = np.array([\n [0, 0, 0, -2, 0, -2, 0, 0, 2],\n [0, 0, 2, 0, -2, 0, 2, 2, 0],\n [0, 2, 0, 0, -2, 0, 2, 2, 0],\n [-2, 0, 0, 0, 0, 2, 0, 0, -2],\n [0, -2, -2, 0, 0, 0, -2, -2, 0],\n [-2, 0, 0, 2, 0, 0, 0, 0, -2],\n [0, 2, 2, 0, -2, 0, 0, 2, 0],\n [0, 2, 2, 0, -2, 0, 2, 0, 0],\n [2, 0, 0, -2, 0, -2, 0, 0, 0]\n ], np.int64)\n npt.assert_equal(network.weight_matrix, expected)", "def initial_shear_modulus(self):\n pass", "def set_hand(self, hand):\n self.hand = hand", "def __init__(self, d_model, n_heads, use_cos, kernel, dropout,\n ffn_ratio, ln_eps, denom_eps, bias):\n super(MHA_block, self).__init__()\n self.ln1 = nn.LayerNorm(d_model, eps=ln_eps)\n self.ln2 = nn.LayerNorm(d_model, eps=ln_eps)\n self.mha = MHA(\n d_model, n_heads, use_cos, kernel, dropout, denom_eps, bias)\n self.ffn = FFN(d_model, ffn_ratio, dropout, bias)", "def hobby(self, hobby):\n\n self._hobby = hobby", "def set_eht(self, target_eht):\n self.target_eht = round(target_eht, 2)\n # Setting SEM to target EHT must be implemented in child class!", "def _make_block(self, model):\n # TODO Make base class\n assert model is not None, 'Top level model must be initialized first'\n self.model = model\n # If block is already present, remove it\n if self.model.component(self.name) is not None:\n self.model.del_component(self.name)\n self.model.add_component(self.name, Block())\n self.block = self.model.__getattribute__(self.name)\n\n self.logger.info(\n 'Optimization block initialized for {}'.format(self.name))", "def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat", "def _build_ham(self):\n path = self._solverpath.long_tail()\n print(path)\n current, k = self.snake.head(), 0\n for direc in path:\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = direc\n current = current.adj(direc)\n k += 1\n # Process snake bodies\n current = self.snake.tail()\n for _ in range(self.snake.len() - 1):\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = self.snake.direc\n current = current.adj(self.snake.direc)\n k += 1", "def bare_hamiltonian(self):\n bare_hamiltonian = 0\n for subsys in self:\n evals = subsys.eigenvals(evals_count=subsys.truncated_dim)\n bare_hamiltonian += self.diag_hamiltonian(subsys, evals)\n return bare_hamiltonian", "def set_state(state):\n global HMC_MOM\n assert type(state) == dict, 'state has to be a state dictionary'\n assert state.has_key('randstate'), 'state does not contain randstate'\n assert state.has_key('mom'), 'state does not contain momentum'\n np.random.set_state(state['randstate'])\n HMC_MOM = state['mom']", "def bias(self, value):\n self.mbmod.bias = value", "def setModel(self, block, /):\n self._model = block\n self.updateView()", "def set_smearing(self, smearing_Ha):\n self.smearing = smearing_Ha\n self.qptanalyzer.smearing = smearing_Ha", "def _set_block(self, pos, block_):\n _get_mc().setBlock(pos, block_)", "def blackbody(self, nu, T):\n x = self.h*nu/(self.kB*T)\n result = 2.*self.h*nu**3 /self.c**2\n result /= np.exp(x) - 1.\n return result", "def set_hedra(self) -> Tuple[bool, Hedron, Hedron]:\n ...", "def test_init_hebbian_3(self):\n v_one = [1, -1, -1, -1, 1, -1, -1, -1, 1]\n v_two = [-1, -1, -1, 1, 1, 1, -1, -1, -1]\n v_three = [-1, -1, 1, -1, -1, 1, -1, -1, 1]\n network = HopfieldNetwork([v_one, v_two, v_three])\n expected = np.array([\n [0, 1, -1, -1, 1, -3, 1, 1, 1],\n [1, 0, 1, 1, -1, -1, 3, 3, -1],\n [-1, 1, 0, -1, -3, 1, 1, 1, 1],\n [-1, 1, -1, 0, 1, 1, 1, 1, -3],\n [1, -1, -3, 1, 0, -1, -1, -1, -1],\n [-3, -1, 1, 1, -1, 0, -1, -1, -1],\n [1, 3, 1, 1, -1, -1, 0, 3, -1],\n [1, 3, 1, 1, -1, -1, 3, 0, -1],\n [1, -1, 1, -3, -1, -1, -1, -1, 0]\n ])\n npt.assert_equal(network.weight_matrix, expected)", "def test_set_hT(self):\n s = State(substance=\"water\")\n s.hT = Q_(2730301.3859201893, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.hT[0], Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def from_blockheader(cls, bh, txns):\n self = cls.__new__(cls)\n self.block_header = bh\n self.transactions = txns\n\n self.merkle_tree = None\n self.invalidate()\n\n return self", "def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )", "def _set_bmus(\n self, X: np.ndarray, som_array: Optional[np.ndarray] = None\n ) -> None:\n self.bmus_ = self.get_bmus(X=X, som_array=som_array)", "def set_bomb(self):\n self.bomba = True", "def set_haiku(self, haiku):\n self.haiku = haiku", "def set_operators_to_update(self, system):\n # If you have a block hamiltonian in your block, update it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_operators_to_update('bh', block_op='bh')\n system.add_to_operators_to_update('c_up', site_op='c_up')\n system.add_to_operators_to_update('c_up_dag', site_op='c_up_dag')\n system.add_to_operators_to_downdate('c_down', site_op='c_down')\n system.add_to_operators_to_downdate('c_down_dag', site_op='c_down_dag')\n system.add_to_operators_to_update('u', site_op='u')", "def display_hamiltonian(H):\n terms = split_hamiltonian(H)\n\n def label(s):\n if s == 'H0':\n return r'\\hat{H}_0'\n elif s == 'Hint':\n return r'\\hat{H}_{\\text{int}}'\n else:\n try:\n prefix, ind = s.split('_')\n except ValueError:\n print(s)\n raise\n return r'\\hat{H}_{\\Omega_%s}' % ind\n\n lines = []\n lines.append(r'\\begin{align}')\n lines.append(r' \\hat{H} &= %s\\\\' % \" + \".join([label(name) for name in terms.keys()]))\n for name, H in terms.items():\n lines.append(r' %s &= %s\\\\' % (label(name), tex(H)))\n lines.append(r'\\end{align}')\n display(Latex(\"\\n\".join(lines)))", "def __init__(self, logp, start, mass=None, step_size=1, n_steps=5, **kwargs):\n\n super(Hamiltonian, self).__init__(logp, start, **kwargs)\n\n if mass is None:\n self.mass = default_mass(self.state, self.scale, self.conditional)\n else:\n assert np.all(mass.T - mass <= 1e-6), 'mass matrix is asymmetric'\n assert self.state.tovector().size == mass.shape[0], \\\n 'mass matrix dimensionality does not match states'\n self.mass = mass\n\n self.dim = self.mass.shape[0]\n self.mass_chol = la.cholesky(self.mass, lower=True)\n self.step_size = step_size / self.dim**(1/4)\n self.n_steps = n_steps", "def set_H2(self):\n self.slot.H2 = self.lf_H2.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def b(self, b):\n\n self._b = b", "def set_bunit(self, bunit):\n self.bunit = bunit", "def change_hands(self, new_hand=None):\n self.hand = new_hand", "def H_layer(self, nqubits):\n for idx in range(nqubits):\n qml.Hadamard(wires=idx)", "def set_from_block(self, M, n):\n self.support[n] = 1\n self.W[n] = self._block_matrix_to_mpo(M, self.dout[n], self.din[n])", "def test_set_hs(self):\n s = State(substance=\"water\")\n s.hs = Q_(1061602.391543017, \"J/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hs[0], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.hs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def H(self, qubit_expr):\n self.apply_gate_operation(cirq.ops.H, qubit_expr)", "def setbearing(self, bearing):\n diff = self.bearing - bearing\n self.b_change = diff\n self.bearing = bearing\n self._add_point()\n self.b_change = 0", "def Hamiltonian(self):\n Vmat = sparse.spdiags([self.U], [0], len(self.U), len(self.U))\n Kmat = -self.KE * Schrodinger.D2mat(numpts=len(self.x), delta=self.x[1] - self.x[0], periodic=self.periodic,\n q=self.q)\n return Kmat + Vmat", "def set_mist(self, humidity):\n if humidity > 100:\n humidity = 100\n elif humidity < 0:\n humidity = 0\n # could set acknolage\n self.__hum = humidity", "def hw_model(self, hw_model):\n self._hw_model = hw_model", "def __init__(self, FeHe):\n\n bhgrid = np.loadtxt(get_data(\"sevtables/bhifmr.dat\"))\n wdgrid = np.loadtxt(get_data(\"sevtables/wdifmr.dat\"))\n self._bhgrid = bhgrid\n self._wdgrid = wdgrid\n self.FeHe = FeHe\n self.FeHe_WD = self.FeHe_BH = self.FeHe\n\n # Make sure chonse metallicity is withing the valid range or fall back\n # to best possible value\n self._check_feh_bounds()\n\n # Interpolate coefficients to chosen metallicity\n BHconstants = []\n for loop in range(1, len(bhgrid[0])):\n BHconstants.append(np.interp(FeHe, bhgrid[:, 0], bhgrid[:, loop]))\n BHconstants = np.array(BHconstants)\n\n WDconstants = []\n # dont interpolate, but get the closest model\n j = np.argmin(np.abs(self.FeHe_WD - wdgrid[:, 0]))\n WDconstants = wdgrid[j, :]\n\n self.m_min, self.B, self.C = BHconstants[:3]\n self.p1 = np.poly1d(BHconstants[3:5])\n self.p2 = np.poly1d(BHconstants[5:7])\n self.p3 = np.poly1d(BHconstants[7:])\n self.mBH_min = self.predict(self.m_min)\n\n self.wd_m_max = WDconstants[1]\n self.p4 = np.poly1d(WDconstants[2:])", "def update_H(self):\n self.grid.H[:, -1, :, :] = self.grid.H[:, 0, :, :]", "def asthma(self, asthma):\n\n self.logger.debug(\"In 'asthma' setter.\")\n\n self._asthma = asthma", "def __init__(self, inplace=False):\n super(Hardsigmoid, self).__init__()\n self.inplace = inplace", "def em_update_h(self):\n with self.elbo_check('h'):\n self.update_h()", "def test_H_hat(self):\n\t\tposition = [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477]\n\t\tpotential = [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0]\n\t\tc = 1\n\t\tposition = tf.constant(position, shape = [1, len(position)], dtype = tf.float32)\n\t\tpotential = tf.constant(potential, shape = [1, len(potential)], dtype = tf.float32)\n\t\tbasis = schrodinger.create_basis(5)\n\t\tv = schrodinger.v0(position, potential, basis)\n\t\tcoeff = schrodinger.coefficient(position, basis)\n\t\tv0_hat = tf.linalg.solve(coeff, v)\n\t\tH = schrodinger.H_hat(c, len(basis), v0_hat)\n\t\tself.assertEqual(coeff.get_shape(), [len(basis), len(basis)])", "def test_multiorbital_hamiltonian():\n def lattice():\n lat = pb.Lattice([1])\n lat.add_sublattices((\"A\", [0], [[1, 3j],\n [0, 2]]))\n lat.register_hopping_energies({\n \"t22\": [[0, 1],\n [2, 3]],\n \"t11\": 1, # incompatible hopping - it's never used so it shouldn't raise any errors\n })\n lat.add_hoppings(([1], \"A\", \"A\", \"t22\"))\n return lat\n\n model = pb.Model(lattice(), pb.primitive(3))\n h = model.hamiltonian.toarray()\n\n assert model.system.num_sites == 3\n assert h.shape[0] == 6\n assert pytest.fuzzy_equal(h, h.T.conjugate())\n assert pytest.fuzzy_equal(h[:2, :2], h[-2:, -2:])\n assert pytest.fuzzy_equal(h[:2, :2], [[ 1, 3j],\n [-3j, 2]])\n assert pytest.fuzzy_equal(h[:2, 2:4], [[0, 1],\n [2, 3]])\n\n @pb.onsite_energy_modifier\n def onsite(energy, x, sub_id):\n return 3 * energy + sub_id.eye * 0 * x\n\n @pb.hopping_energy_modifier\n def hopping(energy):\n return 2 * energy\n\n model = pb.Model(lattice(), pb.primitive(3), onsite, hopping)\n h = model.hamiltonian.toarray()\n\n assert model.system.num_sites == 3\n assert h.shape[0] == 6\n assert pytest.fuzzy_equal(h, h.T.conjugate())\n assert pytest.fuzzy_equal(h[:2, :2], h[-2:, -2:])\n assert pytest.fuzzy_equal(h[:2, :2], [[ 3, 9j],\n [-9j, 6]])\n assert pytest.fuzzy_equal(h[:2, 2:4], [[0, 2],\n [4, 6]])\n assert pytest.fuzzy_equal(h[2:4, 4:6], [[0, 2],\n [4, 6]])\n\n def lattice_with_zero_diagonal():\n lat = pb.Lattice([1])\n lat.add_sublattices((\"A\", [0], [[0, 3j],\n [0, 0]]))\n return lat\n\n model = pb.Model(lattice_with_zero_diagonal(), pb.primitive(3))\n h = model.hamiltonian.toarray()\n\n assert model.system.num_sites == 3\n assert h.shape[0] == 6\n assert pytest.fuzzy_equal(h, h.T.conjugate())\n assert pytest.fuzzy_equal(h[:2, :2], h[-2:, -2:])\n assert pytest.fuzzy_equal(h[:2, :2], [[0, 3j],\n [-3j, 0]])", "async def refresh(self):\n block = await self.blockchain.rpc.get_block_header(self.identifier)\n if not block:\n raise BlockDoesNotExistsException\n await super(BlockHeader, self).__init__(\n block, blockchain_instance=self.blockchain, use_cache=self._use_cache\n )", "def simulateForHeight(rocket,H_1):\n rocket.setLandingBurnStartHeight(H_1)\n \n # Simulate the dynamics\n t_f = 100.\n x_0 = np.array([rocket.H_0,rocket.v_0,rocket.m_0, # Ideal system\n rocket.H_0,rocket.v_0,rocket.m_0 # Real system\n ])\n \n state = solve_ivp(fun = rocket.dynamics,\n t_span = (0,t_f),\n y0 = x_0,\n events = [rocket.burnEvent,rocket.ascentEvent],\n max_step = 0.01)\n \n # Extract simulation results\n t = state.t\n h = state.y[3]\n m = state.y[5]\n \n fuel_used = m[0]-m[-1]\n burn_thrust = rocket.T\n burn_time = rocket.t_b\n time_history = t\n height_history = h\n \n return fuel_used,burn_thrust,burn_time,time_history,height_history", "def update_H(self):\n self.grid.H[:, :, -1, :] = self.grid.H[:, :, 0, :]", "def update_hubbard_settings(self, key, value):\n\n if self._hubbard_settings:\n if key in self._hubbard_settings:\n self._hubbard_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {ldau, ldatype, ldaul, dlauu, ldauj, lmaxmix}\")\n else:\n print(\"hybrid settings not present!\")", "def setB(self, b):\n\t\tself.b = int(b)", "def set_bearing(self, bearing):\n self._set_sub_text('bearing', text=str(bearing))\n return self", "def set_block(self, chunk, coords, value):\n\n chunk.set_block(coords, value)", "def set_hv(self):\n raise NotImplementedError", "def __init__(self, bandit):\n self.bandit = bandit", "def set_ham(self,constant,displacement,pcoef,xcoef,*,real=True):\n self.constant = constant\n self.p0 = np.imag(displacement)\n self.x0 = np.real(displacement)\n self.pcoef = pcoef\n self.xcoef = xcoef\n extra_size = max(len(pcoef),len(xcoef))\n # Calculates powers of x and p via matrix powers. Must use an increased\n # base truncation size in order to correctly resolve x^n and p^n\n self.calculation_size = self.size + extra_size\n self.set_x_and_p()\n ham = np.diag(np.ones(self.calculation_size,dtype=complex)*constant)\n for n in range(0,len(pcoef)):\n p = self.p - np.diag(np.ones(self.calculation_size))*self.p0\n ham += pcoef[n] * np.linalg.matrix_power(p,n+2)\n for m in range(0,len(xcoef)):\n x = self.x - np.diag(np.ones(self.calculation_size))*self.x0\n ham += xcoef[m] * np.linalg.matrix_power(x,m+2)\n self.ham = ham[:self.size,:self.size]\n if real:\n self.ham = np.real(self.ham)", "def compute_Hamiltonian(self, xHat, pxHat, yHat, pyHat):\n\n hamiltonian = 0.5*(pxHat**2 + pyHat**2) + 0.5 *(xHat**2 + yHat**2)\n\n return hamiltonian", "def set_bajayf(self):\n return self.write({'state': 'Baja'})", "def set_height(self,c, h):\r\n self.h = h\r\n self.T1 = [[-self.R * np.sqrt(3) / (2*self.h), self.R / (2*self.h), 1],[0,-self.R/(self.h),1],[self.R * np.sqrt(3) / (2*self.h), self.R / (2*self.h), 1]]\r\n return self.h", "def update_Heff(self):\r\n dim = self.A.shape[0] * self.A.shape[1]\r\n\r\n A = self.A.reshape(dim, -1)\r\n tH = A.T @ self.HA.reshape(dim, dim) @ A\r\n self.HA = np.kron(tH, np.eye(self.p))\r\n self.HA = self.HA.reshape(self.bond, self.p, self.bond, self.p)\r\n\r\n A = self.A.reshape(-1, self.p * self.bond)\r\n B = (A.T @ A).reshape(self.p, self.bond, self.p, self.bond)\r\n self.HA += np.einsum('ibjc,ikjl->bkcl', B, self.NN_interaction)", "def ssBIRCH(self, n_clusters):\n self.classifier = \"Spectral-Spatial-BIRCH\"\n print \"TODO\"", "def hash(self, hash):\n\n self._hash = hash" ]
[ "0.7961094", "0.72680366", "0.70075583", "0.69372654", "0.6821623", "0.6276145", "0.61712486", "0.60667217", "0.59148175", "0.5814612", "0.5814612", "0.55943906", "0.55346644", "0.5519819", "0.54950064", "0.5393633", "0.5378804", "0.53554547", "0.5317289", "0.5309732", "0.5309382", "0.520792", "0.5193114", "0.5176767", "0.5176767", "0.51747555", "0.51619595", "0.5158015", "0.5138185", "0.5113407", "0.5113407", "0.5090658", "0.5062749", "0.5048362", "0.50429493", "0.5036306", "0.5016341", "0.50125873", "0.50071037", "0.5002545", "0.49877796", "0.4966233", "0.495491", "0.49525234", "0.49496338", "0.4936737", "0.49335286", "0.491468", "0.49137327", "0.49128887", "0.48903304", "0.4887399", "0.4881816", "0.48576027", "0.4852943", "0.48501432", "0.4847982", "0.48462477", "0.48347092", "0.4829712", "0.48274466", "0.48174012", "0.48098966", "0.48094574", "0.48069435", "0.47922698", "0.47920677", "0.47919634", "0.47915652", "0.4786922", "0.47761825", "0.47672486", "0.4766396", "0.4764079", "0.4742182", "0.4740643", "0.47391373", "0.4735709", "0.47304216", "0.4726249", "0.47199357", "0.46974233", "0.46929708", "0.46912414", "0.4689555", "0.46683165", "0.46658993", "0.4664161", "0.46615684", "0.4660259", "0.46585613", "0.46584988", "0.46581343", "0.46580058", "0.4653368", "0.465033", "0.46469516", "0.463774", "0.4635008", "0.4633884" ]
0.77650434
1
Sets the operators to update to the ones for the Hubbard model.
def set_operators_to_update(self, system): system.add_to_operators_to_update('rprm_up_plus_dag', site_op='rprm_up_plus_dag') system.add_to_operators_to_update('rprm_down_plus_dag', site_op='rprm_down_plus_dag') system.add_to_operators_to_update('rprm_up_minus_dag', site_op='rprm_up_minus_dag') system.add_to_operators_to_update('rprm_down_minus_dag', site_op='rprm_down_minus_dag') system.add_to_operators_to_update('rprm_up_plus', site_op='rprm_up_plus') system.add_to_operators_to_update('rprm_down_plus', site_op='rprm_down_plus') system.add_to_operators_to_update('rprm_up_minus', site_op='rprm_up_minus') system.add_to_operators_to_update('rprm_down_minus', site_op='rprm_down_minus') system.add_to_operators_to_update('dimer', site_op='dimer') #system.add_to_operators_to_update('u', site_op='u')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_operators_to_update(self, system):\n # If you have a block hamiltonian in your block, update it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_operators_to_update('bh', block_op='bh')\n system.add_to_operators_to_update('c_up', site_op='c_up')\n system.add_to_operators_to_update('c_up_dag', site_op='c_up_dag')\n system.add_to_operators_to_downdate('c_down', site_op='c_down')\n system.add_to_operators_to_downdate('c_down_dag', site_op='c_down_dag')\n system.add_to_operators_to_update('u', site_op='u')", "def operators(self, operators=None) -> None:\n operators = _validate_operators(operators)\n self._invalidate()\n self._operators = operators", "def set_operator(self, op):\n self.operator = op", "def operators(self, operators):\n if operators is None:\n raise ValueError(\"Invalid value for `operators`, must not be `None`\") # noqa: E501\n\n self._operators = operators", "def make_operators(self):\n self.relationship_operator = Operators.RelationshipOperator(self)\n self.infection_operator = Operators.InfectionOperator(self)\n self.time_operator = Operators.TimeOperator(self)", "def SetOperator(self, op):\n return _hypre.HypreAMS_SetOperator(self, op)", "def SetOperator(self, op):\n return _hypre.HypreBoomerAMG_SetOperator(self, op)", "def operator(self, operator):\n\n self._operator = operator", "def SetOperator(self, op):\n return _hypre.HypreADS_SetOperator(self, op)", "def SetOperator(self, op):\n return _hypre.HypreILU_SetOperator(self, op)", "def SetOperator(self, op):\n return _hypre.HypreSolver_SetOperator(self, op)", "def SetOperator(self, op):\n return _hypre.HyprePCG_SetOperator(self, op)", "def operator(self, operator: str):\n\n self._operator = operator", "def set_operators_to_update_to_AF_Heisenberg(system):\n system.add_to_operators_to_update('s_z', site_op='s_z')\n system.add_to_operators_to_update('s_p', site_op='s_p')\n system.add_to_operators_to_update('s_m', site_op='s_m')", "def SetOperator(self, op):\n return _hypre.HypreGMRES_SetOperator(self, op)", "def operators(self):\n return self._operators", "def SetOperator(self, A):\n return _hypre.HypreAME_SetOperator(self, A)", "def SetOperator(self, A):\n return _hypre.HypreLOBPCG_SetOperator(self, A)", "def setOp(self, op):\n self.__op = op", "def setOp(self, op):\n self.__op = op", "def change_ops_state(self, state):\n for op_button in self.operators.values():\n op_button['state'] = state", "def SetOperator(self, op):\n return _hypre.HypreParaSails_SetOperator(self, op)", "def setOp(self, value):\n raise UnsupportedOperationException(\"Cannot change operator status of a block\")", "def change_operator(self, text):\n self.operator = text\n if self.current_num:\n self.prev_num = self.current_num\n self.current_num = \"\"", "def SetOperator(self, op):\n return _hypre.HypreFGMRES_SetOperator(self, op)", "def test_operator(self):\n\t\tfor op in self.ops:\n\t\t\tself.filter.set_operator(op)\n\t\t\tself.assertEqual(self.filter.operator.value, op)", "def declare_operators(*op_list):\n operators.update({op.__name__:op for op in op_list})\n return operators", "def SetOperator(self, op):\n return _hypre.HypreSmoother_SetOperator(self, op)", "def set_operator(self, operator):\n\n self['dimensionFilterClauses']['operator'] = operator.upper()\n\n return self", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def operators(self):\n return self.domain.operators.keys()", "def test_operator_set(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1.set_downstream(op2, Label(\"Label 1\"))\n op3.set_upstream(op2, Label(\"Label 2\"))\n op4.set_upstream(op2)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op2.task_id, op3.task_id) == {\"label\": \"Label 2\"}\n assert dag.get_edge_info(op2.task_id, op4.task_id) == {}", "def update(self, operation, operand0, operand1, operand2):\n self.operation = operation\n self.operand0 = operand0\n self.operand1 = operand1\n self.operand2 = operand2", "def add_operators(self, run):\n if self.rate_variation:\n # UpDownOperator to scale the Gamma distribution for this model's\n # feature rates\n updown = ET.SubElement(run, \"operator\", {\"id\":\"featureClockRateGammaUpDown:%s\" % self.name, \"spec\":\"UpDownOperator\", \"scaleFactor\":\"0.5\",\"weight\":\"0.3\"})\n ET.SubElement(updown, \"parameter\", {\"idref\":\"featureClockRateGammaShape:%s\" % self.name, \"name\":\"up\"})\n ET.SubElement(updown, \"parameter\", {\"idref\":\"featureClockRateGammaScale:%s\" % self.name, \"name\":\"down\"})", "def set_equations(self, *args, **kwargs):\n pass", "def set_eq(self, eq):\n if self.eq is not None:\n for existing_eq in self.eq: \n existing_eq.remove()\n existing_eq.release() \n self.eq = []\n for bands in eq['bands']:\n eqdsp = system.create_dsp_by_type(FMOD_DSP_TYPE_PARAMEQ)\n eqdsp.set_param(0, bands[0]) # centre\n eqdsp.set_param(1, bands[1]) # octaves\n eqdsp.set_param(2, from_dB(bands[2])) # gain \n system.add_dsp(eqdsp) \n self.eq.append(eqdsp)\n logging.debug(\"Equaliser %s active\" % eq['name'])", "def update_with_operator(self, operator: Operator):\n if not isinstance(operator, Operator):\n raise TypeError(\n f\"operator expected to be of type `Operator` but got type \"\n f\"{type(operator)}\"\n )\n\n for prefix in operator.rates.keys():\n self._update_prefix(prefix=prefix, operator=operator)", "def SetOperator(self, op):\n return _hypre.HypreDiagScale_SetOperator(self, op)", "def SetOperateMode(self):\n handler = self.get_command_object(\"SetOperateMode\")\n handler()", "def __init__(self, generators: List[FormulaGenerator], operators: List[Collection] = list()):\n super().__init__(generators)\n self.operators = operators\n self.randomize_order = False", "def apply_operator_set(model, operator_set):\n field_part = []\n for operator in operator_set:\n field_part.append(apply_const_shift_operator(model, operator))\n field_part = torch.cat(field_part)\n return field_part", "def _append_operator(self, operator):", "def set_rhs(self):\n pass", "def setup(self, operator_config):\n raise NotImplementedError()", "def set_powers(self, power_1, power_2):\n pass", "def set_default_operator(self, operator):\n return self.set_param(\"default_operator\", operator)", "def SetOperator(self, op):\n return _hypre.HypreEuclid_SetOperator(self, op)", "def _OverloadAllOperators(): # pylint: disable=invalid-name\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ZfitBaseVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ZfitBaseVariable, \"__getitem__\", array_ops._SliceHelperVar)", "def test_update_operator_with_empty_success(self):\n project_id = util.MOCK_UUID_1\n experiment_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n operator_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.patch(\n f\"/projects/{project_id}/deployments/{deployment_id}/operators/{operator_id}\",\n json={},\n )\n result = rv.json()\n expected = {\n \"uuid\": operator_id,\n \"name\": util.MOCK_TASK_NAME_1,\n \"taskId\": util.MOCK_UUID_1,\n \"task\": {\n \"name\": util.MOCK_TASK_NAME_1,\n \"tags\": [],\n \"parameters\": [],\n },\n \"dependencies\": [],\n \"parameters\": {\"dataset\": util.IRIS_DATASET_NAME},\n \"experimentId\": experiment_id,\n \"deploymentId\": None,\n \"positionX\": 0,\n \"positionY\": 0,\n \"createdAt\": util.MOCK_CREATED_AT_1.isoformat(),\n \"updatedAt\": mock.ANY,\n \"status\": \"Unset\",\n \"statusMessage\": None,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def set_simulation(self, param, operators, mesh):\n self.param = param\n self.operators = operators\n self.mesh = mesh\n\n # Set simulation for the components\n self.electrolyte.set_simulation(param, operators, mesh)\n self.interface.set_simulation(param, mesh)", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def repair_operators(self) -> List[Tuple[str, _OperatorType]]:\n return list(self._r_ops.items())", "def _create_weight_update_ops(self):\n with tf.name_scope(\"Weight_Update_Operators\"):\n self.weight_vars_assign_ops = []\n for weight_matrix, grad in zip(self._train_vars, self.step_direction_variables):\n self.weight_vars_assign_ops.append(\n tf.assign_add(weight_matrix, self._step_on_line_plh * -grad / self.norm_of_gradient_var).op)", "def tenant_operators(self, operator_id, data, tenant_id=None, api_version=\"v2.2\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/operators/{}\".format(api_version,\n tenant_id,\n operator_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def operator(self) -> str:\n return pulumi.get(self, \"operator\")", "def operator(self) -> str:\n return pulumi.get(self, \"operator\")", "def append_operator(cls, operator):\n for context in cls._active_contexts:\n context._append_operator(operator) # pylint: disable=protected-access", "def edit( self, value=None, operator=None ):\n if value is None:\n self._clear()\n else:\n if type( value ) == type( '' ):\n value = value.split('\\n')\n self.value = tuple( value )\n\n if not operator:\n operator = None\n\n self.operator = operator", "def base_operator(self):\n raise NotImplementedError()", "def _OverloadAllOperators(): # pylint: disable=invalid-name\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ComposedVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ComposedVariable, \"__getitem__\", array_ops._SliceHelperVar)", "def _override_operator(class_object, operator, func):\n existing = getattr(class_object, operator, None)\n if existing is not None:\n # Check to see if this is a default method-wrapper or slot wrapper which\n # will be true for the comparison operators.\n if not isinstance(existing, type(object.__lt__)) and not isinstance(existing, type(object.__repr__)):\n raise ValueError(\"operator %s cannot be overwritten again on class %s.\" %(operator, class_object))\n setattr(class_object, operator, func)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def my_operator(self):\n return self._my_operator", "def operator(self):\n return self.__operator", "def __init__(self, orbital_operators, orbital_labels, op_type, prefactor=1.0):\n\n self.orbital_operators = np.array(orbital_operators, dtype=str)\n self.orbital_labels = np.array(orbital_labels, dtype=int)\n self.op_type = op_type\n\n if len(self.orbital_operators) != len(self.orbital_labels):\n ValueError('The number of orbital operators and labels is inconsistent for the OperatorString: {} {}'.format(len(self.orbital_operators), len(self.orbital_labels)))\n\n self.prefactor = prefactor\n\n # Stored for use in computing commutators.\n # A dictionary of the labels to their index in the operator string.\n self._indices_orbital_labels = dict()\n for ind_orbital in range(len(self.orbital_labels)):\n self._indices_orbital_labels[self.orbital_labels[ind_orbital]] = ind_orbital\n \n # Compute the prefactor automatically if a Majorana operator.\n if self.op_type == 'Majorana':\n # Stored for use in computing commutators.\n # The labels of orbital operators that are 'A' or 'B'.\n self._labels_ab_operators = np.array([self.orbital_labels[ind] for ind in range(len(self.orbital_labels)) if self.orbital_operators[ind] in ['A', 'B']], dtype=int)\n num_ab = len(self._labels_ab_operators)\n\n # The prefactor is 1 or 1j, depending\n # on whether reversing the order of operators creates\n # a +1 or -1 sign due to anti-commutation operators.\n num_swaps_to_reorder = (num_ab*(num_ab-1))/2\n if num_swaps_to_reorder % 2 == 1:\n self.prefactor = 1j\n\n if (self.op_type == 'Pauli' and self.prefactor != 1) \\\n or (self.op_type == 'Majorana' and self.prefactor not in [1, 1j]) \\\n or (self.op_type == 'Fermion' and self.prefactor not in [1, 1j]):\n raise ValueError('Invalid prefactor {} for operator string of op_type {}'.format(self.prefactor, self.op_type))\n \n name_list = [str(self.prefactor),' ']\n for (op, la) in zip(self.orbital_operators, self.orbital_labels):\n name_list.extend([op, ' ', str(la), ' '])\n\n self.name = ''.join(name_list)", "def _remove_operator(self, operator):", "def assemble_operator(self, parameters, space_group='default'):\n operator = super(RWGDominantSystem, self).assemble_operator(parameters, space_group)\n return operator.weak_form()", "def operator(self):\n return self.data.get('operator', 'and')", "def operator(self):\n return self.data.get('operator', 'and')", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def __init__(self, op, op_param_list, op_reg_list):\n self. operation = {\n 'op': op,\n 'op_param_list': op_param_list,\n 'op_reg_list': op_reg_list\n }", "def replace_operators(self, instr):\n # change ++, -- to add(1), sub(1)\n instr = re.sub(r\"\\+\\+\", \".add(1)\", instr)\n instr = re.sub(r\"--\", \".sub(1)\", instr)\n\n m1 = re.search(r\"[+\\-*/]=\", instr)\n result = \"\"\n if m1:\n # handle the string with +=, -=, *=. /=\n v = instr[: m1.start()].rstrip(\" \")\n v1 = v.strip(\" \")\n expressions = [v1, m1.group()[: 1], \"(\", instr[m1.end():].strip().strip(\";\"), \");\"]\n instr = v + \"= \" + \" \".join(expressions)\n\n # split by !, &&, ||\n equations = re.split(r\"(!|&&|\\|\\||)\", instr)\n for equation in equations:\n # split by <=, >=, ==, !=, =\n expressions = re.split(r\"([<>=!]*=)\", equation)\n if len(expressions) == 1:\n result += equation\n else:\n for expression in expressions:\n if re.search(r\"[+\\-*/]\", expression):\n # with math operators\n # 0.exclude ;\n rc = \"\"\n pos = expression.find(';')\n if pos != -1:\n rc = expression[pos:]\n expression = expression[:pos]\n\n # 1.exclude independent ( or )\n lbc = expression.count(\"(\")\n rbc = expression.count(\")\")\n lc = \"\"\n if lbc > rbc:\n # ( is more than )\n pos = expression.replace('(', 'X', lbc - rbc - 1).find('(')\n lc = expression[: pos + 1]\n expression = expression[pos + 1:]\n else:\n if lbc < rbc:\n # ( is less than )\n pos = 'X'.join(expression.rsplit(')', rbc - lbc - 1)).rfind(')')\n rc = expression[pos:] + rc\n expression = expression[:pos]\n\n # 2.change normal notation to RPN, in order to change math operators to SafeMath operators\n # 3.change RPN to normal notation\n result += lc + self.rpn_to_nn(self.nn_to_rpn(expression)) + rc\n else:\n result += expression\n\n return result", "def operartors(self) -> List[Operator]:\n return list(self.__ops.keys())", "def _basic_operators_init():\n global BASIC_OPERATORS\n\n BASIC_OPERATORS = {\n \"angle_between\": {\n \"node\": \"angleBetween\",\n \"inputs\": [\n [\"vector1X\", \"vector1Y\", \"vector1Z\"],\n [\"vector2X\", \"vector2Y\", \"vector2Z\"],\n ],\n \"outputs\": [\n [\"angle\"],\n ],\n },\n\n \"average\": {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": 3,\n },\n\n \"blend\": {\n \"node\": \"blendColors\",\n \"inputs\": [\n [\"color1R\", \"color1G\", \"color1B\"],\n [\"color2R\", \"color2G\", \"color2B\"],\n [\"blender\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"choice\": {\n \"node\": \"choice\",\n \"inputs\": [\n [\"input[{array}]\"],\n [\"selector\"],\n ],\n \"outputs\": [\n [\"output\"],\n ],\n },\n\n \"clamp\": {\n \"node\": \"clamp\",\n \"inputs\": [\n [\"inputR\", \"inputG\", \"inputB\"],\n [\"minR\", \"minG\", \"minB\"],\n [\"maxR\", \"maxG\", \"maxB\"],\n ],\n \"outputs\": [\n [\"outputR\", \"outputG\", \"outputB\"],\n ],\n },\n\n \"compose_matrix\": {\n \"node\": \"composeMatrix\",\n \"inputs\": [\n [\"inputTranslateX\", \"inputTranslateY\", \"inputTranslateZ\"],\n [\"inputRotateX\", \"inputRotateY\", \"inputRotateZ\"],\n [\"inputScaleX\", \"inputScaleY\", \"inputScaleZ\"],\n [\"inputShearX\", \"inputShearY\", \"inputShearZ\"],\n [\"inputRotateOrder\"],\n [\"useEulerRotation\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"decompose_matrix\": {\n \"node\": \"decomposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputTranslateX\", \"outputTranslateY\", \"outputTranslateZ\"],\n [\"outputRotateX\", \"outputRotateY\", \"outputRotateZ\"],\n [\"outputScaleX\", \"outputScaleY\", \"outputScaleZ\"],\n [\"outputShearX\", \"outputShearY\", \"outputShearZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"inverse_matrix\": {\n \"node\": \"inverseMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n\n \"length\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"point1X\", \"point1Y\", \"point1Z\"],\n [\"point2X\", \"point2Y\", \"point2Z\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"matrix_distance\": {\n \"node\": \"distanceBetween\",\n \"inputs\": [\n [\"inMatrix1\"],\n [\"inMatrix2\"],\n ],\n \"outputs\": [\n [\"distance\"],\n ],\n },\n\n \"mult_matrix\": {\n \"node\": \"multMatrix\",\n \"inputs\": [\n [\n \"matrixIn[{array}]\"\n ],\n ],\n \"outputs\": [\n [\"matrixSum\"],\n ],\n },\n\n \"normalize_vector\": {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": 0,\n },\n\n \"pair_blend\": {\n \"node\": \"pairBlend\",\n \"inputs\": [\n [\"inTranslateX1\", \"inTranslateY1\", \"inTranslateZ1\"],\n [\"inRotateX1\", \"inRotateY1\", \"inRotateZ1\"],\n [\"inTranslateX2\", \"inTranslateY2\", \"inTranslateZ2\"],\n [\"inRotateX2\", \"inRotateY2\", \"inRotateZ2\"],\n [\"weight\"],\n [\"rotInterpolation\"],\n ],\n \"outputs\": [\n [\"outTranslateX\", \"outTranslateY\", \"outTranslateZ\"],\n [\"outRotateX\", \"outRotateY\", \"outRotateZ\"],\n ],\n \"output_is_predetermined\": True,\n },\n\n \"point_matrix_mult\": {\n \"node\": \"pointMatrixMult\",\n \"inputs\": [\n [\"inPointX\", \"inPointY\", \"inPointZ\"],\n [\"inMatrix\"],\n [\"vectorMultiply\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n },\n\n \"remap_value\": {\n \"node\": \"remapValue\",\n \"inputs\": [\n [\"inputValue\"],\n [\"outputMin\"],\n [\"outputMax\"],\n [\"inputMin\"],\n [\"inputMax\"],\n ],\n \"outputs\": [\n [\"outValue\"],\n ],\n },\n\n \"set_range\": {\n \"node\": \"setRange\",\n \"inputs\": [\n [\"valueX\", \"valueY\", \"valueZ\"],\n [\"minX\", \"minY\", \"minZ\"],\n [\"maxX\", \"maxY\", \"maxZ\"],\n [\"oldMinX\", \"oldMinY\", \"oldMinZ\"],\n [\"oldMaxX\", \"oldMaxY\", \"oldMaxZ\"],\n ],\n \"outputs\": [\n [\"outValueX\", \"outValueY\", \"outValueZ\"],\n ],\n },\n\n \"transpose_matrix\": {\n \"node\": \"transposeMatrix\",\n \"inputs\": [\n [\"inputMatrix\"],\n ],\n \"outputs\": [\n [\"outputMatrix\"],\n ],\n },\n }\n\n # Fill BASIC_OPERATORS with condition operations\n cond_operators = [\"eq\", \"ne\", \"gt\", \"ge\", \"lt\", \"le\"]\n for i, condition_operator in enumerate(cond_operators):\n BASIC_OPERATORS[condition_operator] = {\n \"node\": \"condition\",\n \"inputs\": [\n [\"firstTerm\"],\n [\"secondTerm\"],\n ],\n # The condition node is a special case! It gets created during\n # the magic-method-comparison and fully connected after being\n # passed on to the condition()-method in this OperatorMetaClass\n \"outputs\": [\n [None],\n ],\n \"operation\": i,\n }\n\n # Fill BASIC_OPERATORS with +,- operations\n for i, add_sub_operator in enumerate([\"add\", \"sub\"]):\n BASIC_OPERATORS[add_sub_operator] = {\n \"node\": \"plusMinusAverage\",\n \"inputs\": [\n [\n \"input3D[{array}].input3Dx\",\n \"input3D[{array}].input3Dy\",\n \"input3D[{array}].input3Dz\"\n ],\n ],\n \"outputs\": [\n [\"output3Dx\", \"output3Dy\", \"output3Dz\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with *,/,** operations\n for i, mult_div_operator in enumerate([\"mul\", \"div\", \"pow\"]):\n BASIC_OPERATORS[mult_div_operator] = {\n \"node\": \"multiplyDivide\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }\n\n # Fill BASIC_OPERATORS with vectorProduct operations\n for i, vector_product_operator in enumerate([\"dot\", \"cross\"]):\n BASIC_OPERATORS[vector_product_operator] = {\n \"node\": \"vectorProduct\",\n \"inputs\": [\n [\"input1X\", \"input1Y\", \"input1Z\"],\n [\"input2X\", \"input2Y\", \"input2Z\"],\n [\"normalizeOutput\"],\n ],\n \"outputs\": [\n [\"outputX\", \"outputY\", \"outputZ\"],\n ],\n \"operation\": i + 1,\n }", "def SetOperatorSymmetry(self, is_sym):\n return _hypre.HypreSmoother_SetOperatorSymmetry(self, is_sym)", "def set_model_params(self, w1, b1, w2, b2, w3, b3, w4, b4, w5, b5, w6, b6):\n self.w1 = w1\n self.w2 = w2\n self.w3 = w3\n self.w4 = w4\n self.w5 = w5\n self.w6 = w6\n\n self.b1 = b1\n self.b2 = b2\n self.b3 = b3\n self.b4 = b4\n self.b5 = b5\n self.b6 = b6\n\n return", "def operatorNames(self):\r\n return [\"moveUp\", \"moveDown\",\r\n \"moveLeft\", \"moveRight\"]", "def operator(self) -> str:\n return self._operator", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def setUp(self):\n self.OR_Neuron = Neuron([12, 12], Sigmoid().activate, bias=-6)", "def set_7band_eq_mode(self, preset_index):\n params = [('presetindex', int(preset_index))]\n\n self.get(COMMAND_UIC, 'Set7bandEQMode', params)", "def applyOperators(self):\n sendList = [self.sendTwoM, self.sendTwoC, self.sendMC, self.sendM, self.sendC]\n bringList = [self.bringTwoM, self.bringTwoC, self.bringMC, self.bringM, self.bringC]\n result = []\n if self.boatLocation() == 1: # now boat is on destination side\n for operation in bringList:\n toAdd = operation()\n if toAdd is not None and toAdd.isValidState():\n result.append(toAdd)\n elif self.boatLocation() == 0: #now boat is on start side\n for operation in sendList:\n toAdd = operation()\n if toAdd is not None and toAdd.isValidState():\n result.append(toAdd)\n else:\n raise Exception\n return result", "def connector_operators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FlowTaskConnectorOperatorArgs']]]]:\n return pulumi.get(self, \"connector_operators\")", "def applyOperator(self, operator, operand):\n if self.currentTotal == None:\n self.currentTotal = operand\n elif operator == \"=\":\n self.equalsOp(operand)\n elif self.previousOperand:\n self.previousOperand = None\n else:\n self.computeTotal(operator, operand)\n if operator != \"=\":\n self.previousOperator = operator", "def get_operator(self):\n\n Operator = []\n\n '''\n print('Create H - 150 & 220 GHz')\n ope=[]\n for i in range(self.nfreqs):\n ope.append(self.H150.operands[i])\n for i in range(self.nfreqs):\n ope.append(self.H220.operands[i])\n self.Hboth = BlockRowOperator(ope, new_axisin=0)\n self.H=self.Hboth\n '''\n\n\n\n H_qubic = self.qubic.get_operator()\n R_qubic = ReshapeOperator(H_qubic.shapeout, H_qubic.shape[0])\n Operator.append(R_qubic(H_qubic))\n\n H_planck = self.planck.get_operator()\n R_planck = ReshapeOperator(H_planck.shapeout, H_planck.shape[0])\n Operator.append(R_planck(H_planck))\n return BlockColumnOperator(Operator, axisout=0)", "def get_operator_to_make_TOD(self):\n if len(self) == 1:\n return self.get_operator()\n op = self._get_array_of_operators()\n return BlockRowOperator(op, new_axisin=0)", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def add_operator(self, operator: Soldier) -> None:\n if isinstance(operator, Soldier):\n if len(self.__operators) < self.MAX_OPERATORS:\n self.__operators.append(operator)\n self.__is_alive = True\n else:\n raise TypeError(\"argument must be a Soldier\")", "def _set_controls(self, control_operations: dict):\n control_index = 1\n for id, operations in control_operations.items():\n link = self.pumps[id] if id in self.pumps else self.valves[id] if id in self.valves else self.pipes[id]\n for op in operations:\n epamodule.ENsetcontrol(control_index,\n epamodule.EN_TIMER,\n link.en_index,\n op[0], # operation setting\n 0,\n op[1]) # operation time\n control = epamodule.ENgetcontrol(control_index)\n epanet_control_time = int(control[4])\n link.add_control_operation(epanet_control_time, op[0])\n\n control_index += 1", "def test_operator_rendering(self):\n self.assertEqual(\"=\", six.text_type(EqualsOperator()))\n self.assertEqual(\"IN\", six.text_type(InOperator()))\n self.assertEqual(\">\", six.text_type(GreaterThanOperator()))\n self.assertEqual(\">=\", six.text_type(GreaterThanOrEqualOperator()))\n self.assertEqual(\"<\", six.text_type(LessThanOperator()))\n self.assertEqual(\"<=\", six.text_type(LessThanOrEqualOperator()))", "def get_operators(self):\n url = self.config['links']['accountAPI'] + 'operators'\n params = {\n 'client': self.client,\n 'country_code': self.locale_suffix\n }\n data = self.make_request(url, 'get', params=params)\n\n return data['data']['operators']", "def set(self, **kwargs):\n for key in kwargs:\n if key in self.bool_params:\n self.bool_params[key] = kwargs[key]\n elif key in self.int_params:\n self.int_params[key] = kwargs[key]\n elif key in self.str_params:\n self.str_params[key] = kwargs[key]\n elif key in self.float_params:\n self.float_params[key] = kwargs[key]\n else:\n raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)", "def y_operators(self) -> List[PauliTerm]:\n # Y = iXZ\n y_operators = [1j * x_op * z_op\n for x_op, z_op in zip(self.x_operators(), self.z_operators())]\n for y_op in y_operators:\n assert y_op.coefficient == 1\n return y_operators", "def update(self):\n self.cursor.execute(\"\"\"SELECT * FROM sensors_powersensor\"\"\")\n list = self.cursor.fetchall()\n for sensor in list:\n self.add(sensor[2], sensor[1])", "def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper" ]
[ "0.7131101", "0.71062386", "0.68636316", "0.64068896", "0.62529534", "0.6246277", "0.62326676", "0.6152808", "0.61323714", "0.6125695", "0.61225945", "0.6067522", "0.60651207", "0.59958494", "0.59198344", "0.58076245", "0.5751448", "0.57447195", "0.5720005", "0.5720005", "0.56862736", "0.567975", "0.5674847", "0.5653581", "0.5612472", "0.55647415", "0.5531227", "0.55220425", "0.5518408", "0.55156755", "0.54887384", "0.54460746", "0.5401515", "0.53796947", "0.5376856", "0.53433377", "0.53310233", "0.53119177", "0.5303282", "0.5292136", "0.5284716", "0.5206073", "0.5204703", "0.5202409", "0.5198274", "0.5184555", "0.51640224", "0.5097408", "0.50872725", "0.50760347", "0.5025925", "0.5010071", "0.49937797", "0.4966685", "0.49383652", "0.49383652", "0.4914625", "0.48947564", "0.48942426", "0.4851543", "0.48481917", "0.483581", "0.4829904", "0.4827191", "0.48262566", "0.4818175", "0.48030034", "0.47978836", "0.47978836", "0.47929186", "0.47890106", "0.478575", "0.4783736", "0.47835544", "0.4766583", "0.47536993", "0.475047", "0.4744144", "0.47436753", "0.473864", "0.47241032", "0.47239295", "0.4723681", "0.47181705", "0.47016954", "0.46935248", "0.46904638", "0.4688279", "0.4679457", "0.4676646", "0.4676646", "0.4676646", "0.4672047", "0.46683604", "0.46601272", "0.4646507", "0.46426108", "0.46399048", "0.46270007", "0.46251777" ]
0.6803695
3
Returns the unit vector of the vector.
def unit_vector(vector): return vector / np.linalg.norm(vector)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def get_unit_vector(self, vector):\n return vector / la.norm(vector)", "def unit(self):\r\n return Vector(self.x/self.length(), self.y/self.length())", "def unit_vector(self,vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit():\n return Vec2d(0, 1)", "def getUnitVector(self):\n return Vector.createFromPolar(1, self.angle)", "def unit_vector(self, vector):\n return vector / np.linalg.norm(vector)", "def _unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def cal_unit_vec(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def unit_vector(vector):\n return vector / np.linalg.norm(vector)", "def to_unit(self):\n if self.is_zero():\n return Vector(0,0,0)\n else:\n magnitude = self.l2_norm()\n return Vector(self.x/magnitude, self.y/magnitude, self.z/magnitude)", "def unit_vector(vector):\n assert(vector != [0,0])\n return vector / np.linalg.norm(vector)", "def vec_unit( vec ):\r\n return np.divide( vec , np.linalg.norm( vec ) )", "def unit(vector: np.array) -> np.array:\n return np.array([*vector]) / np.sqrt((vector * vector).sum(axis=0))", "def unit_vec(v):\n vlen = np.linalg.norm(v)\n if np.isclose(vlen, 0):\n raise ValueError('Cannot make unit vector from zero vector.')\n else:\n return v / vlen", "def unit_vector(vector):\n return vector / max(np.linalg.norm(vector), 1e-10)", "def as_unit(self):\n new_vec = self.copy()\n new_vec.normalize()\n return new_vec", "def getNormalVector(self):\n vector = self.unit_vector\n vector.rotate(math.pi / 2)\n return vector", "def unit_vector(vector):\n unit_vector = np.zeros((len(vector), vector.shape[1]))\n norm = np.linalg.norm(vector, axis=1)\n ndim = vector.ndim\n\n if ndim == 1: # Handling of 1-dimensional array\n unit_vector = vector / norm\n elif ndim == 2: # Handling of 2-dimensional array\n for i in range(0, vector.shape[1]):\n unit_vector[:, i] = vector[:, i] / norm\n else:\n log.fatal(f\"Dimension of vector should be either 1- or 2-dimensional and not {ndim}-dimensional.\")\n\n return unit_vector", "def unit_vector(v):\n h = ((v[0]**2)+(v[1]**2))**0.5\n if h == 0:\n h = 0.000000000000001\n ua = v[0] / h\n ub = v[1] / h\n return (ua, ub)", "def unit_vector(vector):\n return 0 if vector[0] == 0 else vector[0]/abs(vector[0]), 0 if vector[1] == 0 else vector[1]/abs(vector[1])", "def unit_vector(vector):\n if not np.all((vector == 0)):\n return vector / np.linalg.norm(vector)\n else:\n return vector", "def unit_vector(vector):\n vector = np.array(vector)\n if np.linalg.norm(vector) <= 0.00010:\n normv = 1.0\n else:\n normv = np.linalg.norm(vector)\n return vector / normv", "def _unit_vector(pt0, pt1):\n dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2)\n return (pt1[0] - pt0[0]) / dis_0_to_1, \\\n (pt1[1] - pt0[1]) / dis_0_to_1", "def tangeant_unit_vector(self, t):\n a = self.a0 + t * self.da\n ca = cos(a)\n sa = sin(a)\n v = Vector((sa, -ca))\n if self.da > 0:\n v = -v\n return v", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def uw(self):\n return sm.unitvec(self.w)", "def getNormalizedVector(self):\n return self.scalarMultiplication(self.norm() ** -1.0)", "def unit_vector(vec_in):\n if vec_in.ndim == 1:\n out = _unit_vector_single(vec_in)\n elif vec_in.ndim == 2:\n out = _unit_vector_multi(vec_in)\n else:\n raise ValueError(\n \"incorrect arg shape; must be 1-d or 2-d, yours is %d-d\"\n % (vec_in.ndim)\n )\n return out", "def unit_vector(i, j):\n magnitude = np.sqrt(i ** 2 + j ** 2)\n unit_i = i / magnitude\n unit_j = j / magnitude\n\n return unit_i, unit_j", "def unit_vector(self,vector):\n\t\tunit_vector_query=0;\n\t\tfor word in vector:\n\t\t\tunit_vector_query += vector[word]*vector[word];\n\t\tunit_vector_query = math.sqrt(unit_vector_query);\n\t\treturn unit_vector_query", "def unit(vector):\r\n result = [[0] for row in range(len(vector))]\r\n # creates the initial value for result of this function, which is a vector full of 0s with the same lenght of a given vector \r\n for z in range(len(vector)):\r\n # for loop which continues as long as there are more elements in the vector \r\n result[z] = vector[z]/norm(vector)\r\n # the new result being each element in the list being divided by the norm \r\n return result", "def _unitVector(self, data: numpy.array, axis: Optional[int] = None, out: Optional[numpy.array] = None) -> numpy.array:\n if out is None:\n data = numpy.array(data, dtype = numpy.float64, copy = True)\n if data.ndim == 1:\n data /= math.sqrt(numpy.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = numpy.array(data, copy = False)\n data = out\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\n numpy.sqrt(length, length)\n if axis is not None:\n length = numpy.expand_dims(length, axis)\n data /= length\n if out is None:\n return data", "def vector(self) -> Vector:\n return self._normal * self._distance_from_origin", "def unit_vector(data, axis=None, out=None):\r\n if out is None:\r\n data = numpy.array(data, dtype=numpy.float64, copy=True)\r\n if data.ndim == 1:\r\n data /= math.sqrt(numpy.dot(data, data))\r\n return data\r\n else:\r\n if out is not data:\r\n out[:] = numpy.array(data, copy=False)\r\n data = out\r\n length = numpy.atleast_1d(numpy.sum(data*data, axis))\r\n numpy.sqrt(length, length)\r\n if axis is not None:\r\n length = numpy.expand_dims(length, axis)\r\n data /= length\r\n if out is None:\r\n return data", "def unit_vector(data, axis=None, out=None):\r\n if out is None:\r\n data = np.array(data, dtype=np.float64, copy=True)\r\n if data.ndim == 1:\r\n data /= math.sqrt(np.dot(data, data))\r\n return data\r\n else:\r\n if out is not data:\r\n out[:] = np.array(data, copy=False)\r\n data = out\r\n length = np.atleast_1d(np.sum(data*data, axis))\r\n np.sqrt(length, length)\r\n if axis is not None:\r\n length = np.expand_dims(length, axis)\r\n data /= length\r\n if out is None:\r\n return data", "def unit_vector(data, axis=None, out=None):\n if out is None:\n data = np.array(data, dtype=np.float64, copy=True)\n if data.ndim == 1:\n data /= math.sqrt(np.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = np.array(data, copy=False)\n data = out\n length = np.atleast_1d(np.sum(data*data, axis))\n np.sqrt(length, length)\n if axis is not None:\n length = np.expand_dims(length, axis)\n data /= length\n if out is None:\n return data", "def normalized(self):\n try:\n m = abs(self)\n return self / m\n except ZeroDivisionError as e:\n raise Exception(\"Attempted to normalize a zero vector, return a unit vector at zero degrees\") from e\n # return Vector(1, 0)", "def unit(direction):\r\n return Vector(0, -1).rotate(direction)", "def vector(self):\n return self.__vector", "def unitize_vector(vector):\n # Section 1: Ensure that a vector was given\n if len(vector) > 1 and len(vector[0]) > 1:\n raise ArithmeticError(\n 'Vector must be a row or column vector.')\n\n # Section 2: Determine vector magnitude\n rows = len(vector); cols = len(vector[0])\n mag = 0\n for row in vector:\n for value in row:\n mag += value ** 2\n mag = mag ** 0.5\n\n # Section 3: Make a copy of vector\n new = copy_matrix(vector)\n\n # Section 4: Unitize the copied vector\n for i in range(rows):\n for j in range(cols):\n new[i][j] = new[i][j] / mag\n\n return new", "def Normal(self):\n return Vector(self.normal)", "def normalized(self):\n len = self.length\n return Vector(self.x / len, self.y / len)", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def unit_vector(a, b):\n tmp = _np.zeros(b)\n tmp[a] = 1\n return tmp", "def unit_vectors(x):\n xnew = x.copy()\n for v in range(x.shape[-1]):\n xnew[:, v] = x[:, v] / np.linalg.norm(x[:, v])\n return xnew", "def sphere_to_unit(v):\n sin_theta = math.sin(v[0])\n cos_theta = math.cos(v[0])\n return (sin_theta * math.cos(v[1]),\n sin_theta * math.sin(v[1]),\n cos_theta)", "def get_normalized_vector(vector):\n # WARN: Zero length may cause problems!\n vector_lenght = get_vector_length(vector)\n if vector_lenght != 0:\n return np.divide(vector, get_vector_length(vector))\n else:\n return [0, 0]", "def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)", "def vector(self):\n return self.q[1:4]", "def __call__(self):\n return self._vector", "def v(self):\n return Vector2(self.position)", "def direction(self):\n len = self.length()\n if len == 0.0:\n uvec = pos.Pos(np.transpose(np.array([0, 0, 0])))\n else:\n uvec = pos.Pos(np.transpose(np.array([(self.end.x - self.start.x) / len,\n (self.end.y - self.start.y) / len,\n (self.end.z - self.start.z) / len])))\n return uvec", "def normal(self) -> Vector:\n return self._normal", "def normal_at(self, u, v, world=True):\n u = u * pi\n v = v * PI2\n x = cos(u) * sin(v)\n y = sin(u) * sin(v)\n z = cos(v)\n normal = Vector(x, y, z)\n if world:\n normal.transform(self.transformation)\n return normal", "def normalized(self):\n length = self.length\n if length != 0:\n return self/length\n return Vec2d(self)", "def normalize(self):\n return Vector(self.args + []) / self.magnitude()", "def unit_sun_r(sun_pos):\n return sun_pos / vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2])", "def __truediv__(self, factor):\n if type(factor) == Vector:\n raise NotImplementedError\n else:\n return Vector([c / factor for c in self.components])", "def magni(vector):\n return(np.linalg.norm(vector))", "def __rmul__(self, el2):\n if type(el2) is float or type(el2) is int:\n return vector(el2 * self.x, el2 * self.y, el2 * self.z)\n elif type(el2) is vector:\n return vector(el2.y * self.z - el2.z * self.y,\n el2.z * self.x - el2.x * self.z,\n el2.x * self.y - el2.y * self.x)\n else:\n raise TypeError('Cannot multiply a vector with something'\n 'that is neither a vector, a float or an int')", "def norm(vec):\n vel = numpy.sqrt(numpy.dot(vec,vec))\n return vel", "def normalized(first):\n if isinstance(first,FreeCAD.Vector):\n l=length(first)\n return FreeCAD.Vector(first.x/l, first.y/l, first.z/l)", "def vector(self, base_ring=None):\n if (base_ring is None) or (base_ring is self._base_ring):\n return self._vector\n else:\n return vector(base_ring, self._vector)", "def unit(self):\n # type: () -> PositionUnit\n return self._unit", "def get_channel_v_unit(self)->float:\n return self.__channel_v_unit", "def AsVector(self) -> ngsolve.la.BaseVector:", "def _get_unit_factor(self, unit: str) -> np.ndarray:\n\n unit_factors = {\n 'vx': np.array((1, 1, 1)),\n 'nm': np.array(self.parameters.scale),\n 'um': np.array(self.parameters.scale)/1000\n }\n assert unit in unit_factors.keys(), 'Invalid unit'\n unit_factor = unit_factors[unit]\n\n return unit_factor", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def normalize(self): # Function is fucked TODO\n l = self.length()\n for i in range(0, len(self.coords)):\n self.coords[i] /= l\n return self\n # return Vector(list([0 for i in range(len(v.coords))]))\n\n # if round(self.length() == 0):\n # s = 1 / self.length()\n # return self * s\n # else:\n # return Vector(list([0 for i in range(len(v.coords))]))", "def as_vector(self):\n return self.pdm.as_vector()", "def unit(x):\n\tl = sum([i**2 for i in x])**0.5\n\treturn [xi/l for xi in x]", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def getVector(self):\n return Vector.createFromTwoPoints(self.p1, self.p2)", "def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))", "def random_vector():\n\n import numpy as np\n\n zeta = np.random.rand(2) # Two uniformly sampled random numbers in range (0,1)\n c = 2.0*zeta[0] - 1.0 # Random cos(theta) uniformly sampled in range (-1,+1)\n if c >= 1.0: # Guard against very small chance of roundoff error\n s = 0.0 # Set sin(theta) to zero\n else:\n s = np.sqrt(1.0-c**2) # Calculate sin(theta) from cos(theta), always positive\n\n phi = zeta[1] * 2.0*np.pi # Random angle uniformly sampled in range (0,2*pi)\n\n return np.array ( ( s*np.cos(phi), s*np.sin(phi), c ), dtype=np.float_ ) # Random unit vector", "def vector_component(u, v):\n x = dot_vectors(u, v) / length_vector_sqrd(v)\n return scale_vector(v, x)", "def AsVector(self) -> BaseVector:", "def vector(x, y, z):\n return point_or_vector(x,y,z,0.0)", "def get_ucm_vec(p0=None, p1=None):\n if p0 is None:\n p0 = np.array([25, 100])\n if p1 is None:\n p1 = np.array([100, 25])\n parallel = p1 - p0\n parallel = parallel / np.linalg.norm(parallel) # Normalize.\n return parallel", "def magnitude(self): # @todo @caution check: something wrong?\n\n return (math.sqrt(reduce(lambda x, y: x+y,\n [x**2 for x in self.vector])))", "def normalize(self, vec):\n length = math.sqrt( vec[0,0]*vec[0,0] + vec[0,1]*vec[0,1] + vec[0,2]*vec[0,2] )\n vnorm = vec / length\n return vnorm", "def __rmul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def dirVector(self,p1,p2):\n v=p2-p1\n l=v.Length\n return self.toMatrix(v)/l", "def unit_vector_stream(stream):\n return stream.map(lambda x: x / np.linalg.norm(x, axis=-1)[...,None])" ]
[ "0.8355314", "0.8344175", "0.83402044", "0.83041203", "0.8288785", "0.82710177", "0.8263843", "0.8237869", "0.8219826", "0.8203201", "0.8157419", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.80363405", "0.8024346", "0.7914348", "0.7900733", "0.78349787", "0.7770651", "0.77661633", "0.77234215", "0.77207124", "0.76531506", "0.7631576", "0.7595943", "0.7595422", "0.7572932", "0.7451418", "0.7346349", "0.7336356", "0.72942096", "0.7267235", "0.7236896", "0.7228997", "0.7191048", "0.718393", "0.7158418", "0.71065384", "0.7056104", "0.7035316", "0.7014858", "0.69491696", "0.6938983", "0.685306", "0.68301356", "0.67822766", "0.6776589", "0.66083777", "0.65908873", "0.6557686", "0.6555904", "0.653139", "0.6499029", "0.64957076", "0.64857525", "0.6430134", "0.64204764", "0.6407004", "0.6403999", "0.6399956", "0.63776267", "0.6363924", "0.63528776", "0.6321036", "0.62962717", "0.62949187", "0.6268904", "0.6238375", "0.6219153", "0.6213419", "0.6211758", "0.6211182", "0.6210066", "0.61896944", "0.618916", "0.61679196", "0.6164343", "0.6158671", "0.61572766", "0.6149799", "0.61477876", "0.6145891", "0.61415035", "0.6137127", "0.6131029", "0.61207145", "0.611959", "0.6116146", "0.61125845" ]
0.8180198
12
Reduces a series of points to a simplified version that loses detail, but maintains the general shape of the series.
def rdp(points, epsilon): dmax = 0.0 index = 0 for i in range(1, len(points) - 1): d = point_line_distance(points[i], points[0], points[-1]) if d > dmax: index = i dmax = d if dmax >= epsilon: results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon) else: results = [points[0], points[-1]] return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def rescale(self, points, inplace=True):\n if inplace == False:\n points = points.copy()\n points *= self.scale_factor\n points += self.origin\n return points", "def reprocessSeries(self, tiltseriesdata):\n\t\treturn None", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def clean_series(y,smooth = False,p = 6.25,logsmooth = True):\n\n # Remove null values in the middle of the series using interpolate\n # First null values are not interpolated but later filled by 0.0\n y = y.replace(0.0,np.NaN).interpolate().fillna(0.0)\n\n # Smooth using Hodrick Prescott filter with parameter p\n if smooth:\n y = smooth_series(y,p)\n y.loc[(y < 1) & (y > 0)] = 1\n\n if logsmooth:\n y = y.map(lambda x : np.log(1+x))\n y = smooth_series(y,p)\n y = y.map(lambda x : np.exp(x) - 1)\n y.loc[(y < 1) & (y > 0)] = 1\n y.loc[y < 0] = 0\n\n return y", "def scale(self, points, inplace=True):\n points = np.array(points).astype(float)\n if inplace==False:\n points = points.copy()\n # if len(points.shape) == 1:\n # points = points[None,:]\n # if len(points.shape) != 2:\n # logger.error(\"cannot scale array of dimensions\".format(len(points.shape)))\n points -= self.origin\n points /= self.scale_factor\n return points", "def simplify_line_vw(points, small_area=100):\r\n while len(points) > 3:\r\n \r\n # For each coordinate that forms the apex of a two-segment\r\n # triangle, find the area of that triangle and put it into a list\r\n # along with the index, ordered from smallest to largest.\r\n \r\n popped, preserved = set(), set()\r\n \r\n triples = zip(points[:-2], points[1:-1], points[2:])\r\n triangles = [Polygon((p1, p2, p3)) for (p1, p2, p3) in triples]\r\n areas = [(triangle.area, index) for (index, triangle) in enumerate(triangles)]\r\n \r\n # Reduce any segments that makes a triangle whose area is below\r\n # the minimum threshold, starting with the smallest and working up.\r\n # Mark segments to be preserved until the next iteration.\r\n\r\n for (area, index) in sorted(areas):\r\n if area > small_area:\r\n # nothing more can be removed on this iteration\r\n break\r\n \r\n if (index + 1) in preserved:\r\n # current index is too close to a previously-preserved one\r\n continue\r\n \r\n preserved.add(index)\r\n popped.add(index + 1)\r\n preserved.add(index + 2)\r\n \r\n if not popped:\r\n # nothing was removed so we are done\r\n break\r\n \r\n # reduce the line, then try again\r\n points = [point for (index, point) in enumerate(points) if index not in popped]\r\n \r\n return list(points)", "def _force_rescale(self, setpoint_x, setpoint_y):", "def uniformize(self):\n\n self.len = len(self.x)\n\n if self.len > 1:\n # comput length of the shape:\n shape_length, scale = self.euclidian_length()\n\n # find new points:\n new_shape = Stroke()\n new_shape.x = []\n new_shape.y = []\n step = shape_length / float(self.len)\n biggest_smoller_point = 0\n new_shape.append(self.x[0], self.y[0])\n for i in 1 + np.array(range(len(self.x) - 1)):\n try:\n while i * step > scale[biggest_smoller_point]:\n biggest_smoller_point += 1\n\n biggest_smoller_point -= 1\n x0 = self.x[biggest_smoller_point]\n y0 = self.y[biggest_smoller_point]\n x1 = self.x[biggest_smoller_point + 1]\n y1 = self.y[biggest_smoller_point + 1]\n diff = float(i * step - scale[biggest_smoller_point])\n dist = float(scale[biggest_smoller_point + 1] - scale[biggest_smoller_point])\n new_x = x0 + diff * (x1 - x0) / dist\n new_y = y0 + diff * (y1 - y0) / dist\n new_shape.append(new_x, new_y)\n\n except IndexError:\n print i * step\n print biggest_smoller_point\n print scale\n # new_shape.append(self.x[-1], self.y[-1])\n\n\n self.x = new_shape.x\n self.y = new_shape.y\n self.len = new_shape.len", "def normalise_series(to_normalise: pd.Series) -> pd.Series:\n \n # return (to_normalise - to_normalise.mean()) / to_normalise.std() # 0 mean and unit standard deviation\n return to_normalise / to_normalise.std() # positive and unit standard deviation", "def lift(point):\n return gs.copy(point)", "def transform_series(obj):\n vals = obj.values\n return transform_array(vals)", "def remove_units_series(series):\n res = copy(series)\n for label, value in res.items():\n res[label] = magnitude(value)\n return res", "def clearLineshape(self):\n self.x = np.arange(self.start,self.stop,round(self.step,4))\n self.lineshape = np.zeros(len(self.x))", "def fit_transform(self,\n series: Union[TimeSeries, Sequence[TimeSeries]],\n *args,\n **kwargs) -> Union[TimeSeries, List[TimeSeries]]:\n return self.fit(series).transform(series, *args, **kwargs)", "def l1(self, points):\n new_points = []\n sum = []\n for point in points:\n for i in range(len(point.coordinates)):\n if (i < len(sum)):\n sum[i] += abs(point.coordinates[i])\n else:\n sum.append(abs(point.coordinates[i]))\n for point in points:\n new_coordinates = point.coordinates\n new_coordinates = [(new_coordinates[i]/ sum[i]) for i in range(len(point.coordinates))]\n new_points.append(Point(point.name, new_coordinates, point.label))\n return new_points", "def _prepPointsForSegments(points):\n while 1:\n point = points[-1]\n if point.segmentType:\n break\n else:\n point = points.pop()\n points.insert(0, point)\n continue\n break", "def _reversePoints(points):\n # copy the points\n points = _copyPoints(points)\n # find the first on curve type and recycle\n # it for the last on curve type\n firstOnCurve = None\n for index, point in enumerate(points):\n if point.segmentType is not None:\n firstOnCurve = index\n break\n lastSegmentType = points[firstOnCurve].segmentType\n # reverse the points\n points = reversed(points)\n # work through the reversed remaining points\n final = []\n for point in points:\n segmentType = point.segmentType\n if segmentType is not None:\n point.segmentType = lastSegmentType\n lastSegmentType = segmentType\n final.append(point)\n # move any offcurves at the end of the points\n # to the start of the points\n _prepPointsForSegments(final)\n # done\n return final", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def toPointwise_withLinearXYs( self, **kwargs ) :\n\n kwargs['removeOverAdjustedPoints'] = True\n xys = regionsModule.Regions1d.toPointwise_withLinearXYs( self, **kwargs )\n return( XYs1d( data = xys, axes = xys.axes ) )", "def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)", "def simplify(self):\n\n from podpac.core.coordinates.uniform_coordinates1d import UniformCoordinates1d\n\n if self.is_uniform:\n return UniformCoordinates1d(self.start, self.stop, self.step, **self.properties)\n\n return self", "def to_series(func):\n\n @wraps(func)\n def add_series(center, home_center):\n normed_center = func(center.x, center.y, home_center)\n return pd.Series(normed_center, index=[\"x_normed\", \"y_normed\"])\n\n return add_series", "def downsampleShape(self, numDesiredPoints):\n\n if len(self.x) > 2:\n t_current_x = np.linspace(0, 1, len(self.x))\n t_current_y = np.linspace(0, 1, len(self.y))\n t_desired_x = np.linspace(0, 1, numDesiredPoints)\n t_desired_y = np.linspace(0, 1, numDesiredPoints)\n f = interpolate.interp1d(t_current_x, self.x, kind='linear')\n self.x = f(t_desired_x).tolist()\n f = interpolate.interp1d(t_current_y, self.y, kind='linear')\n self.y = f(t_desired_y).tolist()\n\n self.len = numDesiredPoints", "def to_work_series(self, data: pd.Series) -> pd.Series:\n ...", "def onChartRemoveSeries(self):\n self.chart().removeAllSeries()\n self.series = {}\n self.yaxis = {}\n self.pen = {}\n self.ymin = {}\n self.ymax = {}", "def SweepSeries(*args, **kwargs):\n if args or kwargs:\n underride(kwargs, dtype=float)\n series = pd.Series(*args, **kwargs)\n else:\n series = pd.Series([], dtype=np.float64)\n\n series.index.name = 'Parameter'\n if 'name' not in kwargs:\n series.name = 'Metric'\n return series", "def remove_point(x): # 'lin' 'log'\n dif = np.diff(x) / x[1:]\n idx = np.argmin(dif)\n xn = np.delete(x, idx + 1)\n if idx+2 == len(x):\n logger.debug(' Remove right point {} {:.8e} [dx= {:.8e}] '.format(idx+1, x[idx + 1], dif[idx]))\n else:\n logger.debug(' Remove point {} {:.8e} [dx= {:.8e}] next: {:.8e}'.format(idx+1, x[idx + 1], dif[idx], x[idx + 2])) \n return xn", "def squeeze(self):\n remove_axes = []\n for axes_ix, axes_value in enumerate(self.coords):\n if len(axes_value) == 1:\n remove_axes.append(axes_ix)\n\n reverse_remove_axes = remove_axes[::-1]\n for index_ix, index_value in enumerate(reverse_remove_axes):\n self.coords.pop(index_value)\n self.dims.pop(index_value)\n self.values = np.squeeze(self.values)", "def series(self, series):\n\n self._series = series", "def apply_remove_point_rules(self, coords):\n return True", "def _copyPoints(points):\n copied = [point.copy() for point in points]\n return copied", "def applyToPoints(self, points):\n return [point + self for point in points]", "def inverse_compress(self, pieces, start):\n \n time_series = [start]\n # stitch linear piece onto last\n for j in range(0, len(pieces)):\n x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]\n #print(x)\n y = time_series[-1] + x\n time_series = time_series + y[1:].tolist()\n\n return time_series", "def inverse_compress(self, pieces, start):\n \n time_series = [start]\n # stitch linear piece onto last\n for j in range(0, len(pieces)):\n x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]\n #print(x)\n y = time_series[-1] + x\n time_series = time_series + y[1:].tolist()\n\n return time_series", "def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\")\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes)", "def mirror_points_point(points, mirror):\n return [mirror_point_point(point, mirror) for point in points]", "def reconstructXY_NoiseFree(self, inputs):\n return (self.reconstructX(inputs),\n self.reconstructY(inputs))", "def _initial_conversion(X: Any) -> TimeSeriesInstances:\n if isinstance(X, np.ndarray) and X.ndim == 2:\n X = X.reshape(X.shape[0], 1, X.shape[1])\n return X", "def fix(points, min_required_distance=1.0):\n # TODO (4) could probably be simpler and better\n parr = np.array(points).tolist()\n points_fixed = []\n while len(parr) > 0:\n p = parr.pop(0)\n # Search from most recently added points\n inserted = False\n for pf in points_fixed[::-1]:\n # If one is close to the current point, we remove it and insert their average\n if not inserted and distance(p, pf) < min_required_distance:\n i = points_fixed.index(pf)\n points_fixed.remove(pf)\n points_fixed.insert(i, np.average([p, pf], 0).tolist())\n inserted = True\n # If we haven't removed any point (new point is far away from old ones) we append it\n if not inserted:\n points_fixed.append(p)\n return np.asarray(points_fixed)", "def get_adjusted_points(self) -> Sequence[Point]:\n if not self._adjusted_cache_dirty:\n return self._adjusted_points # type: ignore\n\n def _adjust_point(point) -> Point:\n x, y = point\n\n x *= self.scale[0]\n y *= self.scale[1]\n\n return (x + self.position[0], y + self.position[1])\n\n self._adjusted_points = [_adjust_point(point) for point in self.points]\n self._adjusted_cache_dirty = False\n return self._adjusted_points # type: ignore [return-value]", "def data_preprocessing(points):\n mean_coords = points.mean(0)\n points -= mean_coords\n \n max_norm = np.max(np.linalg.norm(points, axis = 1))\n points /= max_norm\n\n return points, mean_coords, max_norm", "def ravel(self, order='C'):\n result = super(self.__class__, self).ravel(order)\n result.nsamples = None\n return result", "def resize_points(x, n: int, mode: str = 'lin'):\n n_old = len(x)\n xn = np.copy(x)\n if n == n_old: # nothing to do, return the copy\n return xn\n\n if n > n_old:\n f = lambda xxx: add_point(xxx, mode=mode)\n else:\n f = lambda xxx: remove_point(xxx)\n\n for i in range(abs(n - n_old)):\n xn = f(xn)\n return xn", "def _downcast_numeric(series, allow_categorical=allow_categorical):\r\n if pd.api.types.is_sparse(series.dtype) is True:\r\n return series\r\n elif pd.api.types.is_numeric_dtype(series.dtype) is False:\r\n if pd.api.types.is_datetime64_any_dtype(series.dtype):\r\n return series\r\n else:\r\n if allow_categorical:\r\n return series\r\n else:\r\n codes, uniques = series.factorize()\r\n series = pd.Series(data=codes, index=series.index)\r\n series = _downcast_numeric(series)\r\n return series\r\n else:\r\n series = pd.to_numeric(series, downcast=\"integer\")\r\n if pd.api.types.is_float_dtype(series.dtype):\r\n series = series.astype(float_dtype)\r\n return series", "def toset(series: pd.Series) -> Set:\n\n return set(series.tolist())", "def testEmptyPointsStillCreatesSeries(self):\n chart = self.GetChart()\n self.assertEqual(0, len(chart.data))\n data = []\n chart = self.GetChart(data)\n self.assertEqual(1, len(chart.data))\n self.assertEqual(0, len(chart.data[0].data))\n # This is the use case we are trying to serve: adding points later.\n data.append(0)\n self.assertEqual(1, len(chart.data[0].data))", "def clean(point):\r\n tmp = []\r\n for pts in point:\r\n tmp.append(float('%.5f' % pts))\r\n return (tmp[0], tmp[1], tmp[2])", "def spread(self, n=2):\n for point in self.points:\n point *= n", "def fit_transform(self, series):\n series = np.array(series).astype(np.float64)\n pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))\n strings = self.digitize(pieces[:,0:2])\n self.compression_rate = pieces.shape[0] / series.shape[0]\n self.digitization_rate = self.centers.shape[0] / pieces.shape[0]\n if self.verbose in [1, 2]:\n print(\"\"\"Compression: Reduced series of length {0} to {1} segments.\"\"\".format(series.shape[0], pieces.shape[0]),\n \"\"\"Digitization: Reduced {} pieces\"\"\".format(len(strings)), \"to\", self.centers.shape[0], \"symbols.\") \n # strings = ''.join(strings)\n return strings", "def fit_transform(self, series):\n series = np.array(series).astype(np.float64)\n pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))\n strings = self.digitize(pieces[:,0:2])\n self.compression_rate = pieces.shape[0] / series.shape[0]\n self.digitization_rate = self.centers.shape[0] / pieces.shape[0]\n if self.verbose in [1, 2]:\n print(\"\"\"Compression: Reduced series of length {0} to {1} segments.\"\"\".format(series.shape[0], pieces.shape[0]),\n \"\"\"Digitization: Reduced {} pieces\"\"\".format(len(strings)), \"to\", self.centers.shape[0], \"symbols.\") \n # strings = ''.join(strings)\n return strings", "def close(points: np.matrix):\n return append(points, points[0])", "def expand_series(ser, columns):\n return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1)", "def stage_one_preprocessing(data: pd.Series) -> pd.Series:\n data_ = data.dropna()\n print('ascii')\n data_ = remove_non_ascii(data)\n print('lower')\n data_ = to_lowercase(data_)\n print('slash')\n data_ = underscore_and_slash_to_space(data_)\n print('ellipse')\n data_ = remove_ellipses(data_)\n print('white')\n data_ = shrink_whitespace(data_)\n #print('contracts')\n #data_ = remove_contractions(data_)\n return data_", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def _lines_from_points(points, points_form_closed_loop=False):\n if points_form_closed_loop:\n return list(zip(points[:-1],points[1:]))\n return list(zip(points, tuple(points[1:])+(points[0],)))", "def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def series_view(self, **kwargs): # noqa: PR02\n return SeriesDefault.register(pandas.Series.view)(self, **kwargs)", "def reset_to_origin(self):\n if len(self.segments) == 0:\n return\n self.segments[0].reset_transforms()\n if len(self.segments) > 1:\n prev_pos = self.segments[0].point_at(self.segments[0].end_time)\n prev_rot = self.segments[0].tangent_rotation_matrix_at(self.segments[0].end_time)\n for segment in self.segments[1:-1]:\n end_time = segment.end_time\n if end_time == -1:\n # this should never arise since we chopped off the last segment in the loop\n # and only the last segment can be unbounded in a valid path\n raise Exception(\"Badly formatted piecewise path with unbounded length that isn't last segment\")\n segment.reset_transforms()\n segment.set_start_orientation_matrix(prev_rot)\n segment.set_start_position(prev_pos)\n\n prev_pos = segment.point_at(end_time)\n prev_rot = segment.tangent_rotation_matrix_at(end_time)\n\n self.segments[-1].reset_transforms()\n self.segments[-1].set_start_orientation_matrix(prev_rot)\n self.segments[-1].set_start_position(prev_pos)", "def apply(self, points):\n pshape = numpy.shape(points)\n homogeneous = 1\n if len(pshape) == 1:\n if pshape[0] == 3:\n points = numpy.array(numpy.concatenate((points, numpy.ones(1, 'f')), 1))\n homogeneous = 0\n elif len(pshape) == 2:\n if pshape[1] == 3:\n points = numpy.array(numpy.concatenate(\n (numpy.array(points), numpy.ones((pshape[0], 1), 'f')), 1))\n homogeneous = 0\n mtx = self.getMatrix((4, 4), transpose=1)\n newpoints = numpy.dot(points, mtx)\n if homogeneous:\n return newpoints\n else:\n # strip the final one off the coordinates\n if len(pshape) == 1:\n return newpoints[:3]\n else:\n newpoints = [x[:3] for x in newpoints]\n return newpoints", "def transform(self, X):\n\n X = X.copy()\n\n X[pd.isnull(X)] = self.fill\n\n return np.asarray(X)", "def _filter_nonmonotone_data_points(xs, ys):\n diffs = np.diff(xs)\n xs_new = [xs[0]]\n ys_new = [ys[0]]\n middle = int(len(xs)/2)\n sgn = 1 if xs[0] < xs[-1] else -1\n for i, dx in enumerate(diffs):\n if sgn*dx > 0.0:\n xs_new.append(xs[i+1])\n ys_new.append(ys[i+1])\n elif i > middle:\n xs_new[-1] = xs[i+1]\n ys_new[-1] = ys[i+1]\n return xs_new, ys_new", "def _extract_data_points_from_series(series: dict) -> List[dict]:\n data_points = series[\"generic:Obs\"]\n if type(data_points) != list:\n data_points = [data_points]\n return data_points", "def normalize(self):\n total = 0.0\n for i in range(0,self.npoints):\n total+=self.y[i]*self._dx\n for i in range(0,self.npoints):\n self.y[i]/=total\n return", "def float_series() -> pd.Series:\n series = pd.Series([(n/1000) for n in range(1001)])\n return series", "def regularize(self, point):\n regularized_point = self._iterate_over_factors(\"regularize\", {\"point\": point})\n return regularized_point", "def series_to_supervised(data, n_in=1, n_out=1, dropnan=True, stride=None, dates=False, leaks=True):\n df = pd.DataFrame(data)\n \n time = None\n if 'date' in df.columns:\n time = 'date'\n elif 'time' in df.columns:\n time = 'time'\n if time != None:\n df = df.drop([time], axis=1)\n \n if 'leak' in df.columns:\n df = df.drop(['leak'], axis=1) \n n_vars = df.shape[1]\n times_column = list()\n if dates and time != None:\n times_column = data[time]\n del data\n \n cols, names, pivots = list(), list(), list()\n \n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n\t# put it all together\n agg = pd.concat(cols, axis=1)\n \n agg.columns = names\n\n #stride - delete windows\n if stride != None:\n indexes_to_drop = list()\n for i in range(stride, agg.shape[0], stride):\n print(\"index\", i)\n pivots += [i]\n \n onset = 0\n offset = pivots[0]\n for i in range(0, len(pivots)):\n print(\"onset\", onset)\n print(\"offset\", offset)\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n try:\n onset = pivots[i] + 1\n offset = pivots[i+1]\n \n except IndexError:\n onset = pivots[i] + 1\n offset = agg.shape[0]\n to_drop = [ x for x in range(onset,offset)]\n indexes_to_drop += to_drop\n \n \n \n print(\"indexes_to_drop\", indexes_to_drop)\n \n agg.drop(df.index[indexes_to_drop], inplace=True)\n \"\"\"\n if dates and time!=None:\n agg[time] = times_column\n \"\"\" \n # drop rows with NaN values \n if dropnan:\n agg.dropna(inplace=True)\n \n\n return agg", "def __repr__(self):\n\t\treturn repr([self.series[0],self.series[1],self.series[2],\"...\"])", "def remove(self, i):\n assert self.apply_remove_point_rules((self._ys[i], self._xs[i])), 'Removal rules are not satisfied'\n\n if len(self.get_raw_xs()) > 5:\n if self.is_settable:\n self._remove_xs(i)\n self._remove_ys(i)\n self.is_changed = True\n else:\n raise ValueError('graph '+str(self.name)+' is not is_settable')\n elif not self.is_raw_data:\n raise ValueError('Must be at least 5 points for interpolation.')", "def assignPointsToShapes(self):\n pointsCopy = self.mission['points'].copy()\n\n while len(pointsCopy):\n shape = []\n self.recursiveAddPointToShape(pointsCopy, [pointsCopy[0]], shape)\n shape.append(shape[0])\n self.mission['shapes'].append(shape)", "def denormalize(self, x):\n raise NotImplementedError", "def reset_S(self):\n self.S = [self._one_S(self.D[n]) for n in range(self.L + 1)]", "def transform_points(Points,R,t):\r\n return [transform_point(p,R,t) for p in Points]", "def change(self):\n return _n.reshape(self.next_x - self.x, self.original_shape)", "def simplify_basic(drawing, process=False, **kwargs):\n\n if any(entity.__class__.__name__ != 'Line'\n for entity in drawing.entities):\n log.debug('Skipping path containing entities other than `Line`')\n return drawing\n\n # we are going to do a bookkeeping to avoid having\n # to recompute literally everything when simplification is ran\n cache = copy.deepcopy(drawing._cache)\n\n # store new values\n vertices_new = collections.deque()\n entities_new = collections.deque()\n\n # avoid thrashing cache in loop\n scale = drawing.scale\n\n # loop through (n, 2) closed paths\n for discrete in drawing.discrete:\n # check to see if the closed entity is a circle\n circle = is_circle(discrete,\n scale=scale)\n if circle is not None:\n # the points are circular enough for our high standards\n # so replace them with a closed Arc entity\n entities_new.append(entities.Arc(points=np.arange(3) +\n len(vertices_new),\n closed=True))\n vertices_new.extend(circle)\n else:\n # not a circle, so clean up colinear segments\n # then save it as a single line entity\n points = merge_colinear(discrete, scale=scale)\n # references for new vertices\n indexes = np.arange(len(points)) + len(vertices_new)\n # discrete curves are always closed\n indexes[-1] = indexes[0]\n # append new vertices and entity\n entities_new.append(entities.Line(points=indexes))\n vertices_new.extend(points)\n\n # create the new drawing object\n simplified = type(drawing)(\n entities=entities_new,\n vertices=vertices_new,\n metadata=copy.deepcopy(drawing.metadata),\n process=process)\n # we have changed every path to a single closed entity\n # either a closed arc, or a closed line\n # so all closed paths are now represented by a single entity\n cache.cache.update({\n 'paths': np.arange(len(entities_new)).reshape((-1, 1)),\n 'path_valid': np.ones(len(entities_new), dtype=bool),\n 'dangling': np.array([])})\n\n # force recompute of exact bounds\n if 'bounds' in cache.cache:\n cache.cache.pop('bounds')\n\n simplified._cache = cache\n # set the cache ID so it won't dump when a value is requested\n simplified._cache.id_set()\n\n return simplified", "def toComplex(self):\n return (self._points[0].value(),\n self._points[1].value(),\n self._points[2].value())", "def smooth_curve(points, factor=0.8):\n\n smoothed_points = []\n for point in points:\n if smoothed_points:\n previous = smoothed_points[-1]\n smoothed_points.append(previous * factor + point * (1 - factor))\n else:\n smoothed_points.append(point)\n return smoothed_points", "def basic_series() -> pd.Series:\n series = pd.Series(range(1,6), name=\"Fred\")\n return series", "def detrend(self, polyorder=1, break_tolerance=10):\n lc = self.copy()\n half = lc.time.shape[0] // 2\n if half % 2 == 0:\n # add 1 if even\n half += 1\n return lc.flatten(\n window_length=half,\n polyorder=polyorder,\n break_tolerance=break_tolerance,\n )", "def to_series(self) -> pd.Series:\n df = self.to_dataframe(\"* values *\")\n dims = self.dims_list\n if len(dims) == 1:\n dims = dims[0]\n return df.set_index(dims)[\"* values *\"]", "def simplify_polyline_epsilon(points, epsilon):\n raise NotImplementedError(\"TODO: test this code\")\n\n if len(points) < 3:\n return points\n\n begin, end = points[0], points[-1]\n dist_sq = [segment_point_distance_sq(\n begin[0], begin[1], end[0], end[1], p[0], p[1])\n for p in points[1:-1]]\n\n maxdist = max(dist_sq)\n if maxdist < epsilon ** 2:\n return [begin, end]\n\n pos = dist_sq.index(maxdist)\n return (simplify_polyline_epsilon(points[:pos + 2], epsilon) +\n simplify_polyline_epsilon(points[pos + 1:], epsilon)[1:])", "def rapoint(rpoint):\r\n return [rpoint[0]*gv[\"globalscale\"]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0]),\r\n rpoint[1]*gv[\"globalscale\"]*(gv[\"fixedUR\"][1]-gv[\"fixedLL\"][1])]", "def clean(self):\n if self.reloading:\n self.cleaned = pd.concat(\n [self.raw[0: self.brkIdx1+1],\n self.raw[self.brkIdx3+1: self.brkIdx4+1]])\n else:\n self.cleaned = self.raw[0: self.brkIdx1+1]\n self.cleaned.reset_index(drop=True, inplace=True) # update idx\n # -- Cubic spline that passes through the data\n sigmaLog = np.log10(self.cleaned['stress'][1:])\n cs = CubicSpline(x=sigmaLog, y=self.cleaned['e'][1:])\n self.eSigmaV = float(cs(np.log10(self.sigmaV))) # void ratio at sigmaV\n return", "def invert( self ) :\n\n series_ = self.copy( )\n for l in xrange( 1, len( series_ ), 2 ) : series_.coefficients[l] *= -1\n return( series_ )", "def densify_line(points, distance):\r\n coords = [points[0]]\r\n \r\n for curr_coord in list(points)[1:]:\r\n prev_coord = coords[-1]\r\n \r\n dx, dy = curr_coord[0] - prev_coord[0], curr_coord[1] - prev_coord[1]\r\n steps = ceil(hypot(dx, dy) / distance)\r\n count = int(steps)\r\n \r\n while count:\r\n prev_coord = prev_coord[0] + dx/steps, prev_coord[1] + dy/steps\r\n coords.append(prev_coord)\r\n count -= 1\r\n \r\n return coords", "def x_nondim(self, x):\n x[0:4] /= self.r_scale\n return x", "def force_dtype(series: pd.Series, dtype: str) -> TYPE_ROW:\n\n conv_funcs = {\"bool\": bool,\n \"int\": int,\n \"float\": float,\n \"str\": str,\n \"datetime\": lambda x: pd.to_datetime(x).to_pydatetime()}\n\n conv_func = conv_funcs[dtype]\n\n return [conv_func(x) if not isinstance(x, NullValue) else NULL\n for x in series]", "def toPointwise_withLinearXYs( self, **kwargs ) :\n\n return( XYs1d( data = [ [ self.domainMin, self.value ], [ self.domainMax, self.value ] ], axes = self.axes ) )", "def reNumber(oldSeries, startNb):\n oldNb = oldSeries.unique().tolist()\n newNb = numpy.arange(startNb, startNb + len(oldNb)).tolist()\n newSeries = pandas.Series(numpy.repeat(numpy.nan, oldSeries.shape[0]))\n for i in range(0, len(oldNb)):\n newSeries[oldSeries == oldNb[i]] = newNb[i]\n changes = dict(zip(oldNb, newNb))\n return newSeries.astype('int64'), changes", "def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x", "def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x", "def embed_to_product(self, points):\n for point, factor in zip(points, self.factors):\n geomstats.errors.check_point_shape(point, factor)\n\n if self.default_point_type == \"vector\":\n points_ = []\n for point, factor in zip(points, self.factors):\n if gs.ndim(point) > len(factor.shape):\n batch_shape = get_batch_shape(factor, point)\n point = gs.reshape(point, batch_shape + (-1,))\n else:\n point = gs.flatten(point)\n\n points_.append(point)\n return gs.concatenate(points_, axis=-1)\n stacking_axis = -1 * len(self.shape)\n return gs.stack(points, axis=stacking_axis)", "def sgi(series: Series) -> Series:\n series = series.copy() # Create a copy to ensure series is untouched.\n\n # Loop over the months\n for month in range(1, 13):\n data = series[series.index.month == month]\n n = data.size # Number of observations\n pmin = 1 / (2 * n)\n pmax = 1 - pmin\n sgi_values = norm.ppf(linspace(pmin, pmax, n))\n series.loc[data.sort_values().index] = sgi_values\n return series", "def plot_scatter_points(self):\n self.plot(1)", "def _fit_iterator(self, series: Sequence[TimeSeries]) -> Iterator[Tuple[TimeSeries]]:\n return zip(series)", "def sgn_inplace(a):", "def set_involved_series(self, y):\n y = self.check_consistent_y(y)\n\n if self.indices is not None:\n self.series = []\n for index in self.indices:\n try:\n self.series.append(y[index])\n except IndexError:\n warnings.warn(\"'%d' index out of 'y' range. Max: '%d'. Ignoring this index...\"\n % (index, y.shape[0]-1))\n else:\n self.series = y", "def fix_point_arithmetic(self):\n\n return self._fixpntar", "def Shape(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_UnifySameDomain_Shape(self, *args)", "def as_series(self, arraylike: Iterable) -> pd.Series:\n return pd.Series(arraylike, index=self.data.index)", "def full_S(self):\n return kron_list([R.T.dot(R) for R in self.Rs])" ]
[ "0.6333831", "0.6145088", "0.5858357", "0.5856983", "0.55326074", "0.54662484", "0.5412836", "0.5344801", "0.5280518", "0.5280117", "0.5262521", "0.5240614", "0.5206454", "0.5202019", "0.5178632", "0.51716375", "0.51524824", "0.5152478", "0.5152375", "0.51393616", "0.5135507", "0.50821596", "0.50681764", "0.5053853", "0.5036855", "0.50331324", "0.5025736", "0.50207764", "0.49982825", "0.49873838", "0.4978671", "0.49763834", "0.49659926", "0.4963042", "0.4963042", "0.4924425", "0.49068776", "0.4898011", "0.4894841", "0.48856458", "0.4849617", "0.48483065", "0.48454958", "0.484245", "0.4835076", "0.48338574", "0.4826548", "0.48109773", "0.48081267", "0.48036283", "0.48036283", "0.4803623", "0.47920483", "0.4791728", "0.4777298", "0.4773197", "0.4771775", "0.4769226", "0.47685903", "0.47684622", "0.47606736", "0.47591007", "0.47528353", "0.4736441", "0.47344595", "0.47260028", "0.471728", "0.4715969", "0.4711902", "0.47018963", "0.4700703", "0.47005105", "0.4697216", "0.46761507", "0.4663943", "0.4662411", "0.46549168", "0.46471232", "0.46470952", "0.46467903", "0.46402287", "0.46391085", "0.46365958", "0.46348438", "0.46329793", "0.4627936", "0.4627267", "0.46257564", "0.46143976", "0.46066782", "0.46066782", "0.460621", "0.46049783", "0.460315", "0.45895115", "0.45862183", "0.4577261", "0.456667", "0.4565427", "0.45602196", "0.45582223" ]
0.0
-1
Test ExponentialFamily class initialization.
def test_exponential_family_init(): D = 4 N = 100 exp_fam = ExponentialFamily(D) assert exp_fam.D == D assert exp_fam.support_layer is None assert exp_fam.D_eta == D with raises(TypeError): exp_fam = ExponentialFamily('foo') with raises(ValueError): exp_fam = ExponentialFamily(0) with raises(TypeError): exp_fam = ExponentialFamily(4, int) with raises(NotImplementedError): exp_fam.sample_eta(N) mu = np.zeros((D,)) with raises(NotImplementedError): exp_fam.mu_to_eta(mu) eta = np.zeros((D,)) with raises(NotImplementedError): exp_fam.eta_to_mu(eta) z = np.zeros((D,)) with raises(NotImplementedError): exp_fam.T(z) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test___init__(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n self.assertTrue(hasattr(ga, 'register'))\n\n # should have called evalute\n self.assertEqual(ga.generations[-1].new, 0)\n\n # should have registered a default ranking function\n self.assertEqual(np.round(np.sum(ga.rank())), len(f0))", "def setUp(self):\n # Values copied from head of factors.py file, simulating initial import.\n factors._soe_prime_cache = [2, 3]\n factors._soe_not_prime_map = {9: 3}\n factors._soe_c = 5", "def __init__(self, expected, test_func):\n self._f = test_func\n self._exp = expected", "def test_epsf_build_invalid_fitter(self):\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=EPSFFitter, maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)", "def setUp(self):\n self.employee = Employee('John', 'Doe', 50000)\n self.raise_amount = 20000", "def setUp(self):\n self.frequency = -2017.96\n self.E0_reac = -295.563\n self.E0_TS = -12.7411\n self.E0_prod = (-10.2664) + (-253.48)\n self.tunneling = Eckart(\n frequency=(self.frequency, \"cm^-1\"),\n E0_reac=(self.E0_reac, \"kJ/mol\"),\n E0_TS=(self.E0_TS, \"kJ/mol\"),\n E0_prod=(self.E0_prod, \"kJ/mol\"),\n )", "def test_init(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c.Metric, observed_otus)\r\n self.assertEqual(c.Params, {})", "def test_sphere_init():\n Sphere(5)", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining = 10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining=10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def test_constructor(self):\n pass", "def setUpClass(cls):\n super(CephFSTests, cls).setUpClass()", "def test_init_error_handling(self):\n with pytest.raises(ValueError) as err:\n hll = HyperLogLog(2)\n assert err.value.message == \"k=2 should be in range [16, 65536]\"\n with pytest.raises(ValueError) as err:\n hll = HyperLogLog(2**17)\n assert err.value.message == \"k=131072 should be in range [16, 65536]\"\n hll = HyperLogLog(2**16)\n assert hll.k == 2**16\n hll = HyperLogLog64(2**17)\n assert hll.k == 2**17", "def setUpClass(cls):\n np.random.seed(2019)\n # So the 1 st row of the first random number array, random.rand(500, 3)\n # will be [0.90348221, 0.39308051, 0.62396996]\n # Accordingly, the first row of\n # coordinates = (0.5 - np.random.rand(500, 3)) * box_length\n # should be [-3.31690899, 0.87895379, -1.01912071]\n cls.sys_obj = monte_carlo.SystemSetup()\n cls.energy = energy.Energy()\n cls.parser = monte_carlo.initialize()\n cls.sim = monte_carlo.MonteCarlo(\n cls.sys_obj, cls.energy, cls.parser)\n np.random.seed()", "def setUpClass(cls):\n test_family = 'H_Abstraction'\n\n # set-up RMG object\n rmg = RMG()\n\n # load kinetic database and forbidden structures\n rmg.database = RMGDatabase()\n path = os.path.join(settings['test_data.directory'], 'testing_database')\n\n # kinetics family loading\n rmg.database.load_kinetics(os.path.join(path, 'kinetics'),\n kinetics_families=[test_family],\n reaction_libraries=[]\n )\n # load empty forbidden structures to avoid any dependence on forbidden structures\n # for these tests\n for family in rmg.database.kinetics.families.values():\n family.forbidden = ForbiddenStructures()\n rmg.database.forbidden_structures = ForbiddenStructures()", "def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")", "def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX)", "def test_hmf_init(self):\n spec = np.random.random((20, 100))\n invvar = np.random.random((20, 100))\n hmf = HMF(spec, invvar)\n assert hmf.K == 4\n assert log.level == 20 # INFO\n hmf = HMF(spec, invvar, K=6, verbose=True)\n assert hmf.K == 6\n assert log.level == 10 # DEBUG", "def __init__(self, n=0, e=0):\r\n raise NotImplementedError()", "def setUpClass(cls):\n cls.nhf = nhflux.NhfluxStreamVariant.readBinary(SIMPLE_HEXZ_NHFLUX_VARIANT)", "def test___init__(self):\n copula = GammaUnivariate()\n assert copula.a is None\n assert copula.loc is None\n assert copula.scale is None", "def test_01_Setup(self):\n # print(PrettyFormatAny.form(VALID_FAMILIES, 'A1-01-A - Valid'))\n self.assertEqual(len(VALID_FAMILIES), len(self.m_pyhouse_obj._Families))\n self.assertEqual(VALID_FAMILIES[0], TESTING_FAMILY_NAME_0) # Null\n self.assertEqual(VALID_FAMILIES[1], TESTING_FAMILY_NAME_1) # Insteon\n self.assertEqual(VALID_FAMILIES[2], TESTING_FAMILY_NAME_2) # UPB\n self.assertEqual(VALID_FAMILIES[3], TESTING_FAMILY_NAME_3) # X-10\n self.assertEqual(VALID_FAMILIES[4], TESTING_FAMILY_NAME_4) # Hue", "def test_constructor(self, name, num_petals, price):\n with pytest.raises(AssertionError):\n chap2.Flower(name, num_petals, price)", "def setUpClass(cls):\n celltype_analyse = \"Adipocyte - breast\"\n data_type = \"promoters\"\n sample_type = \"primary cells\"\n parsed = False\n files_path = \"test\"\n cls.element_list = ('chr10:100027943..100027958,-', 'chr10:100174900..100174956,-',\n 'chr10:100204220..100204230,-', 'chr10:100206642..100206717,-')\n expression_obj = iext.CheckElementExpression(inputs=cv.test_promoter_file_name,\n element_list=cls.element_list,\n cell_type=celltype_analyse,\n data_type=data_type, sample_type=sample_type,\n parsed=parsed, files_path=files_path)\n cls.expression = expression_obj.export_expression_data(method=\"return\")", "def __init__(self):\n self.expvalue = np.zeros(10)\n self.iter = np.zeros(10)\n self.epsilon = 0.1", "def test_2X_constructor(self):\n path_to_config = os.path.join(CONST.ROOT, 'CONSTANTS.py')\n with self.assertRaises(Exception):\n FeatureExtractor(path_to_config)", "def test_init(self):\n res = computer.Computer(1)\n exp = computer.Computer\n self.assertIsInstance(res, exp)", "def setUpClass(cls):\n cls.inputs = cv.expression_data1\n cls.celltype_analyse = \"celltypetarget\"\n cls.replicate_suffix = \"_donor\"\n cls.algorithm = \"heuristic\"\n cls.k = 4\n cls.thresholds = (0.5, 0, 0) # act, inact, and sparseness, respectively\n cls.files_path = \"test\"", "def setUp(self):\n self.cashFlowDate=Date(1,October,2018)\n self.fixingDate=Date(1,November,2018)\n self.foreignAmount=1000.0\n self.familyName=\"ECB\"\n self.fixingDays=2\n self.sourceCurrency=USDCurrency()\n self.targetCurrency=EURCurrency()\n self.fixingCalendar=UnitedStates()\n self.todayDate=Date(11, November, 2018)\n self.tsDayCounter=Actual360()\n self.flatForwardUSD=FlatForward(self.todayDate, 0.005, self.tsDayCounter)\n self.sourceYts=RelinkableYieldTermStructureHandle(self.flatForwardUSD)\n self.flatForwardEUR=FlatForward(self.todayDate, 0.03, self.tsDayCounter);\n self.targetYts=RelinkableYieldTermStructureHandle(self.flatForwardEUR)\n self.fxindex=FxIndex(self.familyName,self.fixingDays,self.sourceCurrency,self.targetCurrency,self.fixingCalendar,self.sourceYts,self.targetYts)\n self.fxlinkedcashflow=FXLinkedCashFlow(self.cashFlowDate,self.fixingDate,self.foreignAmount,self.fxindex)", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def setUp(self):\n\n self.male_years = HeightCurveMaleYears().make()\n self.male_months = HeightCurveMaleMonths().make()\n self.female_years = HeightCurveFemaleYears().make()\n self.female_months = HeightCurveFemaleMonths().make()", "def test_01_Init(self):\n pass", "def setUpClass(cls):\n super().setUpClass()\n cls.validate_with = val.get_data_to_validate(\"A549_singlecell\", file_name=\"A549_singlecell.csv\",\n files_path=cls.files_path, positions=(0, 0, 0, 1),\n splits=(\":\", \"-\"))\n cls.vencode_obj = iext.GetVencodeFantomExternalData(validate_with=cls.validate_with,\n inputs=cv.test_enhancer_file_name,\n files_path=cls.files_path,\n cell_type=\"A549_singlecell\",\n algorithm=cls.algorithm,\n n_regulatory_elements=cls.k,\n number_vencodes=2,\n parsed=False,\n thresholds=cls.thresholds, n_samples=10000,\n data_type=\"enhancers\", sample_type=\"cell lines\",\n merge={\"exclude_target\": True})\n cls.vencodes = cls.vencode_obj.coordinates", "def test_perfectModelEnsemble_init(PM_ds_initialized_1d):\n pm = PerfectModelEnsemble(PM_ds_initialized_1d)\n print(PerfectModelEnsemble)\n assert pm", "def setUp(self):\n\t\tself.mason = Employee(\"mason\",\"karsevar\",10000)", "def setUp(self):\n\t\tfirst_name = 'Gerson'\n\t\tlast_name = 'Santos'\n\t\tannual_salary = 5000\n\t\tself.gerson = Employee(first_name, last_name, annual_salary)", "def test_init(self):\n ex = Experiment(note=\"Test\")\n self.assertEqual(ex.note, \"Test\")", "def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']", "def __init__(self,\n step_size=0.1, n_epochs=100, batch_size=1000,\n n_factors=0, alpha=0.00, random_state=20190415):\n self.n_factors = int(n_factors)\n self.alpha = float(alpha)\n self.step_size = step_size\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n if isinstance(random_state, int):\n self.random_state = np.random.RandomState(random_state)\n else:\n self.random_state = random_state", "def testInitialization(self):\n\n self.rdfvalue_class(\"C.00aaeccbb45f33a3\")\n\n # Initialize from another instance.\n sample = self.GenerateSample()\n\n self.CheckRDFValue(self.rdfvalue_class(sample), sample)", "def setUpClass(self):\n super(TestExpedition, self).setUpClass()", "def setUpClass(cls):\n cls.config.setup_toolbox('ENVI', 'qa_envitaskengine_datatype_sarscapedataarray',\n 'test_datatype_sarscapedataarray')", "def __init__(self, testing_level=1, verbosity=1):\r\n for i in [self.test_set(),\r\n self.test__cross(),\r\n self.test_eval(),\r\n self.test_run(),\r\n self.random_test()]:\r\n if i != 1:\r\n print(\"WARNING: AN ERROR HAS OCCURRED IN INITIAL TESTING, THIS CLASS IS UNSTABLE.\")\r\n self.testing_level = testing_level\r\n self.verbosity = verbosity", "def test_exp_square():\n\timport odelab.scheme.exponential as E\n\tfor name in dir(E):\n\t\tcls = getattr(E, name)\n\t\tif hasattr(cls, 'general_linear_z'):\n\t\t\tobj = cls()\n\t\t\ta,b = obj.general_linear_z(np.eye(2))\n\t\t\tnb_stages = len(a)\n\t\t\ttail_length = obj.tail_length\n\t\t\tyield CheckSquare(name),name, a,b, nb_stages, tail_length", "def __init__(self, alpha=1., beta=1., offset=0., seed=None):\n super().__init__(ndim=1, seed=seed)\n\n alpha, beta = np.atleast_1d(alpha), np.atleast_1d(beta)\n assert alpha.ndim == 1, 'alpha must be a 1-d array'\n assert alpha.size == beta.size, 'alpha and beta must match in size'\n assert np.all(alpha > 0.), 'Should be greater than zero.'\n assert np.all(beta > 0.), 'Should be greater than zero.'\n self.alpha = alpha\n self.beta = beta\n self.offset = offset\n self._gamma = gamma(a=alpha, scale=1. / beta)", "def __init__(self, gamma=None, x0=None):\n if gamma is not None and x0 is not None:\n self._initialize(gamma, x0)", "def __init__(self, epsilon=1e-7):\n super().__init__()\n self.epsilon = epsilon", "def setUpClass(cls):\n super().setUpClass()\n add_celltype_file = cv.test_enhancer_file_name\n add_celltype_ = [add_celltype_file, \"hIPS\", {\"sample_types\": \"time courses\",\n \"files_path\": cls.files_path}]\n\n cls.validate_with = val.get_data_to_validate(\"hIPS\", optional=False, files_path=cls.files_path)\n cls.vencode_obj = iext.GetVencodesFantom(validate_with=cls.validate_with,\n inputs=cv.test_enhancer_file_name,\n files_path=cls.files_path,\n cell_type=\"hIPS\",\n algorithm=cls.algorithm,\n n_regulatory_elements=cls.k,\n number_vencodes=2,\n parsed=False,\n thresholds=cls.thresholds, n_samples=10000,\n data_type=\"enhancers\", sample_type=cls.sample_type,\n add_celltype=add_celltype_,\n merge={\"exclude_target\": True})\n cls.vencodes = cls.vencode_obj.coordinates", "def setUpClass(cls):\n sys.stdout.write('(' + os.path.basename(__file__).split('.')[0] +\n '.' + cls.__name__ + ') ...')\n cls._test_name = 'hint_time'\n cls._report_path = 'test_{}.report'.format(cls._test_name)\n cls._image_path = 'test_{}.png'.format(cls._test_name)\n cls._skip_launch = not util.do_launch()\n cls._agent_conf_path = 'test_' + cls._test_name + '-agent-config.json'\n # Clear out exception record for python 2 support\n geopmpy.error.exc_clear()\n machine_file_name = 'test_{}.machine'.format(cls._test_name)\n cls._machine = machine.Machine()\n try:\n cls._machine.load()\n except RuntimeError:\n cls._machine.save()\n\n if not cls._skip_launch:\n # Set the job size parameters\n cls._num_node = 1\n num_rank = 1\n time_limit = 60\n # Configure the test application\n app_conf = AppConf()\n agent_conf = geopmpy.agent.AgentConf(cls._agent_conf_path)\n\n # Create the test launcher with the above configuration\n launcher = geopm_test_launcher.TestLauncher(app_conf,\n agent_conf,\n cls._report_path,\n time_limit=time_limit)\n launcher.set_num_node(cls._num_node)\n launcher.set_num_rank(num_rank)\n # Run the test application\n launcher.run('test_' + cls._test_name)\n cls._report = geopmpy.io.RawReport(cls._report_path)", "def setUp(self):\n self.my_employee = Employee('knight', 'lee', 10000)", "def test_omegaconf(self):\n with pytest.raises(OmegaConfBaseException):\n exp_manager(None, {\"unused\": 1})", "def test_create_instance(self):\n with self.assertRaises(exceptions.NoInitiation):\n Config()", "def __init__(self, dim, rn, gammak, sine=False, feature_generator=None):\n\n\t\tself.dim = dim\n\t\tself.rn = rn\n\t\tself.gammak = gammak\n\n\t\tif feature_generator is None:\n\t\t\tself.feature_generator = GaussianRandomFeatures(self.dim, self.rn, self.gammak, sine=sine)\n\t\telse: self.feature_generator = feature_generator", "def test_init_default(self):\n self._test_init_default()", "def test_init(self):\n likelihoods.Gaussian()\n self._standard_likelihood()", "def experiment_init(self):\n pass", "def test_heip_e(self):\n c = array([1,2,3,1])\n h = shannon(c, base=e)\n expected = exp(h-1)/3\n self.assertEqual(heip_e(c), expected)", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def __init__(self,\n num_factors=40,\n regularization=0.01,\n alpha=1.0,\n iterations=15,\n use_native=True,\n num_threads=0,\n dtype=np.float64):\n self.num_factors = num_factors\n self.regularization = regularization\n self.alpha = alpha\n self.iterations = iterations\n self.use_native = use_native\n self.num_threads = num_threads\n self.dtype = dtype", "def test_initialized() -> None:\n MapieClassifier()", "def __init__(self,\n environment_spec: specs.EnvironmentSpec,\n ):\n # Create the actor\n actor = delta_hedge_actor.DeltaHedgeActor(environment_spec.actions)\n learner = fake_learner.FakeLeaner()\n\n super().__init__(\n actor=actor,\n learner=learner,\n min_observations=100,\n observations_per_step=1e9)", "def test_1st_prime_e(self):\n self.assertEqual(main(sym.exp(1), 200, 10), 7427466391)", "def __init__(self, space, exponent):\n self.exponent = float(exponent)\n super().__init__(space=space, linear=False, grad_lipschitz=np.nan)", "def test_init_with_fire_villan(self):\n pass", "def setUp(cls):\n cls.directory = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)), 'examples', 'arkane')\n cls.level_of_theory = LevelOfTheory(\"cbs-qb3\")\n cls.frequencyScaleFactor = 0.99\n cls.useHinderedRotors = False\n cls.useBondCorrections = True", "def __init__(self, num_instances: int, seed: float = 42):\n self._num_instances = num_instances\n self._seed = seed\n # apply seed\n random.seed(self._seed)\n np.random.seed(self._seed)", "def setUp(self):\n self.kerno = _make_eko().kerno", "def setUpClass(cls):\n cls.celltype_analyse = \"Adipocyte - Breast\"\n cls.data_type = \"promoters\"\n cls.sample_type = \"primary cells\"\n cls.algorithm = \"heuristic\"\n cls.k = 4\n cls.thresholds = (0.5, 0, 0) # act, inact, and sparseness, respectively\n cls.parsed = True\n cls.files_path = \"test\"", "def __init__(self, maxEpochs=100, initAlpha=0.01, power=1.0):\n self.maxEpochs = maxEpochs\n self.initAlpha = initAlpha\n self.power = power\n pass", "def __init__(self, epsilon=1e-14):\n self.epsilon = epsilon", "def __init__(\r\n self,\r\n centre=30.0, # <- **PyAutoFit** recognises these constructor arguments\r\n normalization=1.0, # <- are the Exponential`s model parameters.\r\n rate=0.01,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.rate = rate", "def gff_init():\n pass", "def __init__(self, e=.1, y=.99, n_epochs=2000):\n self.env = gym.make('FrozenLake-v0')\n # params\n self.y = y\n self.e = e\n self.n_epochs = n_epochs", "def setUp(self):\n\tself.emp = Employee('Lin',10000)\n\tself.emp2 = Employee('Jun',20000)", "def testCtor(self):\n try: pykd.DiaSymbol()\n except RuntimeError: pass", "def fake_init():\n return Faker()", "def test_big_family(self):\n\n self.taxon_tester('Staphylinidae')", "def setUp(self):\n problem = setup_house_L(size=(40, 40))\n\n env = MetroLayoutEnv()\n\n costfn = objectives.ConstraintsHeur(problem,\n wmap={'AspectConstraint':0.1,\n 'AreaConstraint': 2\n },\n default=1.)\n\n model = algo.MetropolisHastings(env, costfn)\n\n self.exp = SimpleMH(\n env,\n problem,\n model=model,\n cost_fn=costfn,\n num_iter=1000,\n initializer=PointsInBound(problem, env, size=3, seed=69)\n )", "def setUpModule():\n print(\"In setUpModule()...\")\n global math_obj\n math_obj = mymathlib()", "def setUpClass(cls):\n cls.device = DeviceFactory.create()", "def test_01_GetFamilyObj0(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_0\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-01-A - Family object'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_0)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 0)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'Null_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.Null')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'Null_xml')", "def test_setup_errors(self):\n with self.assertRaises(ValueError):\n _ = RandomForest(n_estimators=0, max_depth=1, criterion='entropy')\n\n with self.assertRaises(ValueError):\n _ = RandomForest(n_estimators=1, max_depth=0, criterion='entropy')\n\n with self.assertRaises(ValueError):\n _ = RandomForest(n_estimators=1, max_depth=1, criterion='test')", "def test_init(self):\n es = elasticsearch.ElasticSearch(server='8.8.8.8',\n user='alice',\n password='iLoveDogs',\n doc_type='someLogCategory')\n\n self.assertTrue(isinstance(es, elasticsearch.ElasticSearch))", "def __init__(self,\r\n gibbs_e=None,\r\n internal_e=None,\r\n enthalpy_e=None,\r\n helmholtz_e=None,\r\n electronic_e=None,\r\n zero_point_e=None,\r\n Cv_trans_term=None,\r\n Cv_rot_term=None,\r\n Cv_vib_term=None,\r\n Cv_to_Cp=None,\r\n entropy_term=None,\r\n PV_term=None,\r\n\r\n # main_energy=\"gibbs\",\r\n ):\r\n #| - __init__\r\n self.gibbs_e = gibbs_e\r\n self.internal_e = internal_e\r\n self.enthalpy_e = enthalpy_e\r\n self.helmholtz_e = helmholtz_e\r\n\r\n self.electronic_e = electronic_e\r\n self.zero_point_e = zero_point_e\r\n\r\n self.Cv_trans_term = Cv_trans_term\r\n self.Cv_rot_term = Cv_rot_term\r\n self.Cv_vib_term = Cv_vib_term\r\n self.Cv_to_Cp = Cv_to_Cp\r\n\r\n self.entropy_term = entropy_term\r\n self.PV_term = PV_term\r\n\r\n if self.internal_e is None:\r\n self.internal_e = self.calc_internal_energy()\r\n\r\n if self.enthalpy_e is None:\r\n self.enthalpy_e = self.calc_enthalpy_energy()\r\n\r\n if self.gibbs_e is None:\r\n self.gibbs_e = self.calc_gibbs_free_energy()\r\n #__|\r", "def test_init():\n radius = 10\n c = Circle(radius)\n assert isinstance(c, Circle)\n assert c.radius == radius", "def test_invalid_data_construction(self):\n with self.assertRaises(Exception):\n LongDecimalEuler(term=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(term=\"aaa\")\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=\"aaa\")", "def __init__(\n self,\n test_factory: tf.TestFactory,\n test_case_factory: tcf.TestCaseFactory,\n fitness_functions: OrderedSet[ff.TestCaseFitnessFunction],\n ) -> None:\n self._test_factory = test_factory\n self._test_case_factory = test_case_factory\n self._fitness_functions = fitness_functions", "def __init__(self, source, testing_level=1, verbosity=1, test_functions=False):\r\n if test_functions:\r\n for i in [self.test_set(),\r\n self.test__cross(),\r\n self.test_run(),\r\n self.random_test(source)]:\r\n if i != 1:\r\n print(\"WARNING: AN ERROR HAS OCCURRED IN INITIAL TESTING, THIS CLASS IS UNSTABLE.\")\r\n self.testing_level = testing_level\r\n self.verbosity = verbosity", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(TypeError):\n knxipframe.init(23)", "def test_Eg(self):\n self.setUp()\n tmp = np.arange(1, 49).reshape(3, 2, 4, 2)\n g1 = np.broadcast_to(tmp[..., None], tmp.shape + (2,)).swapaxes(1, -1)\n f = .02 * np.arange(1, 25).reshape(3, 2, 4)\n n_samples, n_MC, K = self.n_samples, self.n_MC, 2\n Lambda_1 = self.E_func.Lambda_g(np.ones(shape=(n_samples, K, n_MC)), f)\n pi_xi = 1 / (1 + np.exp(np.array([-3, -4, -6])))\n Eg1 = self.E_func.Eg(g1, Lambda_1, pi_xi, f)\n Eg1_ = np.array([4.396, 12.396])\n np.testing.assert_almost_equal(Eg1[0, 0], Eg1_, 3)", "def test_init(self):\n DummyCryptographicObject()", "def test_initialization(self):\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)", "def setUp(self):\n TestExperiment.setUp(self)\n i = self.i\n self.ex = Experiment(\n sampler=self.sampler,\n range=[i, range(100, 2001, 100)],\n nreps=10,\n calls=[\n Signature(\n \"dgemm\",\n Trans(\"transA\"), Trans(\"transB\"),\n Dim(\"m\"), Dim(\"n\"), Dim(\"k\"),\n dScalar(),\n dData(\"A\", \"ldA * (k if transA == 'N' else m)\"),\n Ld(\"ldA\", \"m if transA == 'N' else k\"),\n dData(\"B\", \"ldB * (n if transB == 'N' else k)\"),\n Ld(\"ldB\", \"k if transB == 'N' else n\"),\n dScalar(\"beta\"),\n sData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"m\"),\n flops=\"2 * m * n * k\"\n )(\"N\", \"N\", i, i, i, 1, \"A\", i, \"B\", i, 1, \"C\", i)\n ]\n )", "def start_prime_test():", "def test_creation(self):\n nfa = NondeterministicFiniteAutomaton()\n self.assertIsNotNone(nfa)\n states = [State(x) for x in range(10)]\n nfa = NondeterministicFiniteAutomaton(start_state=states)\n self.assertIsNotNone(nfa)", "def __init__(self, multiplier=1e-1):\r\n self.multiplier = multiplier", "def __init__(self, value=None):\r\n if value is None:\r\n value = (self.significand_type(None), self.exponent_type(None))\r\n else:\r\n if isinstance(value, (int, float)):\r\n e = math.ceil(math.log(abs(value), 2)) if value else 0\r\n s = value / 2**e\r\n assert s == 0 or 0.5 <= abs(s) <= 1, (value, s, e)\r\n value = (self.significand_type(s, integral=False), self.exponent_type(e))\r\n elif isinstance(value, tuple):\r\n if len(value) != 2 or \\\r\n not isinstance(value[0], self.significand_type) or \\\r\n not isinstance(value[1], self.exponent_type):\r\n raise TypeError('Significand/exponent pair required')\r\n\r\n else:\r\n raise TypeError('None, int, float, or significand/exponent pair required')\r\n\r\n super().__init__(value)", "def __init__(self):\n super().__init__()\n self.lambdaVar = 1.0\n self.low = 0.0\n self.type = 'Exponential'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def test_02_GetFamilyObj1(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-02-A - Family'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_1)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 1)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'Insteon_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.Insteon')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'Insteon_xml')", "def test_epsf_build(self):\n\n size = 25\n oversampling = 4.\n stars = extract_stars(self.nddata, self.init_stars, size=size)\n epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=20,\n progress_bar=False)\n epsf, fitted_stars = epsf_builder(stars)\n\n ref_size = (size * oversampling) + 1\n assert epsf.data.shape == (ref_size, ref_size)\n\n y0 = int((ref_size - 1) / 2)\n z = epsf.data[y0, :]\n ampl, peak, sigma = gaussian1d_moments(z)\n assert_allclose(ampl, 0.002487, rtol=1e-4)\n assert_allclose(peak, y0, rtol=1e-3)\n assert_allclose(sigma, oversampling * self.stddev, rtol=1e-5)" ]
[ "0.64224786", "0.60437495", "0.6004442", "0.5973904", "0.5874628", "0.5863269", "0.5862954", "0.5851496", "0.5843871", "0.58273435", "0.58007246", "0.5793445", "0.5762569", "0.57617235", "0.57417697", "0.571784", "0.5715807", "0.5695528", "0.5693813", "0.5684522", "0.5677164", "0.5675944", "0.564229", "0.5635543", "0.56344175", "0.56259227", "0.5617797", "0.5597922", "0.55806696", "0.5571718", "0.5569801", "0.55629814", "0.5562798", "0.55600953", "0.5554828", "0.552676", "0.5522589", "0.5521966", "0.55200785", "0.55190057", "0.5506512", "0.5498908", "0.54934376", "0.5490776", "0.54883826", "0.5487451", "0.5484885", "0.5467655", "0.5467031", "0.5458317", "0.5452141", "0.5451994", "0.54490376", "0.5445871", "0.54420525", "0.5437691", "0.54332465", "0.54315007", "0.5426439", "0.54243493", "0.54144984", "0.5413884", "0.54068273", "0.5406235", "0.540436", "0.5402774", "0.54002917", "0.53991026", "0.53932", "0.5390277", "0.5388678", "0.5382681", "0.5378889", "0.53737265", "0.53707284", "0.5368378", "0.536823", "0.5361681", "0.535516", "0.5350143", "0.5348886", "0.5344342", "0.53433025", "0.5342475", "0.5330887", "0.533037", "0.5330169", "0.5322708", "0.5318219", "0.5317086", "0.5316768", "0.53148866", "0.5310536", "0.5307861", "0.53039646", "0.53013754", "0.5298412", "0.52972", "0.52955824", "0.5291215" ]
0.83874583
0
Read a raw file into a list of Sentences.
def read_raw(f): if type(f) is str: f = file(f, "r") for (li,line) in enumerate(f): sent = process_sgml_line(line) mark = sent.getmark() if mark: tag, attrs = mark attrs = attrs_to_dict(attrs) if False and tag == "seg" and "id" in attrs: sent.id = attrs["id"] else: sent.id = str(li) else: sent.id = str(li) yield sent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n sentences = f.read().splitlines()\n return sentences", "def _read_sentences(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return [sentence.split() for sentence in f.read().split('\\n')]", "def sentences_from_file(this_class, filename):\n # Note that the native method below leaks. We work around this\n # by acquiring its pointer in __init__\n sentReps = parser.sentRepsFromFile(filename)\n return list(map(this_class, sentReps))", "def load_sentences(path):\n sentences = []\n sentence = []\n num = 0\n with codecs.open(path, 'r', 'utf8') as fread:\n # n_lines = len(fread)\n print(\"Read from {:s}\".format(path))\n # pbar = progressbar.ProgressBar(max_value=n_lines)\n for line_idx, line in enumerate(fread):\n assert line_idx==num,'ER'\n num += 1\n\n line = line.rstrip()\n # print(list(line))\n if not line: #Update: only deal with space between sentences\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:# remove the DOCstart\n sentences.append(sentence)\n sentence = []\n else:\n if line[0] == \" \":#Update: this part is never used in Chinese ner!\n line = \"$\" + line[1:]\n word = line.split()\n # word[0] = \" \"\n else:\n word= line.split()\n assert len(word) >= 2, ([word[0]])\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n\n return sentences", "def read_sentences(f):\n with open(f, 'r') as conll_file:\n s = [ROOT]\n for line in conll_file:\n if line.strip() and not line.startswith('#'):\n s.append(read_token(line))\n elif len(s) != 1:\n yield s\n s = [ROOT]\n if len(s) != 1: # file ended without a new line at the end\n yield s", "def read_sents_from_file(f):\r\n sent = []\r\n for line in f:\r\n line = line.strip()\r\n if line == \"\":\r\n if sent != []:\r\n yield 'SENT', Sentence(sent)\r\n sent = []\r\n elif line.startswith('#'):\r\n yield 'COMMENT', line\r\n else:\r\n sent.append(line)\r\n if sent != []:\r\n yield 'SENT', Sentence(sent)\r\n f.close()", "def extract_sentences(file_path):\n\n with open(file_path, \"r\") as file:\n\n lines = list()\n\n for line in file:\n line_stripped = line.strip()\n\n if line_stripped == \"\":\n continue\n\n lines.append(line_stripped)\n\n text = \" \".join(lines)\n sentences = token_to_sentence(text)\n\n return sentences", "def read_file(filename):\n with open(filename, encoding='utf-8') as src:\n return [line.strip() for line in src.readlines()]", "def read_txt(cls, input_file):\n return open(input_file, \"r\", encoding=\"utf-8\").readlines()", "def read_txt(cls, input_file):\n return open(input_file, \"r\", encoding=\"utf-8\").readlines()", "def read_raw_text(self, raw_path: str = None):\n\n if raw_path.rsplit(\".\")[-1] == \"json\":\n self.import_from_json(raw_path)\n return\n\n if raw_path is not None:\n self.raw_path = raw_path\n\n if self.raw_path is None:\n raise Exception(\"Found no file to read\")\n\n file = open(raw_path, \"r\")\n raw = file.read()\n file.close()\n\n self.sentences += get_sentences(raw, self.cM.use_spacy)\n\n self.loaded(False)", "def load_sentences(path, lower, zeros=True):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) >= 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def read_article_2(filename):\n file = open(filename, \"r\")\n filedata = file.readlines()\n sentences = sent_tokenize(filedata[0])\n return sentences", "def extract_sentences_from_file(path: str) -> list:\n sentences = list()\n with io.open(file=path, mode=\"r\", encoding=\"utf-8\") as input_file:\n content = json.load(input_file)\n\n data = content[\"rasa_nlu_data\"]\n\n # Obtain the list of sentences\n common_examples = data[\"common_examples\"]\n\n for example in common_examples:\n sentences.append(example[\"text\"])\n\n return unique(sentences)", "def read_sentences():\r\n f = open(\"data.txt\", \"r\")\r\n gram = f.read().splitlines()\r\n gram = [sentence for sentence in gram if sentence != \"\"]\r\n return gram", "def _read(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n lines = []\n for line in f:\n lines.append(line.strip())\n return lines", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def read_file(filename):\n\n all_documents = []\n document = []\n with tf.gfile.GFile(filename, \"r\") as reader:\n for line in reader:\n line = line.strip()\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n for sent_line in sents:\n if not sent_line or len(sent_line) < 4: # Arbitrary min length for line\n continue\n if sent_line.lower()[:7] == \"chapter\":\n if document:\n all_documents.append(document)\n document = []\n else:\n document.append(sent_line)\n if len(document) == FLAGS.max_para_length:\n all_documents.append(document)\n document = []\n if document:\n all_documents.append(document)\n\n # Remove small documents\n all_documents = [x for x in all_documents if len(x) >= 8]\n\n return all_documents", "def readWhole(self):\n try:\n if os.path.isfile(self.filename) == False:\n raise dse.DocumentStreamError(\"Not a file!\")\n except dse.DocumentStreamError as E:\n print(E.data)\n exit()\n\n f = open(self.filename, 'r')\n\n fileString = f.read()\n f.close()\n\n #fileString = [c for c in fileString if c not in ['\\n', '\\t']] # Remove all returns in the string\n\n sentenceList = []\n sent = ''\n spaceState = False\n\n\n ### If char is .!?; or new line, append sentence to sentenceList\n ### and reset sentence to empty string.\n\n for char in fileString:\n if char in ['\\n', '\\t']:\n char = ' '\n\n if char == ' ':\n if spaceState == True and sent != '':\n sentenceList.append(sentence.Sentence(sent))\n sent = ''\n elif spaceState == False:\n sent += char\n spaceState = True\n else:\n spaceState = False\n sent += char\n if char in '.!?;' and sent != '':\n sentenceList.append(sentence.Sentence(sent))\n sent = ''\n\n if sent != '':\n sentenceList.append(sentence.Sentence(sent))\n\n ### Handles the case that a sentence begins or ends with a space character.\n '''\n for i in sentenceList:\n if i.sentence[0] == ' ':\n i = sentence.Sentence(i.sentence[1:])\n if i.sentence[-1] == ' ':\n i = sentence.Sentence(i.sentence[:-1])\n '''\n\n return sentenceList", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text", "def read_file(self,filename):\n\n f = open(filename,'r')\n lines = f.readlines()\n f.close()\n\n sequences = [l.strip() for l in lines if l.strip() != \"\"]\n\n self.load_sequences(sequences)", "def listfromfilelines(file):\r\n with open(file, 'r') as f:\r\n list = [line.strip().decode('utf-8') for line in f]\r\n return list", "def load_sentences(path, zeros):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def read_file_unlabeled(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n ret.append( (lines) )\n return ret", "def read(filename):\n with open(filename, 'r') as fRead:\n samples = list(map(lambda line: line.strip(), fRead))\n return samples", "def read_file(filename, tokenizer, is_cased):\n sents = []\n with open(filename) as f:\n for line in f:\n sents.append(tokenizer(line, is_cased))\n return sents", "def read_file(self, file_name: str):\n file_text = []\n with open(file_name, encoding='utf-8', errors='ignore') as file:\n for line in file:\n line = line.strip()\n file_text.append(line)\n return file_text", "def read_list(file_name):\n with open(file_name, 'r') as f:\n text = f.read().splitlines()\n return text", "def read_file(path):\n with open(path, \"r\") as IN:\n file_seqs = [line.strip() for line in IN]\n return file_seqs", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n students = f.read().splitlines()\n return students", "def get_sentence_list_for_word_file(file_path: str) -> List[str]:\n # get file data\n with open(file_path, 'r') as review_file:\n file_text = review_file.read().splitlines()\n return file_text", "def get_enron_sentences(self):\n helper._print_subheader('Reading ' + directories.ENRON_TRAIN_SENTENCES_TXT_PATH + '...')\n with open(directories.ENRON_TRAIN_SENTENCES_TXT_PATH, 'r', encoding='utf-8') as txt_file:\n for index, line in enumerate(txt_file):\n if index % 1000000 == 0 and index != 0:\n helper._print(f'{index} sentences read')\n break\n preproccesed_line = simple_preprocess(line)\n if preproccesed_line != []:\n yield preproccesed_line\n helper._print(f'{index} sentences read')\n helper._print_subheader('Done reading Enron email data!')", "def load_input(filepath: str) -> list:\n lines = []\n with open(filepath, \"r\", encoding=\"utf-8\") as file:\n for line in file.readlines():\n lines.append(line.strip())\n return lines", "def load_file(filename):\n with open(filename, \"r\") as f:\n return f.readlines()", "def getlistfromtext(self,filename):\n l=[]\n\n if self.encoding:\n f = codecs.open(filename,\"r\",encoding=self.encoding)\n for line in f:\n l.append(line.rstrip())\n f.close()\n\n else:\n f = open(filename,\"r\")\n for line in f:\n l.append(line.rstrip())\n f.close()\n return l", "def read_lines(file_name: str) -> List[str]:\n try:\n return open(file_name, encoding='utf8').readlines()\n except UnicodeDecodeError:\n return open(file_name, encoding='cp1252').readlines()", "def open_text_file(filepath):\n sentences = []\n sentencemanager = nmea.NMEASentenceManager()\n for line in open_file_generator(filepath):\n sentencemanager.process_sentence(line)\n sentences.append(line)\n return sentencemanager, sentences", "def load_file(file_name):\n with open(file_name,\"r\") as f:\n return f.readlines()", "def read_messages(given_file):\n \n message_list = given_file.readlines()\n for i in range(len(message_list)):\n message_list[i] = message_list[i].strip('\\n')\n return message_list", "def read_lines_from_file(fname):\n return []", "def readlines(filename, encoding='utf-8'):\r\n text, encoding = read(filename, encoding)\r\n return text.split(os.linesep), encoding", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def read_file_into_list(filename):\n with open(filename) as file:\n return file.readlines()", "def get_file_lines(filename):\n\n with open(filename, \"r\") as lines:\n lines = lines.readlines() # Saves list of each poem line in lines\n\n for _ in range(len(lines)):\n lines[_] = lines[_].rstrip() # Removes newline char from right-side end of each poem line\n\n return lines", "def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)", "def file_to_list(file_name):\r\n fr = open(file_name, encoding = 'utf-8')\r\n l = [line.strip() for line in fr]\r\n fr.close()\r\n return l", "def read_text_file(file_name):\n target_file = open(file_name)\n lines = target_file.readlines()\n\n target_file.close()\n return lines", "def read_file_in_lines(filename):\r\n\twith open(filename) as infile:\r\n\t\tlines = infile.readlines()\r\n\treturn [line.strip() for line in lines]", "def read_file(file_path):\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n data = f.read()\n return data.split(\"\\n\")", "def load(file_name):\n file_data = []\n with io.open(file_name, \"r\", encoding=\"utf-8\") as f:\n file_data = [line.rstrip('\\n') for line in f]\n return file_data", "def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data", "def load_data_sentences(dirname):\n sentence_list = []\n for fname in os.listdir(dirname):\n with open(os.path.join(dirname, fname)) as file:\n #sentence_list.append(gensim.models.word2vec.LineSentence(file))\n sentence_list.append(file)\n return sentence_list", "def __read_file(self, filename):\n with open(filename) as f:\n content = f.readlines()\n \n return content", "def readfile(input_file):\n lines = []\n with open(input_file, 'r') as file:\n for line in file:\n lines.append(line.rstrip())\n return lines", "def read_data(max_size=None, max_sentence_size=None, min_sentence_size=10):\n sentences = []\n with tf.gfile.GFile('data_WMT/sentences/sentences.txt', mode=\"r\") as source_file:\n source = source_file.readline()\n print (source)\n counter = 0\n while source and (not max_size or counter < max_size):\n source_ids = [int(x) for x in source]\n if len(source_ids) < max_sentence_size and len(source_ids) > min_sentence_size:\n sentences.append(source_ids)\n ratings.append(rating)\n counter += 1\n if counter % 10000 == 0 and counter != 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n source = source_file.readline()\n return sentences", "def read_file(path, tok=False):\n with open_file(path) as f:\n for line in f.readlines():\n words = split_sentence(line.strip(), tok)\n yield words", "def get_file_contents_as_list(file_name):\n with open(file_name) as file:\n data = file.read().splitlines()\n return data", "def file_reader(filePath):\n try:\n word_file = open(filePath, \"rt\")\n word_list = word_file.read().splitlines()\n word_file.close()\n return word_list\n except Exception:\n print(f\"An error has occured when reading the file.\")\n\n return", "def read_from_file(filename):\n with open(filename, \"r\") as f:\n f.readlines()", "def load_file_sentences(filepath, filename):\n # Read file as string first\n f = open(filepath, 'r')\n text = f.read()\n f.close()\n # Strip the newlines\n text = filter(lambda x: x != '\\n', text)\n # Now use nltks method to read the sentences\n sentences = sent_tokenize(text)\n # convert everything to lower case\n sentences = map(str.lower, sentences)\n \"\"\"sentences = [(s.lower(), filename) for s in sentences]\"\"\"\n # Create segments by clustering. Let's say 3 segments per text.\n # Similarity metric shall be cosine.\n fs = create_feature_space(sentences)\n vectors = [vectorize(fs, sent) for sent in sentences]\n compute_similarity_matrix(vectors, cosine_similarity, filename+\".similarities\")\n segments = cluster_sentences(filename+\".similarities\", __cluto_bin, 3)\n # Stitch it all together\n return zip(sentences, [filename]*len(sentences), segments)", "def readFile(fileName):\n\n textFile = open(fileName, 'r')\n encryptedMessages = []\n for line in textFile:\n encryptedMessages.append(line.rstrip())\n textFile.close()\n return encryptedMessages", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def get_list(file_name):\n with open(file_name, \"r\", encoding=\"latin-1\") as file:\n text = file.read()\n text = text.lower() # Make everything lowercase\n text = text.split(\"\\n\")\n return text", "def read_file(filename) -> List[Todo]:\n with pathlib.Path(filename).expanduser().open('r') as fp:\n return [Todo(_id, line) for _id, line in enumerate(fp)]", "def open_and_read_file(filename):\n \n file_class = open(filename)\n file_list = file_class.read().split(\"\\n\")\n file_class.close()\n \n return file_list", "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []", "def sents(infile):\n with io.open(infile, 'r', encoding='utf8') as fin:\n for line in fin:\n yield line.strip()", "def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences", "def read_file(path):\n with open(path) as _file:\n _list = _file.readlines()\n return _list", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def load_txt(filename, **kwargs):\n with sys_open(filename, 'r', **kwargs) as f:\n return f.readlines()", "def get_parsed_paragraphs_from_file(self, processed_path):\n with open(processed_path, \"r\") as f:\n sent_len = json.loads(f.readline())['sentence_lens']\n paragraphs = list()\n line_no = 1\n para_idx = 0\n while para_idx < len(sent_len):\n paragraph = list()\n end_no = sent_len[para_idx]\n while line_no < end_no:\n sent = json.loads(f.readline())\n sent[\"sid\"] = self.generate_sid(sent, processed_path, line_no)\n paragraph.append(sent)\n line_no += 1\n para_idx += 1\n paragraphs.append(paragraph)\n return paragraphs", "def load_file(file):\r\n\r\n try:\r\n with open(Path(file), \"r\", encoding=\"utf-8\", newline=\"\") as f:\r\n txt_file = f.read()\r\n except:\r\n sys.exit(\"IO_Tools: ERROR: \"+str(file)+\" not found!\")\r\n \r\n lines = txt_file.split(\"\\n\")\r\n\r\n return lines", "def file_to_list(filename, dir=\"../resources\"):\n os.chdir(dir)\n vocabulary = []\n f = open(filename, \"r\")\n lines = f.readlines()\n for line in lines:\n vocabulary.append(line.replace(\"\\n\", \"\"))\n return vocabulary", "def load_text_file(file_path: str):\n with open(file_path) as f:\n content = f.readlines()\n return content", "def parse_file(self, fpath):\n sdir = os.path.abspath(os.path.join(os.path.dirname(salt.__file__), os.pardir))\n with open(os.path.join(sdir, fpath), \"rb\") as f:\n return f.readlines()", "def read_file(path: str) -> list:\n list_of_lines = []\n with open(path, 'r') as f:\n for line in f:\n list_of_lines.append(line.strip())\n return list_of_lines", "def __read_lines__(self):\r\n fd = open(self.input_file, \"r\")\r\n lines = fd.readlines()\r\n fd.close()\r\n return lines", "def read_file_as_list(filename):\n with FileUtils.open_file_by_type(filename) as f:\n return [l for l in (line.strip() for line in f) if l]", "def read_file(filename):\n\n infile = open(filename, 'r')\n lines = infile.readlines()\n infile.close()\n\n return lines", "def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)", "def load_lines(filename):\r\n lines = []\r\n f = open(filename)\r\n for line in f.readlines():\r\n line = line.strip()\r\n lines.append(line)\r\n return lines", "def read( self, song_file_name ):\n song_file = open( song_file_name )\n content = song_file.read()\n return self.split( content )", "def read(file, connfd):\n resultat = []\n file = open(file, 'r')\n lines = file.readlines()\n for line in lines:\n connfd.send(line.encode())\n resultat.append(line.strip(\"\\n\"))\n return resultat", "def readfile(file):\n with open(file, 'r') as f:\n data = f.read().splitlines()\n return data", "def read_lines(filename, verbose=True):\n with open(filename, 'r') as fp:\n lines = fp.readlines()\n if verbose:\n print(\"Done reading file\", filename)\n \n return [line.strip() for line in lines]", "def _read_conll(cls, input_file):\n #def read_conll(input_file):\n sents = []\n sent, labels = [], []\n for line in open(input_file):\n if line.startswith(\"# sent_id\"):\n current_id = line.strip().split(\" = \")[1]\n elif line.strip() == \"\":\n if len(sent) > 0:\n sents.append((current_id, sent, labels))\n sent, labels = [], []\n else:\n token, label = line.strip().split(\"\\t\")\n sent.append(token)\n labels.append(label)\n return sents", "def read_file(filename):\r\n\r\n print(\"Reading TextFile \" + filename)\r\n text = []\r\n with open(filename, encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.strip()\r\n text.append(line)\r\n return text", "def read_txt_file(relative_path_to_txt_file: str):\n with open(file=relative_path_to_txt_file) as f:\n lines = f.read()\n return lines", "def read_text_file(filename):\n try:\n file = open(filename, 'r')\n except:\n print('Cannot read file ' + filename + '. Please check the path', file=sys.stderr)\n sys.exit(1)\n output = []\n \n for line in file:\n line = line.strip().lower()\n output.append(line)\n return output", "def _read_words(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return f.read().replace(\"\\n\", \"<eos>\").split()", "def read_article(file_name):\n file = open(file_name, \"r\")\n filedata = file.readlines()\n sentences = [] \n for sentence in filedata:\n print(\"\\n{} text: \\n{}\".format(file_name,sentence))\n sentences.append(sentence.replace(\"[^a-zA-Z]\", \" \").split(\" \")) # filter charachter only\n \n return sentences", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def read(fname):\n with open(fname) as fh:\n for line in fh:\n line = line.strip().split()\n sent = [tuple(x.rsplit(\"|\",1)) for x in line]\n yield sent", "def readlines(filename):\n with open(filename, 'r') as f:\n lines = f.read().splitlines()\n return lines", "def loadFile(fileName) :\n\tlines = []\n\twith open(fileName, 'r') as inFile :\n\t\tfor line in inFile :\n\t\t\tlines.append(line)\n\treturn lines", "def read_lines(filename):\n # Absolute dir the script is in\n script_dir = os.path.dirname(__file__)\n # Join the relative path to the input file\n resource_path = os.path.join(script_dir, '../resources/' + filename)\n # Open the file\n with open(resource_path) as f:\n # Read all lines from the file. Per default we have now strings\n return [line.rstrip() for line in f]", "def load_words_raw(file_path: str) -> List[Word]:\n def parse_line(line: str, frequency: int) -> Word:\n tokens = line.split()\n word = tokens[0]\n vector = v.normalize([float(x) for x in tokens[1:]])\n return Word(word, vector, frequency)", "def read_sentences(snt_file):\n\n sentences = open(snt_file).readlines()\n em = []\n en = []\n i = 0\n\n for line in sentences:\n line = line.split()\n if line[0] != '1':\n em_snt = []\n en_snt = []\n if i % 3 == 1:\n for word in line:\n en_snt += [int(word)]\n en.append(en_snt)\n elif i % 3 == 2:\n for word in line:\n em_snt += [int(word)]\n em.append(em_snt)\n i += 1\n return em, en", "def load_file(self, filename):\n path = os.path.join(self.path_to_sentences, filename)\n log.info('Reading file %s', path)\n _, int_sentence = scipy.io.wavfile.read(path)\n sent = int_sentence.T / np.iinfo(int_sentence.dtype).min\n if self.force_mono and sent.ndim == 2:\n return sent[1]\n else:\n return sent", "def read_lines_from_file(filename):\n with open(filename) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n return content" ]
[ "0.77226895", "0.7688293", "0.7130299", "0.7058475", "0.6907598", "0.67035824", "0.66702956", "0.6655785", "0.6560921", "0.6560921", "0.6558752", "0.65419894", "0.6525789", "0.652241", "0.6511044", "0.65064627", "0.64996123", "0.64872473", "0.64834857", "0.6471241", "0.6470556", "0.64635634", "0.64209294", "0.64094573", "0.64040214", "0.63998556", "0.638316", "0.63501716", "0.63283473", "0.630949", "0.63085157", "0.6307341", "0.63018006", "0.62890375", "0.62804145", "0.6275411", "0.6270963", "0.6255494", "0.625062", "0.6242082", "0.6241248", "0.62323636", "0.62286866", "0.62110305", "0.61959517", "0.6193819", "0.6193288", "0.61842275", "0.61816925", "0.61801946", "0.6173734", "0.6172331", "0.6165649", "0.61431515", "0.612174", "0.6121634", "0.61160505", "0.6115581", "0.61101913", "0.6107708", "0.6104182", "0.61038", "0.60881746", "0.6071393", "0.6069437", "0.60669696", "0.606692", "0.6066894", "0.60659117", "0.6062572", "0.6059186", "0.6046116", "0.60458773", "0.60458475", "0.60419726", "0.6038575", "0.6033454", "0.60251176", "0.60196364", "0.6019043", "0.6017383", "0.60163426", "0.6013195", "0.6008165", "0.6006398", "0.60030085", "0.5982823", "0.5978093", "0.5975966", "0.5961862", "0.5958096", "0.59517586", "0.5946551", "0.5945748", "0.5942077", "0.59386563", "0.5936337", "0.5929756", "0.59262216", "0.5924132", "0.59206617" ]
0.0
-1
Builds a menu entry with its callback function to call when finished
def __init__(self, callback): self.callback = callback self.selected = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_menu():", "def _createDisplayMenu(ned, menu):\n pass", "def create_menus( self ):", "def main_menu(self):\n menu_string = \"Main menu\\n\"\n menu_string += \"\\t1. Modify a list\\n\"\n menu_string += \"\\t2. Grade submenu\\n\"\n menu_string += \"\\t3. Search for something\\n\"\n menu_string += \"\\t4. Get a statistic\\n\"\n menu_string += \"\\t5. Undo/Redo\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {'0': self.__no_command,\n '1': self.__modify_submenu,\n '2': self.__grade_submenu,\n '3': self.__search_submenu,\n '4': self.__statistics_submenu,\n '5': self.__undo_submenu\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def main_menu(choice=None):\n while True:\n clear()\n print(dedent(\"\"\"\n WORK log\n What would you like to do? Enter a, b or c.\n a) Add new entry\n b) Search in existing entries\n c) Quit program\"\"\"))\n choice = input(\"> \")\n\n if choice == \"a\":\n work_log.entry_data()\n elif choice == \"b\":\n search()\n elif choice == \"c\":\n print(\"Thanks for using WORK LOG!\")\n # return None\n sys.exit()\n else:\n print(\"Please enter a, b or c.\")\n time.sleep(3)", "def createMenuItem(parent, menu, label, status=None, handler=None, id=-1, kind=wx.ITEM_NORMAL):\n\n if not label:\n menu.AppendSeparator()\n else:\n item = menu.Append(id, label, status, kind)\n if handler:\n parent.Bind(wx.EVT_MENU, handler, item)", "def create_menu(self, root):\n menubar = Menu(root)\n root['menu'] = menubar\n\n menu_file = Menu(menubar)\n menu_run = Menu(menubar)\n menu_folders = Menu(menubar)\n menu_links = Menu(menubar)\n menu_help = Menu(menubar)\n menu_beta = Menu(menubar)\n menubar.add_cascade(menu=menu_file, label='File')\n menubar.add_cascade(menu=menu_run, label='Run')\n menubar.add_cascade(menu=menu_folders, label='Folders')\n menubar.add_cascade(menu=menu_links, label='Links')\n menubar.add_cascade(menu=menu_help, label='Help')\n menubar.add_cascade(menu=menu_beta, label='Experimental')\n\n menu_file.add_command(\n label='Re-load param set', command=self.load_params,\n accelerator='Ctrl+L')\n menu_file.add_command(\n label='Re-save param set', command=self.save_params,\n accelerator='Ctrl+S')\n menu_file.add_command(\n label='Output log', command=lambda: LogWindow(self.root))\n if sys.platform != 'darwin':\n menu_file.add_command(\n label='Exit', command=self.exit_program, accelerator='Alt+F4')\n root.bind_all('<Control-l>', lambda e: self.load_params())\n root.bind_all('<Control-s>', lambda e: self.save_params())\n\n menu_run.add_command(\n label='Dwarf Fortress', command=self.lnp.run_df,\n accelerator='Ctrl+R')\n menu_run.add_command(\n label='Init Editor', command=self.run_init, accelerator='Ctrl+I')\n root.bind_all('<Control-r>', lambda e: self.lnp.run_df())\n root.bind_all('<Control-i>', lambda e: self.run_init())\n\n menu_folders.add_command(\n label='Savegame Folder', command=self.lnp.open_savegames)\n menu_folders.add_command(\n label='Utilities Folder', command=self.lnp.open_utils)\n menu_folders.add_command(\n label='Graphics Folder', command=self.lnp.open_graphics)\n menu_folders.add_separator()\n menu_folders.add_command(\n label='Main Folder', command=self.lnp.open_main_folder)\n menu_folders.add_command(\n label='LNP Folder', command=self.lnp.open_lnp_folder)\n menu_folders.add_command(\n label='Dwarf Fortress Folder', command=self.lnp.open_df_folder)\n menu_folders.add_command(\n label='Init Folder', command=self.lnp.open_init_folder)\n\n menu_links.add_command(\n label=\"DF Homepage\", command=self.lnp.open_df_web)\n menu_links.add_command(label=\"DF Wiki\", command=self.lnp.open_wiki)\n menu_links.add_command(label=\"DF Forums\", command=self.lnp.open_forums)\n\n menu_help.add_command(\n label=\"Help\", command=self.show_help, accelerator='F1')\n menu_help.add_command(\n label=\"About\", command=self.show_about, accelerator='Alt+F1')\n root.bind_all('<F1>', lambda e: self.show_help())\n root.bind_all('<Alt-F1>', lambda e: self.show_about())\n root.createcommand('tkAboutDialog', self.show_about)\n\n menu_beta.add_command(\n label='Toggle graphics pack patching', command=self.toggle_patching)", "def __init__(self, gui, text, callback=None, shortcut=None):\r\n menu_text = pi3d.String(font=gui.font, string=text, is_3d=False,\r\n camera=gui.camera, justify='L')\r\n menu_text.set_shader(gui.shader)\r\n super(MenuItem, self).__init__(gui, [menu_text], 0, 0,\r\n callback=callback, shortcut=shortcut)\r\n self.child_menu = None\r\n self.own_menu = None", "def DebugMenuProviderMixin_on_buildUI(self):\n self._DebugMenuProviderMixin_build_menus()\n self._DebugMenuProviderMixin_build_actions() # the actions actually depend on the existance of the menus for this dynamic menu case", "def create_menu_item(menu, label, func):\n item = wx.MenuItem(menu, -1, label)\n menu.Bind(wx.EVT_MENU, func, id=item.GetId())\n menu.Append(item)\n return item", "def _create_menu(self):\n\n self.quit_item.connect('activate', gtk.main_quit, gtk)\n\n self.menu.append(self.quit_item)\n self.status_icon.connect('popup-menu', show_menu, self.menu)", "def create_menu(list_recipes):\n\n title = 'PyVegan - List of Recipes'\n menu = CursesMenu(title, 'Select one and press enter')\n msg = 'This search isn\\'t a valid one'\n\n for recipe in list_recipes:\n recipe_title = clean_title(recipe['post_title'])\n\n if 'post_link' in recipe:\n item = FunctionItem(\n recipe_title,\n url_open,\n args=[recipe['post_link']]\n )\n else:\n item = FunctionItem(recipe_title, lambda x: print(x), args=[msg])\n menu.append_item(item)\n\n return menu", "def main():\n # Add your main code here\n display_menu()\n pass", "def __init__(self, contents, i, o, name=\"Menu\", entry_height=1, default_state=False, append_exit=True):\n self.i = i\n self.o = o\n self.entry_height = entry_height\n self.append_exit = append_exit\n self.name = name\n self.set_contents(contents)\n self.set_display_callback(o.display_data)\n self.generate_keymap()", "def buildKeyframeMenu(*args, **kwargs)->None:\n pass", "def add_specific_menu(self, menu, event, lat, lon): \n add_item = Gtk.MenuItem()\n add_item.show()\n menu.append(add_item)\n add_item = Gtk.MenuItem(label=_(\"Choose and bookmark the new reference family\"))\n add_item.connect(\"activate\", self.selectFamily)\n add_item.show()\n menu.append(add_item)\n return", "def main_menu(self):\n\n clear_screen()\n print('\\nWork Log With Database\\n')\n\n options = {'1': 'Add a new task', '2': 'Find a task', '3': 'Quit'}\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n while True:\n print()\n user_choice = input(\"Please enter the number of choice: \").lower().strip()\n\n if user_choice == '1':\n task = self.get_task_info()\n self.task.add_task(task)\n print('Task successfully added')\n self.main_menu()\n elif user_choice == '2':\n search_method_choice = self.search_method_menu()\n self.search_tasks(search_method_choice)\n elif user_choice == '3':\n print(\"\\nExiting Work Logger\")\n exit()\n else:\n print(\"\\nInvalid choice, please try again.\")", "def build_menu(menu, win):\n\tmenubar = Gtk.MenuBar()\n\tfor (name, items) in menu:\n\t\ttop_item = Gtk.MenuItem(name)\n\t\tmenubar.append(top_item)\n\t\ttop_menu = Gtk.Menu()\n\t\ttop_item.set_submenu(top_menu)\n\t\tfor item in items:\n\t\t\tif isinstance(item, base.AbstractAction):\n\t\t\t\tmake_menu_action(item, top_menu, win)\n\t\t\t\tcontinue\n\t\t\telif isinstance(item, base.AbstractVar):\n\t\t\t\tif item.type.is_enum():\n\t\t\t\t\tmake_enum_menu(item, top_menu)\n\t\t\t\t\tcontinue\n\t\t\t\telif item.type.is_standard(bool):\n\t\t\t\t\tmake_checked_menu(item, top_menu)\n\t\t\t\t\tcontinue\n\t\t\terror(\"don't known how to make a menu item with %s\" % item)\t\n\treturn menubar", "def test_add_entry_returns_main_menu(self):\n example_inputs = [\n 'Example Employee',\n '2018-05-01',\n 'Example Task',\n 100,\n 'Example Note'\n ]\n with patch('builtins.input', side_effect=example_inputs):\n returned_menu = self.menu.add_entry()\n\n self.assertEqual(returned_menu, self.menu.main_menu)", "def do_menu(parser, token):\n bits = token.split_contents()\n return RenderMenuNode()", "def init_menu(self):\n\n def select_file():\n \"\"\"Show file explorer to select json file\"\"\"\n filename = filedialog.askopenfilename(\n initialdir=os.getcwd(), title=\"Select Backup file...\",\n filetypes=((\"JSON Files\", \"*.json\"),\n (\"Text Files\", \"*.txt\"),\n (\"All Files\", \"*.*\")))\n self.init_data(filename)\n menu = tk.Menu(self)\n self.config(menu=menu)\n # file_menu = tk.Menu(menu)\n # menu.add_cascade(label=\"File\", menu=file_menu)\n # file_menu.add_command(label=\"Open...\", command=select_file)\n menu.add_command(label=\"Open...\", command=select_file)\n\n def show_submenu():\n subwindow = NewSnapshotScreen(self)\n subwindow.mainloop()\n if subwindow.finished:\n self.init_data(subwindow.target_file.get())\n\n menu.add_command(label=\"New Snapshot...\", command=show_submenu)", "def create_menu_item(\n self,\n menu_name: str,\n item_name: str,\n callback: Callable | None = None,\n shortcut: str | None = None,\n ) -> None:\n self._widget._mgui_create_menu_item(menu_name, item_name, callback, shortcut)", "def main_menu():\n\tprint(\n\"\"\"\nUsage :-\n$ ./todo add \"todo item\" # Add a new todo\n$ ./todo ls # Show remaining todos\n$ ./todo del NUMBER # Delete a todo\n$ ./todo done NUMBER # Complete a todo\n$ ./todo help # Show usage\n$ ./todo report # Statistics\"\"\")", "def OutputMenuItems():\r\n print('''\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Save Data to File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r", "def menu(self) -> None:\n\t\tMenuOption = namedtuple(\"MenuOption\",[\"name\",\"function\",\"args\"])\n\t\tmenu_options = []\n\t\tmenu_options.append(MenuOption(\"Create New Character\",self.createNewCharacter,[]))\n\t\tmenu_options.append(MenuOption(\"Load Character from File\",self.loadCharacter,[\"path to input character data file\"]))\n\t\tmenu_options.append(MenuOption(\"Save Character\",self.saveCharacter,[\"path to output character data file\"]))\n\t\tmenu_options.append(MenuOption(\"Edit Character (COMING SOON)\",self.editCharacter,[]))\n\n\t\tmenu_options.append(MenuOption(\"Output Character Sheet to PDF\",self.printCharacter,[\"path to output PDF file\"]))\n\t\tmenu_options.append(MenuOption(\"Exit (or type 'exit')\",sys.exit,[]))\n\n\t\tcurrentName = self.sheet.choice_names['Name'] if len(self.sheet.choice_names['Name']) > 0 else \"None\"\n\t\tprint(f\"\\nMAIN MENU\\n---------\\nCurrent character: {currentName}\\nChoose an option:\")\n\t\tselection = chooseOne(menu_options,exit_message=\"Exit character creator?\")[1]\n\t\tif selection == False:\n\t\t\tsys.exit()\n\t\targs = []\n\t\tfor a in selection.args:\n\t\t\targs.append(input(f\"Enter {a}:\\n{PROMPT}\"))\n\t\ttry:\n\t\t\tif not selection.function(*args):\n\t\t\t\tlog.error(f\"Failed to {selection.name}\")\n\t\texcept SystemExit:\n\t\t\tsys.exit()\n\t\texcept Exception as e:\n\t\t\tlog.exception(f\"Failed to {selection.name}\")\n\t\t\tlog.debug(e)\n\t\tself.menu()", "def add_menus_and_status(self):\r\n# Adding the statusbar and menubar\r\n self.statusBar().showMessage(\"\")\r\n menubar = self.menuBar()\r\n# Adding the first menu to the menubar\r\n menu = menubar.addMenu(\"Menu\")\r\n# Adding the first \"choise\" in the menu\r\n contact_action = QAction(\"Contact\", self)\r\n contact_action.setStatusTip(\"Contact PyCrypt\")\r\n contact_action.triggered.connect(self.email)\r\n menu.addAction(contact_action)\r\n# Adding a separator line to the menu\r\n menu.addSeparator()\r\n# Adding the second \"choise\" to the menu\r\n exit_action = QAction(\"Exit\", self) # Create an exit action\r\n exit_action.setStatusTip(\"Click to exit the application\")\r\n exit_action.triggered.connect(self.close) # Close application when clicked\r\n exit_action.setShortcut(\"Ctrl+Q\") # Keyboard shortcut to exit app\r\n menu.addAction(exit_action)", "def Build(self, context, contextCallback=None, parent=None):\n # type: (MenuContext, Optional[Callable], Optional[QtWidgets.QWidget]) -> Optional[QtWidgets.QMenu]\n menu = QtWidgets.QMenu(self.name, parent)\n for action in self.actions:\n action.AddToMenu(menu, context, contextCallback=contextCallback)\n if not menu.isEmpty():\n return menu", "def popUpMenu(callingClassObject,menuRequestingtObject,PopupPoint,menuListString,funcToInvoke,additionalArguments='',iconList = []):\r\n if menuListString == []:\r\n return 0;\r\n Rmnu = QtGui.QMenu(callingClassObject)\r\n for i, itm in enumerate(menuListString):\r\n\r\n newmenuitem = QtGui.QAction(itm, callingClassObject)\r\n\r\n if len(itm)>1 and itm[0]=='|':\r\n itm = itm[1:len(itm)]\r\n newmenuitem.setEnabled(False)\r\n newmenuitem.setText(itm)\r\n\r\n if itm != '':\r\n if len(iconList)>1 and len(iconList)>i:\r\n if iconList[i]<>None:\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(iconList[i]), QtGui.QIcon.Normal, QtGui.QIcon.On)\r\n newmenuitem.setIcon(icon)\r\n\r\n callingClassObject.connect(newmenuitem, QtCore.SIGNAL(\"triggered()\"), lambda passarg=(itm,i,additionalArguments,newmenuitem): funcToInvoke(passarg))\r\n\r\n if itm=='':\r\n Rmnu.addSeparator()\r\n else:\r\n Rmnu.addAction(newmenuitem)\r\n\r\n\r\n PopupPoint.setY(PopupPoint.y() + 30)\r\n PopupPoint.setX(PopupPoint.x() + 5)\r\n Rmnu.exec_(menuRequestingtObject.mapToGlobal(PopupPoint))\r\n del(Rmnu)", "def _build(cls, menu_type):\n if menu_type == MenuType.GAME:\n \"\"\"Build the game menu here\"\"\"\n return MenuModel(\n \"DiamondQuest\",\n TextItem(\"Existing Miner\"),\n ButtonItem(\"\", button_type=ButtonType.SCROLL),\n TextItem(\"New Miner\"),\n ButtonItem(\"Enter Name\", button_type=ButtonType.INPUT),\n ButtonItem(\"Music: 10\", button_type=ButtonType.SCROLL),\n ButtonItem(\"Sound: 10\", button_type=ButtonType.SCROLL),\n ButtonItem(\"QUIT\")\n )\n elif menu_type == MenuType.DEV:\n \"\"\"Build the dev menu here\"\"\"\n else:\n return None", "def create_menu_par(self, name, trig_func, menu, shrt_cut):\n\n createdAction = QAction(name, self)\n createdAction.setShortcut(shrt_cut)\n createdAction.triggered.connect(trig_func)\n menu.addAction(createdAction)\n return createdAction", "def create_menu(self, parent):\n menu = QtGui.QMenu(parent=parent)\n return menu.menuAction()", "def get_menus():\n\n pass", "def get_menu(menu_name):\n\n pass", "def callMenu():\n print(\"Menu: \\\n \\n Area of a triangle (enter 'triangleArea') \\\n \\n Area of a square (enter 'squareArea') \\\n \\n Area of a parallelogram (enter 'paraArea') \\\n \\n Area of an ellipse (enter 'ellipseArea')\\\n \\n Area of a circle (enter 'circleArea')\\\n \\n Circumference of a circle (enter 'circleCirc')\\\n \\n Enter 'quit' to quit.\\\n \\n Enter 'menu' to show the menu again.\")", "def build(self, name, opened, entry):\n raise NotImplementedError()", "def add_menu():\n\n def _(*args, **kwargs):\n args = (cast_str(i) if isinstance(i, six.text_type) else i for i in args)\n kwargs = tuple(\n {\n k: cast_str(v) if isinstance(v, six.text_type) else v\n for k, v in kwargs.items()\n }.items()\n )\n return (args, kwargs)\n\n def _auto_comp():\n try:\n comp.Comp().create_nodes()\n except comp.FootageError:\n nuke.message(cast_str(\"请先导入素材\"))\n\n all_menu = [\n {\n _(\"工具\"): [\n {\n _(\"按素材名组装\"): [\n _(\"对当前工程执行\", _auto_comp, icon=\"autocomp.png\"),\n _(\n \"批量执行\",\n lambda: comp.panels.BatchCompPanel().showModalDialog(),\n icon=\"autocomp.png\",\n ),\n _(\n \"设置\",\n lambda: comp.panels.CompConfigPanel().showModalDialog(),\n icon=\"autocomp.png\",\n ),\n ],\n },\n {\n _(\"转换为序列工程\"): [\n _(\"对当前工程执行\", edit.script_use_seq.execute),\n _(\n \"批量执行\",\n lambda: edit.script_use_seq.panels.BatchPanel().showModalDialog(),\n ),\n _(\n \"设置\",\n lambda: edit.script_use_seq.panels.ConfigPanel().showModalDialog(),\n ),\n ]\n },\n ]\n }\n ]\n\n # Add all menu.\n def _add_menu(menu, parent=nuke.menu(cast_str(\"Nuke\"))):\n # type: (..., nuke.Menu) -> None\n assert isinstance(menu, dict)\n\n for k, v in menu.items():\n m = parent.addMenu(*k[0], **dict(k[1]))\n for i in v:\n if i is None:\n _ = m.addSeparator()\n elif isinstance(i, dict):\n _add_menu(i, m)\n elif isinstance(i, tuple):\n _ = m.addCommand(*i[0], **dict(i[1]))\n\n for menu in all_menu:\n _add_menu(menu)", "def main():\n while True:\n clear()\n print('MAIN MENU')\n print('-'*9)\n print(\"\\n-- Options --\")\n for key, value in menu.items():\n print('{}) {}'.format(key, value.__doc__))\n print('Q) QUIT')\n choice = input('\\nAction: ').upper().strip()\n\n if choice == \"A\":\n return add_entry()\n elif choice == \"S\":\n return search_menu()\n elif choice == \"Q\":\n clear()\n return sys.exit()", "def main(self):\n while self.leave_main_menu:\n print(fr.FR[4], fr.FR[5], fr.FR[6], fr.FR[7])\n self.choice_menu = input(fr.FR[8])\n self.main_menu_input()", "def make_menu_action(action, menu, win):\n\n\t# create the item\n\tif action.icon != \"\":\n\t\titem = Gtk.ImageMenuItem(win.ui.get_icon(action.icon),\n\t\t\tlabel=action.label, always_show_image = True, use_stock = True)\n\telse:\n\t\titem = Gtk.MenuItem(action.label)\n\tif action.help != \"\":\n\t\titem.set_tooltip_text(action.help)\n\tmenu.append(item)\n\t\n\t# connect the item\n\tobs = MenuObserver(item, action, win)\n\tfor dep in action.get_deps():\n\t\tdep.add_observer(obs)\n\titem.connect(\"activate\", obs.activate)", "def add_to_menu ( self, menu_item ):\r\n pass", "def makeMenu(self):\n\t\tself.fileMenu = self.menuBar().addMenu(self.tr(\"&Arquivo\"))\n\t\tself.fileMenu.addAction(self.newAct)\n\t\tself.fileMenu.addAction(self.openAct)\n\t\tself.fileMenu.addAction(self.saveAct)\n\t\tself.fileMenu.addAction(self.exportAct)\n\t\tself.fileMenu.addSeparator() \n\t\tself.fileMenu.addAction(self.exitAct)\n\n\t\tself.editMenu = self.menuBar().addMenu(self.tr(\"&Editar\"))\n\t\t\n\t\tself.helpMenu = self.menuBar().addMenu(self.tr(\"&Ajuda\"))\n\t\tself.helpMenu.addAction(self.aboutAct)", "def main(self):\n\n while True:\n print('Main Menu:')\n user_input = input('What would you like to do? (C)reate new record or (L)ookup existing? ').lower().strip()\n self.check_input(user_input)\n\n if user_input == 'c':\n print('Great! Let\\'s create a new log entry!\\n')\n self.create_entry()\n elif user_input == 'l':\n print('Awesome! Let\\'s look up some entries!\\n')\n self.lookup_entry()", "def build_entry(var, win):\n\tif var.type.is_range():\n\t\tentry = build_range_entry(var, win)\n\telif var.type.is_enum():\n\t\tentry = build_enum_entry(var, win)\n\telse:\n\t\tentry = Gtk.Label(var.label)\n\tif var.help != \"\":\n\t\tentry.set_tooltip_text(var.help)\n\treturn entry", "def _create_context_menu(self):\n self.menu = Gtk.Menu()\n delete_menu = Gtk.MenuItem(\"Delete Task\")\n self.menu.append(delete_menu)", "def create_menu_item(menu, label, func, id=None, help=\"\", kind=wx.ITEM_NORMAL, bind_to=None):\n if id is None:\n id = wx.ID_ANY\n item = wx.MenuItem(menu, id, label, help, kind)\n if bind_to == None: # bind to the menu by default\n menu.Bind(wx.EVT_MENU, func, id=item.GetId())\n else:\n bind_to.Bind(wx.EVT_MENU, func, id=item.GetId())\n menu.Append(item)\n return item", "def _add_color_menu(self):\n print 'adding color menu'\n self.menuBar.addcascademenu('Color', 'Color Atoms'); \n c_lambda = lambda: self.color_wireframe('cpk');\n self.menuBar.addmenuitem('Color Atoms','command','Color wireframes cpk', command=c_lambda, label='cpk')\n c_lambda = lambda: self.color_wireframe('type');\n self.menuBar.addmenuitem('Color Atoms','command','Color wireframes by type', command=c_lambda, label='type')\n c_lambda = lambda: self.color_wireframe('chain');\n self.menuBar.addmenuitem('Color Atoms','command','color wireframes by chain', command=c_lambda, label='chain')\n c_lambda = lambda: self.color_wireframe('hydrogen_type');\n self.menuBar.addmenuitem('Color Atoms','command','color wireframes by H type', command=c_lambda, label='H Type')\n \n self.menuBar.addcascademenu('Color', 'Color Trace')\n self.menuBar.addmenuitem('Color Trace','command','Color tubes by secondary', command=self.color_trace_by_secondary,label='secondary')\n self.menuBar.addmenuitem('Color Trace','command','Color tubes by type', command=self.color_tubes_type,label='type')\n self.menuBar.addmenuitem('Color Trace','command','Color tubes by chain', command=self.color_tubes_chain,label='chain')\n\n self.menuBar.addcascademenu('Color', 'Color Volumes')\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes cpk', command=self.color_volumes_cpk,label='cpk')\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by type', command=self.color_volumes_type,label='type')\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by chain', command=self.color_volumes_chain,label='chain')\n\n # create menu items for .features keys for atoms and residues\n if self.system != 'None' and self.system != None:\n key_store = {}\n key_store['atom'] = self.system.ProteinList[0].atoms[0].features.keys()\n key_store['residue'] = self.system.ProteinList[0].residues[0].features.keys()\n for run_type in ['atom', 'residue']:\n broken = 0\n for key in key_store[run_type]:\n for pol in self.system.ProteinList:\n if key == 'domain':\n self.print_domain_info(pol)\n normalized = 1\n # if the feature includes non-digits, pass. if it is all digits, see if \n # it is normalized\n if run_type == 'atom':\n item_list = pol.atoms\n elif run_type == 'residue':\n item_list = pol.residues\n same_val_count = 0\n try:\n item_list[0].features[key]\n except KeyError:\n continue\n else:\n first_val = item_list[0].features[key]\n for item in item_list:\n try:\n feature = item.features[key]\n except KeyError:\n print 'key error on %s, breaking'%(key)\n broken = 1\n break\n try:\n int(feature)\n except ValueError:\n print '%s not digit, breaking'%(feature)\n broken = 1\n break\n else:\n if feature != -1 and (feature < 0.0 or feature > 1.0):\n normalized = 0\n if feature == first_val:\n same_val_count += 1\n if same_val_count == len(item_list):\n print '%s all the same value; breaking'%(key)\n broken = 1\n break\n if key == 'domain':\n if item.features[key] == 0.0:\n item.features[key] = -1\n else:\n # if not normalized, make a new key called key+'_normalized', and swap the old\n # key with the new key to color by it\n old_key = copy.copy(key)\n if not normalized and (key+'_normalized' not in item.features.keys()):\n min_f = 1000000\n max_f = -1000000\n for item2 in item_list:\n feature = item2.features[key]\n if feature != -1:\n if feature < min_f:\n min_f = feature\n if feature > max_f:\n max_f = feature\n key = key + '_normalized'\n for item2 in item_list:\n if item2.features[old_key] != -1.0:\n d = (item2.features[old_key]-min_f) / (max_f-min_f+0.0)\n item2.features[key] = d\n else:\n item2.features[key] = -1.0\n if run_type == 'residue':\n c_lambda1 = lambda p=pol, k=key: self.color_trace_by_residue_feature(p, k)\n self.menuBar.addmenuitem('Color Trace','command','Color trace by res '+key, command=c_lambda1, label='%s %s'%(pol.chain_name, key))\n c_lambda2 = lambda p=pol, k=key: self.color_volume_by_residue_feature(p, k)\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by res '+key, command=c_lambda2, label='%s %s'%(pol.chain_name, key))\n c_lambda3 = lambda p=pol, k=key: self.color_atoms_by_residue_feature(p, k)\n self.menuBar.addmenuitem('Color Atoms','command','Color atoms by res '+key, command=c_lambda3, label='%s %s'%(pol.chain_name, key))\n elif run_type == 'atom':\n c_lambda1 = lambda p=pol, k=key: self.color_trace_by_atom_feature(p, k)\n self.menuBar.addmenuitem('Color Trace','command','Color trace by atom '+key, command=c_lambda1, label='%s %s'%(pol.chain_name, key))\n c_lambda2 = lambda p=pol, k=key: self.color_volume_by_atom_feature(p, k)\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by atom '+key, command=c_lambda2, label='%s %s'%(pol.chain_name, key))\n c_lambda3 = lambda p=pol, k=key: self.color_atoms_by_atom_feature(p, k)\n self.menuBar.addmenuitem('Color Atoms','command','Color atoms by atom '+key, command=c_lambda3, label='%s %s'%(pol.chain_name, key))\n key = old_key\n #broken = 1\n #break\n if broken:\n break", "def run():\n objMenu = MainMenu(os.path.join(RESOURCES, 'main_menu.json'))\n print objMenu.run()", "def add_menu(menu_name, parent_menu=None, tear_off=True, icon='', **kwargs):\n\n pass", "def AppendToMenu(self,menu,window,data):\r\n menu.AppendSeparator()", "def buildBookmarkMenu(*args, editor: AnyStr=\"\", type: AnyStr=\"\", **kwargs)->None:\n pass", "def _build_menu_command(self, cmd):\n if COMMAND_CHAR[cmd]:\n return COMMAND_CHAR[cmd]+self._newline\n else:\n raise InstrumentProtocolException(\"Unknown command character for %s\" % cmd)", "def initMenu(self, mainMenu):\n menu = QMenu(self.menuTitle(), mainMenu)\n menu.setIcon(UI.PixmapCache.getIcon(\"vcsUpdate.png\"))\n menu.setTearOffEnabled(True)\n \n menu.addAction(self.hgFetchAct)\n \n menu.aboutToShow.connect(self.__aboutToShowMenu)\n \n return menu", "def __init__(self, root, menu_callback):\n\n def setup_callback():\n \"\"\"\n callback to pass in to executive for calling to re-show setup window after game end\n \"\"\"\n self.setup_window.deiconify()\n\n # creates executive instance\n self.root = root\n self.exec = Executive(root, setup_callback)\n self.menu_callback = menu_callback\n\n # creates setup window and centers it\n self.setup_window = Tk.Toplevel(root)\n self.setup_window.geometry(\"200x200\")\n center_window(self.setup_window)\n\n bg = \"indianred2\"\n\n # configures setup window title and title\n self.setup_window.title(\"Setup Board\")\n self.setup_window.configure(bg=bg, bd=10, relief=\"ridge\", pady=32)\n self.setup_window.resizable(width=False, height=False)\n\n # create function to call in order to move back to main menu\n def return_to_menu():\n self.setup_window.destroy()\n self.menu_callback()\n\n # define window close, escape and return key bindings\n self.setup_window.protocol(\"WM_DELETE_WINDOW\", return_to_menu)\n self.setup_window.bind(\"<Escape>\", lambda _: return_to_menu())\n self.setup_window.bind(\"<Return>\", lambda _: self.validate())\n\n # create width text & entry\n width_text = Tk.Label(self.setup_window, text=\"Width\", bg=bg)\n self.width_input = Tk.Entry(self.setup_window, width=12, highlightbackground=bg) #, textvariable=self.width_val)\n\n width_text.grid(row=0, column=0, sticky=Tk.E)\n self.width_input.grid(row=0, column=1)\n\n # create height text & entry\n height_text = Tk.Label(self.setup_window, text=\"Height\", bg=bg)\n self.height_input = Tk.Entry(self.setup_window, width=12, highlightbackground=bg) #, textvariable=self.height_val)\n\n height_text.grid(row=1, column=0, sticky=Tk.E)\n self.height_input.grid(row=1, column=1)\n\n # create mines text & entry\n mines_text = Tk.Label(self.setup_window, text=\"Mines\", bg=bg)\n self.mines_input = Tk.Entry(self.setup_window, width=12, highlightbackground=bg) #, textvariable=self.mines_val)\n\n mines_text.grid(row=2, column=0, sticky=Tk.E)\n self.mines_input.grid(row=2, column=1)\n\n # button that starts the game, or displays an error if the dimensions are invalid\n begin = Tk.Button(self.setup_window, text=\"Begin Game\", command=self.validate, highlightbackground=bg)\n begin.grid(row=3, column=0, columnspan=2)", "def main_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_main_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def menu(update, context):\n\n update_message_text = update.callback_query.edit_message_text if update.callback_query else update.message.reply_text\n update_message_text(\n text='Please choose an option.',\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton('Author Details', callback_data='details'),\n InlineKeyboardButton('Help', callback_data='help'),\n ],\n [\n InlineKeyboardButton('Linkedin Profile', url=Config.OWNER_WEBSITE),\n InlineKeyboardButton('Github repo', url=Config.GITHUB_REPO_URL),\n ],\n [\n InlineKeyboardButton('Download CV', url=Config.DOWNLOAD_CV_URL)\n ]\n ]),\n )", "def menu(*args, allowOptionBoxes: bool=True, defineTemplate: AnyStr=\"\", deleteAllItems:\n bool=True, docTag: Union[AnyStr, bool]=\"\", enable: bool=True, exists: bool=True,\n familyImage: Union[AnyStr, bool]=\"\", helpMenu: bool=True, itemArray: bool=True, label:\n Union[AnyStr, bool]=\"\", ltVersion: Union[AnyStr, bool]=\"\", mnemonic: Union[AnyStr,\n bool]=\"\", numberOfItems: bool=True, parent: AnyStr=\"\", postMenuCommand: Script=None,\n postMenuCommandOnce: bool=True, scrollable: bool=True, tearOff: bool=True,\n useTemplate: AnyStr=\"\", version: Union[AnyStr, bool]=\"\", visible: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def buildMenuBar(parent, menus):\n\n menubar = wx.MenuBar()\n\n for menu_data in menus:\n label, data = menu_data\n menubar.Append(createMenu(parent, data), label)\n\n return menubar", "def init_filemenu(self):\n self.menubar[\"filemenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"filemenu\"].add_command(label=\"New\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Open\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Save\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Save as...\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Close\", command=todo)\n self.menubar[\"filemenu\"].add_separator()\n self.menubar[\"menubar\"].add_cascade(\n label=\"File\", menu=self.menubar[\"filemenu\"])", "def popUpMenu(self, menuRequestingtObject, PopupPoint, menuListString, funcToInvoke, additionalArguments='', iconList = []):\r\n if menuListString == []:\r\n return 0;\r\n Rmnu = QtWidgets.QMenu(self.CallingUI)\r\n for i, itm in enumerate(menuListString):\r\n\r\n newmenuitem = QtWidgets.QAction(itm, self.CallingUI)\r\n #newmenuitem\r\n\r\n if len(itm)>1 and itm[0]=='|':\r\n itm = itm[1:len(itm)]\r\n newmenuitem.setEnabled(False)\r\n newmenuitem.setText(itm)\r\n #var = QtCore.QVariant()\r\n\r\n\r\n\r\n if itm != '':\r\n if len(iconList)>1 and len(iconList)>i:\r\n if iconList[i]!=None:\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(iconList[i]), QtGui.QIcon.Normal, QtGui.QIcon.On)\r\n newmenuitem.setIcon(icon)\r\n\r\n #self.CallingUI.connect(newmenuitem, QtCore.SIGNAL(\"triggered()\"), lambda passarg=(itm,i,additionalArguments,newmenuitem): funcToInvoke(passarg))\r\n newmenuitem.triggered.connect(lambda passarg=([itm,i,additionalArguments,newmenuitem]): funcToInvoke(passarg))\r\n newmenuitem.setData(PopupPoint)\r\n\r\n if itm=='':\r\n Rmnu.addSeparator()\r\n else:\r\n Rmnu.addAction(newmenuitem)\r\n\r\n\r\n PopupPoint.setY(PopupPoint.y())\r\n PopupPoint.setX(PopupPoint.x())\r\n Rmnu.exec_(menuRequestingtObject.mapToGlobal(PopupPoint))\r\n del(Rmnu)", "def menuItem(*args, allowOptionBoxes: bool=True, annotation: Union[AnyStr, bool]=\"\", boldFont:\n bool=False, checkBox: bool=True, collection: Union[AnyStr, bool]=\"\", command:\n Union[Script, bool]=None, data: Union[int, bool]=0, defineTemplate: AnyStr=\"\",\n divider: bool=True, dividerLabel: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr,\n bool]=\"\", dragDoubleClickCommand: Union[Script, bool]=None, dragMenuCommand:\n Union[Script, bool]=None, echoCommand: bool=True, enable: bool=True,\n enableCommandRepeat: bool=True, exists: bool=True, familyImage: Union[AnyStr,\n bool]=\"\", image: Union[AnyStr, bool]=\"\", imageOverlayLabel: Union[AnyStr, bool]=\"\",\n insertAfter: AnyStr=\"\", isCheckBox: bool=True, isOptionBox: bool=True,\n isRadioButton: bool=True, italicized: bool=False, label: Union[AnyStr, bool]=\"\",\n longDivider: bool=True, ltVersion: Union[AnyStr, bool]=\"\", optionBox: bool=True,\n optionBoxIcon: Union[AnyStr, bool]=\"\", parent: AnyStr=\"\", postMenuCommand:\n Union[Script, bool]=None, postMenuCommandOnce: bool=True, radialPosition:\n Union[AnyStr, bool]=\"\", radioButton: bool=True, runTimeCommand: AnyStr=\"\",\n sourceType: Union[AnyStr, bool]=\"\", subMenu: bool=True, tearOff: bool=True,\n useTemplate: AnyStr=\"\", version: Union[AnyStr, bool]=\"\", visible: bool=True,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def menuEditor(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, cellHeight: Union[int, bool]=0, cellWidth: Union[int,\n bool]=0, cellWidthHeight: List[int, int]=None, checkBoxPresent: List[bool,\n AnyStr, int]=None, checkBoxState: List[bool, AnyStr, int]=None, childArray:\n bool=True, command: Union[List[AnyStr, AnyStr, int], bool]=None, defineTemplate:\n AnyStr=\"\", delete: List[AnyStr, int]=None, docTag: Union[AnyStr, bool]=\"\",\n dragCallback: Script=None, dropCallback: Script=None, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, exists: bool=True,\n fullPathName: bool=True, height: Union[int, bool]=0, highlightColor:\n Union[List[float, float, float], bool]=None, iconMenuCallback: AnyStr=\"\", image:\n Union[List[AnyStr, AnyStr, int], bool]=None, isObscured: bool=True, label:\n Union[List[AnyStr, AnyStr, int], bool]=None, manage: bool=True, menuItemTypes:\n bool=True, noBackground: bool=True, numberOfChildren: bool=True,\n numberOfPopupMenus: bool=True, optionBoxCommand: Union[List[AnyStr, AnyStr, int],\n bool]=None, optionBoxPresent: List[bool, AnyStr, int]=None, parent: Union[AnyStr,\n bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True,\n radioButtonPresent: List[bool, AnyStr, int]=None, radioButtonState: List[bool,\n AnyStr, int]=None, separator: Union[List[AnyStr, int], bool]=None,\n statusBarMessage: AnyStr=\"\", style: Union[AnyStr, bool]=\"\", subMenuAt:\n List[AnyStr, int]=None, subMenuEditorWindow: AnyStr=\"\", subMenuEditorsOpen:\n bool=True, subMenuOf: List[AnyStr, AnyStr, int]=None, topLevelMenu: Union[AnyStr,\n bool]=\"\", useTemplate: AnyStr=\"\", visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def _addMenu(self):\n self.action = QAction(QIcon(), 'WakaTime', self)\n self.action.triggered.connect(self._promptForApiKey)\n fileMenu = e5App().getObject('UserInterface').getMenu('file')\n fileMenu.addAction(self.action)", "def initMenu(self, menu):\n menu.clear()\n \n self.subMenus = []\n \n adminMenu = QMenu(self.tr(\"Administration\"), menu)\n adminMenu.setTearOffEnabled(True)\n adminMenu.addAction(self.gitShowConfigAct)\n adminMenu.addAction(self.gitRepoConfigAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitReflogBrowserAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateIgnoreAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateArchiveAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitStatisticsAct)\n adminMenu.addAction(self.gitVerifyAct)\n adminMenu.addAction(self.gitHouseKeepingAct)\n self.subMenus.append(adminMenu)\n \n bundleMenu = QMenu(self.tr(\"Bundle Management\"), menu)\n bundleMenu.setTearOffEnabled(True)\n bundleMenu.addAction(self.gitBundleAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleVerifyAct)\n bundleMenu.addAction(self.gitBundleListHeadsAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleApplyFetchAct)\n bundleMenu.addAction(self.gitBundleApplyPullAct)\n self.subMenus.append(bundleMenu)\n \n patchMenu = QMenu(self.tr(\"Patch Management\"), menu)\n patchMenu.setTearOffEnabled(True)\n patchMenu.addAction(self.gitCheckPatchesAct)\n patchMenu.addAction(self.gitApplyPatchesAct)\n patchMenu.addSeparator()\n patchMenu.addAction(self.gitShowPatcheStatisticsAct)\n self.subMenus.append(patchMenu)\n \n bisectMenu = QMenu(self.tr(\"Bisect\"), menu)\n bisectMenu.setTearOffEnabled(True)\n bisectMenu.addAction(self.gitBisectStartAct)\n bisectMenu.addAction(self.gitBisectStartExtendedAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectGoodAct)\n bisectMenu.addAction(self.gitBisectBadAct)\n bisectMenu.addAction(self.gitBisectSkipAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectResetAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectLogBrowserAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectCreateReplayAct)\n bisectMenu.addAction(self.gitBisectEditReplayAct)\n bisectMenu.addAction(self.gitBisectReplayAct)\n self.subMenus.append(bisectMenu)\n \n tagsMenu = QMenu(self.tr(\"Tags\"), menu)\n tagsMenu.setIcon(UI.PixmapCache.getIcon(\"vcsTag.png\"))\n tagsMenu.setTearOffEnabled(True)\n tagsMenu.addAction(self.vcsTagAct)\n tagsMenu.addAction(self.gitTagListAct)\n tagsMenu.addAction(self.gitDescribeTagAct)\n self.subMenus.append(tagsMenu)\n \n branchesMenu = QMenu(self.tr(\"Branches\"), menu)\n branchesMenu.setIcon(UI.PixmapCache.getIcon(\"vcsBranch.png\"))\n branchesMenu.setTearOffEnabled(True)\n branchesMenu.addAction(self.gitBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitBranchListAct)\n branchesMenu.addAction(self.gitMergedBranchListAct)\n branchesMenu.addAction(self.gitNotMergedBranchListAct)\n branchesMenu.addAction(self.gitShowBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitDeleteRemoteBranchAct)\n self.subMenus.append(branchesMenu)\n \n changesMenu = QMenu(self.tr(\"Manage Changes\"), menu)\n changesMenu.setTearOffEnabled(True)\n changesMenu.addAction(self.gitUnstageAct)\n changesMenu.addAction(self.vcsRevertAct)\n changesMenu.addAction(self.vcsMergeAct)\n changesMenu.addAction(self.gitCommitMergeAct)\n changesMenu.addAction(self.gitCancelMergeAct)\n \n remotesMenu = QMenu(self.tr(\"Remote Repositories\"), menu)\n remotesMenu.setTearOffEnabled(True)\n remotesMenu.addAction(self.gitRemotesShowAct)\n remotesMenu.addAction(self.gitRemoteShowAct)\n remotesMenu.addSeparator()\n remotesMenu.addAction(self.gitRemoteAddAct)\n remotesMenu.addAction(self.gitRemoteRenameAct)\n remotesMenu.addAction(self.gitRemoteChangeUrlAct)\n remotesMenu.addAction(self.gitRemoteCredentialsAct)\n remotesMenu.addAction(self.gitRemoteRemoveAct)\n remotesMenu.addAction(self.gitRemotePruneAct)\n \n cherrypickMenu = QMenu(self.tr(\"Cherry-pick\"), menu)\n cherrypickMenu.setIcon(UI.PixmapCache.getIcon(\"vcsGraft.png\"))\n cherrypickMenu.setTearOffEnabled(True)\n cherrypickMenu.addAction(self.gitCherryPickAct)\n cherrypickMenu.addAction(self.gitCherryPickContinueAct)\n cherrypickMenu.addAction(self.gitCherryPickQuitAct)\n cherrypickMenu.addAction(self.gitCherryPickAbortAct)\n \n stashMenu = QMenu(self.tr(\"Stash\"), menu)\n stashMenu.setTearOffEnabled(True)\n stashMenu.addAction(self.gitStashAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBrowserAct)\n stashMenu.addAction(self.gitStashShowAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashApplyAct)\n stashMenu.addAction(self.gitStashPopAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBranchAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashDropAct)\n stashMenu.addAction(self.gitStashClearAct)\n \n submodulesMenu = QMenu(self.tr(\"Submodules\"), menu)\n submodulesMenu.setTearOffEnabled(True)\n submodulesMenu.addAction(self.gitSubmoduleAddAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesInitAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateInitAct)\n submodulesMenu.addAction(self.gitSubmodulesDeinitAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesUpdateAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateRemoteAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateOptionsAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesSyncAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesListAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesStatusAct)\n submodulesMenu.addAction(self.gitSubmodulesSummaryAct)\n \n act = menu.addAction(\n UI.PixmapCache.getIcon(\n os.path.join(\"VcsPlugins\", \"vcsGit\", \"icons\", \"git.png\")),\n self.vcs.vcsName(), self._vcsInfoDisplay)\n font = act.font()\n font.setBold(True)\n act.setFont(font)\n menu.addSeparator()\n \n menu.addAction(self.gitFetchAct)\n menu.addAction(self.gitPullAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommitAct)\n menu.addAction(self.gitPushAct)\n menu.addSeparator()\n menu.addMenu(changesMenu)\n menu.addMenu(stashMenu)\n menu.addSeparator()\n menu.addMenu(cherrypickMenu)\n menu.addSeparator()\n menu.addMenu(bundleMenu)\n menu.addMenu(patchMenu)\n menu.addSeparator()\n menu.addMenu(remotesMenu)\n menu.addMenu(submodulesMenu)\n menu.addSeparator()\n menu.addMenu(tagsMenu)\n menu.addMenu(branchesMenu)\n menu.addSeparator()\n menu.addAction(self.gitLogBrowserAct)\n menu.addSeparator()\n menu.addAction(self.vcsStatusAct)\n menu.addSeparator()\n menu.addAction(self.vcsDiffAct)\n menu.addAction(self.gitExtDiffAct)\n menu.addSeparator()\n menu.addAction(self.vcsSwitchAct)\n menu.addSeparator()\n menu.addMenu(bisectMenu)\n menu.addSeparator()\n menu.addAction(self.vcsCleanupAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommandAct)\n menu.addSeparator()\n menu.addMenu(adminMenu)\n menu.addSeparator()\n menu.addAction(self.gitEditUserConfigAct)\n menu.addAction(self.gitConfigAct)\n menu.addSeparator()\n menu.addAction(self.vcsNewAct)\n menu.addAction(self.vcsExportAct)", "def create_menu(self):\n about = gtk.ImageMenuItem(gtk.STOCK_ABOUT)\n about.connect_object('activate', self.about, 'about')\n about.show()\n\n# prefs = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES)\n# prefs.connect_object('activate', self.prefs, 'prefs')\n# prefs.show()\n\n quit = gtk.ImageMenuItem(gtk.STOCK_QUIT)\n quit.connect_object('activate', self.exit, 'quit')\n quit.show()\n\n menu = gtk.Menu()\n menu.append(about)\n# menu.append(prefs)\n menu.append(quit)\n return menu", "def menuItem(*args):\n\toptionsWindow()", "def create_menu(self: object) -> None:\n menubar = Menu(self)\n menuFile = Menu(menubar, tearoff=0)\n menubar.add_cascade(label=\"Menu\", menu=menuFile)\n menuFile.add_command(label=\"Choose a file\", command=self.open_file,\n accelerator=\"Ctrl+o\")\n menuFile.add_command(label=\"About\", command=self.about)\n self.bind_all(\"<Control-o>\", lambda e: self.open_file())\n self.config(menu=menubar)", "def build_item_menu(self, items):\n\n menu = QtWidgets.QMenu(self)\n\n # update to latest version\n def _on_update_to_latest(items):\n for item in items:\n api.update(item, -1)\n self.data_changed.emit()\n\n update_icon = qta.icon(\"fa.angle-double-up\", color=DEFAULT_COLOR)\n updatetolatest_action = QtWidgets.QAction(update_icon,\n \"Update to latest\",\n menu)\n updatetolatest_action.triggered.connect(\n lambda: _on_update_to_latest(items))\n\n # set version\n set_version_icon = qta.icon(\"fa.hashtag\", color=DEFAULT_COLOR)\n set_version_action = QtWidgets.QAction(set_version_icon,\n \"Set version\",\n menu)\n set_version_action.triggered.connect(\n lambda: self.show_version_dialog(items))\n\n # switch asset\n switch_asset_icon = qta.icon(\"fa.sitemap\", color=DEFAULT_COLOR)\n switch_asset_action = QtWidgets.QAction(switch_asset_icon,\n \"Switch Asset\",\n menu)\n switch_asset_action.triggered.connect(\n lambda: self.show_switch_dialog(items))\n\n # remove\n remove_icon = qta.icon(\"fa.remove\", color=DEFAULT_COLOR)\n remove_action = QtWidgets.QAction(remove_icon, \"Remove items\", menu)\n remove_action.triggered.connect(\n lambda: self.show_remove_warning_dialog(items))\n\n # go back to flat view\n if self._hierarchy_view:\n back_to_flat_icon = qta.icon(\"fa.list\", color=DEFAULT_COLOR)\n back_to_flat_action = QtWidgets.QAction(back_to_flat_icon,\n \"Back to Full-View\",\n menu)\n back_to_flat_action.triggered.connect(self.leave_hierarchy)\n\n # send items to hierarchy view\n enter_hierarchy_icon = qta.icon(\"fa.indent\", color=\"#d8d8d8\")\n enter_hierarchy_action = QtWidgets.QAction(enter_hierarchy_icon,\n \"Cherry-Pick (Hierarchy)\",\n menu)\n enter_hierarchy_action.triggered.connect(\n lambda: self.enter_hierarchy(items))\n\n # expand all items\n expandall_action = QtWidgets.QAction(menu, text=\"Expand all items\")\n expandall_action.triggered.connect(self.expandAll)\n\n # collapse all items\n collapse_action = QtWidgets.QAction(menu, text=\"Collapse all items\")\n collapse_action.triggered.connect(self.collapseAll)\n\n # add the actions\n has_selection = len(items)\n\n if has_selection:\n menu.addAction(updatetolatest_action)\n menu.addAction(set_version_action)\n menu.addAction(switch_asset_action)\n\n menu.addSeparator()\n menu.addAction(remove_action)\n\n menu.addSeparator()\n\n # These two actions should be able to work without selection\n menu.addAction(expandall_action)\n menu.addAction(collapse_action)\n\n custom_actions = self.get_custom_actions(containers=items)\n if custom_actions:\n submenu = QtWidgets.QMenu(\"Actions\", self)\n for action in custom_actions:\n\n color = action.color or DEFAULT_COLOR\n icon = qta.icon(\"fa.%s\" % action.icon, color=color)\n action_item = QtWidgets.QAction(icon, action.label, submenu)\n action_item.triggered.connect(\n partial(self.process_custom_action, action, items))\n\n submenu.addAction(action_item)\n\n menu.addMenu(submenu)\n\n if has_selection:\n menu.addAction(enter_hierarchy_action)\n\n if self._hierarchy_view:\n menu.addAction(back_to_flat_action)\n\n return menu", "def build_menuable_items(self):\n cols = []\n for bundle in app.bundles:\n bundle_metadata = bundle['Meta']['bundle-metadata']\n try:\n conjure_data = bundle['Meta']['extra-info/conjure-up']\n name = conjure_data.get('friendly-name',\n bundle['Meta']['id']['Name'])\n except KeyError:\n name = bundle['Meta']['id']['Name']\n self.fname_id_map[name] = bundle\n cols.append(\n Columns(\n [\n (\"weight\", 0.2, Color.body(\n menu_btn(label=name,\n on_press=self.done),\n focus_map=\"menu_button focus\")),\n (\"weight\", 0.3, Text(\n bundle_metadata.get('Description',\n 'Needs a description'),\n align=\"left\"))\n ],\n dividechars=1\n )\n )\n cols.append(Padding.line_break(\"\"))\n return Pile(cols)", "def create_menu():\n MenuData = [\n (\"&Draw Variables\",drawable.ask),\n (\"&Show Variables\",printall),\n (\"&Print Variables\",printval),\n (\"&Edit Variable\",edit),\n (\"&Rename Variable\",rename),\n (\"&Forget Variables\",forget),\n (\"---\",None),\n (\"&Create Plane\",\n [(\"Coordinates\", \n [(\"Point and normal\", createPlaneCoordsPointNormal),\n (\"Three points\", createPlaneCoords3Points),\n ]), \n (\"Visually\", \n [(\"Three points\", createPlaneVisual3Points),\n ]),\n ]),\n (\"&Select Plane\",planes.ask),\n (\"&Draw Selection\",planes.draw),\n (\"&Forget Selection\",planes.forget),\n (\"---\",None),\n (\"&Pick Actors\",pick_actors),\n (\"&Pick Elements\",pick_elements),\n (\"&Pick Points\",pick_points),\n (\"&Pick Edges\",pick_edges),\n (\"---\",None),\n ('&Selection',\n [('&Create Report',report_selection),\n ('&Set Property',setprop_selection),\n ('&Grow',grow_selection),\n ('&Partition',partition_selection),\n ('&Get Partition',get_partition),\n ('&Export',export_selection),\n ]),\n (\"---\",None),\n ('&Query',\n [('&Actors',query_actors),\n ('&Elements',query_elements),\n ('&Points',query_points),\n ('&Edges',query_edges),\n ('&Distances',query_distances),\n ]),\n (\"---\",None),\n (\"&Close\",close_menu),\n ]\n return widgets.Menu('Tools',items=MenuData,parent=GD.gui.menu,before='help')", "def display_menu(self):\n print(\"\\n{}\".format(self.message))\n for i, h in enumerate(self.menu_handlers):\n # iterate through handlers and display menu text\n print(\"\\t{}. {}\".format(i+1, h.get_display_text()))\n # add option for exiting the program\n print(\"\\t{}. {}\".format(0, \"Exit\"))", "def main_menu():\n choice = None\n\n while choice != 'q':\n print(\"Enter 'q' to quit.\")\n for key, value in menu.items():\n print(\"{}) {}\".format(key, value.__doc__))\n choice = input(\"Action: \").lower().strip()\n clear_screen()\n\n if choice in menu:\n menu[choice]()\n\n print(\"Good Bye!\")", "def submenu2():\n \n j = ''\n while j == '':\n print('\\nS U B M E N U 2')\n print('1. SEQRES sequence')\n print('2. Coordinate sequence')\n print('3. Alignment sequence')\n print('b. Back')\n print('q. Quit')\n option = input('Select an option: ')\n if option.lower() == 'q':\n sys.exit()\n elif option.lower() == 'b':\n return Write_menu()\n elif option == '1':\n j = write_SEQRES_fasta()\n elif option == '2':\n j = write_coord_seq()\n elif option == '3':\n j = write_align_seq()\n else:\n print ('Invalid selection!')\n return j", "def __admin_menu(self):\n log.debug(\"Displaying __admin_menu\")\n self.menu = TelegramMenu(\"config/comunda_admin_menu.bpmn\", self, \"MenuStart\")\n self.menu.admin_menu(\"MenuStart\", \"menu_admin_main_txt\")\n return", "def Write_menu():\n \n import sys\n d = ''\n msg = '' \n while d == '':\n print('\\nW R I T E M E N U')\n print('1. Write out coordinate file')\n print('2. Write out sequence(Fasta format)')\n print('q. Quit')\n option = input('Select an option: ')\n if option.lower() == 'q':\n sys.exit()\n elif option == '1':\n msg = 'Option 1'\n d = submenu1()\n elif option == '2':\n msg = 'Option 2'\n d = submenu2()\n else:\n print ('Invalid selection!')\n return msg, d", "def _addMenuChilds(self, menu, menuConfig):\n # Helper function to create the main menu.\n for sub in menuConfig:\n menuLabel = sub.text.get()\n if not menuLabel: # empty or None label means a separator\n menu.add_separator()\n elif len(sub) > 0: # sub-menu\n submenu = tk.Menu(self.root, tearoff=0)\n menu.add_cascade(label=menuLabel, menu=submenu)\n self._addMenuChilds(submenu, sub) # recursive filling\n else: # menu option\n # If there is an entry called \"Browse files\", when clicked it\n # will call the method onBrowseFiles() (it has to be defined!)\n def callback(name):\n \"\"\"Return a callback function named \"on<Name>\".\"\"\"\n f = \"on%s\" % \"\".join(x.capitalize() for x in name.split())\n return lambda: getattr(self, f)()\n menu.add_command(label=menuLabel, compound=tk.LEFT,\n image=self.getImage(sub.icon.get()),\n command=callback(name=sub.text.get()))", "def build_menu(category):\n menu = FilterSprite._build_menu(category)\n if menu is None:\n return None\n\n # add title\n menu.insert(0, (category.title() + \" Codes\", \"Code: \"))\n\n # append an option to clear the filter\n menu.append((\"Clear Filter\", FilterSprite.CLEAR))\n\n return menu", "def menu(*args, label: str = \"\", show: bool = True, parent: str = \"\",\n before: str = \"\", enabled: bool = True, id:str='', indent=-1):\n try: \n widget = internal_dpg.add_menu(*args, label=label, show=show, parent=parent,\n before=before, enabled=enabled, id=id, indent=indent)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def show_menu(self, update, context):\n msg_file = 'menu_msg.txt'\n self.send_textfile(msg_file, update, context)", "def initMenu(self):\n self.fileMenu = self.menuBar().addMenu(self.tr(\"&File\"))\n self.fileMenu.addAction(self.createProjectAction)\n self.fileMenu.addAction(self.openProjectAction)\n\n #TODO : problem displaying submenu\n #self.recentMenu = self.fileMenu.addMenu(self.tr(\"Open &recent\"))\n #for recentProject in self._controller.getSession().recentProjects():\n #recentAction = QtGui.QAction(self.tr(str(recentProject.getPath())), self)\n #self.connect(recentAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openRecent(recentProject.getPath())\"))\n #self.recentMenu.addAction(recentAction)\n\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.importVideoAction)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.saveProjectAction)\n\n self.helpMenu = self.menuBar().addMenu(self.tr(\"&Help\"))\n self.helpMenu.addAction(self.aboutAction)", "def init_menu(self):\r\n # generate password\r\n gen_pwd_action = QtWidgets.QAction('Generate Password', self) \r\n gen_pwd_action.triggered.connect(self.create_password)\r\n\r\n # generate key file\r\n gen_key_action = QtWidgets.QAction('Generate Key File', self) \r\n gen_key_action.triggered.connect(self.create_key)\r\n\r\n # exit action, closes the program\r\n exit_action = QtWidgets.QAction('Exit', self) \r\n exit_action.setShortcut('Ctrl+Q')\r\n exit_action.setStatusTip('Exit application')\r\n exit_action.triggered.connect(app.quit)\r\n\r\n # Theme menus\r\n light_theme_action = QtWidgets.QAction('Light theme', self) \r\n light_theme_action.triggered.connect(self.light_theme)\r\n dark_theme_action = QtWidgets.QAction('Dark theme', self) \r\n dark_theme_action.triggered.connect(self.dark_theme)\r\n ubuntu_theme_action = QtWidgets.QAction('Ubuntu theme', self) \r\n ubuntu_theme_action.triggered.connect(self.ubuntu_theme)\r\n solaris_theme_action = QtWidgets.QAction('Solaris theme', self) \r\n solaris_theme_action.triggered.connect(self.solaris_theme)\r\n\r\n # Create menu bar and add action\r\n menuBar = self.menuBar()\r\n fileMenu = menuBar.addMenu('File')\r\n fileMenu.addAction(gen_pwd_action)\r\n fileMenu.addAction(gen_key_action)\r\n fileMenu.addSeparator()\r\n fileMenu.addAction(exit_action)\r\n themeMenu = menuBar.addMenu('Theme')\r\n themeMenu.addAction(light_theme_action)\r\n themeMenu.addAction(dark_theme_action)\r\n themeMenu.addAction(ubuntu_theme_action)\r\n themeMenu.addAction(solaris_theme_action)", "def init_editmenu(self):\n self.menubar[\"editmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"editmenu\"].add_command(label=\"Undo\", command=todo)\n self.menubar[\"editmenu\"].add_separator()\n self.menubar[\"editmenu\"].add_command(label=\"Cut\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Copy\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Paste\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Delete\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Select All\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Edit\", menu=self.menubar[\"editmenu\"])", "def create_menu_set(name):\n\t\t\tmenu = getattr(self, name + \"_menu\")\n\t\t\ttv = getattr(self, \"tv_\" + name)\n\t\t\tcid_index = getattr(self, \"cid_\" + name)\n\n\t\t\t# bind menu helper\n\t\t\tdef bind_menu(label):\n\t\t\t\tdef bind_menu_inner(func):\n\t\t\t\t\tmenu.add_command(label=label, command=func)\n\t\t\t\t\treturn func\n\t\t\t\treturn bind_menu_inner\n\n\t\t\t# add commands...\n\t\t\t@bind_menu(\"刪除\")\n\t\t\tdef tvdelete():\n\t\t\t\tif messagebox.askyesno(\"Comic Crawler\", \"確定刪除?\"):\n\t\t\t\t\tselected = tv.selection()\n\t\t\t\t\tself.remove(name, *[cid_index[cid] for cid in selected])\n\n\t\t\t@bind_menu(\"移至頂部\")\n\t\t\tdef tvlift():\n\t\t\t\tselected = tv.selection()\n\t\t\t\tself.downloader.mission_manager.lift(name, *[cid_index[cid] for cid in selected])\n\n\t\t\t@bind_menu(\"移至底部\")\n\t\t\tdef tvdrop():\n\t\t\t\tselected = tv.selection()\n\t\t\t\tself.downloader.mission_manager.drop(name, *[cid_index[cid] for cid in selected])\n\n\t\t\t@bind_menu(\"改名\")\n\t\t\tdef tvchangetitle():\n\t\t\t\tselected = tv.selection()\n\t\t\t\tmission = cid_index[selected[0]]\n\t\t\t\tselect_title(self.root, mission)\n\n\t\t\t@bind_menu(\"重新選擇集數\")\n\t\t\tdef tvReselectEP():\n\t\t\t\ts = tv.selection()\n\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\tfor mission in missions:\n\t\t\t\t\treselect_episodes(self.root, mission)\n\n\t\t\t@bind_menu(\"開啟資料夾\")\n\t\t\tdef tvOpen():\n\t\t\t\ts = tv.selection()\n\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\tsavepath = setting[\"savepath\"]\n\t\t\t\tfor mission in missions:\n\t\t\t\t\tfolder = os.path.join(savepath, safefilepath(mission.title))\n\t\t\t\t\tos.startfile(os.path.expanduser(folder))\n\n\t\t\t@bind_menu(\"開啟網頁\")\n\t\t\tdef tvOpenBrowser():\n\t\t\t\ts = tv.selection()\n\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\tfor mission in missions:\n\t\t\t\t\twebbrowser.open(mission.url)\n\n\t\t\tif name == \"view\":\n\t\t\t\t@bind_menu(\"加入圖書館\")\n\t\t\t\tdef tvAddToLib():\n\t\t\t\t\ts = tv.selection()\n\t\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\t\ttitles = [ m.title for m in missions ]\n\t\t\t\t\tself.downloader.mission_manager.add(\"library\", *missions)\n\t\t\t\t\tsafeprint(\"已加入圖書館︰{}\".format(\", \".join(titles)))\n\n\t\t\t# menu call\n\t\t\tdef tvmenucall(event):\n\t\t\t\tmenu.post(event.x_root, event.y_root)\n\t\t\ttv.bind(\"<Button-3>\", tvmenucall)", "def MakeCustomMenu(content): #py:MakeCustomMenu\n RUR._MakeCustomMenu_(content)", "def makeMenuBar(self):\n\n # Make a file menu with Hello and Exit items\n fileMenu = wx.Menu()\n # When using a stock ID we don't need to specify the menu enemy's\n exitItem = fileMenu.Append(wx.ID_EXIT)\n\n toolsMenu = wx.Menu()\n downloadAPK = toolsMenu.Append(wx.ID_ANY, \"Download APK\", \"This will download the APK to C:\\\\ADBScript\\\\APKLibrary\\\\\")\n # App13Packing = toolsMenu.Append(wx.ID_ANY, \"Pack App1 3 Stream\", \"This will pack your custom stream\")\n # gamePacking = toolsMenu.Append(wx.ID_ANY, \"Pack game Stream\", \"Type in your stream and start packing\")\n androidMonitor = toolsMenu.Append(wx.ID_ANY, \"Android Device Monitor\", \"This will open ADM if you have it installed\")\n resetsave = toolsMenu.Append(wx.ID_ANY, \"Reset Save File\", \"Reset save(s) on QA & RC\")\n\n # Now a help menu for the about enemy\n helpMenu = wx.Menu()\n aboutItem = helpMenu.Append(wx.ID_ABOUT)\n\n # Make the menu bar and add the two menus to it. The '&' defines\n # that the next letter is the \"mnemonic\" for the menu enemy. On the\n # platforms that support it those letters are underlined and can be\n # triggered from the keyboard.\n menuBar = wx.MenuBar()\n menuBar.Append(fileMenu, \"&File\")\n menuBar.Append(toolsMenu, \"&Tools\")\n menuBar.Append(helpMenu, \"&Help\")\n\n # Give the menu bar to the frame\n self.SetMenuBar(menuBar)\n\n # Finally, associate a handler function with the EVT_MENU event for\n # each of the menu items. That means that when that menu enemy is\n # activated then the associated handler function will be called.\n\n self.Bind(wx.EVT_MENU, self.OnExit, exitItem)\n self.Bind(wx.EVT_MENU, self.downloadAPK, downloadAPK)\n self.Bind(wx.EVT_MENU, self.startAndroidDeviceMonitor, androidMonitor)\n self.Bind(wx.EVT_MENU, self.OnAbout, aboutItem)\n self.Bind(wx.EVT_MENU, self.resetsave, resetsave)\n self.Bind(wx.EVT_MENU, self.startAndroidDeviceMonitor, androidMonitor)", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def right_click_event(self, icon, button, time):\n menu = gtk.Menu()\n\n about = gtk.MenuItem(\"About\")\n quit = gtk.MenuItem(\"Quit\")\n\n about.connect(\"activate\", self.show_about_dialog)\n quit.connect(\"activate\", gtk.main_quit)\n\n menu.append(about)\n menu.append(quit)\n\n menu.show_all()\n\n menu.popup(None, None, gtk.status_icon_position_menu, button, time, self.statusicon)", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def showMenu():\n print( \"1. Create New User\" )\n print( \"2. Authorize\" )\n print( \"3. Send SMS\" )\n print( \"4. Send Email\" )\n print( \"5. Get Recently Sent Message\" )\n print( \"6. Exit\" )", "def Menu():\n config={'dif':'Medium','puntosJ':0,'puntosIA':0,'time':10,'pal':[],'bolsa':[]}\n menu = Crearmenu()\n while True:\n menu.un_hide()\n event, _ = menu.read()\n print(event)\n if event in (\"inicio\", \"continue\"):\n if(event == 'inicio' and os.path.isfile('Guardado.json')):\n event2, _ = sg.Window('ADVERTENCIA',\n [[sg.T('Si inicias una nueva partida se borrara la guardada, seguro que quieres continuar?')],\n [sg.B('OK'), sg.B('Cancel') ]]).read(close=True)\n if event2 == 'OK':\n remove('Guardado.json')\n else:\n continue \n menu.close()\n config['pal']=setDif(config['dif'])\n Tablero.Jugar(config,event)\n elif event == \"puntos\":\n menu.hide()\n pt()\n elif event == \"config\":\n menu.hide()\n config = ajustes(config)\n #print(config['bolsa'])\n elif event in (None, \"exit\"):\n break", "def execute_factory_menu(cls) -> LibraryItemFactory:\n print(\"Item Loader\")\n print(\"-----------\")\n print(\"What kind of items would you like to load?\")\n print(\"1. Manga\")\n print(\"2. Games\")\n print(\"3. Movies\")\n user_choice = int(input(\"Enter your choice (1-3):\"))\n factory = cls.factory_map[user_choice]\n path = input(\"Enter a path: \")\n return factory(path)", "def display_menu(self):\n print(\"\"\"\nLogistic System Menu\n1. Add Vehicles\n2. Add Item To The Cart\n3. Complete The Order\n4. Track The Order\n5. Quit \"\"\")", "def get_menu() -> str:\n date = datetime.date.today()\n urls = generate_urls(date)\n menu_json = fetch_menu(urls)\n menu = extract_menu(menu_json, date)\n\n return menu", "def onPopupMenu(self, pos):\n self.menu = QMenu(self)\n\n # sub menu for adding library\n libsMenu = QMenu(\"Libraries\", self)\n libsMenu.setIcon( QIcon(\":/libraries-add.png\") )\n for i in xrange(len(self.__parent.libraries().model.getData())):\n currentName = self.__parent.libraries().model.getData()[i]['data']['function']\n lib_menu = QMenu(\"%s #%s\" % (currentName, (i+1)), self)\n\n # search the current name of the lib in all libraries to retrieve \n # all associated functions\n libraries = self.__parent.getHelpLibraries()\n if libraries is not None:\n self._load_menu_functions_library(index=i,\n library_name=currentName, \n libraries=libraries, \n menu=lib_menu)\n \n # search the lib in generic branch in case of not found before\n # so becareful of this limitation, \n # the name of the lib must be different between\n # the generic branch and extra\n # if the same name is used then bad functions will be used in the test\n libraries_generic = self.__parent.getHelpLibraries(generic=True)\n if libraries_generic is not None:\n self._load_menu_functions_library(index=i,\n library_name=currentName, \n libraries=libraries_generic, \n menu=lib_menu)\n \n libsMenu.addMenu(lib_menu)\n libsMenu.addSeparator()\n\n # sub menu for adding adapter\n adpsMenu = QMenu(\"Adapters\", self)\n adpsMenu.setIcon( QIcon(\":/adapters-add.png\") )\n for i in xrange(len(self.__parent.adapters().model.getData())):\n currentName = self.__parent.adapters().model.getData()[i]['data']['function']\n \n adpMenu = QMenu(\"%s #%s\" % (currentName, (i+1)), self)\n adpMoreMenu = QMenu(\"More...\", self)\n\n # search the current name of the adapter in all adapters to retrieve \n # all associated functions\n adapters = self.__parent.getHelpAdapters()\n if adapters is not None:\n self._load_menu_functions_adapter(index=i,\n adapter_name=currentName, \n adapters=adapters, \n menu=adpMenu,\n menu_more=adpMoreMenu)\n \n # search the adapter in generic branch in case of not found before\n # so becareful of this limitation, \n # the name of the adapter must be different between\n # the generic branch and extra\n # if the same name is used then bad functions will be used in the test\n adapters_generic = self.__parent.getHelpAdapters(generic=True)\n if adapters_generic is not None:\n self._load_menu_functions_adapter(index=i,\n adapter_name=currentName, \n adapters=adapters_generic, \n menu=adpMenu,\n menu_more=adpMoreMenu)\n \n adpMenu.addSeparator()\n adpMenu.addMenu(adpMoreMenu)\n \n adpsMenu.addMenu(adpMenu)\n adpsMenu.addSeparator()\n \n \n # sub menu for adding step\n stepMenu = QMenu(\"Steps\", self)\n stepMenu.setIcon( QIcon(\":/steps.png\") )\n\n for stp in self.__parent.steps().model.getData():\n stpMenu = QMenu(\"Step #%s\" % stp['id'], self)\n\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Step')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n stpMenu.addAction( QtHelper.createAction(self, fct['name'], self.addStep,\n icon=QIcon(\":/methods.png\"), cb_arg=(stp['id'],argsFct) ) )\n stpMenu.addSeparator()\n stepMenu.addMenu(stpMenu)\n stepMenu.addSeparator()\n \n # sub menu for adding cache functions\n cacheMenu = QMenu(\"Cache\", self)\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Cache')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n cacheMenu.addAction( QtHelper.createAction(self, fct['name'], self.addCache, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n cacheMenu.addSeparator()\n \n # sub menu for adding Interact functions\n interactMenu = QMenu(\"Interact\", self)\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Interact')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n interactMenu.addAction( QtHelper.createAction(self, fct['name'], self.addInteract, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n interactMenu.addSeparator()\n \n # sub menu for adding Interact functions\n traceMenu = QMenu(\"Trace\", self)\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Trace')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n traceMenu.addAction( QtHelper.createAction(self, fct['name'], self.addTrace, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n traceMenu.addSeparator()\n \n # sub menu for adding time functions\n timeMenu = QMenu(\"Time\", self)\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Time')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n timeMenu.addAction( QtHelper.createAction(self, fct['name'], self.addTime, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n timeMenu.addSeparator()\n \n # sub menu for adding storage functions\n storageMenu = QMenu(\"Storage\", self)\n privateMenu = QMenu(\"Private\", self)\n publicMenu = QMenu(\"Public\", self)\n storageMenu.addMenu( privateMenu )\n storageMenu.addMenu( publicMenu )\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Private')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n if fct['name'] not in [ '__init__' ]:\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n privateMenu.addAction( QtHelper.createAction(self, fct['name'], self.addPrivate, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n privateMenu.addSeparator()\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='Public')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n if fct['name'] not in [ '__init__' ]:\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n publicMenu.addAction( QtHelper.createAction(self, fct['name'], self.addPublic, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n publicMenu.addSeparator()\n \n # sub menu for adding testcase function\n tcMenu = QMenu(\"TestCase\", self)\n tcMenu.setIcon( QIcon(\":/main.png\") )\n functions = self.helper.helpFramework(moduleName='TestExecutor', className='TestCase')\n if functions is not None:\n for fct in functions:\n if fct['type'] == 'method': \n if fct['name'] in [ 'addStep', 'condition', 'label' ]:\n pass # skip this function because steps can be added on the other part\n else:\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n tcMenu.addAction( QtHelper.createAction(self, fct['name'], self.addTc, \n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n tcMenu.addSeparator()\n\n # sub menu for templates\n tplsMenu = QMenu(\"Templates\", self)\n functions = self.helper.helpFramework(moduleName='TestTemplates')\n if functions is not None:\n for valid in functions:\n if valid['name'] in [ \"TemplateLayer\", \"TemplateMessage\" ]:\n continue\n tplMenu = QMenu(valid['name'], self)\n for fct in valid['functions']:\n if fct['name'] == '__init__':\n continue\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n tplMenu.addAction( QtHelper.createAction(self, fct['name'], self.addTpl,\n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n tplMenu.addSeparator()\n tplsMenu.addMenu(tplMenu)\n tplsMenu.addSeparator()\n\n # condition\n function = self.helper.helpFramework(moduleName='TestExecutor', className='TestCase', functionName='condition')\n if function is None:\n argsFct = {}\n else:\n argsFct = self.parseDocString(docstring=function['desc'])\n argsFct['function'] = function['name']\n if 'default-args' in function:\n self.addDefaultValues(defaultValues=function['default-args'], currentFunction=argsFct)\n condAct = QtHelper.createAction(self, \"Condition\", self.addIf, icon=None, cb_arg=argsFct )\n\n # label\n function = self.helper.helpFramework(moduleName='TestExecutor', className='TestCase', functionName='label')\n if function is None:\n argsFct = {}\n else:\n argsFct = self.parseDocString(docstring=function['desc'])\n argsFct['function'] = function['name']\n if 'default-args' in function:\n self.addDefaultValues(defaultValues=function['default-args'], currentFunction=argsFct)\n labelAct = QtHelper.createAction(self, \"Label\", self.addLabel, icon=None, cb_arg=argsFct )\n\n\n # sub menu for manipulators\n manipsMenu = QMenu(\"Manipulators\", self)\n functions = self.helper.helpFramework(moduleName='TestManipulators')\n if functions is not None:\n for manip in functions:\n manMenu = QMenu(manip['name'], self)\n currentName = manip['name']\n for fct in manip['functions']:\n if fct['name'] == '__init__':\n continue\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n argsFct['main-name'] = currentName\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n manMenu.addAction( QtHelper.createAction(self, fct['name'], self.addManipulator,\n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n manMenu.addSeparator()\n manipsMenu.addMenu(manMenu)\n manipsMenu.addSeparator()\n\n # sub menu for validators\n valsMenu = QMenu(\"Validators\", self)\n functions = self.helper.helpFramework(moduleName='TestValidators')\n if functions is not None:\n for valid in functions:\n valMenu = QMenu(valid['name'], self)\n currentName = valid['name']\n for fct in valid['functions']:\n if fct['name'] == '__init__':\n continue\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n argsFct['main-name'] = currentName\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n valMenu.addAction( QtHelper.createAction(self, fct['name'], self.addValidator,\n icon=QIcon(\":/methods.png\"), cb_arg=argsFct ) )\n valMenu.addSeparator()\n valsMenu.addMenu(valMenu)\n valsMenu.addSeparator()\n\n self.menu.addMenu( stepMenu )\n self.menu.addSeparator()\n self.menu.addAction( condAct )\n self.menu.addAction( labelAct )\n self.menu.addSeparator()\n self.menu.addMenu( tcMenu )\n self.menu.addMenu( tplsMenu )\n self.menu.addMenu( manipsMenu )\n self.menu.addMenu( valsMenu )\n self.menu.addSeparator()\n self.menu.addAction( self.addBreakpointAction )\n self.menu.addMenu( traceMenu )\n self.menu.addMenu( interactMenu )\n self.menu.addMenu( cacheMenu )\n self.menu.addMenu( timeMenu )\n self.menu.addMenu( storageMenu )\n self.menu.addSeparator()\n self.menu.addMenu( adpsMenu )\n self.menu.addMenu( libsMenu )\n self.menu.addSeparator()\n self.menu.addAction( self.deleteAction )\n self.menu.addAction( self.copyAction )\n self.menu.addAction( self.pasteAction )\n self.menu.addAction( self.reloadAction )\n self.menu.addSeparator()\n \n self.menu.popup( self.mapToGlobal(pos) )", "def _syncDisplayMenu(ned, menu):\n pass", "def create_film_entry(self, film):\n self.list_menu.add_command(\n label=self.get_film_name(film),\n command=partial(self.play_film, film),\n font=self.default_font\n )", "def create_menu(self, menu_items):\n self.menu = Gtk.Menu()\n\n # loop through the menu items\n for k, v in enumerate(menu_items):\n # menu item\n first_item = Gtk.MenuItem(v['title'])\n first_item.connect('activate', self.open_news_url, v['url'])\n self.menu.append(first_item)\n\n # separator item\n separator = Gtk.SeparatorMenuItem()\n self.menu.append(separator)\n\n # settings item\n settings_item = Gtk.MenuItem('Settings')\n settings_item.connect('activate', self.on_settings)\n self.menu.append(settings_item)\n\n # about item\n about_item = Gtk.MenuItem('About')\n about_item.connect('activate', self.on_about)\n self.menu.append(about_item)\n\n # exit item\n exit_item = Gtk.MenuItem('Exit')\n exit_item.connect('activate', self.stop)\n self.menu.append(exit_item)\n\n self.menu.show_all()\n self.indicator.set_menu(self.menu)\n\n return self.menu", "def display_menu():\n print()\n print(\"Commands:\")\n print(\" quit - Quit\")\n print(\" new - Create new account\")\n print(\" display - Display account information\")\n print(\" deposit - Desposit money\")\n print(\" check - Write a check\")", "def makeActionMenu(self):\n\t\tself.newAct = QtGui.QAction(self.tr(\"&Novo\"),self)\n\t\tself.newAct.setShortcut(self.tr(\"Ctrl+N\"))\n\t\tself.newAct.setStatusTip(self.tr(\"Cria uma nova area de desenho em branco\"))\n\t\tself.connect(self.newAct,SIGNAL(\"triggered()\"),self.glwidget.newFile)\n\t\t\n\t\tself.openAct = QtGui.QAction(self.tr(\"&Abrir\"),self)\n\t\tself.openAct.setShortcut(self.tr(\"Ctrl+o\"))\n\t\tself.openAct.setStatusTip(self.tr(\"Abrir arquivo do elvis\"))\n\t\tself.connect(self.openAct,SIGNAL(\"triggered()\"),self.glwidget.openElvisfile)\t\t\n\n\t\tself.saveAct = QtGui.QAction(self.tr(\"&Salvar\"),self)\n\t\tself.saveAct.setShortcut(self.tr(\"Ctrl+S\"))\n\t\tself.saveAct.setStatusTip(self.tr(\"Salva a imagem do canvas\"))\n\t\tself.connect(self.saveAct,SIGNAL(\"triggered()\"),self.glwidget.saveElvisfile)\n\t\t\n\t\tself.exportAct = QtGui.QAction(self.tr(\"&Exportar SVG\"),self)\n\t\tself.exportAct.setShortcut(self.tr(\"Ctrl+E\"))\n\t\tself.exportAct.setStatusTip(self.tr(\"Exporta para formato SVG\"))\n\t\tself.connect(self.exportAct,SIGNAL(\"triggered()\"),self.glwidget.ExportSVG)\n\t\t\t\t\n\t\t\n\t\tself.exitAct = QtGui.QAction(self.tr(\"&Sair\"),self)\n\t\tself.exitAct.setStatusTip(self.tr(\"Sair do programa\"))\n\t\tself.connect(self.exitAct,SIGNAL(\"triggered()\"),self.close)\n\t\t\n\t\n\t\tself.aboutAct = QtGui.QAction(self.tr(\"&Sobre\"),self)\n\t\tself.aboutAct.setStatusTip(self.tr(\"Sobre o programa\"))\n\t\tself.connect(self.aboutAct,SIGNAL(\"triggered()\"),self.about)", "def menu(self):\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()", "def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')" ]
[ "0.69971764", "0.6512389", "0.6462643", "0.6236843", "0.621201", "0.61666703", "0.6126377", "0.60749704", "0.6072576", "0.59767383", "0.5965778", "0.5853985", "0.5805146", "0.57861257", "0.5747815", "0.5744853", "0.5727659", "0.5717768", "0.57175213", "0.5701339", "0.5688219", "0.56823295", "0.5679386", "0.5671081", "0.56666774", "0.5654171", "0.56466097", "0.56463754", "0.5643562", "0.5637114", "0.5621619", "0.56146437", "0.56112105", "0.5595103", "0.5590855", "0.55718815", "0.5570581", "0.5568607", "0.5567163", "0.5566252", "0.5561211", "0.55457604", "0.5545226", "0.55435973", "0.55323815", "0.5516714", "0.5514967", "0.5511708", "0.55116886", "0.55108565", "0.5498181", "0.5489791", "0.5483605", "0.5474783", "0.5469835", "0.5462913", "0.5461078", "0.54152423", "0.5412912", "0.5407091", "0.5402383", "0.5398464", "0.5394184", "0.538872", "0.5383866", "0.5381822", "0.53633106", "0.53561324", "0.5344067", "0.53321576", "0.53279406", "0.53220016", "0.53169215", "0.53137326", "0.530196", "0.53018767", "0.53015244", "0.5301455", "0.5300704", "0.5299733", "0.5294103", "0.5293574", "0.5292745", "0.5289227", "0.5287353", "0.5287353", "0.528228", "0.5278572", "0.52780384", "0.52755505", "0.52752614", "0.52655005", "0.52654445", "0.526508", "0.52644485", "0.5262456", "0.526215", "0.5261214", "0.52519846", "0.5241357", "0.5239223" ]
0.0
-1
Return the printable length of the Entry's Text
def getTextLength(self): return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n return len(self.text)", "def __len__(self):\n return len(self.spricht())", "def width(self, text):\n return len(text) * (self.font_width + 1)", "def getLength(self):\n return len(self.entries)", "def getLength(self, text):\n\n return len(text[self.table_header[0]])", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def LEN(text):\n return len(text)", "def char_size(self):\n return len(self.id2char)", "def _text_length(self, text):\n\n if isinstance(text, dict): # {key: value} case\n return len(next(iter(text.values())))\n elif not hasattr(text, '__len__'): # Object has no len() method\n return 1\n elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints\n return len(text)\n else:\n return sum([len(t) for t in text]) # Sum of length of individual strings", "def compute_user_description_text_length(row):\n row[\"user_description_text_length\"] = len(row['user_description'])\n return row[\"user_description_text_length\"]", "def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength", "def get_string_length(self):\n return int(self.read('H')[0])", "def _len(item):\n stripped = _strip_ansi(item)\n if wcwidth:\n return wcwidth.wcswidth(stripped)\n else:\n return len(stripped)", "def total_length():\n return", "def size(self):\n return len(self.chars)", "def namelength(self):\n return self[\"namelength\"]", "def visual_len(text) -> int:\n return len(text) if NO_COLOR else len(_remove_regex(\"\\033\\\\[[0-9]*m\", text))", "def printed_length(string):\n # It returns the length of the printed string\n return len(remove_colors(string))", "def title_len(self) -> int:\n return self.__title_len", "def get_text_width(self, text: str) -> float:\n pass", "def length(self):\n return self._info.length # pylint: disable=E1101", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def size(self):\n return _(len(self._))", "def __len__(self) -> int:\n return len(self.contents)", "def __len__(self) -> int:\n return len(self.tab10)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def get_width(self):\n return \"%s\" % self.width", "def _get_length(self):\n return self._length", "def get_length(self):\n\n return self.length", "def characters_left(self):\r\n return self.max_chars - len(self.variable.get())", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def get_length(self):\n return self.run_command('get_length')[0]", "def getLength(self):\n return self.length", "def get_text_size(self):\n return self.__text_surface.get_width(), self.__text_surface.get_height()", "def get_size(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetSize', self.handle)", "def hstrlen(self, key, field):\n return self._command(b'HSTRLEN', key, field)", "def __len__(self):\n return self.cli.passwords.len()", "def length(self):\n return self.length", "def total_length(self):\n return self.length", "def __len__( self ) :\n\n return( len( self.__entries ) )", "def __len__(self):\n return len(self.contents)", "def __len__(self):\n return len(self.contents)", "def getLength(self):\n return self.n", "def aln_length(self) -> int:\n return len(self)", "def getLen(self):\n return self.len", "def sizeof(self):\n\n return self.__format_length__", "def get_width(self):\n return max(map(len, self.get_lines()))", "def visual_len(self) -> int:\n return visual_len(self)", "def get_printable_size(self):\n size = self.size\n prefix = ''\n for (s, l) in [(1024*1024*1024, 'GB'), (1024*1024, 'MB'), (1024, 'KB')]:\n if (size>s):\n size = float(size)/s\n prefix = l\n\n return '%10.2f %s' % (size, prefix)", "def get_length(self):\n\n return self._length", "def __len__(self) -> int:\n return len(self.getvalue())", "def DLEN(self):", "def outputText(self, item, titleMode, internal=False):\n return repr(len(item.descendLevelList(-self.parentLevel)))", "def length(self):\n\t\treturn self.n", "def __len__(self) -> int:\n return len(self.length)", "def _visible_width(s):\n # optional wide-character support\n if wcwidth is not None and WIDE_CHARS_MODE:\n len_fn = wcwidth.wcswidth\n else:\n len_fn = len\n if isinstance(s, (str, bytes)):\n return len_fn(_strip_ansi(s))\n else:\n return len_fn(str(s))", "def _label_width(text):\n width = 0\n for lineno, line in enumerate(text.split(u'\\n')):\n size = [_BIG_FONT, _SMALL_FONT][lineno > 0] # Cool idiom, huh?\n width = max(width, size * len(line))\n return width", "def calc_text_size(self, text, font):\n w = 0\n for c in text:\n o = ord(c)\n if o > 0xff: # Translate Cyrillic Unicode to ASCII\n o -= 848\n if o > 255:\n o = 32\n w += font.char_size(o)[1]\n return(w, font.height())", "def compute_text_length(row):\n derived_series = pd.read_json(json.dumps(row['text_derived']), typ='series')\n derived_series = pd.Series(derived_series)\n row[\"tweet_text_length_derived\"] = derived_series.str.len()\n return row[\"tweet_text_length_derived\"]", "def getLength(self):\n return self.count", "def get_height(self):\n\n return \"%s\" % self.height", "def get_length(self):\n return self.resource.get_size()", "def textSize(self, height=1, width=1):\n if not(0 < height < 17) or type(height) is not int:\n raise ValueError('height must be a int between 1 and 16')\n if not(0 < width < 17) or type(width) is not int:\n raise ValueError('textWidth must be a int between 1 and 16')\n else:\n size = (width - 1) << 4 | (height - 1)\n self._write(self.__class__.__GS + '!' + chr(size))", "def length(self):\n return self._length", "def length(self):\n return self._length", "def size(self) -> int:\r\n return self.da.length()", "def length(self):\n pass", "def get_unstr_length(self):\n if self.unstr_length is None:\n return self.length()\n\n elif isinstance(self.unstr_length, str):\n return self.length() + float(self.unstr_length)\n\n return self.unstr_length", "def __len__(self):\n return len(self.label)", "def length(self) -> 'int':\n return self._info.len", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def get_field_length_error_text(field_name):\n\n\treturn(\"Value entered for '{0}' exceeds character length limit of {1}\"\n\t\t .format(field_name, str(field_length_limit)))", "def get_length(self):\r\n return len(self.tweets)", "def get_counts(self):\n value = self.text_ctrl.GetValue()\n chars = len(value)\n words = len(re.findall('\\w+', value))\n pub.sendMessage('update_counts', chars=chars, words=words)", "def getAttributeLength(self):\r\n return self.attributeLength", "def __len__(self) -> int:\n return self.length", "def length(self):\n ...", "def str_len():\n strlen_dict = {}\n # Length of ion name\n strlen_dict['ion'] = 6\n # Length of data file name for line source\n strlen_dict['Source'] = 30\n # Return\n return strlen_dict", "def size(self) -> str:\n return self._search_in_description(REGEXP_ATTR_SIZE)", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def nkeytexts(self):\n return len(self.__keytexts)", "def nkeytexts(self):\n return len(self.__keytexts)", "def __len__(self):\n\n return self.length", "def characterSpace(text):\n return len(text)", "def word_count(self):\n return len(self.text)", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def Length(self) -> int:" ]
[ "0.69978696", "0.6772337", "0.6772269", "0.6674462", "0.66585314", "0.6562858", "0.64422786", "0.63689303", "0.63642585", "0.63120514", "0.6311637", "0.6309694", "0.6292428", "0.6272306", "0.6256533", "0.62360454", "0.6224102", "0.62151045", "0.6209286", "0.6154031", "0.6144847", "0.61350745", "0.613423", "0.6101091", "0.609945", "0.6087122", "0.6075593", "0.60414", "0.60293055", "0.60083586", "0.6001801", "0.6001801", "0.59731245", "0.5970789", "0.5942451", "0.59260374", "0.5924618", "0.5914966", "0.59125847", "0.589066", "0.58883035", "0.5887154", "0.5887154", "0.58834296", "0.58807", "0.58793074", "0.5872595", "0.58683825", "0.5866883", "0.5855758", "0.58515024", "0.583801", "0.58339655", "0.58333325", "0.5831363", "0.58295625", "0.5828764", "0.5824767", "0.58183134", "0.5815343", "0.58087367", "0.5797404", "0.5797191", "0.5792156", "0.5787015", "0.5787015", "0.5775134", "0.5771934", "0.5769543", "0.5769303", "0.5768576", "0.57571083", "0.57571083", "0.5749271", "0.5748379", "0.5746987", "0.5744574", "0.5740571", "0.5735564", "0.57326114", "0.57323104", "0.57313174", "0.57313174", "0.57313174", "0.57313174", "0.57313174", "0.57313174", "0.5729241", "0.5729241", "0.57239336", "0.5723391", "0.57225937", "0.57186013", "0.57186013", "0.57186013", "0.57186013", "0.5711449", "0.5711449", "0.5711449", "0.57090783" ]
0.7582059
0
Return the text of the Menu Entry
def getText(self): return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_header_menu_text(self, menu):\n if menu == BasePage.HOME:\n home = self.browser.find_element(*locators.HOME_LINK).text\n return home\n elif menu == BasePage.SERVICE:\n services = self.browser.find_element(*locators.SERVICE_LINK).text\n return services\n elif menu == BasePage.CONTACT_FORM:\n contact_form = self.browser.find_element(*locators.CONTACT_FORM_LINK).text\n return contact_form\n elif menu == BasePage.METALS_COLORS:\n metals_colors = self.browser.find_element(*locators.METALS_COLORS_LINK).text\n return metals_colors", "def getMenuOption():\n return menu_option", "def text(self):\n return self._combo.currentText()", "def getText(self):\n if self.app.children:\n return self.app.childActive.source.GetText()\n else:\n return ''", "def get_menu(menu_name):\n\n pass", "def display_menu(self):\n return ', '.join(menu.name for menu in self.menu.all()[:3])", "def GetItemText(self, item):\r\n\r\n return item.GetText()", "def text(self):\n if hasattr(self,'label'):\n return str(self.label.text())\n else:\n return self.key", "def get_selected_text(self):\r\n return self.selectedText()", "def menu(self):\n return self._menu", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def get_menu ( self, object, row ):\n return self.menu", "def 取现行选中项文本(self): # real signature unknown; restored from __doc__\n return self.GetStringSelection()", "def text(self):\n return self.label.text()", "def mainMenuText():\n print(\"\"\" 1. New Game\n 2. Load Game\n 3. Authors\n 4. Exit\"\"\")\n global choice\n choice = input(\"What to do? [Choose the number]:\")\n return(choice)", "def menu_quit():\n return \"Quit\"", "def getMenuItem(self, event):\n return self.GetMenuBar().FindItemById(event.GetId())", "def cmd(self):\n return self.view.command_input.edit_text", "def get_one_menu_option():", "def show_menu(menulist):\n text = \"0 ... Cancel\\n\"\n for item in menulist:\n text += \"{} ... {}\\n\".format(menulist.index(item)+1, item)\n return text", "def menuTitle(self):\n return self.tr(\"Fetch\")", "def main_menu(self):\n menu_string = \"Main menu\\n\"\n menu_string += \"\\t1. Modify a list\\n\"\n menu_string += \"\\t2. Grade submenu\\n\"\n menu_string += \"\\t3. Search for something\\n\"\n menu_string += \"\\t4. Get a statistic\\n\"\n menu_string += \"\\t5. Undo/Redo\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {'0': self.__no_command,\n '1': self.__modify_submenu,\n '2': self.__grade_submenu,\n '3': self.__search_submenu,\n '4': self.__statistics_submenu,\n '5': self.__undo_submenu\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def get_menu_item(menu_item_name):\n\n pass", "def get_menu ( self, object ):\n return self.menu", "def get_title_menu(self):\n return _(self.view_label).capitalize()", "def GetMenu(self):\n return self._menu", "def getText(self):", "def cb_cmd_text(data, item, window):\n return cmd_text", "def label(self):\r\n return self._text", "def cmd_get(self):\n return self.text", "def print_menu(self):\n for i,x in enumerate(self.menu):\n print(\"%i. %s\"%(i+1,x))\n return self.get_int()", "def main_menu_selection():\n action = input('''\n Pleaes select one:\n\n a - Send a thank you\n b - Create a report\n c - Quit\n >''')\n\n return action.strip()", "def selected_title(self):\r\n try:\r\n return menu_selected[self.name]\r\n except KeyError:\r\n return NavButton.selected_title(self)", "def menu(self):\n print(f\"{str(self)}\")", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def messageEntry(self,message,default=''):\n dlg = wx.TextEntryDialog(self, message,self.app.title, default)\n if dlg.ShowModal() == wx.ID_OK:value=dlg.GetValue()\n else:value=None\n dlg.Destroy()\n return value", "def entry(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entry\")", "def get_text(self):", "def entry(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entry\")", "def get_menu_items():\n\n pass", "def OnGetItemText(self, item, col):\n\n return self.get_item_text(item, col)", "def display_menu(self):\n print(\"\\n{}\".format(self.message))\n for i, h in enumerate(self.menu_handlers):\n # iterate through handlers and display menu text\n print(\"\\t{}. {}\".format(i+1, h.get_display_text()))\n # add option for exiting the program\n print(\"\\t{}. {}\".format(0, \"Exit\"))", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def get_text(self):\n return self.text", "def back_to_menu_info(cls):\n print(\n \"\"\"\n ________________________________________________\n\n HABITSBOX\n ________________________________________________\n Hint: Press 0 (zero) to return to the main menu\n ------------------------------------------------\"\"\")", "def get_text( self, ):\n a_string = self.a_string_var.get( )\n return a_string", "def get_text( self, ):\n a_string = self.a_string_var.get( )\n return a_string", "def get_text(self):\n return self.row", "def OnGetItemText(self, item, column):\r\n \r\n return \"\"", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def helptext(self):\n return \"\"", "def getValue(self):\n return self.field.currentText()", "def render(self,session,context):\n #TODO: at some point this should actually get implemented, but to do so\n # would need to convert self.menu_items from list to a dict, where the\n # keys are the items that should be selected in the menu. \n # to do so would need to update MenuItem as well.\n menu = [] if self.title_str == '' else [\" %s\" % self.title_str]\n for idx , item in enumerate(self.menu_items):\n if item.hide_index: menu_text = str(item)\n else:\n menu_text = \"%s. %s\"%(item.custom_index, item)\n menu.append( menu_text )\n return \"\\n\".join(menu)", "def selected_title(self):\r\n return self.title", "def _get_prompt_text(self):\n return Blinking_Text(\n self,\n self.settings.font_light_filename,\n 48,\n self.settings.font_color,\n 'Press Enter',\n {'center': self.screen_rect.center},\n 0,\n 50,\n )", "def select_entry(self):\n print(self.contents[self.pointer])", "def primary_message():\n print(Fore.CYAN + \"\\n Select the unit you want to convert from:\\n\" +\n Fore.RESET)\n menu([\n [\"Celsius\", \"\\u00b0C\"],\n [\"Fahrenheit\", \"\\u00b0F\"],\n [\"Kelvin\", \"\\u00b0K\"],\n [\"Rankin\", \"\\u00b0R\"],\n [\"Delisle\", \"\\u00b0De\"],\n [\"Newton\", \"\\u00b0N\"],\n [\"R\\u00e9aumur\", \"\\u00b0R\\u00e9\"],\n [\"R\\u00f8mer\", \"\\u00b0R\\u00f8\"],\n [Fore.RED + \"Exit\\n\" + Fore.RESET,\"\"]\n ])", "def main_window_text(self) -> None:\n tk.Label(text='Название книги:').grid(row=0, column=0, padx=10, pady=10)\n tk.Label(text='Автор:').grid(row=1, column=0, padx=10)\n tk.Label(text='Жанр:').grid(row=2, column=0, padx=10, pady=10)\n entry_title = tk.Entry(width=45)\n entry_title.grid(row=0, column=1, sticky=tk.W)\n entry_author = tk.Entry(width=45)\n entry_author.grid(row=1, column=1, sticky=tk.W)\n entry_genre = tk.Entry(width=45)\n entry_genre.grid(row=2, column=1, sticky=tk.W)", "def entry(self) -> Optional[str]:\n return pulumi.get(self, \"entry\")", "def value(self):\n return str(self.input.currentText())", "def content(self) -> str | MenuData:\n raise NotImplementedError", "def getMenuItemParameter(self):\r\n return self.parameter", "def get_text(title='Enter a label', default=None):\n result, isok = QtWidgets.QInputDialog.getText(None, title, title, text=default)\n if isok:\n return str(result)", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values\n # You may change the menu if you'd like to add an experimental method\n menu = {\"n\": (\"Navigate forward\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"c\": (\"Calibrate\", self.calibrate),\n \"t\": (\"test restore\", self.calibrate),\n \"s\": (\"Check status\", self.status),\n \"q\": (\"Quit\", quit_now)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = raw_input(\"Your selection: \")\n # activate the item selected\n menu.get(ans, [None, error])[1]()", "def get_text(self):\n return self.get_property('text')", "def search_method_menu(self):\n\n print()\n options = {'1': 'Employee Name', '2': 'Keyword', '3': 'Time Spent',\n '4': 'Date', '5': 'Date Range', '6': 'Exit to main menu'}\n\n while True:\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n user_choice = input('\\nPlease enter the number of choice: ').lower().strip()\n\n if user_choice in options.keys():\n return options.get(user_choice)\n else:\n print('\\nInvalid choice! Please try again.\\n')", "def restaurantMenuItem(restaurant_id, menu_id):\n\n return \"List menu item \" + str(menu_id) + \" for \" + str(restaurant_id)", "def get_text(self) -> str:\n return self.text", "def text(self):\n\n selected_option = self.select_elem.first_selected_option\n return selected_option.text", "def GetText(self):\r\n \r\n return self._text", "def create_menu():", "def get_menus():\n\n pass", "def askText(parent,message,title='',default=''):\r\n dialog = wx.TextEntryDialog(parent,message,title,default)\r\n if dialog.ShowModal() != wx.ID_OK:\r\n dialog.Destroy()\r\n return None\r\n else:\r\n value = dialog.GetValue()\r\n dialog.Destroy()\r\n return value", "def callMenu():\n print(\"Menu: \\\n \\n Area of a triangle (enter 'triangleArea') \\\n \\n Area of a square (enter 'squareArea') \\\n \\n Area of a parallelogram (enter 'paraArea') \\\n \\n Area of an ellipse (enter 'ellipseArea')\\\n \\n Area of a circle (enter 'circleArea')\\\n \\n Circumference of a circle (enter 'circleCirc')\\\n \\n Enter 'quit' to quit.\\\n \\n Enter 'menu' to show the menu again.\")", "def text(self):\n for attr in ['label', 'text']:\n val = self.attribute_value(attr)\n if val:\n return val\n\n return super(Option, self).text", "def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()", "def file_menu(self):\n return self.GetMenu(self.FindMenu(\"File\"))", "def operationMenu(cls) -> int:\n print(\"Text File Operations -->\")\n print(\"1. Remove special characters.\")\n print(\"2. Remove all single characters.\")\n print(\"3. Remove multiple spaces.\")\n print(\"4. Convert the text into lower case.\")\n print(\"5. Expand the contractions in the text.\")\n op = int(input(\"Enter option: \"))\n return op", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def menu(update, context):\n\n update_message_text = update.callback_query.edit_message_text if update.callback_query else update.message.reply_text\n update_message_text(\n text='Please choose an option.',\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton('Author Details', callback_data='details'),\n InlineKeyboardButton('Help', callback_data='help'),\n ],\n [\n InlineKeyboardButton('Linkedin Profile', url=Config.OWNER_WEBSITE),\n InlineKeyboardButton('Github repo', url=Config.GITHUB_REPO_URL),\n ],\n [\n InlineKeyboardButton('Download CV', url=Config.DOWNLOAD_CV_URL)\n ]\n ]),\n )", "def menu(self):\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()", "def display(self, prompt, items):\n dmenu = Popen([\"dmenu\"] + self.argv[1:] + [\"-p\", prompt], stdin=PIPE, stdout=PIPE)\n result = dmenu.communicate(input=bytes(\"\\n\".join(items), 'utf-8'))[0]\n if dmenu.returncode != 0:\n sys.exit(1)\n return result.decode('utf-8')[:-1]", "def get_menu() -> str:\n date = datetime.date.today()\n urls = generate_urls(date)\n menu_json = fetch_menu(urls)\n menu = extract_menu(menu_json, date)\n\n return menu", "def OutputMenuItems():\r\n print('''\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Save Data to File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r", "def GetItemText(self, item, column=None):\r\n\r\n if self.IsVirtual():\r\n return self._owner.OnGetItemText(item, column)\r\n else:\r\n return item.GetText(column)", "def entry(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"entry\")", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def menu(lst: list) -> str:\n\n # Print all menu items with numbers 1..n\n for z, y in enumerate(lst):\n print(z + 1, ':', y)\n # Create list with menu items\n nums = [str(z + 1) for z in range(len(lst))]\n nums_str = ', '.join(nums)\n # receive correct command\n while True:\n command = input(f'Enter command ({nums_str}): ')\n if command in nums:\n break\n else:\n print('Wrong command.')\n return command", "def menu(node):\n\n global menu_code\n\n menu_code = '\\n'\n root = node\n while root.parent:\n root = root.parent\n menu_(root, node)\n return menu_code", "def print_menu():\r\n clear()\r\n print(\"Ratatouille Server\")\r\n print(\"---------------------------\")\r\n print(\"\")\r\n\r\n for (index, func) in MENU.items():\r\n print(\"%d - %s\" % (index, func.__name__))\r\n\r\n return raw_input(\"Choose an option: \").lstrip()", "def menu():\n print('''\nMenu for deposits:\n 'n' - deposit a nickel\n 'd' - deposit a dime\n 'q' - deposit a quarter\n 'o' - deposit one dollar\n 'f' - deposit five dollars\n 'c' - cancel the purchase\n''')", "def menu(self) -> None:\n\t\tMenuOption = namedtuple(\"MenuOption\",[\"name\",\"function\",\"args\"])\n\t\tmenu_options = []\n\t\tmenu_options.append(MenuOption(\"Create New Character\",self.createNewCharacter,[]))\n\t\tmenu_options.append(MenuOption(\"Load Character from File\",self.loadCharacter,[\"path to input character data file\"]))\n\t\tmenu_options.append(MenuOption(\"Save Character\",self.saveCharacter,[\"path to output character data file\"]))\n\t\tmenu_options.append(MenuOption(\"Edit Character (COMING SOON)\",self.editCharacter,[]))\n\n\t\tmenu_options.append(MenuOption(\"Output Character Sheet to PDF\",self.printCharacter,[\"path to output PDF file\"]))\n\t\tmenu_options.append(MenuOption(\"Exit (or type 'exit')\",sys.exit,[]))\n\n\t\tcurrentName = self.sheet.choice_names['Name'] if len(self.sheet.choice_names['Name']) > 0 else \"None\"\n\t\tprint(f\"\\nMAIN MENU\\n---------\\nCurrent character: {currentName}\\nChoose an option:\")\n\t\tselection = chooseOne(menu_options,exit_message=\"Exit character creator?\")[1]\n\t\tif selection == False:\n\t\t\tsys.exit()\n\t\targs = []\n\t\tfor a in selection.args:\n\t\t\targs.append(input(f\"Enter {a}:\\n{PROMPT}\"))\n\t\ttry:\n\t\t\tif not selection.function(*args):\n\t\t\t\tlog.error(f\"Failed to {selection.name}\")\n\t\texcept SystemExit:\n\t\t\tsys.exit()\n\t\texcept Exception as e:\n\t\t\tlog.exception(f\"Failed to {selection.name}\")\n\t\t\tlog.debug(e)\n\t\tself.menu()", "def getMenuItemID(self):\r\n return self.eventID", "def getText(self):\n return self.text", "def getText(self):\n return self.text" ]
[ "0.6761892", "0.67316175", "0.66230667", "0.6615068", "0.65632343", "0.65293497", "0.65000373", "0.6497349", "0.64363074", "0.64161533", "0.6374737", "0.6374737", "0.6374737", "0.6374737", "0.6374737", "0.63471115", "0.6344143", "0.633642", "0.6325014", "0.62955856", "0.62814355", "0.6226568", "0.6211363", "0.6180822", "0.6177714", "0.6171444", "0.6169672", "0.61689883", "0.61233324", "0.6095661", "0.60895985", "0.6072018", "0.6066325", "0.60545975", "0.6051222", "0.60333574", "0.6031254", "0.6024541", "0.6009648", "0.5991059", "0.59864885", "0.5985945", "0.59858835", "0.5956606", "0.5938827", "0.59168917", "0.59166425", "0.59166425", "0.59110105", "0.5902702", "0.58952546", "0.58952546", "0.58828926", "0.58687514", "0.58639705", "0.58630997", "0.58384126", "0.5832232", "0.5831134", "0.5828583", "0.5818267", "0.5814995", "0.58015645", "0.5791237", "0.57677424", "0.5757852", "0.5756837", "0.57534844", "0.57474697", "0.57459855", "0.57354057", "0.5732328", "0.57317895", "0.5731422", "0.57155967", "0.57098913", "0.5682799", "0.5680652", "0.56792593", "0.56763303", "0.56740636", "0.5661141", "0.56603926", "0.565483", "0.5644639", "0.5640342", "0.5639929", "0.56368077", "0.5633598", "0.5630282", "0.5629605", "0.56135315", "0.5613266", "0.56095755", "0.5606503", "0.56062883", "0.56038874", "0.56024534", "0.5598505", "0.5598505" ]
0.56504595
84
Helper function to generate jitted lanczos function used in JaxBackend.eigsh_lanczos. The function `jax_lanczos` returned by this higherorder function has the following
def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable: @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6)) def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho): """ Jitted lanczos routine. Args: matvec: A callable implementing the matrix-vector product of a linear operator. arguments: Arguments to `matvec` additional to an input vector. `matvec` will be called as `matvec(init, *args)`. init: An initial input state to `matvec`. ncv: Number of krylov iterations (i.e. dimension of the Krylov space). neig: Number of eigenvalue-eigenvector pairs to be computed. landelta: Convergence parameter: if the norm of the current Lanczos vector falls below `landelta`, iteration is stopped. reortho: If `True`, reorthogonalize all krylov vectors at each step. This should be used if `neig>1`. Returns: jax.numpy.ndarray: Eigenvalues list: Eigenvectors """ def body_modified_gram_schmidt(i, vals): vector, krylov_vectors = vals v = krylov_vectors[i, :] vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors] def body_lanczos(vals): current_vector, krylov_vectors, vector_norms = vals[0:3] diagonal_elements, matvec, args, _ = vals[3:7] threshold, i, maxiteration = vals[7:] norm = jax.numpy.linalg.norm(current_vector) normalized_vector = current_vector / norm normalized_vector, krylov_vectors = jax.lax.cond( reortho, True, lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt, [normalized_vector, krylov_vectors]), False, lambda x: [normalized_vector, krylov_vectors]) Av = matvec(normalized_vector, *args) diag_element = jax.numpy.vdot(normalized_vector, Av) res = jax.numpy.reshape( jax.numpy.ravel(Av) - jax.numpy.ravel(normalized_vector) * diag_element - krylov_vectors[i - 1] * norm, Av.shape) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :], jax.numpy.ravel(normalized_vector)) vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1], norm) diagonal_elements = jax.ops.index_update(diagonal_elements, jax.ops.index[i - 1], diag_element) return [ res, krylov_vectors, vector_norms, diagonal_elements, matvec, args, norm, threshold, i + 1, maxiteration ] def cond_fun(vals): _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals def check_thresh(check_vals): val, thresh = check_vals return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x) return jax.lax.cond(iteration <= maxiteration, [norm, threshold], check_thresh, False, lambda x: x) numel = jax.numpy.prod(init.shape) krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype) norms = jax.numpy.zeros(ncv, dtype=init.dtype) diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype) norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0) norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0), dtype=init.dtype)).dtype initvals = [ init, krylov_vecs, norms, diag_elems, matvec, arguments, norms_dtype.type(1.0), landelta, 1, ncv ] output = jax.lax.while_loop(cond_fun, body_lanczos, initvals) final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :], jax.numpy.ravel(final_state)) A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag( norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1) eigvals, U = jax.numpy.linalg.eigh(A_tridiag) eigvals = eigvals.astype(A_tridiag.dtype) def body_vector(i, vals): krv, unitary, states = vals dim = unitary.shape[1] n, m = jax.numpy.divmod(i, dim) states = jax.ops.index_add(states, jax.ops.index[n, :], krv[m + 1, :] * unitary[m, n]) return [krv, unitary, states] state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype) _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1), body_vector, [krylov_vecs, U, state_vectors]) return jax.numpy.array(eigvals[0:neig]), [ jax.numpy.reshape(vectors[n, :], init.shape) / jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig) ] return jax_lanczos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def lanczos(dx, width, cutoff, /):\n # Coefficients and initial stuff\n # n = (width/dx)//1 # convert window width from 'time units' to 'steps'\n # n = width//2\n # Convert alpha to wavenumber (new units are 'inverse timesteps')\n alpha = 1.0 / (cutoff / dx)\n n = width\n n = (n - 1) // 2 + 1\n tau = np.arange(1, n + 1) # lag time\n C0 = 2 * alpha # integral of cutoff-response function is alpha*pi/pi\n Ck = np.sin(2 * np.pi * alpha * tau) / (np.pi * tau)\n Cktilde = Ck * np.sin(np.pi * tau / n) / (np.pi * tau / n)\n\n # Return filter\n # Example: n = 9 returns 4 + 4 + 1 points\n order = n * 2 - 1\n print(f'Order-{order} Lanczos window')\n window = np.concatenate((np.flipud(Cktilde), np.array([C0]), Cktilde))\n return window[1:-1], 1", "def optimisation_factory_Jzazbz() -> (\n Tuple[NDArrayFloat, Callable, Callable, Callable]\n):\n\n x_0 = as_float_array([1, 0, 0, 1, 0, 0])\n\n def objective_function(\n M: ArrayLike, RGB: ArrayLike, Jab: ArrayLike\n ) -> NDArrayFloat:\n \"\"\":math:`J_za_zb_z` colourspace based objective function.\"\"\"\n\n M = finaliser_function(M)\n\n XYZ_t = vector_dot(\n RGB_COLOURSPACE_ACES2065_1.matrix_RGB_to_XYZ, vector_dot(M, RGB)\n )\n Jab_t = XYZ_to_optimization_colour_model(XYZ_t)\n\n return as_float(np.sum(euclidean_distance(Jab, Jab_t)))\n\n def XYZ_to_optimization_colour_model(XYZ: ArrayLike) -> NDArrayFloat:\n \"\"\"*CIE XYZ* colourspace to :math:`J_za_zb_z` colourspace function.\"\"\"\n\n return XYZ_to_Jzazbz(XYZ)\n\n def finaliser_function(M: ArrayLike) -> NDArrayFloat:\n \"\"\"Finaliser function.\"\"\"\n\n return whitepoint_preserving_matrix(\n np.hstack([np.reshape(M, (3, 2)), zeros((3, 1))])\n )\n\n return (\n x_0,\n objective_function,\n XYZ_to_optimization_colour_model,\n finaliser_function,\n )", "def jit(func):\n return func", "def zonal( self, fields, fun ):\n raise NotImplementedError(\"zonal\")", "def test_clz_u4(self):\n compiled = cuda.jit(\"void(int32[:], uint32)\")(simple_clz)\n ary = np.zeros(1, dtype=np.int32)\n compiled[1, 1](ary, 0x00100000)\n self.assertEquals(ary[0], 11)", "def _vzlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._zlerchphi(z_, a) for z_ in z])", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def _ltz(self):\n raise NotImplementedError(\"_ltz is not implemented\")", "def lanczos_decomp(vector_prod_fn, scalar, n, k):\n Q = tf.zeros([n, 1])\n v = tf.random_uniform([n, 1])\n v = v / tf.norm(v)\n Q = tf.concat([Q, v], axis=1)\n\n # diagonals of the tridiagonal matrix\n beta = tf.constant(0.0, dtype=tf.float32, shape=[1])\n alpha = tf.constant(0.0, dtype=tf.float32, shape=[1])\n\n for i in range(k):\n v = vector_prod_fn(tf.reshape(Q[:, i+1], [n, 1])) - tf.scalar_mul(scalar, tf.reshape(Q[:, i+1], [n, 1]))\n v = tf.reshape(v, [n,])\n curr_alpha = tf.reshape(tf.reduce_sum(v * Q[:, i+1]), [1,])\n alpha = tf.concat([alpha, curr_alpha], axis=0)\n v = v-beta[-1]*Q[:, i]-alpha[-1]*Q[:, i+1]\n curr_beta = tf.reshape(tf.norm(v), [1,])\n beta = tf.concat([beta, curr_beta], axis=0)\n curr_norm = tf.reshape(v/(beta[-1]+1e-8), [n, 1])\n Q = tf.concat([Q, curr_norm], axis=1)\n\n alpha = tf.slice(alpha, begin=[1], size=[-1])\n beta = tf.slice(beta, begin=[1], size=[k-1])\n Q = tf.slice(Q, begin=[0, 1], size=[-1, k])\n return alpha, beta, Q", "def zenazi(scx_l, scx_b, scy_l, scy_b, scz_l, scz_b, src_l, src_b):\n # Zenith is the distance from the optical axis (here z)\n costheta = GreatCircle(scz_l,scz_b,src_l,src_b) \n # Azimuth is the combination of the remaining two\n cosx = GreatCircle(scx_l,scx_b,src_l,src_b)\n cosy = GreatCircle(scy_l,scy_b,src_l,src_b)\n \n # check exceptions\n # maybe not for vectorisation\n \"\"\"\n if costheta.size == 1:\n if (costheta > 1.0):\n costheta = 1.0\n if (costheta < -1.0):\n costheta = -1.0\n else:\n costheta[costheta > 1.0] = 1.0\n costheta[costheta < -1.0] = -1.0\n \"\"\"\n # theta = zenith\n theta = np.rad2deg(np.arccos(costheta))\n # phi = azimuth\n phi = np.rad2deg(np.arctan2(cosy,cosx)) # TS January 14: you sure about that? changed y and x\n \n # make azimuth going from 0 to 360 deg\n if phi.size == 1:\n if (phi < 0):\n phi += 360\n else:\n phi[phi < 0] += 360\n \n return theta,phi", "def return_lxx_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[k1*1*dt,0],[0,0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0,0],[0,k2*1*dt]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable:\n\n arnoldi_fact = _generate_arnoldi_factorization(jax)\n\n # ######################################################\n # ####### NEW SORTING FUCTIONS INSERTED HERE #########\n # ######################################################\n @functools.partial(jax.jit, static_argnums=(1,))\n def LR_sort(evals, p):\n inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n @functools.partial(jax.jit, static_argnums=(1,))\n def LM_sort(evals, p):\n inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n # #######################################################\n # #######################################################\n # #######################################################\n @functools.partial(jax.jit, static_argnums=(4, 5, 6))\n def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh):\n funs = [LR_sort, LM_sort]\n shifts, _ = funs[which](evals, p)\n # compress to k = numeig\n q = jax.numpy.zeros(Hm.shape[0])\n q = jax.ops.index_update(q, jax.ops.index[-1], 1)\n m = Hm.shape[0]\n\n for shift in shifts:\n Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m))\n Hm = Qj.T.conj() @ Hm @ Qj\n Vm = Qj.T @ Vm\n q = q @ Qj\n\n fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1]\n Vk = Vm[0:k, :]\n Hk = Hm[0:k, 0:k]\n H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype)\n H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk)\n Z = jax.numpy.linalg.norm(fk)\n v = fk / Z\n krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :],\n Vk)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v)\n Z = jax.numpy.linalg.norm(fk)\n #if fk is a zero-vector then arnoldi has exactly converged.\n #use small threshold to check this\n return krylov_vectors, H, fk, Z < res_thresh\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def update_data(Vm_tmp, Hm_tmp, numits):\n Vm = Vm_tmp[0:numits, :]\n Hm = Hm_tmp[0:numits, 0:numits]\n fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1]\n return Vm, Hm, fm\n\n @functools.partial(jax.jit, static_argnums=(3,))\n def get_vectors(Vm, unitary, inds, numeig):\n\n def body_vector(i, vals):\n krv, unitary, states, inds = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m, :] * unitary[m, inds[n]])\n return [krv, unitary, states, inds]\n\n state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype)\n _, _, state_vectors, _ = jax.lax.fori_loop(\n 0, numeig * Vm.shape[0], body_vector,\n [Vm, unitary, state_vectors, inds])\n state_norms = jax.numpy.linalg.norm(state_vectors, axis=1)\n state_vectors = state_vectors / state_norms[:, None]\n return state_vectors\n\n\n def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"\n Implicitly restarted arnoldi factorization of `matvec`. The routine\n finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec`\n by alternating between compression and re-expansion of an initial\n `num_krylov_vecs`-step Arnoldi factorization.\n\n Note: The caller has to ensure that the dtype of the return value\n of `matvec` matches the dtype of the initial state. Otherwise jax\n will raise a TypeError.\n\n Args:\n matvec: A callable representing the linear operator.\n args: Arguments to `matvec`. `matvec` is called with\n `matvec(x, *args)` with `x` the input array on which\n `matvec` should act.\n initial_state: An starting vector for the iteration.\n num_krylov_vecs: Number of krylov vectors of the arnoldi factorization.\n numeig: The number of desired eigenvector-eigenvalue pairs.\n which: Which eigenvalues to target. Currently supported: `which = 'LR'`\n or `which = 'LM'`.\n eps: Convergence flag. If the norm of a krylov vector drops below `eps`\n the iteration is terminated.\n maxiter: Maximum number of (outer) iteration steps.\n Returns:\n eta, U: Two lists containing eigenvalues and eigenvectors.\n \"\"\"\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]\n\n return implicitly_restarted_arnoldi_method", "def make_vector_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n laplace_r = make_laplace(bcs.extract_component(0))\n laplace_z = make_laplace(bcs.extract_component(1))\n laplace_phi = make_laplace(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def vector_laplace(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out\n\n return vector_laplace # type: ignore", "def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def optimisation_factory_Oklab_15() -> (\n Tuple[NDArrayFloat, Callable, Callable, Callable]\n):\n\n x_0 = as_float_array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])\n\n def objective_function(\n M: ArrayLike, RGB: ArrayLike, Jab: ArrayLike\n ) -> NDArrayFloat:\n \"\"\"*Oklab* colourspace based objective function.\"\"\"\n\n M = finaliser_function(M)\n\n XYZ_t = np.transpose(\n np.dot(\n RGB_COLOURSPACE_ACES2065_1.matrix_RGB_to_XYZ,\n np.dot(\n M,\n np.transpose(\n polynomial_expansion_Finlayson2015(RGB, 2, True)\n ),\n ),\n )\n )\n\n Jab_t = XYZ_to_optimization_colour_model(XYZ_t)\n\n return as_float(np.sum(euclidean_distance(Jab, Jab_t)))\n\n def XYZ_to_optimization_colour_model(XYZ: ArrayLike) -> NDArrayFloat:\n \"\"\"*CIE XYZ* colourspace to *Oklab* colourspace function.\"\"\"\n\n return XYZ_to_Oklab(XYZ)\n\n def finaliser_function(M: ArrayLike) -> NDArrayFloat:\n \"\"\"Finaliser function.\"\"\"\n\n return whitepoint_preserving_matrix(\n np.hstack([np.reshape(M, (3, 5)), zeros((3, 1))])\n )\n\n return (\n x_0,\n objective_function,\n XYZ_to_optimization_colour_model,\n finaliser_function,\n )", "def zzX_zz_LC(f):\n if poly_univariate_p(f):\n return poly_LC(f)\n else:\n return zzX_zz_LC(poly_LC(f))", "def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator", "def _get_jacobian(tris_pts):\n a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])\n b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])\n J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],\n [b[:, 0], b[:, 1]]])\n return J", "def laplacian(f,dx,dy,dz,x=[],y=[],z=[],param=[],dim=[]):\n if not param:\n param = read_param(quiet=True)\n if not dim:\n dim = read_dim()\n if len(x) < 1:\n gd = read_grid(quiet=True)\n x = gd.x\n y = gd.y\n z = gd.z\n\n laplacian = N.empty(f.shape)\n laplacian = xder2(f,dx,x=x,y=y,z=z,param=param,dim=dim) +\\\n yder2(f,dy,x=x,y=y,z=z,param=param,dim=dim) +\\\n zder2(f,dz,x=x,y=y,z=z,param=param,dim=dim)\n\n if param.coord_system == 'cylindric':\n laplacian += xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)/x\n if param.coord_system == 'spherical':\n sin_y = N.sin(y)\n cos_y = N.cos(y)\n i_sin = N.where(N.abs(sin_y) < 1e-5)[0]\n if i_sin.size > 0:\n cos_y[i_sin] = 0.; sin_y[i_sin] = 1\n x_2, cotth = N.meshgrid(1./x**2, cos_y/sin_y)\n laplacian += 2*xder(f,dx,x=x,y=y,z=z,param=param,dim=dim)/x +\\\n yder(f,dy,x=x,y=y,z=z,param=param,dim=dim)*x_2*cotth\n\n return laplacian", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def blas_header_text():\r\n header = \"\"\"\r\n extern \"C\"\r\n {\r\n\r\n void xerbla_(char*, void *);\r\n\r\n /***********/\r\n /* Level 1 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void srot_(const int*, float *, const int*, float *, const int*, const float *, const float *);\r\n void srotg_(float *,float *,float *,float *); \r\n void srotm_( const int*, float *, const int*, float *, const int*, const float *);\r\n void srotmg_(float *,float *,float *,const float *, float *);\r\n void sswap_( const int*, float *, const int*, float *, const int*);\r\n void scopy_( const int*, const float *, const int*, float *, const int*);\r\n void saxpy_( const int*, const float *, const float *, const int*, float *, const int*);\r\n float sdot_(const int*, const float *, const int*, const float *, const int*);\r\n void sdot_sub_(const int*, const float *, const int*, const float *, const int*, float *);\r\n void sdsdot_sub_( const int*, const float *, const float *, const int*, const float *, const int*, float *);\r\n void sscal_( const int*, const float *, float *, const int*);\r\n void snrm2_sub_( const int*, const float *, const int*, float *);\r\n void sasum_sub_( const int*, const float *, const int*, float *);\r\n void isamax_sub_( const int*, const float * , const int*, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void drot_(const int*, double *, const int*, double *, const int*, const double *, const double *);\r\n void drotg_(double *,double *,double *,double *); \r\n void drotm_( const int*, double *, const int*, double *, const int*, const double *);\r\n void drotmg_(double *,double *,double *,const double *, double *);\r\n void dswap_( const int*, double *, const int*, double *, const int*);\r\n void dcopy_( const int*, const double *, const int*, double *, const int*);\r\n void daxpy_( const int*, const double *, const double *, const int*, double *, const int*);\r\n void dswap_( const int*, double *, const int*, double *, const int*);\r\n double ddot_(const int*, const double *, const int*, const double *, const int*);\r\n void dsdot_sub_(const int*, const float *, const int*, const float *, const int*, double *);\r\n void ddot_sub_( const int*, const double *, const int*, const double *, const int*, double *);\r\n void dscal_( const int*, const double *, double *, const int*);\r\n void dnrm2_sub_( const int*, const double *, const int*, double *);\r\n void dasum_sub_( const int*, const double *, const int*, double *);\r\n void idamax_sub_( const int*, const double * , const int*, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cswap_( const int*, void *, const int*, void *, const int*);\r\n void ccopy_( const int*, const void *, const int*, void *, const int*);\r\n void caxpy_( const int*, const void *, const void *, const int*, void *, const int*);\r\n void cswap_( const int*, void *, const int*, void *, const int*);\r\n void cdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void cdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void cscal_( const int*, const void *, void *, const int*);\r\n void icamax_sub_( const int*, const void *, const int*, const int*);\r\n void csscal_( const int*, const float *, void *, const int*);\r\n void scnrm2_sub_( const int*, const void *, const int*, float *);\r\n void scasum_sub_( const int*, const void *, const int*, float *);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zswap_( const int*, void *, const int*, void *, const int*);\r\n void zcopy_( const int*, const void *, const int*, void *, const int*);\r\n void zaxpy_( const int*, const void *, const void *, const int*, void *, const int*);\r\n void zswap_( const int*, void *, const int*, void *, const int*);\r\n void zdotc_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void zdotu_sub_( const int*, const void *, const int*, const void *, const int*, void *);\r\n void zdscal_( const int*, const double *, void *, const int*);\r\n void zscal_( const int*, const void *, void *, const int*);\r\n void dznrm2_sub_( const int*, const void *, const int*, double *);\r\n void dzasum_sub_( const int*, const void *, const int*, double *);\r\n void izamax_sub_( const int*, const void *, const int*, const int*);\r\n\r\n /***********/\r\n /* Level 2 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void sgemv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void sgbmv_(char*, const int*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssymv_(char*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssbmv_(char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void sspmv_(char*, const int*, const float *, const float *, const float *, const int*, const float *, float *, const int*);\r\n void strmv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\r\n void stbmv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\r\n void strsv_( char*, char*, char*, const int*, const float *, const int*, float *, const int*);\r\n void stbsv_( char*, char*, char*, const int*, const int*, const float *, const int*, float *, const int*);\r\n void stpmv_( char*, char*, char*, const int*, const float *, float *, const int*);\r\n void stpsv_( char*, char*, char*, const int*, const float *, float *, const int*);\r\n void sger_( const int*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\r\n void ssyr_(char*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void sspr_(char*, const int*, const float *, const float *, const int*, float *); \r\n void sspr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *); \r\n void ssyr2_(char*, const int*, const float *, const float *, const int*, const float *, const int*, float *, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void dgemv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dgbmv_(char*, const int*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsymv_(char*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsbmv_(char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dspmv_(char*, const int*, const double *, const double *, const double *, const int*, const double *, double *, const int*);\r\n void dtrmv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\r\n void dtbmv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\r\n void dtrsv_( char*, char*, char*, const int*, const double *, const int*, double *, const int*);\r\n void dtbsv_( char*, char*, char*, const int*, const int*, const double *, const int*, double *, const int*);\r\n void dtpmv_( char*, char*, char*, const int*, const double *, double *, const int*);\r\n void dtpsv_( char*, char*, char*, const int*, const double *, double *, const int*);\r\n void dger_( const int*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\r\n void dsyr_(char*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void dspr_(char*, const int*, const double *, const double *, const int*, double *); \r\n void dspr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *); \r\n void dsyr2_(char*, const int*, const double *, const double *, const int*, const double *, const int*, double *, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void cgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void chpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\r\n void ctrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ctbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ctpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\r\n void ctrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ctbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ctpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\r\n void cgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void cgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void cher_(char*, const int*, const float *, const void *, const int*, void *, const int*);\r\n void cher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void chpr_(char*, const int*, const float *, const void *, const int*, void *);\r\n void chpr2_(char*, const int*, const float *, const void *, const int*, const void *, const int*, void *);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zgemv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zgbmv_(char*, const int*, const int*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhemv_(char*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhbmv_(char*, const int*, const int*, const void *, const void *, const int*, const void *, const int*, const void *, void *, const int*);\r\n void zhpmv_(char*, const int*, const void *, const void *, const void *, const int*, const void *, void *, const int*);\r\n void ztrmv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ztbmv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ztpmv_( char*, char*, char*, const int*, const void *, void *, const int*);\r\n void ztrsv_( char*, char*, char*, const int*, const void *, const int*, void *, const int*);\r\n void ztbsv_( char*, char*, char*, const int*, const int*, const void *, const int*, void *, const int*);\r\n void ztpsv_( char*, char*, char*, const int*, const void *, void *,const int*);\r\n void zgerc_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zgeru_( const int*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zher_(char*, const int*, const double *, const void *, const int*, void *, const int*);\r\n void zher2_(char*, const int*, const void *, const void *, const int*, const void *, const int*, void *, const int*);\r\n void zhpr_(char*, const int*, const double *, const void *, const int*, void *);\r\n void zhpr2_(char*, const int*, const double *, const void *, const int*, const void *, const int*, void *);\r\n\r\n /***********/\r\n /* Level 3 */\r\n /***********/\r\n\r\n /* Single Precision */\r\n\r\n void sgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ssyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void ssyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void strmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void strsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n\r\n /* Double Precision */\r\n\r\n void dgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void dsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void dtrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void dtrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n\r\n /* Single Complex Precision */\r\n\r\n void cgemm_(char*, char*, const int*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void csymm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void chemm_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void csyrk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void cherk_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, float *, const int*);\r\n void csyr2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void cher2k_(char*, char*, const int*, const int*, const float *, const float *, const int*, const float *, const int*, const float *, float *, const int*);\r\n void ctrmm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n void ctrsm_(char*, char*, char*, char*, const int*, const int*, const float *, const float *, const int*, float *, const int*);\r\n\r\n /* Double Complex Precision */\r\n\r\n void zgemm_(char*, char*, const int*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zsymm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zhemm_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zsyrk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void zherk_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, double *, const int*);\r\n void zsyr2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void zher2k_(char*, char*, const int*, const int*, const double *, const double *, const int*, const double *, const int*, const double *, double *, const int*);\r\n void ztrmm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n void ztrsm_(char*, char*, char*, char*, const int*, const int*, const double *, const double *, const int*, double *, const int*);\r\n\r\n }\r\n \"\"\"\r\n\r\n if detect_macos_sdot_bug():\r\n if detect_macos_sdot_bug.fix_works:\r\n header += textwrap.dedent(\"\"\"\\\r\n extern \"C\" float cblas_sdot(int, float*, int, float*, int);\r\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\r\n {\r\n return cblas_sdot(*Nx, x, *Sx, y, *Sy);\r\n }\r\n \"\"\")\r\n else:\r\n # Make sure the buggy version of sdot_ is never used\r\n header += textwrap.dedent(\"\"\"\\\r\n static float sdot_(int* Nx, float* x, int* Sx, float* y, int* Sy)\r\n {\r\n fprintf(stderr,\r\n \"FATAL: The implementation of BLAS SDOT \"\r\n \"routine in your system has a bug that \"\r\n \"makes it return wrong results.\\\\n\"\r\n \"Please contact [email protected].\\\\n\"\r\n \"You can work around this bug by using a \"\r\n \"different BLAS library, or disabling BLAS\\\\n\");\r\n assert(0);\r\n }\r\n \"\"\")\r\n\r\n return header", "def calculate_jacobian(robot_position, landmark_pos):\n\n return None", "def generate_raw_decomposition(alphabeta, lanczos_iterations=None):\n # extract matrix elements\n alpha, beta = alphabeta\n\n # trim vectors\n if (lanczos_iterations is not None):\n (alpha, beta) = (alpha[:lanczos_iterations],beta[:lanczos_iterations-1])\n\n # generate Lanczos decomposition\n eigvals, eigvecs = linalg.eigh_tridiagonal(alpha, beta)\n raw_decomposition = [\n (eigval,eigvecs[0, i]**2)\n for i, eigval in enumerate(eigvals)\n ]\n\n return raw_decomposition", "def isYZPlanar(points=[]):\n return isCardinalPlanar(\"yz\",points)", "def _TODOStepsScipy(z, nstep, refr, Fin):\n\n if Fin._curvature != 0.0:\n raise ValueError('Cannot operate on spherical coords.'\n + 'Use Convert() first')\n Fout = Field.copy(Fin)\n N = Fout.N\n lam = Fout.lam\n size = Fout.siz\n dtype = Fout._dtype\n \n legacy = True\n if legacy:\n Pi = 3.141592654 #to compare Cpp results accurately\n else:\n Pi = _np.pi\n K = 2.*Pi/lam\n z = z/2.\n Pi4lz = 4.*Pi/lam/z\n imPi4lz = 1j * Pi4lz\n \n delta = size/(N-1.) #dx\n delta2 = delta*delta\n \n n = 100\n c = 1\n # n = N\n # c = delta**2\n def f(u, ):\n return u**3\n \n def f_prime(u):\n return 3 * u**2\n \n def fun(u, n, f, f_prime, c, **kwargs):\n v = _np.zeros((n + 2, n + 2))\n u = u.reshape((n, n))\n v[1:-1, 1:-1] = u\n y = v[:-2, 1:-1] + v[2:, 1:-1] + v[1:-1, :-2] + v[1:-1, 2:] - 4 * u + c * f(u)\n return y.ravel()\n\n def compute_jac_indices(n):\n i = _np.arange(n)\n jj, ii = _np.meshgrid(i, i)\n \n ii = ii.ravel()\n jj = jj.ravel()\n \n ij = _np.arange(n**2)\n \n jac_rows = [ij]\n jac_cols = [ij]\n \n mask = ii > 0\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask - n)\n \n mask = ii < n - 1\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask + n)\n \n mask = jj > 0\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask - 1)\n \n mask = jj < n - 1\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask + 1)\n \n return _np.hstack(jac_rows), _np.hstack(jac_cols)\n jac_rows, jac_cols = compute_jac_indices(N)\n # u0 = np.ones(n**2) * 0.5\n u0 = Fin.field.ravel() #initial guess is old field\n \n def jac(u, n, f, f_prime, c, jac_rows=None, jac_cols=None):\n jac_values = _np.ones_like(jac_cols, dtype=float)\n jac_values[:n**2] = -4 + c * f_prime(u)\n return coo_matrix((jac_values, (jac_rows, jac_cols)),\n shape=(n**2, n**2))\n \n res_1 = least_squares(fun, u0.real, jac=jac, gtol=1e-3,\n args=(N, f, f_prime, c),\n kwargs={'jac_rows': jac_rows,\n 'jac_cols': jac_cols},\n verbose=0)\n # print(res_1)\n Fout.field = res_1.x.reshape((N, N))\n Fout._IsGauss=False\n return Fout", "def build_jacobian(l_comp, R_comp, l_vect, R_vect, B_vect):\r\n l_len = numpy.sqrt((l_vect * l_vect).sum(-1))\r\n R_len = numpy.sqrt((R_vect * R_vect).sum(-1))\r\n B_len = numpy.sqrt((B_vect * B_vect).sum(-1))\r\n # Empty 3x3 jacobian matrix\r\n jacob = numpy.zeros((B_vect.shape[-1], B_vect.shape[-1]), B_vect.dtype)\r\n\r\n # This is in the space with a standard basis along the \"l\", \"R\" and \"B\" axes\r\n jacob[1, 2] = -B_len / R_len\r\n jacob[2, 0] = l_comp\r\n jacob[2, 1] = R_comp\r\n\r\n # Transform the Jacobian to main space\r\n xform = numpy.stack((\r\n l_vect / l_len,\r\n R_vect / R_len,\r\n B_vect / B_len\r\n )).T\r\n xform_inv = numpy.linalg.inv(xform)\r\n return numpy.matmul(xform, numpy.matmul(jacob.T, xform_inv)).T", "def njit(func):\n return func", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def calc_lampam_sym(ss, constraints):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n\n n_plies_in_panels = 2 * np.size(ss) # laminate ply count\n\n cos_sin = np.empty((4, n_plies_in_panels // 2), float)\n for ind in range(n_plies_in_panels // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, ))\n\n for_the_top = np.arange(n_plies_in_panels // 2)\n z_0 = np.ones(n_plies_in_panels // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n return lampam", "def make_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n dr_2, dz_2 = 1 / bcs.grid.discretization ** 2\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))\n def laplace(arr, out=None):\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n out[i, j] = (\n 2 * (arr[i + 1, j] - arr_c) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n if dim_r == 1:\n continue # deal with singular radial dimension\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], arr[i + 1, j]\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], value_outer(arr, (i, j))\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n return out\n\n return laplace # type: ignore", "def lap_mat(self):", "def I_matrix(api_calls, invokes):\n i = len(api_calls)\n j = len(api_calls)\n matrix = lil_matrix((i,j), dtype=np.int8)\n for invoke in invokes:\n matrix[invoke] = 1\n matrix[(invoke[1],invoke[0])] = 1\n return matrix", "def test_compute_jaccard(self):\n pass", "def helmholtz_equation(\n outputs, inputs, parameterization, return_diagnostics=False\n):\n batched = len(inputs.size()) > 1\n jac, lap = jacobian_and_laplacian(\n outputs, inputs, batched=batched, create_graph=True, allow_unused=False\n )\n\n frequency = (2 * np.pi * parameterization[..., 1]).view(outputs.size())\n # r$ \\nabla^2 u = - k^2 u$\n lhs = lap\n rhs = -frequency * frequency * outputs\n\n if return_diagnostics:\n return lhs - rhs, (lhs, rhs, jac)\n else:\n return lhs - rhs", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def _make_cas_function():\n # Generate IR\n mod = lc.Module.new('generate-cas')\n llint = lc.Type.int()\n llintp = lc.Type.pointer(llint)\n fnty = lc.Type.function(llint, [llintp, llint, llint])\n fn = mod.add_function(fnty, name='.numba.parallel.ufunc.cas')\n ptr, old, repl = fn.args\n bb = fn.append_basic_block('')\n builder = lc.Builder.new(bb)\n outpack = builder.cmpxchg(ptr, old, repl, ordering='monotonic')\n out = builder.extract_value(outpack, 0)\n failed = builder.extract_value(outpack, 1)\n builder.ret(builder.select(failed, old, out))\n\n # Build & Link\n llmod = ll.parse_assembly(str(mod))\n\n target = ll.Target.from_triple(ll.get_process_triple())\n tm = target.create_target_machine()\n engine = ll.create_mcjit_compiler(llmod, tm)\n ptr = engine.get_function_address(fn.name)\n return engine, ptr", "def calc_jacobian(\n model: nn.Module,\n latents: torch.Tensor,\n normalize: bool = False,\n eps: float = 1e-8,\n vectorize=False,\n reverse_ad=True,\n norm_range=True,\n norm_diagonal=False,\n) -> torch.Tensor:\n # set to eval mode but remember original state\n in_training: bool = model.training\n model.eval() # otherwise we will get 0 gradients\n with torch.set_grad_enabled(True):\n jacob = []\n input_vars = latents.clone().requires_grad_(True)\n\n output_vars = model(input_vars)\n if not vectorize:\n for i in range(output_vars.shape[1]):\n jacob.append(\n torch.autograd.grad(\n output_vars[:, i : i + 1],\n input_vars,\n create_graph=True,\n grad_outputs=torch.ones(output_vars[:, i : i + 1].shape).to(\n output_vars.device\n ),\n )[0].detach()\n )\n\n jacobian = torch.stack(jacob, 1)\n else:\n from functorch import vmap, jacrev, jacfwd\n\n if reverse_ad is True:\n jac_fn = jacrev\n else:\n jac_fn = jacfwd\n\n sample_jacobian = jac_fn(model.forward, argnums=0)\n jacobian = vmap(\n lambda x: sample_jacobian(torch.unsqueeze(x, 0)), in_dims=0\n )(input_vars).squeeze()\n\n if normalize is True:\n # normalize the Jacobian by making it volume preserving\n # jacobian /= jacobian.det().abs().pow(1 / jacobian.shape[-1]).reshape(-1, 1, 1)\n\n # normalize to make variance to 1\n # norm_factor = (output_vars.std(dim=0) + 1e-8)\n # jacobian /= norm_factor.reshape(1, 1, -1)\n if norm_range is True:\n # normalize range to [0;1]\n dim_range = (\n (output_vars.max(dim=0)[0] - output_vars.min(dim=0)[0])\n .abs()\n .reshape(-1, 1)\n )\n\n jacobian /= dim_range + eps\n elif norm_diagonal is True:\n assert (dim := jacobian.shape[1]) == jacobian.shape[2]\n jacobian /= jacobian[:, (r := torch.arange(dim)), r].unsqueeze(-1) + eps\n\n # set back to original mode\n if in_training is True:\n model.train()\n\n return jacobian", "def laplacian(mesh):\n faces = np.array(mesh.triangles)\n N = np.array(mesh.vertices).shape[0]\n A = np.zeros((N, N))\n for i in range(3):\n for j in range(3):\n if i == j:\n continue\n A[(faces[:, i], faces[:, j])] = 1.0\n A = A + A.T\n diag = A.dot(np.ones(N))\n L = np.diag(diag) - A\n return L", "def test_cels():\n N = 999\n kcc = (np.random.rand(N) - 0.5) * 10\n pp = (np.random.rand(N) - 0.5) * 10\n cc = (np.random.rand(N) - 0.5) * 10\n ss = (np.random.rand(N) - 0.5) * 10\n\n res0 = [cel0(kc, p, c, s) for kc, p, c, s in zip(kcc, pp, cc, ss)]\n res1 = celv(kcc, pp, cc, ss)\n res2 = cel(kcc, pp, cc, ss)\n\n assert np.allclose(res0, res1)\n assert np.allclose(res1, res2)", "def _vlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._lerchphi(z_, a) for z_ in z])", "def return_lx_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n result = lambda X,U,dt: np.matrix([[0],[0]])\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[0],[0]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0],[0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[k1*(X[0]-TargetAngle)*dt],[0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0],[0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0],[k2*(X[1]-TargetAngularVelocity)*dt]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0],[0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def lfun(z, lparams):\n W, b = unpack(lparams)\n return np.tanh(np.dot(z, W) + b)", "def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def get_lz(self):\r\n return self.dz * self.nz - self.oz", "def P_matrix(api_calls, packages):\n i = len(api_calls)\n j = len(api_calls)\n matrix = lil_matrix((i,j), dtype=np.int8)\n for package in packages:\n matrix[package] = 1\n matrix[(package[1],package[0])] = 1\n return matrix", "def bjs(l, c):\n if len(l) == 4:\n l = mbvector(l)\n elif len(l) == 3:\n pass\n else:\n return 0\n v = np.array([1, pi, e])\n r = l / np.linalg.norm(l)\n m = np.cross(r, v)\n n = np.cross(r, m)\n m = m / np.linalg.norm(m)\n n = n / np.linalg.norm(n)\n w = np.arange(0, 2 * pi, 0.001)\n s = len(w)\n\n mm = vect_contract(m, c, m)\n mn = vect_contract(m, c, n)\n nm = vect_contract(n, c, m)\n nn0 = vect_contract(n, c, n)\n nn = np.linalg.inv(nn0)\n\n val1 = mm - np.dot(np.dot(mn, nn), nm)\n R = BB = np.zeros(shape=(3, 3))\n for i in range(1, s):\n t = 1 - cos(w[i])\n CO = cos(w[i])\n SI = sin(w[i])\n R[0, 0] = t * r[0] ** 2 + CO\n R[0, 1] = t * r[0] * r[1] - SI * r[2]\n R[0, 2] = t * r[0] * r[2] + SI * r[1]\n R[1, 0] = t * r[0] * r[1] + SI * r[2]\n R[1, 1] = t * r[1] ** 2 + CO\n R[1, 2] = t * r[1] * r[2] - SI * r[0]\n R[2, 0] = t * r[0] * r[2] - SI * r[1]\n R[2, 1] = t * r[1] * r[2] + SI * r[0]\n R[2, 2] = t * r[2] ** 2 + CO\n\n mr = np.dot(R, np.transpose(m))\n nr = np.dot(R, np.transpose(n))\n\n mm = vect_contract(mr, c, mr)\n mn = vect_contract(mr, c, nr)\n nm = vect_contract(nr, c, mr)\n nn0 = vect_contract(nr, c, nr)\n nn = np.linalg.inv(nn0)\n val2 = mm - np.dot(np.dot(mn, nn), nm)\n BB = BB + 0.5 * (val2 + val1) * (w[i] - w[i - 1])\n val1 = val2\n B = BB / (8 * pi**2)\n return B", "def return_lxu_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[0],[0]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0],[0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[0],[0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0],[0]])\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0],[0]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0],[0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def zernikeJac(coeffs, x, y, z):\n\n coeffs = np.ones_like(coeffs)\n coeffs[:2] = 0\n\n jac = np.zeros((len(z),len(coeffs)))\n\n for i in range(2,len(coeffs)):\n cis = np.zeros(36)\n cis[i] = 1.\n jac[i] = zernikePoly(x, y, midPoint(x), midPoint(y), cis)\n\n return jac", "def isXYPlanar(points=[]):\n return isCardinalPlanar(\"xy\",points)", "def calculate_celestial_pole_array(native_reference_x,\n native_reference_cos_lat,\n native_reference_sin_lat,\n reference_x, reference_y,\n reference_cos_lat, reference_sin_lat,\n native_pole_x, native_pole_y,\n select_solution): # pragma: no cover\n native_reference_x = np.atleast_1d(np.asarray(native_reference_x))\n native_reference_cos_lat = np.atleast_1d(\n np.asarray(native_reference_cos_lat))\n native_reference_sin_lat = np.atleast_1d(\n np.asarray(native_reference_sin_lat))\n reference_x = np.atleast_1d(np.asarray(reference_x))\n reference_y = np.atleast_1d(np.asarray(reference_y))\n reference_cos_lat = np.atleast_1d(np.asarray(reference_cos_lat))\n reference_sin_lat = np.atleast_1d(np.asarray(reference_sin_lat))\n native_pole_x = np.atleast_1d(np.asarray(native_pole_x))\n native_pole_y = np.atleast_1d(np.asarray(native_pole_y))\n\n sizes = np.array([native_reference_x.size,\n reference_x.size,\n native_pole_x.size])\n max_array = np.argmax(sizes)\n\n if max_array == 0:\n x = np.empty_like(native_reference_x, dtype=nb.float64)\n y = np.empty_like(native_reference_x, dtype=nb.float64)\n elif max_array == 1:\n x = np.empty_like(reference_x, dtype=nb.float64)\n y = np.empty_like(reference_x, dtype=nb.float64)\n else:\n x = np.empty_like(native_pole_x, dtype=nb.float64)\n y = np.empty_like(native_pole_x, dtype=nb.float64)\n\n n = x.size\n\n flat_x, flat_y = x.flat, y.flat\n flat_native_reference_x = native_reference_x.flat\n flat_native_reference_cos_lat = native_reference_cos_lat.flat\n flat_native_reference_sin_lat = native_reference_sin_lat.flat\n flat_reference_x = reference_x.flat\n flat_reference_y = reference_y.flat\n flat_reference_cos_lat = reference_cos_lat.flat\n flat_reference_sin_lat = reference_sin_lat.flat\n flat_native_pole_x = native_pole_x.flat\n flat_native_pole_y = native_pole_y.flat\n\n singular_native_reference = native_reference_x.size == 1\n singular_reference = reference_x.size == 1\n singular_native_pole = native_pole_x.size == 1\n\n for i in range(n):\n natref_i = 0 if singular_native_reference else i\n ref_i = 0 if singular_reference else i\n pole_i = 0 if singular_native_pole else i\n\n flat_x[i], flat_y[i] = calculate_celestial_pole(\n native_reference_x=flat_native_reference_x[natref_i],\n native_reference_cos_lat=flat_native_reference_cos_lat[natref_i],\n native_reference_sin_lat=flat_native_reference_sin_lat[natref_i],\n reference_x=flat_reference_x[ref_i],\n reference_y=flat_reference_y[ref_i],\n reference_cos_lat=flat_reference_cos_lat[ref_i],\n reference_sin_lat=flat_reference_sin_lat[ref_i],\n native_pole_x=flat_native_pole_x[pole_i],\n native_pole_y=flat_native_pole_y[pole_i],\n select_solution=select_solution)\n return x, y", "def symeigLanczos(A, k, extreme=\"both\", *, sparse=False, dim=None):\n Qk, T = Lanczos(A, k, sparse=sparse, dim=dim)\n eigvalsQ, eigvectorsQ = torch.symeig(T, eigenvectors=True)\n eigvectorsQ = torch.matmul(Qk, eigvectorsQ)\n if extreme == \"both\":\n return eigvalsQ[0], eigvectorsQ[:, 0], eigvalsQ[-1], eigvectorsQ[:, -1]\n elif extreme == \"min\":\n return eigvalsQ[0], eigvectorsQ[:, 0]\n elif extreme == \"max\":\n return eigvalsQ[-1], eigvectorsQ[:, -1]", "def nnz(self):", "def isXZPlanar(points=[]):\n return isCardinalPlanar(\"xz\",points)", "def colii_callen(mu,Ti,Te,nev,Zi=None):\n if Zi==None:\n Zi=1.\n return np.sqrt(me/mp/mu)*(Te/Ti)**(1.5)*Zi**2/np.sqrt(2)*cole(Te,nev);", "def calc_jacobian(*args, **kwargs):\n try:\n tag = kwargs[\"tag\"]\n except:\n tag = 0\n\n try:\n sparse = kwargs[\"sparse\"]\n except:\n sparse = True\n\n if sparse:\n try:\n shape = kwargs[\"shape\"]\n except:\n raise ValueError(\"'shape' should be passed to calculate sparse jacobian!\")\n\n \n options = np.array([0,0,0,0],dtype=int)\n result = ad.colpack.sparse_jac_no_repeat(tag, *args, options=options)\n nnz = result[0]\n ridx = result[1]\n cidx = result[2]\n values = result[3]\n assert nnz > 0\n jac = sp.csr_matrix((values, (ridx, cidx)), shape=shape)\n jac = jac.toarray()\n else:\n jac = ad.jacobian(tag, *args)\n return jac", "def vjp_assemble_eval(\n fenics_function: Callable, fenics_templates: FenicsVariable, *args: np.array\n) -> Tuple[np.array, Callable]:\n\n numpy_output, ufl_form, fenics_inputs = assemble_eval(\n fenics_function, fenics_templates, *args\n )\n\n def vjp_fun(g):\n return tuple(\n vjp if vjp is not None else jax.ad_util.zeros_like_jaxval(args[i])\n for i, vjp in enumerate(vjp_assemble_impl(g, ufl_form, fenics_inputs))\n )\n\n return numpy_output, vjp_fun", "def lizardite():\n\n rho = 2610.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 245.; C[0,1] = 50.; C[0,2] = 31.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 245.; C[1,2] = 31.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 23.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 11.6; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 11.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 97.5\n\n return C, rho", "def get_l_interface(n_v,n_c, neighbours, vs, CV_matrix,L):\n h_j = np.empty((n_v, 3, 2))\n for i in range(3):\n h_j[:, i] = vs\n h_jp1 = np.dstack((roll_reverse(neighbours[:,:,0]),roll_reverse(neighbours[:,:,1])))\n l = np.mod(h_j - h_jp1 + L/2,L) - L/2\n l = np.sqrt(l[:,:,0]**2 + l[:,:,1]**2)\n LI = np.zeros((n_c,n_c),dtype=np.float32)\n for i in range(3):\n LI+= np.asfortranarray(l[:,i]*CV_matrix[:,:,i])@np.asfortranarray(CV_matrix[:,:,np.mod(i+2,3)].T)\n return LI", "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc_,aptrc_,asubc_,avalc_):\n n_ = None\n if n_ is None:\n n_ = len(anzc_)\n elif n_ != len(anzc_):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc_)\n elif n_ != len(aptrc_):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if anzc_ is None:\n raise ValueError(\"Argument anzc cannot be None\")\n if anzc_ is None:\n raise ValueError(\"Argument anzc may not be None\")\n if isinstance(anzc_, numpy.ndarray) and anzc_.dtype is numpy.dtype(numpy.int32) and anzc_.flags.contiguous:\n _anzc_copyarray = False\n _anzc_tmp = ctypes.cast(anzc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif anzc_ is not None:\n _anzc_copyarray = True\n _anzc_np_tmp = numpy.zeros(len(anzc_),numpy.dtype(numpy.int32))\n _anzc_np_tmp[:] = anzc_\n assert _anzc_np_tmp.flags.contiguous\n _anzc_tmp = ctypes.cast(_anzc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _anzc_copyarray = False\n _anzc_tmp = None\n \n if aptrc_ is None:\n raise ValueError(\"Argument aptrc cannot be None\")\n if aptrc_ is None:\n raise ValueError(\"Argument aptrc may not be None\")\n if isinstance(aptrc_, numpy.ndarray) and aptrc_.dtype is numpy.dtype(numpy.int64) and aptrc_.flags.contiguous:\n _aptrc_copyarray = False\n _aptrc_tmp = ctypes.cast(aptrc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif aptrc_ is not None:\n _aptrc_copyarray = True\n _aptrc_np_tmp = numpy.zeros(len(aptrc_),numpy.dtype(numpy.int64))\n _aptrc_np_tmp[:] = aptrc_\n assert _aptrc_np_tmp.flags.contiguous\n _aptrc_tmp = ctypes.cast(_aptrc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _aptrc_copyarray = False\n _aptrc_tmp = None\n \n if asubc_ is None:\n raise ValueError(\"Argument asubc cannot be None\")\n if asubc_ is None:\n raise ValueError(\"Argument asubc may not be None\")\n if isinstance(asubc_, numpy.ndarray) and asubc_.dtype is numpy.dtype(numpy.int32) and asubc_.flags.contiguous:\n _asubc_copyarray = False\n _asubc_tmp = ctypes.cast(asubc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asubc_ is not None:\n _asubc_copyarray = True\n _asubc_np_tmp = numpy.zeros(len(asubc_),numpy.dtype(numpy.int32))\n _asubc_np_tmp[:] = asubc_\n assert _asubc_np_tmp.flags.contiguous\n _asubc_tmp = ctypes.cast(_asubc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asubc_copyarray = False\n _asubc_tmp = None\n \n if avalc_ is None:\n raise ValueError(\"Argument avalc cannot be None\")\n if avalc_ is None:\n raise ValueError(\"Argument avalc may not be None\")\n if isinstance(avalc_, numpy.ndarray) and avalc_.dtype is numpy.dtype(numpy.float64) and avalc_.flags.contiguous:\n _avalc_copyarray = False\n _avalc_tmp = ctypes.cast(avalc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif avalc_ is not None:\n _avalc_copyarray = True\n _avalc_np_tmp = numpy.zeros(len(avalc_),numpy.dtype(numpy.float64))\n _avalc_np_tmp[:] = avalc_\n assert _avalc_np_tmp.flags.contiguous\n _avalc_tmp = ctypes.cast(_avalc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _avalc_copyarray = False\n _avalc_tmp = None\n \n perm_ptr = ctypes.POINTER(ctypes.c_int32)()\n diag_ptr = ctypes.POINTER(ctypes.c_double)()\n lnzc_ptr = ctypes.POINTER(ctypes.c_int32)()\n lptrc_ptr = ctypes.POINTER(ctypes.c_int64)()\n lensubnval_ = ctypes.c_int64()\n lsubc_ptr = ctypes.POINTER(ctypes.c_int32)()\n lvalc_ptr = ctypes.POINTER(ctypes.c_double)()\n res = __library__.MSK_XX_computesparsecholesky(self.__nativep,multithread_,ordermethod_,tolsingular_,n_,_anzc_tmp,_aptrc_tmp,_asubc_tmp,_avalc_tmp,ctypes.byref(perm_ptr),ctypes.byref(diag_ptr),ctypes.byref(lnzc_ptr),ctypes.byref(lptrc_ptr),ctypes.byref(lensubnval_),ctypes.byref(lsubc_ptr),ctypes.byref(lvalc_ptr))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n perm_arr = perm_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,perm_ptr)\n diag_arr = diag_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,diag_ptr)\n lnzc_arr = lnzc_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,lnzc_ptr)\n lptrc_arr = lptrc_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,lptrc_ptr)\n lensubnval_ = lensubnval_.value\n _lensubnval_return_value = lensubnval_\n lsubc_arr = lsubc_ptr[0:lensubnval_]\n __library__.MSK_XX_freeenv(self.__nativep,lsubc_ptr)\n lvalc_arr = lvalc_ptr[0:lensubnval_]\n __library__.MSK_XX_freeenv(self.__nativep,lvalc_ptr)\n return (perm_arr,diag_arr,lnzc_arr,lptrc_arr,_lensubnval_return_value,lsubc_arr,lvalc_arr)", "def calc_jacobi_matrix(mesh, Z):\r\n A = lil_matrix((mesh.ntotal, mesh.ntotal))\r\n i, j, k = 0, 0, 0\r\n for i in range(1, mesh.nz-1):\r\n for j in range(1, mesh.ny-1):\r\n for k in range(1, mesh.nx-1):\r\n p = coordinate(i, j, k, mesh)\r\n\r\n if Z[i, j, k] != 0: #0の時はe-10などで近似して、なんとかする\r\n A[p.p, p.p] = 1.0\r\n\r\n else:\r\n A[p.p, p.ip1] = 1/6\r\n A[p.p, p.im1] = 1/6\r\n A[p.p, p.jp1] = 1/6\r\n A[p.p, p.jm1] = 1/6\r\n A[p.p, p.kp1] = 1/6\r\n A[p.p, p.km1] = 1/6\r\n\r\n return A.tocsc()", "def L(order=4):\n dim_sh = dimension(order)\n L = np.zeros((dim_sh, dim_sh))\n for j in range(dim_sh):\n l = sh_degree(j)\n L[j, j] = - (l * (l + 1))\n return L", "def get_l(GW_glitch,i,j):\n\t\t \n\ttemp = np.einsum('nmk,nmk->k', GW_glitch.r_outer_r[:,:,i,j,:], GW_glitch.Hij[:,:,i,j,:])\n\t\t \n\treturn temp", "def intern_J(self):\n if self.Fz is None:\n fz_none = True\n else:\n fx, fy, fu = self.Fz\n fz_none = False\n if self.A is None:\n def J(x,y):\n if self.hx is None or self.gradh is None:\n if fz_none:\n fx, _, _ = self.F(x,y)\n xp, _, _ = minus(x, fx)\n xp, _, _ = operator_P(self.proj, xp)\n xp, _, _ = minus(x, xp)\n return LA.norm(xp),None,None\n else:\n if fz_none:\n fx, fy, _ = self.F(x,y)\n xp, yp, _ = minus(x, fx, y, fy)\n xp, yp, _ = operator_P(self.proj, xp, yp)\n xp, yp, _ = minus(x, xp, y, yp)\n total = np.concatenate((xp, yp))\n return LA.norm(xp)+LA.norm(yp),None,None\n else:\n def J(x,y,u):\n if self.hx is None or self.gradh is None:\n if fz_none:\n fx, _,fu = self.F(x,y,u)\n xp, up, _ = minus(x, fx, u, fu)\n xp, _, up = operator_P(self.proj, xp, None, up)\n xp, up, _ = minus(x, xp, u, up)\n total = np.concatenate((xp, up))\n return LA.norm(xp)+LA.norm(up),None,None\n else:\n if fz_none:\n fx, fy, fu = self.F(x,y,u)\n xp, yp, up = minus(x, fx, y, fy, u, fu)\n xp, yp, up = operator_P(self.proj, xp, yp, up)\n xp, yp, up = minus(x, xp, y, yp, u, up)\n total = np.concatenate((xp, yp, up))\n return LA.norm(xp)+LA.norm(yp)+LA.norm(up),None,None\n return J", "def compute_all_jxy(polygon=None):\n expressions = []\n symmetric = []\n\n # given a 12-gon, we do the following:\n # polygon = Symbolic12Gon()\n # polygon = make_regular()\n if polygon is None:\n polygon = make_any_gon()\n # polygon = make_assumption_gon()\n\n # print(polygon.vertices)\n for i in range(6):\n print(i)\n # translate such that this point is the origin\n# polygon = polygon.translate(polygon.vertices[i])\n# print(polygon)\n # shear so that the diagonal we are considering is vertical\n try:\n q = polygon.vertices[i].qx_to_shear_by(polygon.vertices[i+1])\n# print(\"q1:\", q.rational(D=3), q.irrational(D=3))\n except ZeroDivisionError:\n print(\"-------\")\n print(\"division by 0!\")\n print(\"-------\")\n continue\n\n sheared_polygon = polygon.shear_x_zero(q)\n# print(sheared_polygon)\n# print(\"test:\", sheared_polygon.vertices[i] - sheared_polygon.vertices[i+1])\n w, h = sheared_polygon.get_cylinder(i)\n # print(\"h: \",h.full_simplify())\n# print(\"shear 1 w: \",w.full_simplify())\n # print(len(sheared_polygon.vertices))\n# print(sheared_polygon.vertices[i])\n # shear again so that the edge that we consider is horizontal\n try:\n q = sheared_polygon.vertices[i].qy_to_shear_by(sheared_polygon.vertices[(i + 7) % 12])\n# print(sheared_polygon.vertices[i], sheared_polygon.vertices[(i + 7) % 12])\n# print(\"q2:\", q.rational(D=3), q.irrational(D=3))\n except ZeroDivisionError:\n print(\"-------\")\n print(\"division by 0!\")\n print(\"-------\")\n continue\n\n twice_sheared = sheared_polygon.shear_y_zero(q)\n\n # rescale such that the modulus of the vertical cylinder is rational\n w, h = twice_sheared.get_cylinder(i)\n# print(\"shear 2 h: \",h.full_simplify())\n# print(\"shear 2 w: \",w.full_simplify())\n # print(w.y, h.x)\n stretch_factor = w.x/h.y # this should be reciprocated, but we just care it is rational\n # print(stretch_factor)\n stretched_polygon = sheared_polygon.stretch_y(stretch_factor)\n\n # compute Jxy\n jxy = stretched_polygon.jxy()\n expressions.append(jxy)\n symmetric.append((jxy[1], jxy[2]))\n\n return expressions, symmetric", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def _apply_cz(self, state, axes, **kwargs):\n ndim = self._ndim(state)\n sl_0 = _get_slice(0, axes[0], ndim)\n sl_1 = _get_slice(1, axes[0], ndim)\n\n if axes[1] > axes[0]:\n target_axes = [axes[1] - 1]\n else:\n target_axes = [axes[1]]\n\n state_z = self._apply_z(state[sl_1], axes=target_axes)\n return self._stack([state[sl_0], state_z], axis=axes[0])", "def collatz_eval (a) :\n # <your code>\n return ([i, j, 1] for i, j in a)", "def _get_jacobian(self):\n srcs, recs = self.srcs, self.recs\n if not self.sparse:\n jac = numpy.array(\n [ttime2d.straight([cell], '', srcs, recs, velocity=1.)\n for cell in self.mesh]).T\n else:\n shoot = ttime2d.straight\n nonzero = []\n extend = nonzero.extend\n for j, c in enumerate(self.mesh):\n extend((i, j, tt)\n for i, tt in enumerate(shoot([c], '', srcs, recs,\n velocity=1.))\n if tt != 0)\n row, col, val = numpy.array(nonzero).T\n shape = (self.ndata, self.nparams)\n jac = scipy.sparse.csr_matrix((val, (row, col)), shape)\n return jac", "def make_mat_lp_le(lin_pot_mesh, lin_geo_mesh):\n num_nodes = lin_pot_mesh.get_nodes().shape[0]\n K = np.zeros((3 * num_nodes, 3 * num_nodes))\n add_lp_le_DL_terms(K, lin_pot_mesh, lin_geo_mesh)\n add_lp_le_RBM_terms(K, lin_pot_mesh, lin_geo_mesh)\n return K", "def calc_lampam_2(ss):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_2(ss[index])\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_2(ss[index])\n return lampam\n\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n theta2 = np.deg2rad(2*ss.astype(float))\n theta4 = 2*theta2\n cos_sin = np.concatenate((\n np.cos(theta2),\n np.cos(theta4),\n np.sin(theta2),\n np.sin(theta4))).reshape((4, n_plies_in_panels))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)", "def mXZ(nxz,P_dot_Dj,P_dot_ej):\n return np.divide(np.multiply(P_dot_ej, np.sum(nxz, axis=0)), P_dot_Dj)", "def jacobian(self,x,y,l,a):\n J = np.zeros([*x.shape,2,2])\n\n J = _jacobian(x,y,l,a,J)\n\n return J", "def linearize_pose_landmark_constraint(x, l, z):\n print(\"you shouldn't be here....\")\n e = np.zeros([2, 1])\n A = np.zeros([2, 3])\n B = np.zeros([2, 2])\n\n Ri = v2t(x)[0:2, 0:2]\n ti = x[0:2]\n\n fi = x[2]\n c = np.cos(fi)\n s = np.sin(fi)\n dR_dteta = np.array([[-s, c], [-c, -s]])\n\n e = Ri.transpose() @ (l - x[0:2]) - z\n\n B = Ri.transpose()\n\n A[0:2, 0:2] = -Ri.transpose()\n A[0:2, 2] = dR_dteta @ (l - ti)\n\n return e, A, B", "def get_hkl(self, x, y, z):\n if self.Umat is not None:\n v5 = self.Gvec(x, y, z)\n# v6 = inv(self.Umat) * v5\n# v7 = inv(self.Bmat) * v6\n v7 = inv(self.UBmat) * v5\n return list(np.array(v7.T)[0])\n else:\n return [0.0, 0.0, 0.0]", "def zenith_nadir(x, y):\n if y == 'm':\n bb = []\n cc = []\n for i in range(x.shape[1]):\n bb.append(amax(x[:, i:i + 1]))\n b = array(bb)\n cc.append(amin(x[:, i:i + 1]))\n c = array(cc)\n return (b, c)\n else:\n b = ones(x.shape[1])\n c = zeros(x.shape[1])\n return (b, c)", "def laplacian(degree_vector, weight_matrix):\n return np.diag(degree_vector) - weight_matrix", "def Lorenz(s):\n x = s[0]\n y = s[1]\n z = s[2]\n \n # constants for the equations\n sigma = 10.0\n rho = 28.0\n beta = 8.0/3.0\n \n # Return the state derivatives.\n return [sigma * (y-x), (rho-z)*x -y, x*y - beta*z]", "def calc_lampam(ss, constraints=None):\n if constraints is None:\n return calc_lampam_2(ss)\n\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam(ss[index], constraints)\n return lampam\n n_plies_in_panels = np.size(ss) # laminate ply count\n\n if not constraints.sym:\n cos_sin = np.empty((4, n_plies_in_panels), float)\n for ind in range(n_plies_in_panels):\n cos_sin[:, ind] = np.copy(constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, )))\n\n for_the_top = np.arange(n_plies_in_panels)\n z_0 = np.ones(n_plies_in_panels)\n z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3\n z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \\\n - ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2\n return np.array([\n (1/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n (2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),\n (4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n cos_sin = np.empty((4, np.size(ss) // 2), float)\n for ind in range(np.size(ss) // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4,))\n\n for_the_top = np.arange(np.size(ss) // 2)\n z_0 = np.ones(np.size(ss) // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2/n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n\n if np.size(ss) % 2:\n cos_sin_mid = constraints.cos_sin[\n constraints.ind_angles_dict[ss[n_plies_in_panels // 2]]]\n lampam += np.array([\n (1/n_plies_in_panels)*cos_sin_mid,\n np.zeros((4,), dtype=float),\n (1/n_plies_in_panels**3)*cos_sin_mid]).reshape(12)\n return lampam", "def zoisite():\n\n rho = 3343.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 279.8; C[0,1] = 94.7; C[0,2] = 88.7; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 249.2; C[1,2] = 27.5; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 209.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 51.8; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 81.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.3\n\n return C, rho", "def call(self, inputs, training=None):\n with tf.device(\"/device:GPU:0\"):\n return tf.reshape(tf.einsum('bj,jk,bk->b', inputs, self.laplacian, inputs), (-1, 1))", "def collatz_eval (i, j) :\n assert i > 0\n assert j > 0\n \n v = 1\n # Allows the format i, j and j, i (ex: 1 10 and 10 1)\n beg = i\n end = j\n if(i > j):\n\tbeg = j\n\tend = i\n\n for num in range(beg, end+1):\n\t\n\tcycle = collatz_cycle(num)\n\tif(num < cache_size and cycle_table[num] == 0):\n\t cycle_table[num] = cycle\n\tif(cycle > v):\n\t v = cycle\n\n \n assert v > 0\n return v", "def collatz(n, out=None):\n if out is None:\n out = []\n if n in out:\n return out+[n]\n else:\n out.append(n)\n if n%2 == 0:\n return collatz(n//2, out)\n else:\n return collatz(n*3+1, out)", "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc,aptrc,asubc,avalc): # 3\n n_ = None\n if n_ is None:\n n_ = len(anzc)\n elif n_ != len(anzc):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc)\n elif n_ != len(aptrc):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if n_ is None: n_ = 0\n if anzc is None: raise TypeError(\"Invalid type for argument anzc\")\n if anzc is None:\n anzc_ = None\n else:\n try:\n anzc_ = memoryview(anzc)\n except TypeError:\n try:\n _tmparr_anzc = array.array(\"i\",anzc)\n except TypeError:\n raise TypeError(\"Argument anzc has wrong type\")\n else:\n anzc_ = memoryview(_tmparr_anzc)\n \n else:\n if anzc_.format != \"i\":\n anzc_ = memoryview(array.array(\"i\",anzc))\n \n if aptrc is None: raise TypeError(\"Invalid type for argument aptrc\")\n if aptrc is None:\n aptrc_ = None\n else:\n try:\n aptrc_ = memoryview(aptrc)\n except TypeError:\n try:\n _tmparr_aptrc = array.array(\"q\",aptrc)\n except TypeError:\n raise TypeError(\"Argument aptrc has wrong type\")\n else:\n aptrc_ = memoryview(_tmparr_aptrc)\n \n else:\n if aptrc_.format != \"q\":\n aptrc_ = memoryview(array.array(\"q\",aptrc))\n \n if asubc is None: raise TypeError(\"Invalid type for argument asubc\")\n if asubc is None:\n asubc_ = None\n else:\n try:\n asubc_ = memoryview(asubc)\n except TypeError:\n try:\n _tmparr_asubc = array.array(\"i\",asubc)\n except TypeError:\n raise TypeError(\"Argument asubc has wrong type\")\n else:\n asubc_ = memoryview(_tmparr_asubc)\n \n else:\n if asubc_.format != \"i\":\n asubc_ = memoryview(array.array(\"i\",asubc))\n \n if avalc is None: raise TypeError(\"Invalid type for argument avalc\")\n if avalc is None:\n avalc_ = None\n else:\n try:\n avalc_ = memoryview(avalc)\n except TypeError:\n try:\n _tmparr_avalc = array.array(\"d\",avalc)\n except TypeError:\n raise TypeError(\"Argument avalc has wrong type\")\n else:\n avalc_ = memoryview(_tmparr_avalc)\n \n else:\n if avalc_.format != \"d\":\n avalc_ = memoryview(array.array(\"d\",avalc))\n \n res,resargs = self.__obj.computesparsecholesky(multithread_,ordermethod_,tolsingular_,n_,anzc_,aptrc_,asubc_,avalc_)\n if res != 0:\n raise Error(rescode(res),\"\")\n _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc = resargs\n return _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc", "def lcs_le(x: List, y: List) -> Tuple[Matrix, atrix]:\n m = len(x)\n n = len(y)\n lcs_matrix = [[None]*(n+1) for i in range(m+1)\n # each index is a optimal solution for each subproblem\n direction_matrix = [[None]*n for i in range(m)]\n # if either indecd is 0 then each element is 0\n for i n ranage(1, m+1):\n lcs_matrix[i][0] = 0\n for j in range(n+1):\n lcs_matrix[0][j] = 0\n for i in range(m):\n for j in range(n):\n if x[i] == y[j]:\n lcs_matrix[i+1][j+1] = lcs_matrix[i][j]+1\n direction_matrix[i][j] = Direction.UPPER_LEFT\n elif lcs_matrix[i][j+1] >= lcs_matrix[i+1][j]:\n lcs_matrix[i+1][j+1] = lcs_matrix[i][j+1]\n direction_matrix[i][j] = Direction.UP\n else:\n lcs_matrix[i+1][j+1] = lcs_matrix[i+1][j]\n direction_matrix[i][j] = Direction.LEFT\n return lcs_matrix, index_matrix", "def lanczos_resampling(arr, target_r, target_c):\n origin_r = arr.shape[0]\n origin_c = arr.shape[1]\n s_r, s_c = get_sampling_scale(origin_r, origin_c, target_r, target_c)\n # Padding 3 to outside of arr.\n arr_padded = np.pad(arr, ((2, 3), (2, 3), (0, 0)), 'edge')\n output = np.ndarray([target_r, target_c, 3], dtype=np.uint8)\n\n def lanczos_filter(x):\n if x == 0:\n return 1\n elif -3 <= x < 3:\n return (3 * np.sin(np.pi * x) * np.sin(np.pi * x / 3)) / (np.pi * np.pi * x * x)\n else:\n return 0\n\n # Populate pixel in output\n for r in range(target_r):\n for c in range(target_c):\n rf, cf = s_r * r, s_c * c\n rp, cp = math.floor(rf), math.floor(cf)\n # Grab 36 pixels.\n region = arr_padded[rp:(rp + 6), cp:(cp + 6), :]\n coefs = np.ndarray((6, 6), dtype=float)\n for r_t in range(-2, 4):\n for c_t in range(-2, 4):\n coefs[r_t + 2, c_t + 2] = lanczos_filter(np.sqrt((rp + r_t - rf) ** 2 + (cp + c_t - cf) ** 2))\n coefs /= np.sum(coefs)\n this_color = np.zeros((3, ), dtype=float)\n for r_t in range(6):\n for c_t in range(6):\n this_color += region[r_t, c_t, :] * coefs[r_t, c_t]\n this_color = np.minimum(np.maximum(this_color, 0), 255)\n output[r, c, :] = this_color\n print(\"\\rPlease Wait: %d / %d\" % (r, target_r), end='')\n return output", "def conj(z):", "def lcn(x,ishape,size=9):\n # Function borrowed from bengioe_util\n inshape = (x.shape[0],1,ishape[0],ishape[1])\n p = x.reshape(inshape)\n #p = (p-TT.mean(p))/T.std(p)\n g = gaussian(size,1.591/size)\n g/=g.sum()\n g = numpy.float32(g.reshape((1,1,size,size)))\n mean = TT.nnet.conv.conv2d(p,TT.constant(g),\n None,\n (1,1,size,size),\n 'full').reshape(\n (x.shape[0],1)+(ishape[0]+size-1,)*2)\n mean = mean[:,:,\n size/2:ishape[0]+size/2,\n size/2:ishape[1]+size/2]\n v = (p - mean)#.dimshuffle('x','x',0,1)\n var = TT.nnet.conv.conv2d(TT.sqr(v),TT.constant(g),\n None,\n (1,1,size,size),\n 'full').reshape(\n (x.shape[0],1)+(ishape[0]+size-1,)*2)\n var = var[:,:,\n size/2:ishape[0]+size/2,\n size/2:ishape[1]+size/2]\n std = TT.sqrt(var)\n std_mean = TT.mean(TT.mean(std,axis=3),axis=2).dimshuffle(0,1,'x','x')\n out = v / TT.maximum(std,std_mean)\n return (out + 2.5 )/5# - out.min()", "def func_with_jvp(x):\n return mysum(pow2(mysin(x)))", "def _precession_matrix_besselian(epoch1, epoch2):\n # tropical years\n t1 = (epoch1 - 1850.0) / 1000.0\n t2 = (epoch2 - 1850.0) / 1000.0\n dt = t2 - t1\n\n zeta1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1\n zeta2 = 30.240 - 0.27 * t1\n zeta3 = 17.995\n pzeta = (zeta3, zeta2, zeta1, 0)\n zeta = np.polyval(pzeta, dt) / 3600\n\n z1 = 23035.545 + t1 * 139.720 + 0.060 * t1 * t1\n z2 = 109.480 + 0.39 * t1\n z3 = 18.325\n pz = (z3, z2, z1, 0)\n z = np.polyval(pz, dt) / 3600\n\n theta1 = 20051.12 - 85.29 * t1 - 0.37 * t1 * t1\n theta2 = -42.65 - 0.37 * t1\n theta3 = -41.8\n ptheta = (theta3, theta2, theta1, 0)\n theta = np.polyval(ptheta, dt) / 3600\n\n return (\n rotation_matrix(-z, \"z\")\n @ rotation_matrix(theta, \"y\")\n @ rotation_matrix(-zeta, \"z\")\n )", "def zerodegree_pol(dim):\n\n out = zeros(dim)\n out[0] = 1\n\n return out", "def kullback_leibler_iaf(z, logqz_x, beta=1., **args):\n \n logpz = -tf.reduce_sum(input_tensor=0.5 * np.log(2*np.pi) + 0.5 * tf.square(z), axis=-1)\n kl = beta * tf.reduce_mean(input_tensor=logqz_x - logpz)\n return kl", "def magmablas_zsymmetrize(uplo, n, A, lda):\n\n uplo = _uplo_conversion[uplo]\n status = _libmagma.magmablas_zsymmetrize(uplo, n, int(A), lda)\n magmaCheckStatus(status)", "def build_matrix(ldpc_code_params):\n n_cnodes = ldpc_code_params['n_cnodes']\n cnode_deg_list = ldpc_code_params['cnode_deg_list']\n cnode_adj_list = ldpc_code_params['cnode_adj_list'].reshape((n_cnodes, ldpc_code_params['max_cnode_deg']))\n\n parity_check_matrix = sp.lil_matrix((n_cnodes, ldpc_code_params['n_vnodes']), dtype=np.int8)\n for cnode_idx in range(n_cnodes):\n parity_check_matrix[cnode_idx, cnode_adj_list[cnode_idx, :cnode_deg_list[cnode_idx]]] = 1\n\n parity_check_matrix = parity_check_matrix.tocsc()\n systematic_part = parity_check_matrix[:, -n_cnodes:]\n parity_part = parity_check_matrix[:, :-n_cnodes]\n\n ldpc_code_params['parity_check_matrix'] = parity_check_matrix\n ldpc_code_params['generator_matrix'] = splg.inv(systematic_part).dot(parity_part).tocsr()", "def lc_triplets():\n return [(a, b, 1000 - (a + b)) for a in xrange(1, 1000) for b in xrange(a, 1000) if a**2 + b**2 == (1000 - (a+b))**2][0]", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)" ]
[ "0.61316943", "0.5759136", "0.5484182", "0.53870416", "0.5306601", "0.5300693", "0.52789545", "0.52645034", "0.525278", "0.5248642", "0.52068466", "0.5197051", "0.5172847", "0.5171192", "0.5147542", "0.5092073", "0.5066736", "0.5065881", "0.50415236", "0.5041109", "0.50320095", "0.50298345", "0.5008514", "0.5001094", "0.50006133", "0.49993464", "0.49896526", "0.49810082", "0.49807104", "0.4973545", "0.49564967", "0.49542546", "0.49301755", "0.4922153", "0.49018896", "0.48986796", "0.48968834", "0.48951828", "0.48900825", "0.4881542", "0.48798132", "0.4874661", "0.4871157", "0.48685685", "0.48611447", "0.48598084", "0.4858463", "0.48505008", "0.48451236", "0.48244867", "0.4819192", "0.48040408", "0.48027042", "0.47933966", "0.47921956", "0.47864544", "0.47817436", "0.47808847", "0.47787538", "0.477705", "0.4761165", "0.4760873", "0.47574997", "0.47559267", "0.47518227", "0.4749927", "0.47402433", "0.4740056", "0.47372332", "0.4731873", "0.47300705", "0.47277138", "0.47212964", "0.47121078", "0.4709271", "0.47071734", "0.4701479", "0.46975645", "0.46939394", "0.46836755", "0.46696317", "0.46626556", "0.4662489", "0.46569192", "0.4651079", "0.46444502", "0.46413374", "0.46406677", "0.4634567", "0.46300402", "0.46233985", "0.4622611", "0.4609962", "0.4609559", "0.4607654", "0.4601589", "0.459057", "0.45889598", "0.4588858", "0.45838866" ]
0.8053774
0
Helper function to create a jitted arnoldi factorization. The function returns a function `_arnoldi_fact` which performs an mstep arnoldi factorization. `_arnoldi_fact` computes an mstep arnoldi factorization of an input callable `matvec`, with m = min(`it`,`num_krylov_vecs`). `_arnoldi_fact` will do at most `num_krylov_vecs` steps. `_arnoldi_fact` returns arrays `kv` and `H` which satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable: @jax.jit def modified_gram_schmidt_step_arnoldi(j, vals): """ Single step of a modified gram-schmidt orthogonalization. Args: j: Integer value denoting the vector to be orthogonalized. vals: A list of variables: `vector`: The current vector to be orthogonalized to all previous ones `krylov_vectors`: jax.array of collected krylov vectors `n`: integer denoting the column-position of the overlap <`krylov_vector`|`vector`> within `H`. Returns: updated vals. """ vector, krylov_vectors, n, H = vals v = krylov_vectors[j, :] h = jax.numpy.vdot(v, vector) H = jax.ops.index_update(H, jax.ops.index[j, n], h) vector = vector - h * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors, n, H] @functools.partial(jax.jit, static_argnums=(5, 6, 7)) def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): """ Compute an m-step arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm - Vm @ Hm - fm * em = 0 ``` with `matrix` the matrix representation of `matvec` and `Vm = jax.numpy.transpose(kv[:it, :])`, `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1) and `em` a cartesian basis vector of shape `(1, kv.shape[1])` with `em[0, -1] == 1` and 0 elsewhere. Note that the caller is responsible for dtype consistency between the inputs, i.e. dtypes between all input arrays have to match. Args: matvec: The matrix vector product. args: List of arguments to `matvec`. v0: Initial state to `matvec`. krylov_vectors: An array for storing the krylov vectors. The individual vectors are stored as columns. The shape of `krylov_vecs` has to be (num_krylov_vecs + 1, np.ravel(v0).shape[0]). H: Matrix of overlaps. The shape has to be (num_krylov_vecs + 1,num_krylov_vecs + 1). start: Integer denoting the start position where the first produced krylov_vector should be inserted into `krylov_vectors` num_krylov_vecs: Number of krylov iterations, should be identical to `krylov_vectors.shape[0] + 1` eps: Convergence parameter. Iteration is terminated if the norm of a krylov-vector falls below `eps`. Returns: kv: An array of krylov vectors H: A matrix of overlaps it: The number of performed iterations. """ Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps return _arnoldi_fact
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps", "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]", "def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable:\n\n arnoldi_fact = _generate_arnoldi_factorization(jax)\n\n # ######################################################\n # ####### NEW SORTING FUCTIONS INSERTED HERE #########\n # ######################################################\n @functools.partial(jax.jit, static_argnums=(1,))\n def LR_sort(evals, p):\n inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n @functools.partial(jax.jit, static_argnums=(1,))\n def LM_sort(evals, p):\n inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n # #######################################################\n # #######################################################\n # #######################################################\n @functools.partial(jax.jit, static_argnums=(4, 5, 6))\n def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh):\n funs = [LR_sort, LM_sort]\n shifts, _ = funs[which](evals, p)\n # compress to k = numeig\n q = jax.numpy.zeros(Hm.shape[0])\n q = jax.ops.index_update(q, jax.ops.index[-1], 1)\n m = Hm.shape[0]\n\n for shift in shifts:\n Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m))\n Hm = Qj.T.conj() @ Hm @ Qj\n Vm = Qj.T @ Vm\n q = q @ Qj\n\n fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1]\n Vk = Vm[0:k, :]\n Hk = Hm[0:k, 0:k]\n H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype)\n H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk)\n Z = jax.numpy.linalg.norm(fk)\n v = fk / Z\n krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :],\n Vk)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v)\n Z = jax.numpy.linalg.norm(fk)\n #if fk is a zero-vector then arnoldi has exactly converged.\n #use small threshold to check this\n return krylov_vectors, H, fk, Z < res_thresh\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def update_data(Vm_tmp, Hm_tmp, numits):\n Vm = Vm_tmp[0:numits, :]\n Hm = Hm_tmp[0:numits, 0:numits]\n fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1]\n return Vm, Hm, fm\n\n @functools.partial(jax.jit, static_argnums=(3,))\n def get_vectors(Vm, unitary, inds, numeig):\n\n def body_vector(i, vals):\n krv, unitary, states, inds = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m, :] * unitary[m, inds[n]])\n return [krv, unitary, states, inds]\n\n state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype)\n _, _, state_vectors, _ = jax.lax.fori_loop(\n 0, numeig * Vm.shape[0], body_vector,\n [Vm, unitary, state_vectors, inds])\n state_norms = jax.numpy.linalg.norm(state_vectors, axis=1)\n state_vectors = state_vectors / state_norms[:, None]\n return state_vectors\n\n\n def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"\n Implicitly restarted arnoldi factorization of `matvec`. The routine\n finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec`\n by alternating between compression and re-expansion of an initial\n `num_krylov_vecs`-step Arnoldi factorization.\n\n Note: The caller has to ensure that the dtype of the return value\n of `matvec` matches the dtype of the initial state. Otherwise jax\n will raise a TypeError.\n\n Args:\n matvec: A callable representing the linear operator.\n args: Arguments to `matvec`. `matvec` is called with\n `matvec(x, *args)` with `x` the input array on which\n `matvec` should act.\n initial_state: An starting vector for the iteration.\n num_krylov_vecs: Number of krylov vectors of the arnoldi factorization.\n numeig: The number of desired eigenvector-eigenvalue pairs.\n which: Which eigenvalues to target. Currently supported: `which = 'LR'`\n or `which = 'LM'`.\n eps: Convergence flag. If the norm of a krylov vector drops below `eps`\n the iteration is terminated.\n maxiter: Maximum number of (outer) iteration steps.\n Returns:\n eta, U: Two lists containing eigenvalues and eigenvectors.\n \"\"\"\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]\n\n return implicitly_restarted_arnoldi_method", "def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos", "def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y", "def svm_admm(X, y, mylambda=1., rho=1., rel_par=1., QUIET = False, MAX_ITER = 200, ABSTOL = 1e-6, RELTOL = 1e-2):\n if not QUIET:\n tic = time.time()\n m, n = X.shape \n y_raveld = y.ravel() \n # A is a matrix given by [-y_j*x_j -y_j]\n A = - np.dot(np.diag(y_raveld), np.concatenate((X, np.ones((m, 1))), axis = 1))\n\n #Data preprocessing\n m, n = A.shape\n \n #ADMM solver\n x = np.zeros((n, N))\n z = np.zeros((n, N))\n u = np.zeros((n, N))\n\n if not QUIET:\n print('\\n%3s\\t%10s\\t%10s\\t%10s\\t%10s\\t%10s' %('iter',\n 'r np.linalg.norm', \n 'eps pri', \n 's np.linalg.norm', \n 'eps dual', \n 'objective'))\n\n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n\n for k in range(MAX_ITER):\n # x-update \n for i in range(N):\n A_temp = A[i * num_per_batch: (i + 1) * num_per_batch, :]\n y_temp = y[i * num_per_batch: (i + 1) * num_per_batch, :]\n #\n # temp1 = -z[:, i] + u[:, i]\n # fun = lambda x: np.sum(np.maximum(np.dot(A_temp, x.reshape((n, 1))) + 1, np.zeros((num_per_batch, 1)))) + \\\n # rho/2. * np.dot(x + temp1, x + temp1)\n # # np.random.uniform(-1, 1, (n,1))\n # result = scipy.optimize.minimize(fun, 0.1 * np.ones((n, 1)), tol = 1e-8, method = 'Nelder-Mead')\n # x_temp = result.x\n #\n x_var = Variable(n)\n constraints = []\n objective = Minimize(sum_entries(pos( A_temp * x_var + 1)) + rho/2. * sum_squares((x_var - z[:, i] + u[:, i])))\n prob = Problem(objective, constraints)\n result = prob.solve()\n x_temp = x_var.value\n\n x_temp = x_temp.reshape((x_temp.shape[0], 1))\n x[:, i] = x_temp.ravel()\n\n xave = np.mean(x, axis = 1)\n\n # z-update\n zold = np.copy(z)\n x_hat = rel_par * x + (1. - rel_par) * zold\n z = N * rho/(1./mylambda + N * rho) * np.mean(x_hat + u, axis = 1)\n z = z.reshape((z.shape[0], 1))\n z = np.dot(z, np.ones((1, N))) # N columns of the same values\n\n # u-update\n u = u + x_hat - z\n\n # diagnostics, reporting, termination checks\n h['objval'][k] = myobjective(A, mylambda, x, z)\n h['r_norm'][k] = np.linalg.norm(x - z)\n h['s_norm'][k] = np.linalg.norm(rho * (z - zold))\n h['eps_pri'][k] = np.sqrt(n) * ABSTOL+ RELTOL * np.maximum(np.linalg.norm(x), np.linalg.norm(-z))\n h['eps_dual'][k] = np.sqrt(n) * ABSTOL + RELTOL * np.linalg.norm(rho * u)\n if not QUIET:\n print('%4d\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.2f' %(k + 1,\\\n h['r_norm'][k],\\\n h['eps_pri'][k],\\\n h['s_norm'][k],\\\n h['eps_dual'][k],\\\n h['objval'][k]))\n\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n\n if not QUIET:\n toc = time.time()-tic\n print(\"\\nElapsed time is %.2f seconds\"%toc)\n\n return z, h", "def vec_factored_rolling(decays: jnp.ndarray) -> _InitUpdate:\n return _vmap_accumulator(factored_rolling, decays)", "def lanczos_decomp(vector_prod_fn, scalar, n, k):\n Q = tf.zeros([n, 1])\n v = tf.random_uniform([n, 1])\n v = v / tf.norm(v)\n Q = tf.concat([Q, v], axis=1)\n\n # diagonals of the tridiagonal matrix\n beta = tf.constant(0.0, dtype=tf.float32, shape=[1])\n alpha = tf.constant(0.0, dtype=tf.float32, shape=[1])\n\n for i in range(k):\n v = vector_prod_fn(tf.reshape(Q[:, i+1], [n, 1])) - tf.scalar_mul(scalar, tf.reshape(Q[:, i+1], [n, 1]))\n v = tf.reshape(v, [n,])\n curr_alpha = tf.reshape(tf.reduce_sum(v * Q[:, i+1]), [1,])\n alpha = tf.concat([alpha, curr_alpha], axis=0)\n v = v-beta[-1]*Q[:, i]-alpha[-1]*Q[:, i+1]\n curr_beta = tf.reshape(tf.norm(v), [1,])\n beta = tf.concat([beta, curr_beta], axis=0)\n curr_norm = tf.reshape(v/(beta[-1]+1e-8), [n, 1])\n Q = tf.concat([Q, curr_norm], axis=1)\n\n alpha = tf.slice(alpha, begin=[1], size=[-1])\n beta = tf.slice(beta, begin=[1], size=[k-1])\n Q = tf.slice(Q, begin=[0, 1], size=[-1, k])\n return alpha, beta, Q", "def Avv_func(f):\n\n def Avv(x, v):\n def F(s):\n return f(x + v * s)\n\n return jacfwd(jacfwd(F))(0.0)\n\n return Avv", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def factor_mat(all_dat, f_num, iterations, regularization):\n\n\t# get # of users and # of items\n\t[u_num, i_num] = all_dat.shape\n\n\t# init user factors and item factors with random values\n\tu_fac = np.matrix(np.random.rand(u_num, f_num))\t# MxF\n\ti_fac = np.matrix(np.random.rand(i_num, f_num))\t# NxF\n\n\t# calculate the preference matrix\n\tpreference = cal_preference(all_dat)\n\n\t# calculate the confidence matrix\n\tconfidence = cal_confidence(all_dat)\n\t\n\t# recalculate the user factors and item factors using the alternating least square method\n\tfor itr in range(iterations):\n\t\tu_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization)\n\t\t#print itr, \"u_fac\"\n\t\ti_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization)\n\t\t#print itr, \"i_fac\"\n\t\n\t# save the output\n\tdf = pd.DataFrame(u_fac)\n\tdf.to_csv(\"tmp/u_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\tdf = pd.DataFrame(i_fac.T)\n\tdf.to_csv(\"tmp/i_fac.tmp\", index=False, header=False, sep='\\t', encoding='utf-8')\n\n\t# an MxF user factor matrix and an FxN item factor matrix\n\treturn [u_fac, i_fac.T]", "def mylinearsvm(beta, lambd, x, y, step_size_init, eps=0.0000001, max_iter=100):\n theta = beta\n t = step_size_init\n grad_beta = grad(beta, lambd, x, y)\n beta_vals = [beta]\n objs = [obj(beta, lambd, x, y)]\n iter = 0\n while np.linalg.norm(grad_beta) > eps and iter < max_iter: \n # THE CODE BELOW SO IT USES BACKTRACKING LINE SEARCH INSTEAD OF A CONSTANT STEP SIZE\n t = backtracking(beta, lambd=lambd, x=x, y=y, step_size=t)\n # THE CODE BELOW USES UPDATING THETA FOR BETA OPTIMAZATION\n beta = theta - t*grad_beta\n theta = beta + (iter/(iter+3))*(beta - beta_vals[-1])\n obj_val = obj(beta,lambd, x, y)\n beta_vals.append(beta)\n objs.append(obj_val)\n grad_beta = grad(theta, lambd, x, y)\n iter += 1\n \n return np.array(beta_vals), np.array(objs)", "def mylinearsvm(lambdat, eta_init, maxiter, X, y):\n d = np.size(X, 1)\n beta_init = np.zeros(d)\n theta_init = np.zeros(d)\n betas, objs = fast_grad(beta_init, theta_init, lambdat, eta_init, maxiter,X=X,y=y)\n return betas, objs", "def _fd_matrix(step_ratio, parity, nterms):\n _assert(0 <= parity <= 6,\n 'Parity must be 0, 1, 2, 3, 4, 5 or 6! ({0:d})'.format(parity))\n step = [1, 2, 2, 4, 4, 4, 4][parity]\n inv_sr = 1.0 / step_ratio\n offset = [1, 1, 2, 2, 4, 1, 3][parity]\n c0 = [1.0, 1.0, 1.0, 2.0, 24.0, 1.0, 6.0][parity]\n c = c0 / \\\n special.factorial(np.arange(offset, step * nterms + offset, step))\n [i, j] = np.ogrid[0:nterms, 0:nterms]\n return np.atleast_2d(c[j] * inv_sr ** (i * (step * j + offset)))", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def projective_factorization(x, max_iterations=1):\n\n n_views = len(x)\n n_points = x[0].shape[1]\n\n iterations = 0\n\n #lambda matrix, approximate depths\n l = np.ones((n_views, n_points))\n\n #normalization matrices\n norm_matrices = []\n\n # normalize coordinates\n xn = np.zeros((3*n_views, n_points))\n for i in range(n_views):\n\n #find normalization matrix for projections i\n x_norm, T = normalize_points(x[i], is_homogeneous=True)\n xn[3*i:3*(i+1), :] = x_norm\n norm_matrices.append(T)\n\n while iterations < max_iterations:\n # normalize the lambda matrix\n lr_norm = norm(l, axis=1)\n ln = l / lr_norm[:, np.newaxis]\n lc_norm = norm(ln, axis=0)\n ln /= lc_norm\n\n # repeat the lambdas\n ln = np.repeat(ln, 3, axis=0)\n\n #build the factorization matrix\n fact_matrix = ln*xn\n\n u, d, vh = svd(fact_matrix)\n\n print(d[3] / d[4])\n d = d[:4]/d[0]\n\n # from the svd decomposition we can find the projections and 3d points\n p_matrices = u[:, :4]\n x_3d = np.dot(np.diag(d), vh[:4, :])\n\n iterations += 1\n if iterations != max_iterations:\n\n w_matrix = np.dot(p_matrices, x_3d)\n\n for i in range(n_views):\n l[i, :] = w_matrix[3*i+2, :]\n\n cameras = []\n\n for i in range(n_views):\n # denormalize camera matrices\n c_matrix = np.dot(inv(norm_matrices[i]), p_matrices[3*i:3*(i+1), :])\n\n cameras.append(c_matrix)\n\n return cameras, x_3d", "def nonnegative_tensor_factorization(X, r, method='anls_bpp',\n tol=1e-4, stop_criterion=1,\n min_iter=20, max_iter=200, max_time=1e6,\n init=None, orderWays=None):\n\n nWay = len(X.shape)\n\n if orderWays is None:\n orderWays = np.arange(nWay)\n\n # set initial values\n if init is not None:\n F_cell = init\n else:\n Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]\n F_cell = Finit\n\n grad = getGradient(X, F_cell, nWay, r)\n\n nr_X = X.norm()\n nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2\n for i in range(nWay)))\n\n if method == \"anls_bpp\":\n method = anls_bpp()\n elif method == \"anls_asgroup\":\n method = anls_asgroup()\n else:\n raise Exception(\"Unknown method\")\n\n # Execute initializer\n F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays)\n\n tStart = time.time()\n\n if stop_criterion == 2:\n F_kten = ktensor(F_cell)\n rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n\n # main iterations\n for iteration in range(max_iter):\n cntu = True\n\n F_cell, FF_init = method.iterSolver(X, F_cell,\n FF_init, nWay, r, orderWays)\n F_kten = ktensor(F_cell)\n\n if iteration >= min_iter:\n\n if time.time() - tStart > max_time:\n cntu = False\n\n else:\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n if SC_PGRAD < tol:\n cntu = False\n\n elif stop_criterion == 2:\n prev_rel_Error = rel_Error\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n SC_DIFF = np.abs(prev_rel_Error - rel_Error)\n if SC_DIFF < tol:\n cntu = False\n else:\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n if rel_Error < 1:\n cntu = False\n\n if not cntu:\n break\n\n return F_kten", "def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError", "def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out", "def _get_mult_function_runtime_sparse(k_list, l_list, m_list, mult_table_vals, n_dims):\n @numba.njit\n def mv_mult(value, other_value):\n output = np.zeros(n_dims)\n for ind, k in enumerate(k_list):\n v_val = value[k]\n if v_val != 0.0:\n m = m_list[ind]\n ov_val = other_value[m]\n if ov_val != 0.0:\n l = l_list[ind]\n output[l] += v_val * mult_table_vals[ind] * ov_val\n return output\n\n return mv_mult", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def getLinearizedMatrices(model_type: ModelType, operating_point, Vf_op, Vb_op):\n\n p_op, e_op, lamb_op, dp_op, de_op, dlamb_op = operating_point\n\n # Vf_op, Vb_op = compute_feed_forward_flatness(e_and_derivatives, lambda_and_derivatives)\n Vs_op = Vf_op + Vb_op\n Vd_op = Vf_op - Vb_op\n\n if model_type == ModelType.EASY:\n A = np.array([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0],\n [-L3 * Vs_op * sin(p_op) / Je_static, -L2 * sin(e_op) / Je_static, 0, 0, 0, 0],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, 0, 0, 0, 0, 0]])\n elif model_type == ModelType.FRICTION:\n A = np.array([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, -mc.d_p / Jp_static, 0, 0],\n [-L3 * Vs_op * sin(p_op) / Je_static, -L2 * sin(e_op) / Je_static, 0, 0, -mc.d_e / Je_static, 0],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, -L4 * Vs_op * sin(p_op) * sin(e_op) / Jl_static, 0, 0, 0, -mc.d_l / Jl_static]])\n elif model_type == ModelType.CENTRIPETAL:\n A = np.array([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1],\n [-(de_op ** 2 - dlamb_op ** 2 * cos(e_op) ** 2) * sin(p_op) ** 2 + (de_op ** 2 - dlamb_op ** 2 * cos(e_op) ** 2) * cos(p_op) ** 2, 2 * dlamb_op ** 2 * sin(p_op) * sin(e_op) * cos(p_op) * cos(e_op), 0, -mc.d_p / Jp_static, 2 * de_op * sin(p_op) * cos(p_op), -2 * dlamb_op * sin(p_op) * cos(p_op) * cos(e_op) ** 2],\n [-L3 * Vs_op * sin(p_op) / Je_static, dlamb_op ** 2 * sin(e_op) ** 2 - dlamb_op ** 2 * cos(e_op) ** 2 - L2 * sin(e_op) / Je_static, 0, 0, -mc.d_e / Je_static, -2 * dlamb_op * sin(e_op) * cos(e_op)],\n [L4 * Vs_op * cos(p_op) * cos(e_op) / Jl_static, -L4 * Vs_op * sin(p_op) * sin(e_op) / Jl_static, 0, 0, 0, -mc.d_l / Jl_static]])\n\n B = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [L1 / Jp_static, -L1 / Jp_static],\n [L3 / Je_static * cos(p_op), L3 / Je_static * cos(p_op)],\n [L4 * sin(p_op) * cos(e_op) / Jl_static, L4 * sin(p_op) * cos(e_op) / Jl_static]])\n\n return A, B, Vf_op, Vb_op", "def force_list_lookup(X, V, lt, iparams, blist, sp):\n N = len(X)\n force_cube = np.zeros((N, N, 3))\n cell = sp.L*np.eye(3)\n inv_cell = np.linalg.pinv(cell)\n for i in range(N):\n for j in range(i):\n if lt[i, j]:\n dr = X[i] - X[j] # rij = ri - rj\n G = np.dot(inv_cell, dr)\n G_n = G - np.round(G)\n dr_n = np.dot(cell, G_n)\n v_ij = V[i] - V[j] # vij = vi - vj\n force_cube[i, j, :] = \\\n F_tot(dr_n, v_ij, iparams[(blist[i], blist[j])], sp)\n \n force_cube -= np.transpose(force_cube, (1, 0, 2))\n return np.sum(force_cube, axis=1)", "def SVM_train(Ktrain,y,lbda_vec):\r\n n = Ktrain.shape[0]\r\n for idx, lbda in enumerate(lbda_vec): \r\n C = 1/(2*lbda*n)\r\n P = matrix(Ktrain, tc=\"d\")\r\n q = - matrix(y,tc=\"d\")\r\n G = matrix( np.concatenate( (np.diagflat(y) , -np.diagflat(y) ), axis=0 ),tc=\"d\" )\r\n h1 = C * np.ones((n,1))\r\n h2 = np.zeros((n,1)) \r\n h = matrix(np.concatenate((h1,h2),axis=0))\r\n\r\n solvers.options['show_progress'] = False\r\n \r\n sol = solvers.qp(P,q,G,h) \r\n a = np.asarray(sol['x'])\r\n\r\n #alpha is sparse\r\n a[np.where(np.abs(a) < 1e-4)] = 0\r\n y_svm = np.dot(Ktrain,a)\r\n\r\n print(\"Précision pour lambda = \" + str(lbda) + \" :\", accuracy(y_svm,y))", "def beta_A_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n beta_A = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.beta_A_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n beta_A[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n return beta_A", "def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T", "def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma", "def als(user_ids : numpy.ndarray, item_ids : numpy.ndarray,\n ratings : numpy.ndarray, num_item_factors : int,\n num_users: int, num_items : int, min_r_decrease=0.01,\n max_iterations=200, algorithm=1):\n # allocate \"user_factors\" and \"item_factors\"\n num_user_factors = num_item_factors + 1\n user_factors = numpy.random.uniform(-1, 1, num_users * num_user_factors)\n item_factors = numpy.random.uniform(-1, 1, num_items * num_item_factors)\n\n # argument construction\n user_ids_ptr = user_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n item_ids_ptr = item_ids.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n ratings_length = len(ratings)\n ratings_ptr = ratings.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n user_factors_length = len(user_factors)\n user_factors_ptr = user_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n item_factors_length = len(item_factors)\n item_factors_ptr = item_factors.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n iterations = _dll.als_from_python(\n user_ids_ptr, item_ids_ptr, ratings_length, ratings_ptr,\n num_item_factors, user_factors_length, user_factors_ptr,\n item_factors_length, item_factors_ptr, ctypes.c_double(min_r_decrease),\n max_iterations, algorithm)\n\n return user_factors, item_factors, iterations", "def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)", "def m_step(rr, votes, i_activations, beta_v, beta_a, inverse_temperature):\n\n rr_prime = rr * i_activations\n\n # rr_prime_sum: sum over all input capsule i\n rr_prime_sum = tf.reduce_sum(rr_prime, axis=-3, keepdims=True, name='rr_prime_sum')\n\n # Mean of the output capsules: o_mean(24, 6, 6, 1, 32, 16)\n o_mean = tf.reduce_sum(\n rr_prime * votes, axis=-3, keepdims=True\n ) / rr_prime_sum\n\n # Standard deviation of the output capsule: o_stdv (24, 6, 6, 1, 32, 16)\n o_stdv = tf.sqrt(\n tf.reduce_sum(\n rr_prime * tf.square(votes - o_mean), axis=-3, keepdims=True\n ) / rr_prime_sum\n )\n\n # o_cost_h: (24, 6, 6, 1, 32, 16)\n o_cost_h = (beta_v + tf.log(o_stdv + epsilon)) * rr_prime_sum\n\n # o_cost: (24, 6, 6, 1, 32, 1)\n # o_activations_cost = (24, 6, 6, 1, 32, 1)\n # For numeric stability.\n o_cost = tf.reduce_sum(o_cost_h, axis=-1, keepdims=True)\n o_cost_mean = tf.reduce_mean(o_cost, axis=-2, keepdims=True)\n o_cost_stdv = tf.sqrt(\n tf.reduce_sum(\n tf.square(o_cost - o_cost_mean), axis=-2, keepdims=True\n ) / o_cost.get_shape().as_list()[-2]\n )\n o_activations_cost = beta_a + (o_cost_mean - o_cost) / (o_cost_stdv + epsilon)\n\n # (24, 6, 6, 1, 32, 1)\n o_activations = tf.sigmoid(\n inverse_temperature * o_activations_cost\n )\n\n return o_mean, o_stdv, o_activations", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def get_leftLaInv(k_list, l_list, m_list, mult_table_vals, n_dims, gradeList):\n\n identity = np.zeros((n_dims,))\n identity[gradeList.index(0)] = 1\n\n @numba.njit\n def leftLaInvJIT(value):\n intermed = np.zeros((n_dims, n_dims))\n for test_ind, i in enumerate(k_list):\n j = l_list[test_ind]\n k = m_list[test_ind]\n intermed[i, j] += mult_table_vals[test_ind] * value[k]\n intermed = np.transpose(intermed)\n if abs(linalg.det(intermed)) < _eps:\n raise ValueError(\"multivector has no left-inverse\")\n sol = linalg.solve(intermed, identity)\n return sol\n\n return leftLaInvJIT", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def solve_feas(hplanes, idx, method=\"farkas\"):\n if method == \"farkas\":\n cat_hplanes = np.concatenate(\n [hplanes, - hplanes[idx][np.newaxis, :]], axis=0)\n A = - cat_hplanes[:, :-1]\n b = cat_hplanes[:, -1]\n out = solve_farkas(A, b)\n else:\n raise NotImplementedError('method no implemented.')\n return out", "def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration", "def compute_matrix(*params, **hyperparams):\n phi = params[0]\n d, t = hyperparams[\"dimension\"]\n\n if qml.math.get_interface(phi) == \"tensorflow\":\n p = qml.math.exp(1j * qml.math.cast_like(phi, 1j))\n minus_p = qml.math.exp(-1j * qml.math.cast_like(phi, 1j))\n zeros = qml.math.zeros_like(p)\n\n columns = []\n for i in range(t):\n columns.append(\n [p if j == i else zeros for j in range(t)]\n if i < d\n else [minus_p if j == i else zeros for j in range(t)]\n )\n r = qml.math.stack(columns, like=\"tensorflow\", axis=-2)\n return r\n\n arg = 1j * phi\n prefactors = qml.math.array([1 if index < d else -1 for index in range(t)], like=phi)\n\n if qml.math.ndim(arg) == 0:\n return qml.math.diag(qml.math.exp(arg * prefactors))\n\n diags = qml.math.exp(qml.math.outer(arg, prefactors))\n return qml.math.stack(qml.math.diag(d) for d in diags)", "def v(resistances, r_i, applied_voltages, **kwargs):\n if r_i.word_line > 0 or r_i.bit_line > 0:\n g = fill.g(resistances, r_i)\n i = fill.i(applied_voltages, resistances, r_i)\n\n utils.message('Started solving for v.', **kwargs)\n v_matrix = linalg.spsolve(g.tocsc(), i)\n utils.message('Solved for v.', **kwargs)\n\n # if `num_examples == 1`, it can result in 1D array.\n if v_matrix.ndim == 1:\n v_matrix = v_matrix.reshape(v_matrix.shape[0], 1)\n\n # if one of the interconnect resistances is zero, only half of the\n # matrix_v had to be solved. The other half can be filled without\n # solving because the node voltages are known.\n if r_i.word_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n new_v_matrix[resistances.size:, ] = v_matrix\n v_matrix = new_v_matrix\n if r_i.bit_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = v_matrix\n v_matrix = new_v_matrix\n else:\n # if both interconnect resistances are zero, all node voltages are\n # known.\n v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n\n return v_matrix", "def fdspring(xy, v, NL, KL, BM, Mm, beta):\n NP, nn = np.shape(NL)\n if np.shape(xy)[1] == 2:\n '''2D version'''\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(nn)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(nn)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. --> same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n dxvec = np.sum(stretch * vecx / mag, axis=-1) / Mm\n dyvec = np.sum(stretch * vecy / mag, axis=-1) / Mm\n # damping term\n damp_dv = np.array([beta / Mm[i] * v[i] for i in range(NP)])\n # add them up\n ftx = -np.hstack((dxvec.reshape(NP, 1), dyvec.reshape(NP, 1))) - damp_dv\n else:\n '''3D version'''\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(nn)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(nn)] for i in range(NP)])\n vecz = np.array([[KL[i, j] * (xy[i, 2] - xy[NL[i, j], 2]) for j in range(nn)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2 + vecz ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n dxvec = np.sum(stretch * vecx / mag, axis=-1) / Mm\n dyvec = np.sum(stretch * vecy / mag, axis=-1) / Mm\n dzvec = np.sum(stretch * vecz / mag, axis=-1) / Mm\n # damping term\n damp_dv = np.array([beta / Mm[i] * v[i] for i in range(NP)])\n # add them up\n ftx = -np.hstack((dxvec.reshape(NP, 1), dyvec.reshape(NP, 1), dyvec.reshape(NP, 1))) - damp_dv\n return ftx", "def _jvp_isotonic_mag(solution, vector, w, l, eps=1e-4):\n x = solution\n mask = jnp.pad(jnp.absolute(jnp.diff(x)) <= eps, (1, 0))\n ar = jnp.arange(x.size)\n\n inds_start = jnp.where(mask == 0, ar, +jnp.inf).sort()\n u = 1 + l * w\n one_hot_start = jax.nn.one_hot(inds_start, len(vector))\n a = _cumsum_einsum(one_hot_start)\n a = jnp.append(jnp.diff(a[::-1], axis=0)[::-1], a[-1].reshape(1, -1), axis=0)\n return (\n ((a.T * (a @ (vector * u))).T) / ((a * u).sum(1, keepdims=True) + 1e-8)\n ).sum(0)", "def _compile(self, V, cardinality, L, L_offset, y, deps, init_acc, init_deps):\n\n ### Error Checking ###\n\n # Check L_offset is valid\n index = np.flatnonzero(UdfStart == L_offset)\n if len(index) == 0:\n raise ValueError(\"L_offset \" + str(L_offset) + \" does not correspond to a known application\")\n if len(index) > 1:\n raise ValueError(\"L_offset \" + str(L_offset) + \" found multiple times\")\n index = index[0]\n\n # Check L is the right size\n if len(L) != LfCount[index]:\n raise ValueError(\"Wrong number of LFs passed: (\" + str(len(L)) + \" given and \" + str(LfCount[index]) + \" in udf.py)\")\n\n # Check cardinality of each LF is right\n for i in range(len(L)):\n if len(L[i]) != UdfCardinality[UdfCardinalityStart[index] + i]:\n raise ValueError(\"LF \" + str(i) + \" has the wrong cardinality: (\" + str(len(L[i])) + \" given and \" + str(UdfCardinality[UdfCardinalityStart[index] + i]) + \" in udf.py)\")\n\n # Check that there are enough vocab terms\n for i in range(len(L)):\n for j in range(len(L[i])):\n if L[i][j] >= V.shape[1]:\n raise ValueError(\"LF \" + str(i) + \" uses vocab \" + str(L[i][j]) + \" when there are only \" + str(V.shape[1]) + \" terms\")\n\n\n ### Set up factor graph ###\n\n n_data = V.shape[0]\n n_vocab = V.shape[1]\n n_lf = len(L)\n\n n_weights = n_lf + len(deps)\n n_vars = n_data * (n_vocab + 1)\n n_factors = n_data * n_weights\n n_edges = n_data * (sum([len(l) + 1 for l in L]) + 2 * len(deps))\n\n weight = np.zeros(n_weights, Weight)\n variable = np.zeros(n_vars, Variable)\n factor = np.zeros(n_factors, Factor)\n ftv = np.zeros(n_edges, FactorToVar)\n domain_mask = np.zeros(n_vars, np.bool)\n\n #\n # Compiles weight matrix\n #\n for i in range(n_weights):\n weight[i]['isFixed'] = False\n if i < n_lf:\n if type(init_acc) == int or type(init_acc) == float:\n weight[i]['initialValue'] = np.float64(init_acc)\n else:\n weight[i]['initialValue'] = init_acc[i]\n else:\n if type(init_deps) == int or type(init_deps) == float:\n weight[i]['initialValue'] = np.float64(init_deps)\n else:\n weight[i]['initialValue'] = init_deps[i - n_lf]\n\n #\n # Compiles variable matrix\n #\n\n # True Label y\n for i in range(n_data):\n variable[i]['isEvidence'] = False if (y is None) else True\n variable[i]['initialValue'] = self.rng.randrange(0, 2) if (y is None) else (1 if y[i] == 1 else 0)\n variable[i][\"dataType\"] = 0\n variable[i][\"cardinality\"] = 2\n\n # Vocabulary\n for i in range(n_data):\n for j in range(n_vocab):\n variable[n_data + i * n_vocab + j][\"isEvidence\"] = True\n variable[n_data + i * n_vocab + j][\"initialValue\"] = V[i, j]\n variable[n_data + i * n_vocab + j][\"dataType\"] = 0\n variable[n_data + i * n_vocab + j][\"cardinality\"] = cardinality[j]\n if V[i, j] >= cardinality[j]:\n raise ValueError(\"Vocab \" + str(j) + \" contains \" + str(V[i, j]) + \" even though it has a cardinality of \" + str(cardinality[j]))\n\n #\n # Compiles factor and ftv matrices\n #\n index = 0\n # Accuracy\n for i in range(n_data):\n for j in range(n_lf):\n factor[i * n_lf + j][\"factorFunction\"] = L_offset + j\n factor[i * n_lf + j][\"weightId\"] = j\n factor[i * n_lf + j][\"featureValue\"] = 1.0\n factor[i * n_lf + j][\"arity\"] = len(L[j]) + 1\n factor[i * n_lf + j][\"ftv_offset\"] = index\n for k in range(len(L[j])):\n ftv[index][\"vid\"] = n_data + i * n_vocab + L[j][k]\n ftv[index][\"dense_equal_to\"] = 0 # not actually used\n index += 1\n ftv[index][\"vid\"] = i\n ftv[index][\"dense_equal_to\"] = 0 # not actually used\n index += 1\n\n # Dependencies\n for i in range(n_data):\n for j in range(len(deps)):\n factor[n_lf * n_data + i * len(deps) + j][\"factorFunction\"] = FUNC_CORAL_GEN_DEP_SIMILAR\n factor[n_lf * n_data + i * len(deps) + j][\"weightId\"] = n_lf + j\n factor[n_lf * n_data + i * len(deps) + j][\"featureValue\"] = 1.0\n factor[n_lf * n_data + i * len(deps) + j][\"arity\"] = 2\n factor[n_lf * n_data + i * len(deps) + j][\"ftv_offset\"] = index\n\n ftv[index + 0][\"vid\"] = n_data + i * n_vocab + deps[j][0]\n ftv[index + 0][\"dense_equal_to\"] = 0 # not actually used\n ftv[index + 1][\"vid\"] = n_data + i * n_vocab + deps[j][1]\n ftv[index + 1][\"dense_equal_to\"] = 0 # not actually used\n index += 2\n\n return weight, variable, factor, ftv, domain_mask, n_edges", "def modified_gram_schmidt_step_arnoldi(j, vals):\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _", "def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol", "def _olver_asymptotic_uniform(v, z, output_log_space=False, name=None):\n with tf.name_scope(name or 'olver_asymptotic_uniform'):\n v_abs = tf.math.abs(v)\n w = z / v_abs\n t = tf.math.reciprocal(_sqrt1px2(w))\n n_ufactors = len(_ASYMPTOTIC_OLVER_EXPANSION_COEFFICIENTS)\n\n divisor = v_abs\n ive_sum = 1.\n kve_sum = 1.\n\n # Note the polynomials have properties of oddness and evenness so that\n # could be taken advantage of when doing evaluation. For simplicity,\n # we naively sum using Horner's method.\n for i in range(n_ufactors):\n coeff = 0.\n for c in _ASYMPTOTIC_OLVER_EXPANSION_COEFFICIENTS[i]:\n coeff = coeff * t + c\n term = coeff / divisor\n ive_sum = ive_sum + term\n kve_sum = kve_sum + (term if i % 2 == 1 else -term)\n divisor = divisor * v_abs\n\n # This is modified from the original impl to be more numerically stable\n # since we are subtracting off x.\n shared_prefactor = (tf.math.reciprocal(_sqrt1px2(w) + w) + tf.math.log(w)\n - tf.math.log1p(tf.math.reciprocal(t)))\n log_i_prefactor = 0.5 * tf.math.log(\n t / (2 * np.pi * v_abs)) + v_abs * shared_prefactor\n\n # Not the same here since they will have the same sign.\n log_k_prefactor = 0.5 * tf.math.log(\n np.pi * t / (2 * v_abs)) - v_abs * shared_prefactor\n\n log_kve = log_k_prefactor + tf.math.log(kve_sum)\n log_ive = log_i_prefactor + tf.math.log(ive_sum)\n\n # We need to add a correction term for negative v.\n negative_v_correction = log_kve - 2. * z\n n = tf.math.round(v)\n u = v - n\n coeff = 2 / np.pi * tf.math.sin(np.pi * u)\n coeff = (1. - 2. * tf.math.mod(n, 2.)) * coeff\n\n lse, sign = tfp_math.log_sub_exp(\n log_ive,\n negative_v_correction + tf.math.log(tf.math.abs(coeff)),\n return_sign=True)\n sign = tf.where(coeff < 0., sign, 1.)\n\n log_ive_negative_v = tf.where(\n coeff < 0.,\n lse,\n tfp_math.log_add_exp(\n log_ive, negative_v_correction + tf.math.log(tf.math.abs(coeff))))\n\n if output_log_space:\n log_ive = tf.where(v >= 0., log_ive, log_ive_negative_v)\n return log_ive, log_kve\n\n ive = tf.where(\n v >= 0.,\n tf.math.exp(log_ive),\n sign * tf.math.exp(log_ive_negative_v))\n return ive, tf.math.exp(log_kve)", "def _materialize_array(matvec, shape, dtype=None):\n x = jnp.zeros(shape, dtype)\n return jax.jacfwd(matvec)(x)", "def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H", "def i_matrix_calc(app):\n def permute(lst):\n tups = []\n tup = []\n if len(lst) > 1:\n tup = [(lst[i],lst[j]) for i in range(len(lst)) for j in range(i+1, len(lst))]\n tups.extend(tup)\n return tups\n \n api_calls = defaultdict(int)\n invokes = defaultdict(list)\n api_idx = 0\n print('Calculating I Matrix...')\n for file in tqdm(app):\n parsed_file = parseSmaliFile(open(file))\n for method in parsed_file['Methods']:\n for api_call in method['Android API']:\n api = api_call.replace('\\n','').split(' ')[-1]\n if api not in api_calls:\n api_calls[api] = api_idx\n api_idx += 1\n for method in parsed_file['Methods']:\n for api_call in method['Android API']:\n invoke = api_call.split(',')[0].split('}, ')[0]\n if invoke[-1] != '}': invoke += '}'\n api = api_call.replace('\\n','').split(' ')[-1]\n if invoke not in invokes:\n invokes[invoke] = [api_calls[api]]\n else:\n if api_calls[api] not in invokes[invoke]:\n invokes[invoke].append(api_calls[api])\n \n for val in invokes.keys():\n invokes[val] = permute(invokes[val])\n invokes = [item for sublist in list(invokes.values()) for item in sublist]\n \n return api_calls, invokes", "def idiosyncratic_var_matrix(returns, factor_returns, factor_betas, ann_factor):\r\n residuals_ = returns - pd.DataFrame(np.dot(factor_returns, factor_betas.T), index=returns.index, columns=returns.columns)\r\n return pd.DataFrame(np.diag(residuals_.var(axis=0, ddof=1) * ann_factor),returns.columns,returns.columns)", "def val_get_right_gmt_matrix(x, k_list, l_list, m_list, mult_table_vals, ndims):\n intermed = np.zeros((ndims, ndims))\n test_ind = 0\n for m in m_list:\n j = l_list[test_ind]\n i = k_list[test_ind]\n intermed[j, i] += mult_table_vals[test_ind] * x[m]\n test_ind = test_ind + 1\n return intermed", "def lambda_matrix_kspace(glat, eps=1e-10):\n return lambda kvec: dynamical_matrix_kspace(kvec, glat, eps=eps)", "def matrix_capsules_em_routing(votes, i_activations, beta_v, beta_a, iterations, name):\n\n votes_shape = votes.get_shape().as_list()\n\n with tf.variable_scope(name) as scope:\n\n # Match rr (routing assignment) shape, i_activations shape with votes shape for broadcasting in EM routing\n\n # rr: [3x3x32=288, 32, 1]\n # rr: routing matrix from each input capsule (i) to each output capsule (o)\n rr = tf.constant(\n 1.0 / votes_shape[-2], shape=votes_shape[-3:-1] + [1], dtype=tf.float32\n )\n\n # i_activations: expand_dims to (24, 6, 6, 288, 1, 1)\n i_activations = i_activations[..., tf.newaxis, tf.newaxis]\n\n # beta_v and beta_a: expand_dims to (1, 1, 1, 1, 32, 1]\n beta_v = beta_v[..., tf.newaxis, :, tf.newaxis]\n beta_a = beta_a[..., tf.newaxis, :, tf.newaxis]\n\n def m_step(rr, votes, i_activations, beta_v, beta_a, inverse_temperature):\n \"\"\"The M-Step in EM Routing from input capsules i to output capsule j.\n i: input capsules (32)\n o: output capsules (32)\n h: 4x4 = 16\n output spatial dimension: 6x6\n :param rr: routing assignments. shape = (kh x kw x i, o, 1) =(3x3x32, 32, 1) = (288, 32, 1)\n :param votes. shape = (N, OH, OW, kh x kw x i, o, 4x4) = (24, 6, 6, 288, 32, 16)\n :param i_activations: input capsule activation (at Level L). (N, OH, OW, kh x kw x i, 1, 1) = (24, 6, 6, 288, 1, 1)\n with dimensions expanded to match votes for broadcasting.\n :param beta_v: Trainable parameters in computing cost (1, 1, 1, 1, 32, 1)\n :param beta_a: Trainable parameters in computing next level activation (1, 1, 1, 1, 32, 1)\n :param inverse_temperature: lambda, increase over each iteration by the caller.\n\n :return: (o_mean, o_stdv, o_activation)\n \"\"\"\n\n rr_prime = rr * i_activations\n\n # rr_prime_sum: sum over all input capsule i\n rr_prime_sum = tf.reduce_sum(rr_prime, axis=-3, keepdims=True, name='rr_prime_sum')\n\n # Mean of the output capsules: o_mean(24, 6, 6, 1, 32, 16)\n o_mean = tf.reduce_sum(\n rr_prime * votes, axis=-3, keepdims=True\n ) / rr_prime_sum\n\n # Standard deviation of the output capsule: o_stdv (24, 6, 6, 1, 32, 16)\n o_stdv = tf.sqrt(\n tf.reduce_sum(\n rr_prime * tf.square(votes - o_mean), axis=-3, keepdims=True\n ) / rr_prime_sum\n )\n\n # o_cost_h: (24, 6, 6, 1, 32, 16)\n o_cost_h = (beta_v + tf.log(o_stdv + epsilon)) * rr_prime_sum\n\n # o_cost: (24, 6, 6, 1, 32, 1)\n # o_activations_cost = (24, 6, 6, 1, 32, 1)\n # For numeric stability.\n o_cost = tf.reduce_sum(o_cost_h, axis=-1, keepdims=True)\n o_cost_mean = tf.reduce_mean(o_cost, axis=-2, keepdims=True)\n o_cost_stdv = tf.sqrt(\n tf.reduce_sum(\n tf.square(o_cost - o_cost_mean), axis=-2, keepdims=True\n ) / o_cost.get_shape().as_list()[-2]\n )\n o_activations_cost = beta_a + (o_cost_mean - o_cost) / (o_cost_stdv + epsilon)\n\n # (24, 6, 6, 1, 32, 1)\n o_activations = tf.sigmoid(\n inverse_temperature * o_activations_cost\n )\n\n return o_mean, o_stdv, o_activations\n\n def e_step(o_mean, o_stdv, o_activations, votes):\n \"\"\"The E-Step in EM Routing.\n\n :param o_mean: (24, 6, 6, 1, 32, 16)\n :param o_stdv: (24, 6, 6, 1, 32, 16)\n :param o_activations: (24, 6, 6, 1, 32, 1)\n :param votes: (24, 6, 6, 288, 32, 16)\n\n :return: rr\n \"\"\"\n\n o_p_unit0 = - tf.reduce_sum(\n tf.square(votes - o_mean) / (2 * tf.square(o_stdv)), axis=-1, keepdims=True\n )\n\n o_p_unit2 = - tf.reduce_sum(\n tf.log(o_stdv + epsilon), axis=-1, keepdims=True\n )\n\n # o_p is the probability density of the h-th component of the vote from i to j\n # (24, 6, 6, 1, 32, 16)\n o_p = o_p_unit0 + o_p_unit2\n\n # rr: (24, 6, 6, 288, 32, 1)\n zz = tf.log(o_activations + epsilon) + o_p\n rr = tf.nn.softmax(\n zz, axis=len(zz.get_shape().as_list()) - 2\n )\n\n return rr\n\n # inverse_temperature schedule (min, max)\n it_min = 1.0\n it_max = min(iterations, 3.0)\n for it in range(iterations):\n inverse_temperature = it_min + (it_max - it_min) * it / max(1.0, iterations - 1.0)\n o_mean, o_stdv, o_activations = m_step(\n rr, votes, i_activations, beta_v, beta_a, inverse_temperature=inverse_temperature\n )\n if it < iterations - 1:\n rr = e_step(\n o_mean, o_stdv, o_activations, votes\n )\n\n # pose: (N, OH, OW, o 4 x 4) via squeeze o_mean (24, 6, 6, 32, 16)\n poses = tf.squeeze(o_mean, axis=-3)\n\n # activation: (N, OH, OW, o) via squeeze o_activationis [24, 6, 6, 32]\n activations = tf.squeeze(o_activations, axis=[-3, -1])\n\n return poses, activations", "def alternative_iterative_method(x0, n, gamma, b):\n # Parameters:\n MAX_ITER = 1000\n n2 = n**2\n\n # Creating NxN versions of vector for easier indexing during iteration\n b = b.copy().reshape(n, n)\n b_transposed = b.copy().T\n x0 = x0.copy().reshape(n, n)\n x0_transposed = x0.copy().T\n x1 = x0.copy()\n x1_transposed = x0_transposed.copy()\n\n # No need for M, N, only a smaller tridiagonal system:\n H = scipy.sparse.diags((-1, 2, -1), (-1, 0, 1), shape=(n, n), format=\"csr\")\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n, n), format=\"csr\")\n M1 = gammaI + H # Corresponds to both (gI + M) & (gI + N) in equations\n M2 = gammaI - H # Corresponds to both (gI - M) & (gI - N) in equations\n\n # Preallocating RHS of equations\n RHS7 = np.zeros((n, n), dtype=np.float64)\n RHS8 = np.zeros((n, n), dtype=np.float64)\n\n k = 0\n while k < MAX_ITER:\n for i in range(n): # Loading RHS values for Equation (7):\n RHS7[:, i] = scipy.sparse.csr_matrix.dot(M2, x0_transposed[i]) + b_transposed[i]\n for i in range(n): # Solving N independent tridig mat systems related to Eq(7):\n x1[i] = scipy.sparse.linalg.spsolve(M1, RHS7[i])\n RHS8[i] = scipy.sparse.csr_matrix.dot(M2, x1[i]) + b[i] # Loading RHS values for Equation (8):\n for i in range(n): # Solving N independent tridig mat systems related to Eq(8):\n x1_transposed[i] = scipy.sparse.linalg.spsolve(M1, RHS8[:, i])\n\n k += 1\n if np.allclose(x1_transposed, x0_transposed, rtol=1e-8):\n break\n x0_transposed = x1_transposed.copy()\n\n res = x1_transposed.T.reshape(n2)\n return res, k", "def test_tensor_can_be_canonicalized(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n r = p.R\n m = p.m\n h = p.h\n v = p.v\n\n # Anti-symmetric real matrix.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0\n\n # With wrapping under an even function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 2 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 2 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == 2 * m[i, j] ** 2\n assert term.vecs == (v[i], v[j])\n\n # With wrapping under an odd function.\n tensor = (\n dr.sum((i, r), (j, r), m[i, j] ** 3 * v[i] * v[j]) +\n dr.sum((i, r), (j, r), m[j, i] ** 3 * v[i] * v[j])\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res.n_terms == 0\n\n # Hermitian matrix.\n tensor = dr.einst(\n h[i, j] * v[i] * v[j] + conjugate(h[j, i]) * v[i] * v[j]\n )\n assert tensor.n_terms == 2\n res = tensor.simplify()\n assert res == 0", "def drx_solver_numpy(tol, steps, factor, C, Ct, X, M, k0, l0, f0, ind_c, ind_t, P, S, B, V, refresh,\n beams, inds, indi, indf, EIx, EIy, callback, **kwargs):\n res = 1000 * tol\n ts, Uo = 0, 0\n M = factor * tile(M.reshape((-1, 1)), (1, 3))\n\n while (ts <= steps) and (res > tol):\n\n uvw, l = uvw_lengths(C, X) # noqa: E741\n f = f0 + k0 * (l.ravel() - l0)\n\n if ind_t:\n f[ind_t] *= f[ind_t] > 0\n if ind_c:\n f[ind_c] *= f[ind_c] < 0\n\n if beams:\n S = _beam_shear(S, X, inds, indi, indf, EIx, EIy)\n\n q = f[:, newaxis] / l\n qt = tile(q, (1, 3))\n R = (P - S - Ct.dot(uvw * qt)) * B\n res = mean(normrow(R))\n\n V += R / M\n Un = sum(M * V**2)\n if Un < Uo:\n V *= 0\n Uo = Un\n\n X += V\n\n if refresh:\n if (ts % refresh == 0) or (res < tol):\n print('Step:{0} Residual:{1:.3f}'.format(ts, res))\n if callback:\n callback(X, **kwargs)\n\n ts += 1\n\n return X, f, l", "def dens_matrix_with_trace_opt(left_vector, right_vector):\n size = len(left_vector)\n if len(left_vector) != len(right_vector):\n raise ValueError('Incorrect dimensions')\n\n right_vector_conj = np.conj(right_vector)\n dm = np.zeros((size,) * 4, dtype=complex)\n\n fact_arr = np.array([factorial(x) for x in range(size)])\n tf2 = np.tensordot(fact_arr, fact_arr, axes=0)\n\n for p2 in range(size):\n for p2_ in range(size):\n for p4 in range(size):\n for p4_ in range(size):\n prod1 = np.multiply(left_vector[:, p2, :, p4], right_vector_conj[:, p2_, :, p4_])\n prod2 = prod1 * sqrt(factorial(p2) * factorial(p4) * factorial(p2_) * factorial(p4_))\n prod3 = np.multiply(prod2, tf2)\n dm[p2, p4, p2_, p4_] = np.sum(prod3)\n return dm", "def calc_matrix(mat_a, mat_d, case):\n nodes, = get_valdict(case, 'nodes')\n for i in range(0, nodes):\n for j in range(0, nodes):\n if i == j and i == 0:\n aW, aE, aP, Su = equation_nodes('left', case)\n mat_a[i, j] = aP\n mat_a[i, j+1] = -aE\n mat_d[i] = Su\n elif i == j and (i > 0 and i < nodes-1):\n aW, aE, aP, Su = equation_nodes('mid', case)\n mat_a[i, j-1] = -aW\n mat_a[i, j] = aP\n mat_a[i, j+1] = -aE\n mat_d[i] = Su\n elif i == j and (i == nodes-1):\n aW, aE, aP, Su = equation_nodes('right', case)\n mat_a[i, j-1] = -aW\n mat_a[i, j] = aP\n mat_d[i] = Su", "def solve_cholesky(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b, sym_pos=True)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b.ravel(), sym_pos=True).reshape(*b.shape)\n else:\n raise NotImplementedError", "def parafac(factors):\n ndims = len(factors)\n request = ''\n for temp_dim in range(ndims):\n request += lowercase[temp_dim] + 'z,'\n request = request[:-1] + '->' + lowercase[:ndims]\n return np.einsum(request, *factors)", "def create_matrix(array_vec, size_vector=128, size_block=32):\n matrix = []\n for i in array_vec:\n vec = [i[j:j + size_block] for j in range(0, len(i), size_block)]\n\n for j in range(size_block):\n t = []\n for k in vec:\n t += rotate(k, j)\n\n matrix.append(t)\n return matrix", "def k_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n ensemble_average_fun = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.k_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n ensemble_average_fun[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n ensemble_average_fun_TS = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: \\\r\n self.beta_U_1(\r\n np.concatenate(([self.lambda_TS], lambda_))\r\n ) + self.beta_A_0_abs_isometric(\r\n 1, np.concatenate(([self.lambda_TS], lambda_))\r\n )\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.k_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n ensemble_average_fun_TS[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(\r\n v_i, transition_state=True\r\n )[2][-(self.M - 1):, 0],\r\n **kwargs\r\n )\r\n return ensemble_average_fun_TS/ensemble_average_fun", "def val_get_left_gmt_matrix(x, k_list, l_list, m_list, mult_table_vals, ndims):\n intermed = np.zeros((ndims, ndims))\n test_ind = 0\n for k in k_list:\n j = l_list[test_ind]\n i = m_list[test_ind]\n intermed[j, i] += mult_table_vals[test_ind] * x[k]\n test_ind = test_ind + 1\n return intermed", "def free_alg(spark_ctx):\n\n dr = Drudge(spark_ctx)\n\n r = Range('R')\n dumms = sympify('i, j, k, l, m, n')\n dr.set_dumms(r, dumms)\n\n s = Range('S')\n s_dumms = symbols('alpha beta')\n dr.set_dumms(s, s_dumms)\n\n dr.add_resolver_for_dumms()\n\n # For testing the Einstein over multiple ranges.\n a1, a2 = symbols('a1 a2')\n dr.add_resolver({\n a1: (r, s), a2: (r, s)\n })\n dr.set_name(a1, a2)\n\n v = Vec('v')\n dr.set_name(v)\n\n m = IndexedBase('m')\n dr.set_symm(m, Perm([1, 0], NEG))\n\n h = IndexedBase('h')\n dr.set_symm(h, Perm([1, 0], NEG | CONJ))\n\n rho = IndexedBase('rho')\n dr.set_symm(rho, Perm([1, 0, 3, 2]), valence=4)\n\n dr.set_tensor_method('get_one', lambda x: 1)\n\n return dr", "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x", "def NMF(model, maxIter=100, beliefs=None, verbose=False):\n if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]\n \n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter 0: \"+str(lnZ))\n\n for t in xrange(1,maxIter+1): # for each iteration:\n # Update all the beliefs via coordinate ascent:\n for Xi in model.X: # for each variable, \n bNew = 0.0 # compute E[ log f ] as a function of Xi:\n for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:\n m = f.log() # E[log f_a] = \\sum \\log f_a \\prod b_v\n for v in f.vars - [Xi]: m *= beliefs[v]\n bNew += m.marginal([Xi]) # sum them up to get E[log f]\n bNew -= bNew.max() # (numerical issues)\n bNew = bNew.exp()\n bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z\n beliefs[Xi] = bNew\n #\n # Compute the lower bound on the partition function:\n # E_b [ log f ] + H(b) = \\sum_a E[log f_a] + \\sum_i H(b_i) for independent beliefs\n lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])\n for f in model.factors:\n m = f.log()\n for v in f.vars: m *= beliefs[v]\n lnZ += m.sum()\n if verbose: print(\"Iter \"+str(t)+\": \"+str(lnZ))\n return lnZ,beliefs", "def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def rk_adaptive(accel,m,r,h,v,recur,emin=10**-12,emax=10**-8,hmax=.1,hmin=.01,recurmax=100):\n k1v = accel(m,r)\n k1r = v\n k2v = accel(m,r + 0.25*k1r*h)\n k2r = v + (0.25*k1v)*h\n k3v = accel(m,r + (3/32.*k1r + 9/32.*k2r)*h)\n k3r = v + (3/32.*k1v + 9/32.*k2v)*h\n k4v = accel(m,r + (1932/2197.*k1r - 7200/2197.*k2r + 7296/2197.*k3r)*h)\n k4r = v + (1932/2197.*k1v - 7200/2197.*k2v + 7296/2197.*k3v)*h\n k5v = accel(m,r + (439/216.*k1r - 8*k2r + 3680/513.*k3r - 845/4104.*k4r)*h)\n k5r = v + (439/216.*k1v - 8*k2v + 3680/513.*k3v - 845/4104.*k4v)*h\n k6v = accel(m,r - (8/27.*k1r + 2*k2r - 3544/2565.*k3r + 1859/4104.*k4r - 11/40.*k5r)*h)\n k6r = v - (8/27.*k1v + 2*k2v - 3544/2565.*k3v + 1859/4104.*k4v - 11/40.*k5v)*h\n\n # 4th order calculation\n new_v4 = v + h*(25/216.*k1v + 1408/2565.*k3v + 2197/4104.*k4v - 1/5.*k5v)\n new_r4 = r + h*(25/216.*k1r + 1408/2565.*k3r + 2197/4104.*k4r - 1/5.*k5r)\n \n # 5th order calculation\n new_v5 = v + h*(16/135.*k1v + 6656/12825.*k3v+28561/56430.*k4v - 9/50.*k5v + 2/55.*k6v) \n new_r5 = r + h*(16/135.*k1r + 6656/12825.*k3r+28561/56430.*k4r - 9/50.*k5r + 2/55.*k6r) \n\n # Calculate truncation error between 5th and 4th order\n eps = np.abs( (np.max(np.abs(new_r5)) - np.max(np.abs(new_r4))) / np.max(np.abs(new_r4)))\n \n # Compare eps to emin and emax and update h accordingly\n if np.max(eps) < emin:\n if h*2.0 < hmax:\n h *= 2.0\n new_v = new_v5\n new_r = new_r5 \n \n if np.max(eps) > emax:\n if h/2.0 > hmin:\n h /= 2.0\n print h\n # Error too large, call rk_adaptive again with smaller h\n if recur < recurmax:\n recur += 1\n rk_adaptive(accel,m,r,h,v,recur)\n new_v = new_v5\n new_r = new_r5\n \n else:\n new_v = new_v5\n new_r = new_r5\n \n return new_v, new_r, h", "def lambda_fun_mat(icimeq_funk, x):\n # input must be 1d numpy.ndarray\n assert isinstance(x, np.ndarray) and x.ndim == 1\n\n # result from the lambdified function with array input\n abo = icimeq_funk(x)\n\n # for each row in the array\n for i,a in enumerate(abo):\n # fix the term not properly broadcast in the lambdified funtion\n a_fix = np.broadcast_arrays(*a)\n abo[i] = a_fix\n\n # cleaning to work with a proper array (no array of arrays of arrays !)\n abo_fix = np.array(abo.tolist())\n\n # roll the axis to obtain an array of matrix, and not a matrix of arrays\n abo_fix = np.moveaxis(abo_fix, -1, 0)\n\n return abo_fix", "def _krls_evaluate(self, dAldKRLS):\n \n # Get the needed data from the dictionary with data\n mDict = dAldKRLS['mDict']\n vAlpha = dAldKRLS['vAlpha']\n \n (iRowsDict, _) = mDict.shape # Get the number of rows from the dictionary\n if iRowsDict > 0:\n vX = np.dot(vAlpha.T, mDict)\n else:\n vX = np.zeros((iRowsDict,1))\n \n return vX", "def assembly_matrix(Afun, solutions):\n dim = len(solutions)\n if not np.allclose(Afun.N, solutions[0].N):\n Nbar = Afun.N\n sol = []\n for ii in np.arange(dim):\n sol.append(solutions[ii].project(Nbar))\n else:\n sol = solutions\n\n AH = np.zeros([dim, dim])\n for ii in np.arange(dim):\n for jj in np.arange(dim):\n AH[ii, jj] = Afun(sol[ii]) * sol[jj]\n return AH", "def lu_factorization (M) -> list:\n dim = len(M)\n L = np.eye(dim)\n\n #Itero sulle Incognite da Trovare\n for i in range(dim-1):\n\n #Itero sulle righe su cui devo cancellare un elemento\n for j in range(i+1,dim):\n m__j_i = M[j][i] / M[i][i]\n L[j][i] = m__j_i\n \n M[j][i] = 0.0\n\n for k in range (i+1,dim):\n M[j][k] = M[j][k] - m__j_i * M[i][k]\n \n\n return M,L", "def kullback_leibler_iaf(z, logqz_x, beta=1., **args):\n \n logpz = -tf.reduce_sum(input_tensor=0.5 * np.log(2*np.pi) + 0.5 * tf.square(z), axis=-1)\n kl = beta * tf.reduce_mean(input_tensor=logqz_x - logpz)\n return kl", "def __call__(self, alms: dict[str: np.ndarray]):\n ivf_alms = dict()\n for f in self.maps_labels:\n assert f in alms, alms.keys()\n assert alms[f].ndim == 1\n ivf_alms[f] = self._almxflcopy(f, self._fal[f+f] * self.transfs_i[f], alms[f])\n self._build_fal()\n for fg in self._fal:\n assert len(fg) % 2 == 0, fg\n f, g = fg[:len(fg) // 2], fg[len(fg) // 2:]\n if f != g: # off-diagonals, checking explicitly symmetry\n fac = 1 if (g + f) in self._fal else 2\n assert (g + f not in self._fal) or (self._fal[g + f] is self._fal[f + g])\n ivf_alms[f] += self._almxflcopy(f, fac * self._fal[fg] * self.transfs_i[g], alms[g])\n return ivf_alms", "def build_laplacian_nearest_neighbor_graph(\n input_vecs: types.Tensor, k: int = 1\n) -> types.Tensor:\n num_actions = tf.shape(input_vecs)[0]\n pdistance_matrix = compute_pairwise_distances(input_vecs)\n sorted_indices = tf.argsort(values=pdistance_matrix)\n selected_indices = tf.reshape(sorted_indices[:, 1 : k + 1], [-1, 1])\n rng = tf.tile(tf.expand_dims(tf.range(num_actions), axis=-1), [1, k])\n rng = tf.reshape(rng, [-1, 1])\n full_indices = tf.concat([rng, selected_indices], axis=1)\n adjacency_matrix = tf.zeros([num_actions, num_actions], dtype=tf.float32)\n adjacency_matrix = tf.tensor_scatter_nd_update(\n tensor=adjacency_matrix,\n indices=full_indices,\n updates=tf.ones([k * num_actions], dtype=tf.float32),\n )\n # Symmetrize it.\n adjacency_matrix = adjacency_matrix + tf.transpose(adjacency_matrix)\n adjacency_matrix = tf.minimum(\n adjacency_matrix, tf.ones_like(adjacency_matrix)\n )\n degree_matrix = tf.linalg.tensor_diag(tf.reduce_sum(adjacency_matrix, axis=1))\n laplacian_matrix = degree_matrix - adjacency_matrix\n return laplacian_matrix", "def solve_gevp_gen(a, t_0, algorithm, sort_by_vectors=15, **kwargs):\n B = np.matrix(a[t_0])\n try:\n f = algorithm(B=B, **kwargs)\n except TypeError:\n # If the function doesn't do currying, implement that here\n f = lambda A: algorithm(B=B, A=A)\n except LinAlgError:\n return\n\n eigenvectors = None\n count = 0\n\n for j in range(t_0 + 1, 32):\n try:\n eigenvalues, new_eigenvectors = f(np.matrix(a[j]))\n \n if eigenvectors is None:\n eigenvectors = np.zeros_like(new_eigenvectors)\n\n if j < sort_by_vectors:\n # TODO Sortieren nach Eigenwert\n perm = permutation_indices(eigenvalues)\n else:\n perm = reorder_by_ev(new_eigenvectors, eigenvectors, B)\n\n eigenvectors = new_eigenvectors[:,perm]\n eigenvalues = eigenvalues[:,perm]\n \n count += 1\n\n yield eigenvalues, eigenvectors\n\n except (LinAlgError, TypeError) as e:\n #import traceback\n #traceback.print_exc()\n return", "def eig_one_step(current_vector, learning_rate, vector_prod_fn):\n grad = 2*vector_prod_fn(current_vector)\n # Current objective = (1/2)*v^T (2*M*v); v = current_vector\n # grad = 2*M*v\n current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),\n grad) / 2., shape=())\n\n # Project the gradient into the tangent space of the constraint region.\n # This way we do not waste time taking steps that try to change the\n # norm of current_vector\n grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)\n grad_norm = tf.norm(grad)\n grad_norm_sq = tf.square(grad_norm)\n\n # Computing normalized gradient of unit norm\n norm_grad = grad / grad_norm\n\n # Computing directional second derivative (dsd)\n # dsd = 2*g^T M g, where g is normalized gradient\n directional_second_derivative = (\n tf.reshape(2*tf.matmul(tf.transpose(norm_grad),\n vector_prod_fn(norm_grad)),\n shape=()))\n\n # Computing grad^\\top M grad [useful to compute step size later]\n # Just a rescaling of the directional_second_derivative (which uses\n # normalized gradient\n grad_m_grad = directional_second_derivative*grad_norm_sq / 2\n\n # Directional_second_derivative/2 = objective when vector is norm_grad\n # If this is smaller than current objective, simply return that\n if directional_second_derivative / 2. < current_objective:\n return norm_grad\n\n # If curvature is positive, jump to the bottom of the bowl\n if directional_second_derivative > 0.:\n step = -1. * grad_norm / directional_second_derivative\n else:\n # If the gradient is very small, do not move\n if grad_norm_sq <= 1e-16:\n step = 0.0\n else:\n # Make a heuristic guess of the step size\n step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq\n # Computing gain using the gradient and second derivative\n gain = -(2 * tf.reduce_sum(current_vector*grad) +\n (step*step) * grad_m_grad)\n\n # Fall back to pre-determined learning rate if no gain\n if gain < 0.:\n step = -learning_rate * grad_norm\n current_vector = current_vector + step * norm_grad\n return tf.nn.l2_normalize(current_vector)", "def get_vf_matrix(self, geom_dict, view_matrix, obstr_matrix, list_pvrow):\n n_all_surfaces = view_matrix.shape[0]\n view_factors = np.zeros((n_all_surfaces, n_all_surfaces), dtype=float)\n\n # --- First deal with finite surfaces from the registry, and treat only\n # half of the views because symmetry will be used next\n n_finite_surfaces = n_all_surfaces - 1 # no sky\n view_matrix_upper_finite_surfaces = np.triu(\n view_matrix[:n_finite_surfaces, :n_finite_surfaces])\n indices_views_finite = np.where(view_matrix_upper_finite_surfaces)\n\n n_views = len(indices_views_finite[0])\n geometries = list(geom_dict.values())\n for i in range(n_views):\n idx = (indices_views_finite[0][i], indices_views_finite[1][i])\n view = self.mapper.reverse_view[view_matrix[idx]]\n line_i = geometries[idx[0]]\n line_j = geometries[idx[1]]\n obstr_index = obstr_matrix[idx]\n if obstr_index is not None:\n obstructing_pvrow = list_pvrow[obstr_matrix[idx]]\n else:\n obstructing_pvrow = None\n # The following line takes the most time to execute (looped)\n view_factors[idx] = self.mapper.function_mapping[view](\n line_i, line_j, obstructing_pvrow)\n\n # Use the reciprocity property of view factors to speed up the\n # vfactor calculation: A_1 * F_1-2 = A_2 * F_2-1 ==> symmetric matrx\n areas = np.array([surf.length for surf in geometries])\n matrix_areas = np.diag(areas)\n matrix_areas_inv = np.diag(1. / areas)\n\n upper_matrix_reciprocity = np.dot(matrix_areas,\n view_factors[:n_finite_surfaces,\n :n_finite_surfaces])\n\n total_matrix_reciprocity = (upper_matrix_reciprocity +\n upper_matrix_reciprocity.T)\n finite_vf_matrix = np.dot(matrix_areas_inv, total_matrix_reciprocity)\n view_factors[:n_finite_surfaces, :n_finite_surfaces] = finite_vf_matrix\n\n # --- Then do the calculations for the sky, which is the remaining\n # portion of the hemisphere\n view_factors[:-1, -1] = 1. - np.sum(view_factors[:-1, :-1], axis=1)\n return view_factors", "def linear_film_generator(embedding,\n block_sizes,\n filter_sizes,\n enabled_block_layers = None):\n if enabled_block_layers:\n if len(enabled_block_layers) != len(block_sizes):\n raise ValueError(\n 'Got {} bools for enabled_block_layers, expected {}'.format(\n len(enabled_block_layers), len(block_sizes)))\n # FiLM generator - just a linear projection of embedding.\n film_gamma_betas = []\n for i, num_blocks in enumerate(block_sizes):\n if enabled_block_layers and not enabled_block_layers[i]:\n # Do not generate FiLM vectors for this block layer.\n film_gamma_betas.append([None]*num_blocks)\n else:\n num_filters = filter_sizes[i]\n film_output_size = num_blocks * num_filters * 2\n film_gamma_beta = slim.fully_connected(\n embedding,\n film_output_size,\n scope='film{}'.format(i),\n normalizer_fn=None,\n activation_fn=None)\n film_gamma_betas.append(tf.split(film_gamma_beta, num_blocks, axis=-1))\n return film_gamma_betas", "def factored_rolling(decay_rate: float, epsilon: float = 1e-30) -> _InitUpdate:\n\n def init_fn(params: Any) -> FactoredAccum:\n\n def _init_one(param):\n shape = param.shape\n f_dims = factored_dims(shape)\n # If factored, set v_row, v_col. Otherwise set v_full\n if f_dims is not None:\n d1, d0 = f_dims\n vr_shape = onp.delete(shape, d0)\n vc_shape = onp.delete(shape, d1)\n v_row = jnp.zeros(vr_shape, dtype=jnp.float32)\n v_col = jnp.zeros(vc_shape, dtype=jnp.float32)\n return v_row, v_col, jnp.asarray([], dtype=jnp.float32)\n\n else:\n v = jnp.zeros(param.shape, dtype=jnp.float32)\n return jnp.asarray([],\n dtype=jnp.float32), jnp.asarray([],\n dtype=jnp.float32), v\n\n leaves, tree = jax.tree_util.tree_flatten(params)\n v_rows, v_cols, v_fulls = zip(*[_init_one(l) for l in leaves])\n return FactoredAccum(\n v_row=jax.tree_util.tree_unflatten(tree, v_rows),\n v_col=jax.tree_util.tree_unflatten(tree, v_cols),\n v_diag=jax.tree_util.tree_unflatten(tree, v_fulls))\n\n def update_fn(state: FactoredAccum, grad: Any) -> Tuple[FactoredAccum, Any]:\n\n def update_one(v_col: Any, v_row: Any, v_full: Any,\n g: Any) -> Tuple[Any, Any, Any, Any]:\n clip_decay_rate = jnp.clip(decay_rate, 0.0, 1.0)\n mixing_rate = 1.0 - clip_decay_rate\n\n grad_sqr = g * g + epsilon\n f_dims = factored_dims(g.shape)\n\n if f_dims is not None:\n # precondition with factored dimensions.\n d1, d0 = f_dims\n new_v_row = (\n clip_decay_rate * v_row + mixing_rate * jnp.mean(grad_sqr, axis=d0))\n new_v_col = (\n clip_decay_rate * v_col + mixing_rate * jnp.mean(grad_sqr, axis=d1))\n\n reduced_d1 = d1 - 1 if d1 > d0 else d1\n row_col_mean = jnp.mean(new_v_row, axis=reduced_d1, keepdims=True)\n\n row_factor = safe_rsqrt(new_v_row / (row_col_mean + 1e-9))\n col_factor = safe_rsqrt(new_v_col)\n y = (\n g * jnp.expand_dims(row_factor, axis=d0) *\n jnp.expand_dims(col_factor, axis=d1))\n return new_v_col, new_v_row, jnp.asarray([], jnp.float32), y\n\n else:\n # otherwise precondition with diagonal style preconditioner\n new_v = clip_decay_rate * v_full + mixing_rate * grad_sqr\n y = g * safe_rsqrt(new_v + 1e-9)\n return jnp.asarray([], jnp.float32), jnp.asarray([],\n jnp.float32), new_v, y\n\n f_v_col, tree = jax.tree_util.tree_flatten(state.v_col)\n f_v_row = jax.tree_util.tree_leaves(state.v_row)\n f_v = jax.tree_util.tree_leaves(state.v_diag)\n f_g = jax.tree_util.tree_leaves(grad)\n assert len(f_g) == len(f_v_col)\n assert len(f_g) == len(f_v)\n assert len(f_g) == len(f_v_row)\n f_v_col, f_v_row, f_v, outs = zip(\n *[update_one(*args) for args in zip(f_v_col, f_v_row, f_v, f_g)])\n\n next_state = FactoredAccum(\n v_col=jax.tree_util.tree_unflatten(tree, f_v_col),\n v_row=jax.tree_util.tree_unflatten(tree, f_v_row),\n v_diag=jax.tree_util.tree_unflatten(tree, f_v))\n\n return next_state, jax.tree_util.tree_unflatten(tree, outs)\n\n return _InitUpdate(init_fn, update_fn)", "def OIT_solver_fisher_rao(I0, I1, niter, eps, lamb, inverse_inertia_op,\n callback=None):\n # Get the space of I0\n domain = I0.space\n \n # Initialize the determinant of Jacobian of inverse deformation\n DPhiJacobian = domain.one()\n\n # Create gradient operator and divergence operator\n grad_op = Gradient(domain, method='forward', pad_mode='symmetric')\n div_op = - grad_op.adjoint\n \n # Create the temporary elements for update\n v = grad_op.range.element()\n\n # Initialize the non-mass-preserving deformed template\n non_mp_deform_I0 = I0\n \n inv_inertia_op = inverse_inertia_op\n\n # Store energy\n E = []\n kE = len(E)\n E = np.hstack((E, np.zeros(niter)))\n \n# print('Chong Chen')\n\n # Begin iteration\n for k in range(niter):\n # Compute the energy of the regularization term\n E[k+kE] = np.asarray(lamb * (np.sqrt(DPhiJacobian) - 1) ** 2).sum()\n\n # Implementation for mass-preserving case\n PhiStarI0 = DPhiJacobian * non_mp_deform_I0\n\n # Show intermediate result\n if callback is not None:\n callback(PhiStarI0)\n\n # For Fisher-Rao distance\n sqrt_mp_I0 = np.sqrt(PhiStarI0)\n sqrt_I1 = np.sqrt(I1)\n grad_sqrt_mp_I0 = grad_op(sqrt_mp_I0)\n grad_sqrt_I1 = grad_op(sqrt_I1)\n \n # Compute the energy of the data fitting term \n E[k+kE] += np.asarray((sqrt_mp_I0 - sqrt_I1)**2).sum()\n\n # Compute the L2 gradient of the data fitting term\n grad_fitting = grad_op.range.zero()\n for i in range(grad_op.range.size):\n grad_fitting[i] = sqrt_I1 * grad_sqrt_mp_I0[i] - \\\n sqrt_mp_I0 * grad_sqrt_I1[i]\n \n # Compute the minus L2 gradient\n u = - lamb * grad_op(np.sqrt(DPhiJacobian)) - grad_fitting\n\n # Compute inverse inertia\n v = inv_inertia_op(u)\n\n # Update the non-mass-preserving deformed template\n non_mp_deform_I0 = domain.element(\n _linear_deform(non_mp_deform_I0, - eps * v))\n\n # Implementation for updating Jacobian determinant\n DPhiJacobian = (1.0 - eps * div_op(v)) * domain.element(\n _linear_deform(DPhiJacobian, - eps * v))\n \n return PhiStarI0, E", "def mr(A, n_iterations, stop=False):\n assert len(A.sizes) == 2\n assert A.sizes[0] == A.sizes[1]\n M = A.same_shape()\n n = A.sizes[0]\n @for_range(n)\n def _(i):\n e = sfix.Array(n)\n e.assign_all(0)\n e[i] = 1\n M[i] = solve_linear(A, e, n_iterations, stop=stop)\n return M.transpose()", "def ilike(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, N):\r\n\tdetD, gfunc, U = np.zeros(N), np.zeros(N), np.zeros(N)\r\n\tfor i in range(N):\r\n\t\tif np.isfinite(vrad_obs[i]):\r\n\t\t\tdetD[i], gfunc[i] = _like4(init_par, alpha[i], delta[i], plx_obs[i],\r\n\t\t\t\t\t\t mualpha_obs[i], mudelta_obs[i], vrad_obs[i], sigma_obs[i,:], sigma_vrad[i], ccoef[i, :], i)\r\n\t\t\tU[i] = np.log(detD[i]) + gfunc[i] + 4.*np.log(2.*np.pi)\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tdetD[i], gfunc[i] = _like3(init_par, alpha[i], delta[i], plx_obs[i],\r\n\t\t\t\t\t\t mualpha_obs[i], mudelta_obs[i], sigma_obs[i,:], ccoef[i, :], i)\t\r\n\t\t\tU[i] = np.log(detD[i]) + gfunc[i] + 3.*np.log(2.*np.pi)\r\n\r\n\tL = np.sum(U)\r\n\treturn L, gfunc", "def csr_mulvec_wrap(fn):\n\n @functools.wraps(fn)\n def csr_mul_vector(A, x):\n if A.nnz > 50000 and _NUM_THREAD_WORKERS > 1:\n return par_dot_csr_matvec(A, x)\n else:\n y = fn(A, x)\n if isinstance(x, qarray):\n y = qarray(y)\n return y\n\n return csr_mul_vector", "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc,aptrc,asubc,avalc): # 3\n n_ = None\n if n_ is None:\n n_ = len(anzc)\n elif n_ != len(anzc):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc)\n elif n_ != len(aptrc):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if n_ is None: n_ = 0\n if anzc is None: raise TypeError(\"Invalid type for argument anzc\")\n if anzc is None:\n anzc_ = None\n else:\n try:\n anzc_ = memoryview(anzc)\n except TypeError:\n try:\n _tmparr_anzc = array.array(\"i\",anzc)\n except TypeError:\n raise TypeError(\"Argument anzc has wrong type\")\n else:\n anzc_ = memoryview(_tmparr_anzc)\n \n else:\n if anzc_.format != \"i\":\n anzc_ = memoryview(array.array(\"i\",anzc))\n \n if aptrc is None: raise TypeError(\"Invalid type for argument aptrc\")\n if aptrc is None:\n aptrc_ = None\n else:\n try:\n aptrc_ = memoryview(aptrc)\n except TypeError:\n try:\n _tmparr_aptrc = array.array(\"q\",aptrc)\n except TypeError:\n raise TypeError(\"Argument aptrc has wrong type\")\n else:\n aptrc_ = memoryview(_tmparr_aptrc)\n \n else:\n if aptrc_.format != \"q\":\n aptrc_ = memoryview(array.array(\"q\",aptrc))\n \n if asubc is None: raise TypeError(\"Invalid type for argument asubc\")\n if asubc is None:\n asubc_ = None\n else:\n try:\n asubc_ = memoryview(asubc)\n except TypeError:\n try:\n _tmparr_asubc = array.array(\"i\",asubc)\n except TypeError:\n raise TypeError(\"Argument asubc has wrong type\")\n else:\n asubc_ = memoryview(_tmparr_asubc)\n \n else:\n if asubc_.format != \"i\":\n asubc_ = memoryview(array.array(\"i\",asubc))\n \n if avalc is None: raise TypeError(\"Invalid type for argument avalc\")\n if avalc is None:\n avalc_ = None\n else:\n try:\n avalc_ = memoryview(avalc)\n except TypeError:\n try:\n _tmparr_avalc = array.array(\"d\",avalc)\n except TypeError:\n raise TypeError(\"Argument avalc has wrong type\")\n else:\n avalc_ = memoryview(_tmparr_avalc)\n \n else:\n if avalc_.format != \"d\":\n avalc_ = memoryview(array.array(\"d\",avalc))\n \n res,resargs = self.__obj.computesparsecholesky(multithread_,ordermethod_,tolsingular_,n_,anzc_,aptrc_,asubc_,avalc_)\n if res != 0:\n raise Error(rescode(res),\"\")\n _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc = resargs\n return _perm,_diag,_lnzc,_lptrc,_lensubnval_return_value,_lsubc,_lvalc", "def graphize_velocity(\n V: np.ndarray,\n X: np.ndarray,\n nbrs_idx: Union[np.ndarray, List[int]] = None,\n dists: np.ndarray = None,\n k: int = 30,\n normalize_v: bool = False,\n scale_by_dist: bool = False,\n E_func: Union[Literal[\"sqrt\", \"exp\"], Callable, None] = None,\n use_sparse: bool = False,\n return_nbrs: bool = False,\n) -> Union[\n Tuple[Union[np.ndarray, sp.lil_matrix], Union[List[int], np.ndarray], np.ndarray],\n Tuple[Union[np.ndarray, sp.lil_matrix], Union[List[int], np.ndarray], np.ndarray, NearestNeighbors],\n]:\n\n n = X.shape[0]\n\n if (nbrs_idx is not None) and return_nbrs:\n main_warning(\n \"nbrs_idx argument is ignored and recomputed because nbrs_idx is not None and return_nbrs=True\",\n indent_level=2,\n )\n\n if nbrs_idx is None or return_nbrs:\n main_info(\"calculating neighbor indices...\", indent_level=2)\n nbrs_idx, dists, nbrs = k_nearest_neighbors(X, k, exclude_self=True, return_nbrs=True)\n\n if dists is None:\n dists = nbrs_to_dists(X, nbrs_idx)\n\n if type(E_func) is str:\n if E_func == \"sqrt\":\n E_func = np.sqrt\n elif E_func == \"exp\":\n E_func = np.exp\n else:\n raise NotImplementedError(\"The specified edge function is not implemented.\")\n\n if normalize_v:\n V_norm = np.linalg.norm(V, axis=1)\n V_norm[V_norm == 0] = 1\n V = np.array(V, copy=True)\n V = (V.T / V_norm).T\n\n if use_sparse:\n E = sp.lil_matrix((n, n))\n else:\n E = np.zeros((n, n))\n\n for i in range(n):\n x = flatten(X[i])\n idx = nbrs_idx[i]\n dist = dists[i]\n if len(idx) > 0 and idx[0] == i: # excluding the node itself from the neighbors\n idx = idx[1:]\n dist = dist[1:]\n vi = flatten(V[i])\n\n # normalized differences\n U = X[idx] - x\n dist[dist == 0] = 1\n U /= dist[:, None]\n\n for jj, j in enumerate(idx):\n vj = flatten(V[j])\n u = flatten(U[jj])\n v = np.mean((vi.dot(u), vj.dot(u)))\n if scale_by_dist:\n v /= dist[jj]\n\n if E_func is not None:\n v = np.sign(v) * E_func(np.abs(v))\n E[i, j] = v\n E[j, i] = -v\n\n if return_nbrs:\n return E, nbrs_idx, dists, nbrs\n return E, nbrs_idx, dists", "def solve_pcaw(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, head, invhead, mean, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n \n def vec(x):\n return tf.reshape(x, [-1])\n\n def A_cgs_fun(x):\n x = tf.reshape(x,x_shape)\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = tf.reshape(z, x_shape)\n return z\n\n\n def A_cgs_fun_init(x):\n x = tf.reshape(x, x_shape)\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = tf.reshape(z,x_shape)\n return z\n\n # initialize z and u\n z = tf.reshape(mean,x_shape)\n u = np.zeros(x_shape)\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n \n Wzu = head([net_input])\n q = tfp.math.soft_threshold(Wzu, lambda_l1/alpha)\n x = invhead(q)[0]\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def build_graph(mesh, evals, nevals,nfix, step=1.0, params=OptimizationParams()): #smoothing='absolute', numsteps=40000):\n [Xori,TRIV,n, m, Ik, Ih, Ik_k, Ih_k, Tpi, Txi, Tni, iM, Windices, Ael, Bary] = mesh\n\n dtype='float32'\n if(Xori.dtype=='float64'):\n dtype='float64'\n if(Xori.dtype=='float16'):\n dtype='float16'\n print(dtype)\n graph = lambda: None\n \n #model the shape deformation as a displacement vector field\n dX = tf.Variable((0*Xori).astype(dtype) );\n scaleX = tf.Variable(1,dtype=dtype); #not used in shape alignment\n \n graph.input_X = tf.placeholder(shape=dX.shape,dtype=dtype);\n graph.assign_X = tf.assign(dX, graph.input_X-Xori*scaleX).op;\n \n graph.X=Xori*scaleX+dX;\n \n Lx,S,L,Ak = tf_calc_lap(mesh,graph.X)\n\n #Normalized Laplacian\n Si = tf.diag(tf.sqrt(1/S[:,0]))\n Lap = tf.matmul(Si,tf.matmul(Lx,Si));\n\n \n #Spectral decomposition approach\n [s_,v] = tf.self_adjoint_eig( Lap )\n graph.cost_evals_f1 = 1e2*tf.nn.l2_loss( (s_[0:nevals]-evals[0:nevals])* (1/np.asarray(range(1,nevals+1),dtype)) )/nevals # \\\n \n \n #Approach avoiding spectral decomposition - NOT USED\n # [_,EigsOpt,lap] = tfeig(Lap)\n # v = tf.Variable(EigsOpt[:,0:nevals].astype(dtype) );\n # cost_evals_a = 1e3*tf.nn.l2_loss(tf.matmul(tf.transpose(v),v)-tf.eye(nevals,dtype=dtype));\n # cost_evals_b = 1e1*tf.nn.l2_loss( (tf.matmul(Lap,v) - tf.matmul(v,np.diag(evals[0:nevals]).astype(dtype))) )/nevals\n # graph.cost_evals_f2 = cost_evals_a + cost_evals_b\n \n \n meanA, varA = tf.nn.moments(Ak, axes=[0])\n meanL, varL = tf.nn.moments(L, axes=[0])\n\n graph.global_step = tf.Variable(step+1.0, name='global_step',trainable=False, dtype=dtype)\n graph.global_step_val = tf.placeholder(dtype)\n graph.set_global_step = tf.assign(graph.global_step, graph.global_step_val).op \n \n #regularizers decay factor\n cosine_decay = 0.5 * (1 + tf.cos(3.14 * tf.minimum(np.asarray(params.numsteps/2.0,dtype=dtype),graph.global_step) / (params.numsteps/2.0)))\n graph.decay= (1 - params.decay_target) * cosine_decay + params.decay_target\n \n if(params.smoothing=='displacement'): \n graph.vcL = params.curvature_reg*graph.decay * tf.nn.l2_loss( tf.matmul(Bary.astype(dtype),dX)[nfix:,:]);\n graph.vcW = params.smoothness_reg*graph.decay *tf.nn.l2_loss( tf.matmul(Lx,dX)[nfix:,:]) \n if(params.smoothing=='absolute'):\n graph.vcL = params.curvature_reg*graph.decay * tf.nn.l2_loss( tf.matmul(Bary.astype(dtype),S*graph.X)[nfix:,:]);\n graph.vcW = params.smoothness_reg**graph.decay *tf.nn.l2_loss( tf.matmul(Lx,graph.X)[nfix:,:]) \n \n #Volume compuation\n T1 = tf.gather(graph.X, TRIV[:,0])\n T2 = tf.gather(graph.X, TRIV[:,1])\n T3 = tf.gather(graph.X, TRIV[:,2])\n XP = tf.cross(T2-T1, T3-T2)\n T_C = (T1+T2+T3)/3\n graph.Volume = params.volume_reg*graph.decay*tf.reduce_sum(XP*T_C/2)/3\n\n\n #L2 regularizer on total displacement weighted by area elements\n graph.l2_reg = params.l2_reg*tf.nn.l2_loss(S*dX)\n\n \n graph.cost_spectral = graph.cost_evals_f1 + graph.vcW + graph.vcL - graph.Volume + graph.l2_reg\n\n optimizer = tf.train.AdamOptimizer(params.opt_step)\n \n #gradient clipping \n gvs = optimizer.compute_gradients(graph.cost_spectral)\n capped_gvs = [(tf.clip_by_value(grad, -0.0001, 0.0001), var) for grad, var in gvs if grad!=None]\n graph.train_op_spectral = optimizer.apply_gradients(capped_gvs, global_step=graph.global_step)\n\n [graph.s_,v] = tf.self_adjoint_eig( Lap ) \n return graph", "def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)", "def nmf_solve(X, n_clusters, gamma=0.5):\n\n random_state = None\n \n W, H = _initialize_nmf(X, n_components=n_clusters, init='random',\n random_state=random_state)\n \n Ht = check_array(H.T, order='C')\n X = check_array(X, accept_sparse='csr')\n\n # L1 and L2 regularization\n l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0\n update_H = True\n shuffle = False\n verbose = True\n tol = 1e-4\n \n max_iter = 200\n\n#==============================================================================\n# if regularization in ('both', 'components'):\n# alpha = float(alpha)\n# l1_H = l1_ratio * alpha\n# l2_H = (1. - l1_ratio) * alpha\n# if regularization in ('both', 'transformation'):\n# alpha = float(alpha)\n# l1_W = l1_ratio * alpha\n# l2_W = (1. - l1_ratio) * alpha\n#==============================================================================\n\n rng = check_random_state(random_state)\n\n for n_iter in range(max_iter):\n violation = 0.\n\n # Update W\n violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,\n shuffle, rng)\n # Update H\n if update_H:\n violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,\n shuffle, rng)\n\n if n_iter == 0:\n violation_init = violation\n\n if violation_init == 0:\n break\n\n if verbose:\n print(\"violation:\", violation / violation_init)\n\n if violation / violation_init <= tol:\n if verbose:\n print(\"Converged at iteration\", n_iter + 1)\n break\n\n return W, Ht.T, n_iter", "def _solve_complex_unc_generator(self, d, v, a, F0):\n nt = d.shape[1]\n order = self.order\n\n # need to handle up to 3 types of equations every loop:\n # - rb, el, rf\n unc = self.unc\n rbsize = self.rbsize\n m = self.m\n if rbsize:\n rb = self.rb\n if m is not None:\n imrb = self.imrb\n if unc:\n imrb = imrb.ravel()\n rbforce = imrb * F0[rb]\n else:\n imrb = la.lu_solve(imrb, np.eye(rbsize), check_finite=False)\n rbforce = imrb @ F0[rb]\n else:\n rbforce = F0[rb]\n a[rb, 0] = rbforce\n\n if nt == 1:\n yield\n\n pc = self.pc\n if rbsize:\n G = pc.G\n A = pc.A\n Ap = pc.Ap\n if order == 0:\n A = 1.5 * A\n Ap = 2.0 * Ap\n drb = d[rb]\n vrb = v[rb]\n arb = a[rb]\n\n Force = self._force\n ksize = self.ksize\n rfsize = self.rfsize\n systype = self.systype\n\n if ksize:\n self._delconj()\n Fe = pc.Fe\n Ae = pc.Ae\n Be = pc.Be\n if order == 0:\n Ae = Ae + Be\n ur_d = pc.ur_d\n ur_v = pc.ur_v\n rur_d = pc.rur_d\n iur_d = pc.iur_d\n rur_v = pc.rur_v\n iur_v = pc.iur_v\n ur_inv_v = pc.ur_inv_v\n ur_inv_d = pc.ur_inv_d\n\n kdof = self.kdof\n if m is not None:\n invm = self.invm\n if self.unc:\n invm = invm.ravel()\n else:\n invm = la.lu_solve(invm, np.eye(ksize), check_finite=False)\n D = d[kdof]\n V = v[kdof]\n\n if rfsize:\n rf = self.rf\n ikrf = self.ikrf\n if unc:\n ikrf = ikrf.ravel()\n else:\n ikrf = la.lu_solve(ikrf, np.eye(rfsize), check_finite=False)\n drf = d[rf]\n\n while True:\n j, F1 = yield\n if j < 0:\n # add to previous soln\n Force[:, i] += F1\n if rbsize:\n if m is not None:\n if unc:\n F1rb = imrb * F1[rb]\n else:\n F1rb = imrb @ F1[rb]\n else:\n F1rb = F1[rb]\n if order == 1:\n AF = A * 0.5 * F1rb\n AFp = Ap * F1rb\n drb[:, i] += AF\n vrb[:, i] += AFp\n arb[:, i] += F1rb\n\n if order == 1:\n if ksize:\n F1k = F1[kdof]\n if m is not None:\n if unc:\n F1k = invm * F1k\n else:\n F1k = invm @ F1k\n w1 = ur_inv_v @ F1k\n yn = Be * w1\n if systype is float:\n ry = yn.real\n iy = yn.imag\n D[:, i] += rur_d @ ry - iur_d @ iy\n V[:, i] += rur_v @ ry - iur_v @ iy\n else:\n D[:, i] += ur_d @ yn\n V[:, i] += ur_v @ yn\n\n if rfsize:\n if unc:\n drf[:, i] += ikrf * F1[rf]\n else:\n drf[:, i] += ikrf @ F1[rf]\n else:\n i = j\n Force[:, i] = F1\n F0 = Force[:, i - 1]\n if rbsize:\n if m is not None:\n if unc:\n F0rb = imrb * F0[rb]\n F1rb = imrb * F1[rb]\n else:\n F0rb = imrb @ F0[rb]\n F1rb = imrb @ F1[rb]\n else:\n F0rb = F0[rb]\n F1rb = F1[rb]\n if order == 1:\n AF = A * (F0rb + 0.5 * F1rb)\n AFp = Ap * (F0rb + F1rb)\n else:\n AF = A * F0rb\n AFp = Ap * F0rb\n vi = vrb[:, i - 1]\n drb[:, i] = drb[:, i - 1] + G * vi + AF\n vrb[:, i] = vi + AFp\n arb[:, i] = F1rb\n\n if ksize:\n # F0k = Force[kdof, i-1]\n F0k = F0[kdof]\n if order == 1:\n F1k = F1[kdof]\n if m is not None:\n if unc:\n F0k = invm * F0k\n F1k = invm * F1k\n else:\n F0k = invm @ F0k\n F1k = invm @ F1k\n w0 = ur_inv_v @ F0k\n w1 = ur_inv_v @ F1k\n ABF = Ae * w0 + Be * w1\n else:\n if m is not None:\n if unc:\n F0k = invm * F0k\n else:\n F0k = invm @ F0k\n w0 = ur_inv_v @ F0k\n ABF = Ae * w0\n # [V; D] = ur @ y\n # y = ur_inv @ [V; D] =\n # [ur_inv_v, ur_inv_d] @ [V; D]\n y = ur_inv_v @ V[:, i - 1] + ur_inv_d @ D[:, i - 1]\n yn = Fe * y + ABF\n if systype is float:\n # Can do real math for recovery. Note that the\n # imaginary part of 'd' and 'v' would be zero\n # if no modes were deleted of the complex\n # conjugate pairs. The real part is correct\n # whether or not modes were deleted, and\n # that's all we need.\n ry = yn.real\n iy = yn.imag\n D[:, i] = rur_d @ ry - iur_d @ iy\n V[:, i] = rur_v @ ry - iur_v @ iy\n else:\n # [V; D] = ur @ y\n D[:, i] = ur_d @ yn\n V[:, i] = ur_v @ yn\n\n if rfsize:\n if unc:\n drf[:, i] = ikrf * F1[rf]\n else:\n drf[:, i] = ikrf @ F1[rf]", "def init_vector(\n iif,\n nbeads,\n momenta=False,\n dimension=\"length\",\n units=\"automatic\",\n cell_units=\"automatic\",\n):\n\n mode = iif.mode\n value = iif.value\n if mode == \"xyz\" or mode == \"pdb\" or mode == \"ase\":\n rq = init_beads(iif, nbeads, dimension, units, cell_units).q\n elif mode == \"chk\":\n if momenta:\n rq = init_beads(iif, nbeads).p\n else:\n rq = init_beads(iif, nbeads).q\n elif mode == \"manual\":\n rq = value\n\n # determines the size of the input data\n if mode == \"manual\":\n if (\n iif.bead >= 0\n ): # if there is a bead specifier then we return a single bead slice\n nbeads = 1\n natoms = len(rq) // (nbeads * 3)\n rq.shape = (nbeads, 3 * natoms)\n\n return rq", "def __init__(self,\n learning_rate: Optional[float] = None,\n factored: bool = True,\n multiply_by_parameter_scale: Union[bool, HParamMap] = True,\n beta1: Optional[float] = None,\n decay_rate: float = 0.8,\n step_offset: int = 0,\n clipping_threshold: Optional[float] = 1.0,\n weight_decay_rate: Optional[float] = None,\n min_dim_size_to_factor: int = 128,\n epsilon1: float = 1e-30,\n epsilon2: float = 1e-3,\n dtype_momentum: Dtype = jnp.float32,\n factor_map: Optional[HParamMap] = None,\n logical_factor_rules: Optional[Mapping[str, FactorDim]] = None,\n weight_decay_rate_lr_exponent: Optional[float] = None,\n global_norm_clip_threshold: Optional[float] = None,\n max_parameter_scale: Optional[float] = None):\n if not factored and factor_map is not None:\n raise ValueError('Adafactor factored is False but factorization rules '\n 'have been provided.')\n if not isinstance(multiply_by_parameter_scale, (bool, HParamMap)):\n raise TypeError(\n '`multiply_by_parameter_scale` must be either bool or `HParamMap` '\n f'type. Got {type(multiply_by_parameter_scale)}')\n\n if not isinstance(factor_map, (type(None), HParamMap)):\n raise TypeError(\n '`factor_map` must be either None or `HParamMap` type. Got '\n f'{type(factor_map)}')\n\n hyper_params = _AdafactorHyperParams(\n learning_rate, factored, multiply_by_parameter_scale, beta1, decay_rate,\n step_offset, clipping_threshold, weight_decay_rate,\n min_dim_size_to_factor, epsilon1, epsilon2, factor_map,\n logical_factor_rules, weight_decay_rate_lr_exponent,\n global_norm_clip_threshold, max_parameter_scale)\n self.dtype_momentum = jax.dtypes.canonicalize_dtype(dtype_momentum)\n super().__init__(hyper_params)", "def function(self, lamb, Av=1, Rv=None, Alambda=True, **kwargs):\n _lamb = val_in_unit('lamb', lamb, 'angstrom').magnitude\n\n if isinstance(_lamb, float) or isinstance(_lamb, np.float_):\n _lamb = np.asarray([_lamb])\n else:\n _lamb = _lamb[:]\n\n if Rv is None:\n Rv = self.Rv\n\n c1 = -4.959 / Rv\n c2 = 2.264 / Rv\n c3 = 0.389 / Rv\n c4 = 0.461 / Rv\n x0 = 4.6\n gamma = 1.0\n\n x = 1.e4 / _lamb\n k = np.zeros(np.size(x))\n\n # UV part\n xcutuv = 10000.0 / 2700.\n xspluv = 10000.0 / np.array([2700., 2600.])\n yspluv = 1.0 + c1 + (c2 * xspluv) + c3 * ((xspluv) ** 2) / ( ((xspluv) ** 2 - (x0 ** 2)) ** 2 + (gamma ** 2) * ((xspluv) ** 2 ))\n\n ind = np.where(x >= xcutuv)\n if np.size(ind) > 0:\n k[ind] = 1.0 + c1 + (c2 * x[ind]) + c3 * ((x[ind]) ** 2) / ( ((x[ind]) ** 2 - (x0 ** 2)) ** 2 + (gamma ** 2) * ((x[ind]) ** 2 ))\n\n ind = np.where(x >= 5.9)\n k[ind] += c4 * (0.5392 * ((x[ind] - 5.9) ** 2) + 0.05644 * ((x[ind] - 5.9) ** 3))\n\n # Opt/NIR part\n ind = np.where(x < xcutuv)\n if np.size(ind) > 0:\n xsplopir = np.zeros(9)\n xsplopir[0] = 0.0\n xsplopir[1: 10] = 1.0 / np.array([2.198, 1.65, 1.25, 0.81, 0.65, 0.55, 0.44, 0.37])\n\n # Values directly from Gordon et al. (2003)\n # ysplopir = np.array([0.0,0.016,0.169,0.131,0.567,0.801,1.00,1.374,1.672])\n # K & J values adjusted to provide a smooth, non-negative cubic spline interpolation\n ysplopir = np.array([0.0, 0.11, 0.169, 0.25, 0.567, 0.801, 1.00, 1.374, 1.672])\n\n tck = interpolate.splrep(np.hstack([xsplopir, xspluv]), np.hstack([ysplopir, yspluv]), k=3)\n k[ind] = interpolate.splev(x[ind], tck)\n\n if (Alambda):\n return(k * Av)\n else:\n return(k * Av * (np.log(10.) * 0.4 ))", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def fit(self, ymeas, mmeas=None, alg='optls'): # alg: {'optls','mine'}\n\n # [ X ]*dm = [ dy ]\n # [ a ] [ 0 ] <-- using built-in Ridge model does this\n #\n # [ X ]*dm = [ dy ]\n # [-a ] [ a*m ] <-- but I want this for iterated nonlin problem\n #\n # [ X ]*dm = [ dy ]\n # [-aL ] [ a*L*m ] <-- and more generally I want this (higher-order Tihk)\n #\n # which can be rewritten:\n # G * dm = D (and then loop that from m0 with m=m+dm...)\n\n # X is the Jacobian matrix of derivs of predicted data points wrt model\n # params m, as given by ypred,X=self.fwd_deriv_code(m)...\n\n\n if alg=='optls':\n # https://docs.scipy.org/doc/scipy/reference/optimize.html\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html\n def fun(m):\n mlen = m.size\n L = create_findiff_mtx(mlen,self.beta)\n ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix\n resids = ymeas-ypred\n modelfunc = self.alpha * np.dot(L,m)\n modelfunc = modelfunc.reshape(len(modelfunc),1)\n f = np.squeeze(np.concatenate((resids,modelfunc),axis=0))\n return f\n\n def jac(m):\n mlen = m.size\n L = create_findiff_mtx(mlen,self.beta)\n ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix\n Jreg = self.alpha * L\n Jout = np.concatenate((J,Jreg))\n return Jout\n\n if self.usefindiff:\n jacfn='2-point'\n else:\n jacfn=jac\n if self.verbose:\n verblevel=2\n else:\n verblevel=0\n res = least_squares(fun, np.squeeze(self.minit), jac=jacfn,\n bounds=(0., 3.5), diff_step=None, verbose=verblevel, max_nfev=self.max_nfev,\n method='trf', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0)\n #ftol=1e-4, xtol=1e-1, gtol=1e-8, x_scale=1.0)\n #ftol=1e0, xtol=1e-01, gtol=1e-01, x_scale=1.0)\n #ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0)\n\n if mmeas is not None:\n testMSE = cplxMSE(res.x.reshape(len(res.x),1),mmeas)\n else:\n testMSE = npl.nan\n ypred,J = self.fwd_deriv_code(res.x.reshape(len(res.x),1))\n ypred=np.log10(ypred)\n residnorm = norm(ypred-ymeas)\n print('resid norm',residnorm)\n L = create_findiff_mtx(len(self.minit),self.beta)\n print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(J.T,J))))) # J'J has real eigvals but kept cplx type\n print('maxeig LL',np.amax(np.linalg.eigvals(np.dot(L.T,L))))\n if self.showplot:\n f, ax = plt.subplots(1, 2, figsize=(11,4))\n # plot the meas and pred data:\n # print('ypred',ypred)\n # print('ymeas',ymeas)\n ax[0].plot(ypred,'r.-')\n ax[0].plot(ymeas,'k.-')\n ax[0].grid()\n #ax[0].set_ylabel('cost')\n #ax[0].set_xlabel('iterations')\n ax[0].set_title('Measured (blk) and predicted (blu) data')\n # plot the init, true, and final model param vectors:\n ax[1].plot(self.minit,'g.-')\n ax[1].plot(res.x,'r.--')\n ax[1].plot(mmeas,'k.--')\n ax[1].grid()\n #ax[1].set_ylabel('model value')\n #ax[1].set_xlabel('indep var')\n ax[1].set_title('Model vectors (true=blk, init=grn, soln=red)')\n\n # return m,cost,misfit,modelnorm,norm(dm),testMSE\n return res.x,res.cost,np.nan,np.nan,np.nan,testMSE\n\n elif alg=='mine':\n cost = []\n m = self.minit\n mlen = len(m)\n if self.verbose:\n print('iter alpha cost norm(dd) norm(dm) dmtol')\n for i in range(self.max_nfev):\n ypred,X = self.fwd_deriv_code(m) # m: model params vector, X: derivs matrix\n if self.usefindiff:\n def tmpfwdcode(m):\n return np.squeeze(self.fwd_deriv_code(m)[0])\n X = jacfindiff(tmpfwdcode,m,dx=1.0e-6) # dx=1.0e-6 is problem dependent!\n L = create_findiff_mtx(mlen,self.beta)\n G = np.concatenate((X, -self.alpha*L),axis=0)\n D = np.concatenate((ymeas-ypred, self.alpha*np.dot(L,m)),axis=0)\n misfit = cplxMSE(ymeas, ypred)\n modelnorm = norm(np.dot(L,m))**2\n current_cost = misfit + pow(self.alpha,2)*modelnorm\n dm,res,rnk,sv = lstsq(G,D)\n m = m + dm\n cost.append(current_cost)\n if self.verbose:\n print('%3d %6.1g %10.3f %10.3f %10.2g %6.3g' %\n (i, self.alpha, current_cost, norm(ymeas-ypred), norm(dm), self.dmtol))\n if norm(dm) < self.dmtol:\n break\n self.G = G\n self.ypred = ypred\n if mmeas is not None:\n testMSE = cplxMSE(m,mmeas)\n else:\n testMSE = npl.nan\n print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(X.T,X))))) # X'X has real eigvals but kept cplx type\n print('maxeig LL',np.amax(np.linalg.eigvals(np.dot(L.T,L))))\n if self.showplot:\n f, ax = plt.subplots(1, 2, figsize=(11,4))\n # plot the cost (ie loss) per iterations:\n ax[0].semilogy(cost,'.-') # (last element of cost)\n ax[0].grid()\n ax[0].set_ylabel('cost')\n ax[0].set_xlabel('iterations')\n ax[0].set_title('Cost history (misfit^2 + alpha^2*modelnorm^2)')\n # plot the init, true, final, and evolution of model params:\n #print('m',np.squeeze(m.T))\n ax[1].plot(mmeas,'k')\n ax[1].plot(self.minit,'g')\n ax[1].plot(m,'r')\n ax[1].grid()\n #ax[1].set_ylabel('model value')\n ax[1].set_xlabel('indep var')\n ax[1].set_title('Model vectors')\n\n return m,cost[-1],misfit,modelnorm,norm(dm),testMSE", "def computesparsecholesky(self,multithread_,ordermethod_,tolsingular_,anzc_,aptrc_,asubc_,avalc_):\n n_ = None\n if n_ is None:\n n_ = len(anzc_)\n elif n_ != len(anzc_):\n raise IndexError(\"Inconsistent length of array anzc\")\n if n_ is None:\n n_ = len(aptrc_)\n elif n_ != len(aptrc_):\n raise IndexError(\"Inconsistent length of array aptrc\")\n if anzc_ is None:\n raise ValueError(\"Argument anzc cannot be None\")\n if anzc_ is None:\n raise ValueError(\"Argument anzc may not be None\")\n if isinstance(anzc_, numpy.ndarray) and anzc_.dtype is numpy.dtype(numpy.int32) and anzc_.flags.contiguous:\n _anzc_copyarray = False\n _anzc_tmp = ctypes.cast(anzc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif anzc_ is not None:\n _anzc_copyarray = True\n _anzc_np_tmp = numpy.zeros(len(anzc_),numpy.dtype(numpy.int32))\n _anzc_np_tmp[:] = anzc_\n assert _anzc_np_tmp.flags.contiguous\n _anzc_tmp = ctypes.cast(_anzc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _anzc_copyarray = False\n _anzc_tmp = None\n \n if aptrc_ is None:\n raise ValueError(\"Argument aptrc cannot be None\")\n if aptrc_ is None:\n raise ValueError(\"Argument aptrc may not be None\")\n if isinstance(aptrc_, numpy.ndarray) and aptrc_.dtype is numpy.dtype(numpy.int64) and aptrc_.flags.contiguous:\n _aptrc_copyarray = False\n _aptrc_tmp = ctypes.cast(aptrc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif aptrc_ is not None:\n _aptrc_copyarray = True\n _aptrc_np_tmp = numpy.zeros(len(aptrc_),numpy.dtype(numpy.int64))\n _aptrc_np_tmp[:] = aptrc_\n assert _aptrc_np_tmp.flags.contiguous\n _aptrc_tmp = ctypes.cast(_aptrc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _aptrc_copyarray = False\n _aptrc_tmp = None\n \n if asubc_ is None:\n raise ValueError(\"Argument asubc cannot be None\")\n if asubc_ is None:\n raise ValueError(\"Argument asubc may not be None\")\n if isinstance(asubc_, numpy.ndarray) and asubc_.dtype is numpy.dtype(numpy.int32) and asubc_.flags.contiguous:\n _asubc_copyarray = False\n _asubc_tmp = ctypes.cast(asubc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asubc_ is not None:\n _asubc_copyarray = True\n _asubc_np_tmp = numpy.zeros(len(asubc_),numpy.dtype(numpy.int32))\n _asubc_np_tmp[:] = asubc_\n assert _asubc_np_tmp.flags.contiguous\n _asubc_tmp = ctypes.cast(_asubc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asubc_copyarray = False\n _asubc_tmp = None\n \n if avalc_ is None:\n raise ValueError(\"Argument avalc cannot be None\")\n if avalc_ is None:\n raise ValueError(\"Argument avalc may not be None\")\n if isinstance(avalc_, numpy.ndarray) and avalc_.dtype is numpy.dtype(numpy.float64) and avalc_.flags.contiguous:\n _avalc_copyarray = False\n _avalc_tmp = ctypes.cast(avalc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif avalc_ is not None:\n _avalc_copyarray = True\n _avalc_np_tmp = numpy.zeros(len(avalc_),numpy.dtype(numpy.float64))\n _avalc_np_tmp[:] = avalc_\n assert _avalc_np_tmp.flags.contiguous\n _avalc_tmp = ctypes.cast(_avalc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _avalc_copyarray = False\n _avalc_tmp = None\n \n perm_ptr = ctypes.POINTER(ctypes.c_int32)()\n diag_ptr = ctypes.POINTER(ctypes.c_double)()\n lnzc_ptr = ctypes.POINTER(ctypes.c_int32)()\n lptrc_ptr = ctypes.POINTER(ctypes.c_int64)()\n lensubnval_ = ctypes.c_int64()\n lsubc_ptr = ctypes.POINTER(ctypes.c_int32)()\n lvalc_ptr = ctypes.POINTER(ctypes.c_double)()\n res = __library__.MSK_XX_computesparsecholesky(self.__nativep,multithread_,ordermethod_,tolsingular_,n_,_anzc_tmp,_aptrc_tmp,_asubc_tmp,_avalc_tmp,ctypes.byref(perm_ptr),ctypes.byref(diag_ptr),ctypes.byref(lnzc_ptr),ctypes.byref(lptrc_ptr),ctypes.byref(lensubnval_),ctypes.byref(lsubc_ptr),ctypes.byref(lvalc_ptr))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n perm_arr = perm_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,perm_ptr)\n diag_arr = diag_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,diag_ptr)\n lnzc_arr = lnzc_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,lnzc_ptr)\n lptrc_arr = lptrc_ptr[0:n_]\n __library__.MSK_XX_freeenv(self.__nativep,lptrc_ptr)\n lensubnval_ = lensubnval_.value\n _lensubnval_return_value = lensubnval_\n lsubc_arr = lsubc_ptr[0:lensubnval_]\n __library__.MSK_XX_freeenv(self.__nativep,lsubc_ptr)\n lvalc_arr = lvalc_ptr[0:lensubnval_]\n __library__.MSK_XX_freeenv(self.__nativep,lvalc_ptr)\n return (perm_arr,diag_arr,lnzc_arr,lptrc_arr,_lensubnval_return_value,lsubc_arr,lvalc_arr)" ]
[ "0.7236", "0.69279444", "0.60326505", "0.5435932", "0.53381103", "0.5186424", "0.50212055", "0.49177843", "0.4913222", "0.48340198", "0.47836974", "0.4736391", "0.47337383", "0.47162333", "0.467649", "0.46651557", "0.46549806", "0.46410066", "0.4637888", "0.4630094", "0.46174133", "0.45679438", "0.45679438", "0.45600614", "0.45520654", "0.4549737", "0.45299256", "0.45068708", "0.4501668", "0.4493616", "0.4469645", "0.44625583", "0.44588217", "0.44382066", "0.44351646", "0.4430605", "0.44292808", "0.4423908", "0.4417532", "0.4414553", "0.44059613", "0.44053024", "0.44030938", "0.4402069", "0.4399933", "0.43969688", "0.43962076", "0.4395625", "0.4393017", "0.4385715", "0.43783727", "0.4377932", "0.43745324", "0.43717527", "0.4368007", "0.4357552", "0.43529788", "0.43523026", "0.43519548", "0.43458998", "0.43379137", "0.43368977", "0.43335164", "0.43305114", "0.43297043", "0.43095607", "0.43010685", "0.4300558", "0.42958686", "0.42869255", "0.4286086", "0.42828646", "0.42801157", "0.42769375", "0.42742848", "0.42722455", "0.42721725", "0.426726", "0.42585286", "0.42580506", "0.4256715", "0.42488575", "0.4248408", "0.42403078", "0.4230145", "0.42287156", "0.42268166", "0.42167306", "0.42140558", "0.42114305", "0.42090496", "0.4208967", "0.4208758", "0.42082116", "0.42029572", "0.42020836", "0.42008224", "0.4198382", "0.41978678", "0.41976938" ]
0.79677343
0
Single step of a modified gramschmidt orthogonalization.
def modified_gram_schmidt_step_arnoldi(j, vals): vector, krylov_vectors, n, H = vals v = krylov_vectors[j, :] h = jax.numpy.vdot(v, vector) H = jax.ops.index_update(H, jax.ops.index[j, n], h) vector = vector - h * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors, n, H]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_orthogonal(self):\n pass", "def orthogonal_component(self, basis: Vector) -> Vector:\n return self - self.parallel_component(basis)", "def orthogonal(v):\n return np.array([-v[1], v[0]])", "def _orthogonal_vector(vector):\n return -1 * vector[1], vector[0]", "def _orthogonal_init(self):\n # if is a conv layer, will need to reshape to fan in matrix,\n # which is of dimension\n # num input feature maps * filter height * filter width\n if(len(self.dims) > 2):\n rv_samp = np.random.randn(self.dims[2],\n self.dims[0] * self.dims[1] * self.dims[3])\n out_sigma = np.sqrt(1.0 / rv_samp.shape[1])\n # otherwise will be a densely connected layer\n else:\n rv_samp = np.random.randn(self.dims[0], self.dims[1])\n out_sigma = np.sqrt(1.0 / rv_samp.shape[0])\n # perform SVD\n U, _, V = np.linalg.svd(rv_samp, full_matrices=False) #pylint: disable=invalid-name\n # both U and V are orthoginal matricies, so will choose the one\n # that is the correct dimensions for our layer\n ortho_matrix = U if U.shape == rv_samp.shape else V\n # rescale so it is unit variance for each vector\n # print(\"std(q) = {}\".format(np.std(q)))\n ortho_norm = (ortho_matrix / np.std(ortho_matrix)) * out_sigma\n #print(\"std(qs) = {}\".format(np.std(qs)))\n #print(q.shape)\n return ortho_norm.reshape(self.dims).astype(np.float32)", "def orthogonality(A, g):\n # Compute vector norms\n norm_g = np.linalg.norm(g)\n # Compute Froebnius norm of the matrix A\n if issparse(A):\n norm_A = scipy.sparse.linalg.norm(A, ord='fro')\n else:\n norm_A = np.linalg.norm(A, ord='fro')\n\n # Check if norms are zero\n if norm_g == 0 or norm_A == 0:\n return 0\n\n norm_A_g = np.linalg.norm(A.dot(g))\n # Orthogonality measure\n orth = norm_A_g / (norm_A*norm_g)\n return orth", "def tanh_inplace(a):", "def glorot(w):\n n_in, n_out = w.size()\n b = math.sqrt(6) / math.sqrt(n_in + n_out)\n return w.uniform_(-b, b)", "def orth_left(self, n):\n self.A[n], self.C, nC, Dr = self._mps_decompose_AC(self.A[n])\n self.normC *= nC\n self.D[n + 1] = Dr\n self.pC = n + 1", "def orthogonal(shape):\n # taken from https://gist.github.com/kastnerkyle/f7464d98fe8ca14f2a1a\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return q[:shape[0], :shape[1]].astype(theano.config.floatX)", "def orthoXY(a):\n\n return [ a[1], -a[0], 0, 1.0 ]", "def gramschmidt(A):\r\n _, k = A.shape\r\n\r\n # first basis vector\r\n Q = A[:, [0]] / np.linalg.norm(A[:, 0])\r\n for j in range(1, k):\r\n # orthogonal projection, loop-free implementation\r\n q = A[:, j] - np.dot(Q, np.dot(Q.T, A[:, j]))\r\n\r\n # check premature termination\r\n nq = np.linalg.norm(q)\r\n if nq < 1e-9 * np.linalg.norm(A[:, j]):\r\n break\r\n # add new basis vector as another column of Q\r\n Q = np.column_stack([Q, q / nq])\r\n return Q", "def orthonormalize(self, mode='left'):\n if len(self.A) == 0:\n return 1\n\n if mode == 'left':\n for i in range(len(self.A) - 1):\n self.A[i], self.A[i+1], self.qD[i+1] = local_orthonormalize_left_qr(self.A[i], self.A[i+1], self.qd, self.qD[i:i+2])\n # last tensor\n self.A[-1], T, self.qD[-1] = local_orthonormalize_left_qr(self.A[-1], np.array([[[1]]]), self.qd, self.qD[-2:])\n # normalization factor (real-valued since diagonal of R matrix is real)\n assert T.shape == (1, 1, 1)\n nrm = T[0, 0, 0].real\n if nrm < 0:\n # flip sign such that normalization factor is always non-negative\n self.A[-1] = -self.A[-1]\n nrm = -nrm\n return nrm\n if mode == 'right':\n for i in reversed(range(1, len(self.A))):\n self.A[i], self.A[i-1], self.qD[i] = local_orthonormalize_right_qr(self.A[i], self.A[i-1], self.qd, self.qD[i:i+2])\n # first tensor\n self.A[0], T, self.qD[0] = local_orthonormalize_right_qr(self.A[0], np.array([[[1]]]), self.qd, self.qD[:2])\n # normalization factor (real-valued since diagonal of R matrix is real)\n assert T.shape == (1, 1, 1)\n nrm = T[0, 0, 0].real\n if nrm < 0:\n # flip sign such that normalization factor is always non-negative\n self.A[0] = -self.A[0]\n nrm = -nrm\n return nrm\n raise ValueError(f'mode = {mode} invalid; must be \"left\" or \"right\".')", "def xform_homog( self , xfrmMatx ):\r\n for i in xrange( 0 , len( self.vertices ) , 3 ):\r\n self.vertX[ i : i+4 ] = apply_homog( xfrmMatx , self.vertices[ i : i+4 ] )", "def orth(A):\n u,s,vh = svd(A)\n M,N = A.shape\n tol = max(M,N)*numpy.amax(s)*eps\n num = numpy.sum(s > tol,dtype=int)\n Q = u[:,:num]\n return Q", "def e_ortogonal(self, other):\n if self.pi(other) == 0:\n return True\n else:\n return False", "def _get_orthogonal_states(self, and_state):\n if and_state not in self.and_states:\n states = and_state.get_orthogonal_states()\n states.insert(0, and_state)\n self.and_states[and_state] = states\n return self.and_states[and_state]", "def ortho(self):\r\n\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n x = _vec3(m11, m21, m31)\r\n y = _vec3(m12, m22, m32)\r\n z = _vec3(m13, m23, m33)\r\n\r\n xl = x.length()\r\n xl*=xl\r\n y = y - ((x*y)/xl)*x\r\n z = z - ((x*z)/xl)*x\r\n\r\n yl = y.length()\r\n yl*=yl\r\n z = z - ((y*z)/yl)*y\r\n\r\n return mat4( x.x, y.x, z.x, m14,\r\n x.y, y.y, z.y, m24,\r\n x.z, y.z, z.z, m34,\r\n m41, m42, m43, m44)", "def apply_homog( homogMat , vec3 ):\r\n return ( np.dot( homogMat , [ vec3[0] , vec3[1] , vec3[2] , 1 ] ) )[:3]", "def glucose_c1(self, g_t, t_G, a_G, d_g_t=0):\n return -(1/t_G)*g_t+(a_G/t_G)*d_g_t", "def orth_right(self, n):\n self.C, self.A[n], nC, Dl = self._mps_decompose_CA(self.A[n])\n self.normC *= nC\n self.D[n] = Dl\n self.pC = n", "def lowdin_orthogonalize(fock, s):\n eva, eve = np.linalg.eigh(s)\n sm12 = eve @ np.diag(1.0/np.sqrt(eva)) @ eve.T\n return sm12 @ fock @ sm12", "def _apply_one_mode_gate(G, T, i):\n\n T[i] *= G\n return T", "def orthogonal_init(tensor, gain=1, random_state=None):\n if tensor.ndimension() < 2:\n raise ValueError(\"Only tensors with 2 or more dimensions are supported\")\n\n rows = tensor.size(0)\n cols = tensor.numel() // rows\n flattened = normal_init(tensor.new(rows, cols),0,1,random_state)\n\n if rows < cols:\n flattened.t_()\n\n # Compute the qr factorization\n q, r = torch.qr(flattened)\n # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf\n d = torch.diag(r, 0)\n ph = d.sign()\n q *= ph\n\n if rows < cols:\n q.t_()\n\n with torch.no_grad():\n tensor.view_as(q).copy_(q)\n tensor.mul_(gain)\n\n return tensor", "def gram_schmidt(basis):\n orthog = np.array([None for _ in basis])\n mu = np.array([[None for _ in basis] for _ in basis])\n \n orthog[0] = basis[0]\n\n for i in range(1, len(basis)):\n for j in range(i):\n mu[i][j] = np.dot(basis[i], orthog[j])/sq_norm(orthog[j])\n orthog[i] = basis[i]\n for j in range(i):\n orthog[i] = orthog[i] - mu[i][j] * orthog[j]\n return orthog", "def gauss_jordan(out):\n\n h, w = out.shape\n\n assert w > h\n\n for y in range(0, h):\n\n maxrow = out[y:, y].argmax() + y\n\n (out[y], out[maxrow]) = (out[maxrow], out[y].copy())\n\n if out[y][y] == 0:\n # this will be a problem, see if we can do a row\n # operation to fix it\n for y2 in range(y+1,h):\n if out[y2][y]!=0:\n out[y]+=out[y2]\n break\n\n # no, out of options, must be a singular matrix\n if out[y][y]==0:\n raise np.linalg.linalg.LinAlgError(\"Singular matrix\")\n\n for y2 in range(y + 1, h): # Eliminate column y\n c = out[y2][y] / out[y][y]\n out[y2] -= out[y] * c\n\n for y in range(h - 1, 0 - 1, -1): # Backsubstitute\n c = out[y][y]\n for y2 in range(0, y):\n for x in range(w - 1, y - 1, -1):\n out[y2][x] -= out[y][x] * out[y2][y] / c\n out[y][y] /= c\n for x in range(h, w): # Normalize row y\n out[y][x] /= c\n\n return out", "def orthoPolyPower(x,power):\n y = x**power\n x_normalized = x / np.dot(x,x) ** 0.5\n ortho = y - np.dot(x_normalized,y) * x_normalized\n orthonormal = ortho / np.dot(ortho,ortho)**0.5\n return orthonormal", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def tan_inplace(a):", "def _sample_orthonormal_to(mu):\n v = np.random.randn(mu.shape[0])\n proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)\n orthto = v - proj_mu_v\n return orthto / np.linalg.norm(orthto)", "def riemannian_grads(self):\n u = self.feature_embedding.weight.grad\n x = self.feature_embedding.weight.data\n u.narrow(-1, 0, 1).mul_(-1)\n u.addcmul_(ldot(x, u, keepdim=True).expand_as(x), x)\n return u # can be delete?", "def stupid_transform(GLreal):\n\n out = zeros(M)\n\n for i in range(M):\n out[i] += (1./(M-1.))*GLreal[0]\n for j in range(1,M-1):\n out[i] += (2./(M-1.))*GLreal[j]*cos(pi*i*j/(M-1))\n out[i] += (1./(M-1.))*GLreal[M-1]*cos(pi*i)\n del i,j\n\n out[0] = out[0]/2.\n out[M-1] = out[M-1]/2.\n\n return out", "def twobody_acc(sat):\n pos = sat.getpos_sph()\n g_acc = [-G*M_EARTH/pos[0]**2, 0, 0]\n return g_acc", "def near_orthog(m):\n w = np.linalg.svd(m)\n return(w[0].dot(w[2]))", "def orthogonal_initializer(scale) -> Callable:\n\n def orgtho_init(w:tf.Tensor) -> tf.Tensor:\n\n # reshaping image matrix to 2d for enforcing orthogonality\n _,_,_, c = w.shape.as_list()\n w = tf.reshape(w, [-1, c])\n\n # declare identity matrix\n identity = tf.eye(c)\n\n # perform wt*w\n w_transpose = tf.transpose(w)\n w_mul = tf.matmul(w_transpose, w)\n\n reg = tf.subtract(w_mul, identity)\n\n ortho_loss = tf.nn.l2_loss(reg)\n\n return scale * ortho_loss\n\n return orgtho_init", "def get_orthogonality_regularizer(orthogonality_penalty_weight):\n def orthogonality(weight):\n \"\"\"Calculates the layer-wise penalty encouraging orthogonality.\"\"\"\n with tf.name_scope(None, \"orthogonality\", [weight]) as name:\n w2 = tf.matmul(weight, weight, transpose_b=True)\n wn = tf.norm(weight, ord=2, axis=1, keepdims=True) + 1e-32\n correlation_matrix = w2 / tf.matmul(wn, wn, transpose_b=True)\n # print(tf.matmul(wn, wn, transpose_b=True).get_shape()) output:64 * 64\n matrix_size = correlation_matrix.get_shape().as_list()[0]\n base_dtype = weight.dtype.base_dtype\n identity = tf.eye(matrix_size, dtype=base_dtype)\n # print(matrix_size) output: 64\n weight_corr = tf.reduce_mean(\n tf.squared_difference(correlation_matrix, identity))\n # tf.print(weight_corr)\n return tf.multiply(\n tf.cast(orthogonality_penalty_weight, base_dtype),\n weight_corr,\n name=name)\n\n return orthogonality", "def gha(self):\n return np.mod(self.gmst*self.turndeg +\n self.turndeg*self.T*self.century +\n self.turndeg/2.0, self.turndeg)", "def orthogonalize(self, ortho_type=\"basic\", beta=0.001):\n if ortho_type == \"basic\":\n W = self.transform.weight.data\n o = ((1 + beta) * W) - (beta * W.mm(W.t().mm(W)))\n W.copy_(o)\n elif ortho_type == \"spectral\":\n self.spectral()\n elif ortho_type == \"forbenius\":\n self.forbenius()\n else:\n raise NotImplementedError(f\"{ortho_type} not found\")", "def orthonormation_method(standardized_methods_cleaned):\n method_standardized_ortho = standardized_methods_cleaned.copy(deep=True)\n\n categories = method_standardized_ortho.columns.tolist()\n\n # Normation of the first category\n method_standardized_ortho[categories[0]] = method_standardized_ortho[categories[0]] / \\\n linalg.norm(method_standardized_ortho[categories[0]])\n\n # Normation of every following categories\n j = 0\n while j < len(categories):\n i = 0\n while i < j:\n # Calculates the orthogonal projection of j on each i and substraction of the projection from j\n method_standardized_ortho[categories[j]] = \\\n method_standardized_ortho[categories[j]] - method_standardized_ortho[categories[i]] * (\n sum(method_standardized_ortho[categories[i]] * method_standardized_ortho[categories[j]]) /\n sum(method_standardized_ortho[categories[i]] * method_standardized_ortho[categories[i]]))\n if linalg.norm(method_standardized_ortho[categories[j]]) == 0:\n # If after the projection, if the j columns is null it is droped (i.e it is linearly dependant with\n # the other columns) and the inner loop stops\n method_standardized_ortho.drop(method_standardized_ortho.columns[j], inplace=True, axis=1)\n categories.remove(categories[j])\n\n break\n else:\n # If the j column is not null, it is normed and the inner while loop keeps going\n method_standardized_ortho[categories[j]] = method_standardized_ortho[categories[j]] / \\\n (linalg.norm(method_standardized_ortho[categories[j]]))\n i += 1\n j += 1\n\n return method_standardized_ortho", "def orthogonal_weight_tensor(shape):\n if len(shape) == 2 :\n if shape[0] == shape[1] :\n M = rng_np.randn(*shape).astype(np.float32)\n Q, R = np.linalg.qr(M)\n Q = Q * np.sign(np.diag(R))\n return Q\n elif shape[1] % shape[0] == 0:\n print \"WARNING: You asked for a orth initialization for a 2D tensor\"+\\\n \" that is not square, but it seems possible to make it orth by blocks\"\n weight_tensor = np.empty(shape, dtype=np.float32)\n blocks_of_orth = shape[1] // shape[0]\n for i in range(blocks_of_orth):\n M = rng_np.randn(shape[0],shape[0]).astype(np.float32)\n Q, R = np.linalg.qr(M)\n Q = Q * np.sign(np.diag(R))\n weight_tensor[:,i*shape[0]:(i+1)*shape[0]] = Q\n return weight_tensor\n else :\n print \"WARNING: You asked for a orth initialization for a 2D tensor\"+\\\n \" that is not square and not square by block. Falling back to norm\"\n return norm_weight_tensor(shape)\n\n elif len(shape) == 3 :\n print \"WARNING: You asked for a orth initialization for 3D tensor\"+\\\n \" it is not implemented. Falling back to norm init.\"\n return norm_weight_tensor(shape)\n\n assert shape[2] == shape[3]\n if shape[2] == 1 :\n return norm_weight_tensor(shape)\n\n weight_tensor = np.empty(shape, dtype=np.float32)\n shape_ = shape[2:]\n\n for i in range(shape[0]):\n for j in range(shape[1]) :\n M = rng_np.randn(*shape_).astype(np.float32)\n Q, R = np.linalg.qr(M)\n Q = Q * np.sign(np.diag(R))\n weight_tensor[i,j,:,:] = Q\n\n return weight_tensor", "def _ruth_4th(vel_update, pos_update=update.PositionUpdate()):\n updates = [vel_update, pos_update]*3 + [vel_update]\n ctmp = 2.0**(1.0/3.0)\n coeff = [\n 1.0/(2.0*(2.0-ctmp)),\n 1.0/(2.0-ctmp),\n (1.0-ctmp)/(2.0*(2.0-ctmp)),\n -ctmp/(2.0-ctmp),\n (1.0-ctmp)/(2.0*(2.0-ctmp)),\n 1.0/(2.0-ctmp),\n 1.0/(2.0*(2.0-ctmp)),\n ]\n return ExplicitIntegrator(coeff, updates)", "def optimizer_step(g, H, lambda_=0):\n if lambda_: # LM instead of GN\n D = (H.diagonal(dim1=-2, dim2=-1) + 1e-9).diag_embed()\n H = H + D*lambda_\n try:\n P = torch.inverse(H)\n except RuntimeError as e:\n logging.warning(f'Determinant: {torch.det(H)}')\n raise e\n delta = -(P @ g[..., None])[..., 0]\n return delta", "def renorm(self):\n self.U /= (np.sum(np.abs(self.U)**2)*self.dx)**0.5", "def ortho(A, B, dim=-2, M=None, mright=True):\n if M is None:\n return A - (A * B).sum(dim=dim, keepdim=True) * B\n elif mright:\n return A - (M.mm(A) * B).sum(dim=dim, keepdim=True) * B\n else:\n return A - M.mm((A * B).sum(dim=dim, keepdim=True) * B)", "def tanh(a):", "def g_tensor(self,gpara,gperp):\n gx = gperp\n gy = gperp\n gz = gpara\n\n self.gx = gx\n self.gy = gy\n self.gz = gz\n self.g_grid = np.array([[gx*gx, gx*gy, gx*gz],[gy*gx, gy*gy, gy*gz],[gz*gx, gz*gy, gz*gz]])\n # rotate the crystal coordinates so that I'm now in the coordinate system \n # given by the zeeman tensor's principal axes", "def orthogonal(shape, dtype=tf.float32, partition_info=None):\n # taken from https://github.com/cooijmanstim/recurrent-batch-normalization\n # taken from https://gist.github.com/kastnerkyle/f7464d98fe8ca14f2a1a\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return tf.constant(q[:shape[0], :shape[1]], dtype)", "def is_ortho(self, leg, **kwargs):\n B = self @ self.adj(leg) # Make and contract with the adjoint\n return B.is_unity(**kwargs)", "def orthogonalize_matrix(m):\n U, __, VT = np.linalg.svd(np.matrix(m))\n return np.dot(U, VT)", "def dcintegrand(z,omegalambda,omegam,omegak):\n return 1./adotovera(z,omegalambda,omegam,omegak)", "def orthonormalize_inplace(self):\n Q = np.linalg.qr(self.components.T)[0].T\n self.components[...] = Q", "def _apply_two_mode_gate(G, T, i, j):\n (T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j])\n return T", "def orthonormal_1(dim_n=5):\n pb = []\n for i in range(0,dim_n-1):\n pb.append([1.0/(i+1)]*(i+1) + [-1] + [0]*(dim_n-i-2))\n m = matrix(RDF,pb)\n new_m = []\n for i in range(0,dim_n-1):\n new_m.append([RDF(100000*q/norm(m[i])).ceil()/100000 for q in m[i]])\n return matrix(QQ,new_m)", "def orthogonal_to(vector: ModelParameters) -> ModelParameters:\n new_vector = rand_u_like(vector)\n new_vector = new_vector - new_vector.dot(vector) * vector / math.pow(vector.model_norm(2), 2)\n return new_vector", "def g_tensor(self,gpara,gperp,zeta_a):\n gx = gperp\n gy = gperp\n gz = gpara\n\n self.gx = gx\n self.gy = gy\n self.gz = gz\n self.g_grid = np.array([[gx*gx, gx*gy, gx*gz],[gy*gx, gy*gy, gy*gz],[gz*gx, gz*gy, gz*gz]])\n # rotate the crystal coordinates so that I'm now in the coordinate system \n # given by the zeeman tensor's principal axes\n self.a = Ry(zeta_a) @ self.a\n self.b = Ry(zeta_a) @ self.b\n self.c = Ry(zeta_a) @ self.c", "def ortho_weight(ndim):\n W = numpy.random.randn(ndim, ndim)\n u, s, v = numpy.linalg.svd(W)\n return u.astype('float32')", "def step(self, chunk: th.Tensor) -> th.Tensor:\n if chunk.dim() == 2:\n chunk = chunk[:, None]\n if self.proj is not None:\n chunk = tf.relu(self.proj(chunk))\n out, hx = self.rnns(chunk, self.hx)\n self.hx = hx\n if self.outp is not None:\n out = self.outp(out)\n if self.non_linear is not None:\n out = self.non_linear(out)\n return out", "def ortho_weight(ndim):\n W = rng_np.randn(ndim, ndim)\n u, _, _ = np.linalg.svd(W)\n return u.astype('float32')", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def orthogonal_matrix(self):\n return Matrix([[self.cell[0], self.cell[1] * cos(self.cell[5]), self.cell[2] * cos(self.cell[4])],\n [0, self.cell[1] * sin(self.cell[5]),\n (self.cell[2] * (cos(self.cell[3]) - cos(self.cell[4]) * cos(self.cell[5])) / sin(\n self.cell[5]))],\n [0, 0, self.cell[6] / (self.cell[0] * self.cell[1] * sin(self.cell[5]))]])", "def train_normal_equation(self):\n\t\tself.theta = np.dot(np.dot(np.linalg.pinv(np.dot(np.transpose(self.X), self.X)), np.transpose(self.X)), self.y)", "def housegen(x):\n a = linalg.norm(x)\n if a == 0:\n u=x; u[0]=sqrt(2); return u, a\n if x[0] == 0:\n r = 1\n else:\n r =x[0]/abs(x[0])\n u = conj(r)*x/a\n u[0]=u[0]+1\n u=u/sqrt(u[0])\n a=-r*a\n return u, a", "def GL(mu, wid, x, m = 0.5): \n return m * gaussian(mu, wid, x) + (1 - m) * lorentzian(mu, wid, x)", "def gauss_jordan(A):\n if sl.det(A) != 0:\n nrowA = np.shape(A)[0]\n invA = np.identity(nrowA)\n ident = np.identity(nrowA)\n for k in range(nrowA):\n a = np.array(A[:, k]/A[k,k]).reshape(nrowA, 1)\n a[k] = 1-1/A[k,k]\n e = np.zeros_like(a)\n e[k,0] = 1\n\n T = ident - np.dot(a, np.transpose(e))\n A = np.dot(T, A)\n invA = np.dot(T, invA)\n return invA \n else:\n print(\"La matriz es singular, elija otro metodo.\")", "def orthogonality(matrix):\n norm = jnp.sqrt(jnp.sum(matrix**2, axis=1))\n matrix_normalized = matrix / (norm[:, None] + EPSILON)\n return matrix_normalized @ matrix_normalized.transpose()", "def OAVolterra_direct(p0,wD,dt,Nt):\n # INITIALIZATION ------------------------------------------------------\n pz = np.zeros(Nt) # oa signal at detection point\n K0 = wD # oa propagator: K(0,0) \n K1 = wD*np.exp(-wD*dt) # oa propagator: K(1,0) \n K1_K0 = np.exp(-wD*dt) # quotient: K(i+1)/K(i)\n\n # SOLVE FORWARD PROBLEM VIA RECURRENCE RELATION -----------------------\n I = 0 \n pz[0] = p0[0] \n for i in range(1,Nt):\n I = I*K1_K0 + 0.5*dt*(K1*p0[i-1] + K0*p0[i])\n pz[i] = p0[i] - I\n return pz", "def __proj_onto_nnorthant(M):\n\tP = np.asmatrix(np.zeros((M.shape[0], M.shape[1])))\n\tfor i in range(M.shape[0]):\n\t for j in range(M.shape[1]):\n\t if M[i,j] > 0:\n\t\t P[i,j] = M[i,j]\n\treturn P", "def glucose_c2(self, m_t, g_t, t_G):\n return -(1/t_G)*m_t+(1/t_G)*g_t", "def _one_step(self, gamma, axis, opp_scores):\n opp_exp = opp_scores**gamma\n s = _np.array([])\n for i in range(self.d[axis]):\n s = _np.append(s, _np.take(opp_exp, self._neighb[axis][i]).sum())\n return s/_np.mean(s)", "def doubleStep(self, rotorL, rotorM, rotorR):\n Mpos = rotorM.GetRotorPosition() #Find the current position of the middle rotor\n Mturn = rotorM.GetTurnoverNum() #Find the turnover position of the middle rotor\n \n if (Mpos == Mturn): #If the rotor is at its turnover position\n if self.visuals == 'Y': #And the user wants to visualise encryption\n print('Double Step') #Notify the user that there is a double step\n rotorM.incrementRotor() #Increment the middle rotor\n rotorL.incrementRotor() #And increment the left rotor", "def g(self, X):\n\n return (X[0])**2 - 2*X[0] + X[1]**3 - 2", "def a_realization(self):\n if self.t==1:\n return self.kmonomial()\n else:\n return self.kHallLittlewoodP()", "def get_tetrahedral_set(v: np.ndarray, ortho=None) -> tuple:\n v = np.reshape(v, (3,))\n if ortho is None:\n # default choice of ortho based on `v`\n if v[0] != 0. and v[1] != 0.: # (x, y, z)\n ortho_v1 = np.array([-v[1], v[0], 0.])\n elif v[0] != 0. and v[2] != 0.: # (x, 0, z)\n ortho_v1 = np.array([-v[2], 0., v[0]])\n elif v[1] != 0. and v[2] != 0.: # (0, y, z)\n ortho_v1 = np.array([0., -v[2], v[1]])\n elif v[0] != 0.: # (x, 0, 0)\n ortho_v1 = np.array([0., v[0], 0.])\n elif v[1] != 0.: # (0, y, 0)\n ortho_v1 = np.array([0., 0., v[1]])\n elif v[2] != 0.: # (0, 0, z)\n ortho_v1 = np.array([v[2], 0., 0.])\n else:\n raise ValueError(\"can not rotate zero vector\")\n elif abs(np.dot(v, ortho)) <= np.finfo(np.float64).eps:\n ortho_v1 = ortho\n else:\n angle = np.arccos(\n np.dot(v, ortho) / np.linalg.norm(v) * np.linalg.norm(ortho)\n ) * 180 / np.pi\n raise ValueError(\"argument vectors are not orthogonal, \"\n \"angle = %f degrees\" % angle)\n\n # perform rotations\n theta = 1.9106332362490184 # ~109.4˚ in rad\n q1 = Quaternion.rotator(ortho_v1, theta)\n q2 = Quaternion.rotator(v, np.pi / 3)\n v2 = q1.rotate(v)\n v3 = q2.rotate(v2)\n v4 = q2.rotate(v2)\n return v2, v3, v4", "def housegen(x):\n\n a = np.linalg.norm(x)\n if a == 0:\n u = x\n u[0] = np.sqrt(2)\n return u, a\n \n if x[0] == 0:\n r = 1\n else:\n r = x[0] / abs(x[0])\n\n u = np.conj(r) * x / a\n u[0] = u[0] + 1\n u = u / np.sqrt(u[0])\n \n a = -r*a\n\n return u, a", "def relu(self):\n return self * self.ge(0)", "def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal", "def generate_half_space_normals(traj, policy, mdp):\n #TODO I could probably generically write this to find a dictionary of all action: counts\n init_state,init_action = traj[0]\n #print('init_state',init_state, 'init_action',init_action)\n #calculate \\bar{\\mu}_\\pi,s_a\n f_counts = generate_feature_counts(traj,mdp)\n #get feature counts in the order specified by mdp.features\n mu_sa = np.array(f_counts)\n #print('mu_sa',mu_sa)\n\n #get mu_sb for all other actions starting at state s and following policy\n actions = list(mdp.actions(init_state))\n #print(actions)\n actions.remove(init_action)\n \n mu_normals = []\n for a in actions:\n #print('a', a)\n new_start = mdp.go(init_state, a)\n #print('new',new_start)\n demo_b = generate_demonstration(new_start, policy, mdp)\n demo_b = [(init_state, a)] + demo_b\n #print(demo_b)\n mu_sb = np.array(generate_feature_counts(demo_b, mdp))\n #print('mu_sb',mu_sb)\n mu_normals.append(mu_sa - mu_sb)\n\n return mu_normals", "def gouy_phase(w0, lambda0, z, z0=0):\n zR = z_rayleigh(w0, lambda0)\n return -np.arctan2(z-z0, zR)", "def rotate_along(axis: Tensor) -> Tensor:\n W = torch.einsum('ijk,j->ik', levi_civita.to(axis), axis)\n return expm(W)", "def get_orthogonal_vec2d(vec):\n ortho = np.array([-vec[1], vec[0]])\n return ortho", "def homozygotie(self):\n if self.allele[1] == 0.0:\n self.homozygote = True", "def sliding_Lorentz(l_onde,amp,w0,gamma):\n return amp/(w0 - (1e4/l_onde) - 1j*gamma)", "def retarded_gf(h_ao, s_ao, energy, gamma_left, gamma_right):\n return np.linalg.inv(energy*s_ao - h_ao + (1j/2.)*(gamma_left + gamma_right))", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def orthogonalize(*vlist, orthonormal=False):\n\n if not all(isinstance(vec, Vector) for vec in vlist):\n raise TypeError('Each element must be of Type Vector')\n\n ortho_vlist = []\n for i, term in enumerate(vlist):\n for j in range(i):\n term -= ortho_vlist[j].projection(vlist[i])\n # TODO : The following line introduces a performance issue\n # and needs to be changed once a good solution for issue #10279 is\n # found.\n if simplify(term).equals(Vector.zero):\n raise ValueError(\"Vector set not linearly independent\")\n ortho_vlist.append(term)\n\n if orthonormal:\n ortho_vlist = [vec.normalize() for vec in ortho_vlist]\n\n return ortho_vlist", "def homogenize(self):\n if abs(self._vector[1]) < np.finfo(float).eps:\n self._vector[0] = 1\n else:\n self._vector[0] = self._vector[0] / self._vector[1]\n self._vector[1] = 1\n return self", "def enthalpy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n h = g - temp*g_t\n return h", "def\tfOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw):\n\tSum=Vc1+Vc2+Vc3+Vk+Vrw\n\tOrmGDen=(Vc1*Dc1+Vc2*Dc2+Vc3*Dc3+Vk*Dk+Vrw*Dw)/Sum\n\treturn OrmGDen", "def T(self, *, inplace: bool = False) -> SelfAdjointUnitaryGate:\n if self.power == 1 and self.is_conjugated(\n ) and not self.is_transposed():\n return PowerMatrixGate.conj(self, inplace=inplace)\n else:\n return PowerMatrixGate.T(self, inplace=inplace)", "def rayleigh(v0):\r\n # Need to sample the angle theta from the phase function\r\n loop_condition = True\r\n while loop_condition:\r\n eps = random.random()*np.pi # Sampled x coordinate from 0 to pi\r\n eta = random.random()*(3/4)*2 # Sampled y coordinate from 0 to max of Rayleigh phase function for unpolarised light\r\n if eta < 3/4*(1 + (np.cos(eps))**2): # Checks if eta is less than the Rayleigh phase function using the angle eps\r\n loop_condition = False\r\n \r\n # Get a new direction vector for the photon\r\n v = scattering_direction(v0, eps)\r\n return v", "def __truediv__(self, o): \n return self * o.inv()", "def g(self, x):\n return x * (1 - x)", "def Rotation_EQJ_GAL():\n # This rotation matrix was calculated by the following script\n # in this same source code repository:\n # demo/python/galeqj_matrix.py\n return RotationMatrix([\n [-0.0548624779711344, +0.4941095946388765, -0.8676668813529025],\n [-0.8734572784246782, -0.4447938112296831, -0.1980677870294097],\n [-0.4838000529948520, +0.7470034631630423, +0.4559861124470794]\n ])", "def _AffineGrothendieckPolynomial(self, la, m):\n return self._AffineGrothendieck(la.to_core(self.k).to_grassmannian(),m)", "def geglu(x: Tensor) ->Tensor:\n assert x.shape[-1] % 2 == 0\n a, b = x.chunk(2, dim=-1)\n return a * F.gelu(b)", "def gale_transform(self):\n if not self.is_compact(): raise ValueError('Not a polytope.')\n\n A = matrix(self.n_vertices(), \n [ [1]+list(x) for x in self.vertex_generator()])\n A = A.transpose()\n A_ker = A.right_kernel()\n return A_ker.basis_matrix().transpose().rows()", "def timeintegrand(z,omegalambda,omegam,omegak):\n\n return 1./((1+z)*adotovera(z,omegalambda,omegam,omegak))", "def cholesky(tensor, damping):\n identity = tf.eye(tensor.shape.as_list()[0], dtype=tensor.dtype)\n damping = tf.cast(damping, dtype=tensor.dtype)\n return tf.linalg.cholesky(tensor + damping * identity)", "def orthogonalization_matrix(S,type='symmetric'):\n if type == 'Schmidt':\n pass\n elif type == 'symmetric':\n val, vec = np.linalg.eig(S) \n val_minus_half = (np.diag(val**(-0.5))) \n X = np.dot(vec,np.dot(val_minus_half,np.transpose(vec))) \n elif type == 'canonical':\n val, vec = np.linalg.eig(S) \n val_minus_half = (np.diag(val**(-0.5))) \n X = np.dot(vec,val_minus_half) \n return X", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()" ]
[ "0.61191195", "0.57736254", "0.5773286", "0.5687321", "0.56248266", "0.5618349", "0.5608474", "0.55393374", "0.55151355", "0.541601", "0.53893787", "0.5387959", "0.53768814", "0.53333694", "0.5328658", "0.53105354", "0.52937436", "0.5284233", "0.52705336", "0.5248712", "0.5246222", "0.52310944", "0.52298504", "0.5229155", "0.5214519", "0.52100176", "0.5206829", "0.5199494", "0.51981837", "0.5166135", "0.5165962", "0.514721", "0.5137006", "0.51346284", "0.5132191", "0.51218706", "0.5120947", "0.51165557", "0.5113971", "0.51138234", "0.51049685", "0.5096456", "0.5093516", "0.5085262", "0.50584066", "0.50546545", "0.5051654", "0.5029293", "0.50156015", "0.5012843", "0.5010751", "0.49979025", "0.49971917", "0.49912104", "0.49742904", "0.49735042", "0.49439108", "0.49428526", "0.49425486", "0.4940735", "0.49386245", "0.4935439", "0.49305376", "0.49284378", "0.49068373", "0.49004638", "0.48942852", "0.48919097", "0.48843902", "0.48806843", "0.48787373", "0.48624215", "0.48615226", "0.48396805", "0.48333183", "0.48318577", "0.48309022", "0.48275456", "0.48244902", "0.48243958", "0.4823632", "0.48231316", "0.481755", "0.48166424", "0.4813448", "0.481037", "0.47985733", "0.47958055", "0.47871003", "0.47817615", "0.47790253", "0.4776363", "0.47726437", "0.47651199", "0.47614256", "0.47512007", "0.47417796", "0.4731895", "0.473047", "0.47289422", "0.47239572" ]
0.0
-1
Compute an mstep arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]", "def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out", "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def calc_kmatrix_magnetic_psi(kvec, mlat, eps=1e-11):\n # First use mlat to create (angs, num_neis, bls, tvals, ons)\n #\n # angs : list\n # each row represents a site in the lattice. Each entry in the row represents the angles to that site's\n # neighbors\n # num_nei : list or array (num_sites x num_sites)\n # Tells how many neighbors of on each kind of sublattice. For example a honeycomb lattice would be\n # num_nei = [[0,3], [3,0]] because each point has 3 neighbors of the other lattice type.\n # bls : len(angs) x float array or int\n # bondlengths, with dimensions equal to dimensions of angs.\n # default value is an int, -1, indicating that all bond lengths are 1\n # tvals : len(angs) x 1 float array or int\n # dimension equal to number of different kinds of springs in unit cell x 1. represents omega_k\n # ons : array (dimension = num_sites per unit cell)\n # represents omega_g\n xy = mlat.lattice.xy\n NL, KL = mlat.NL, mlat.KL\n num_sites, NN = np.shape(NL)\n Omg, OmK = mlat.Omg, mlat.OmK\n PVx, PVy = mlat.PVx, mlat.PVy\n if PVx is None or PVy is None:\n PVx = np.zeros_like(NL, dtype=float)\n PVy = np.zeros_like(NL, dtype=float)\n\n # num_sites is the total number of particles\n mm = np.zeros([2 * num_sites, 2 * num_sites], dtype='complex128')\n\n # checking\n # print 'np.shape(Omg) = ', np.shape(Omg)\n # print 'np.shape(NL) = ', np.shape(NL)\n # print 'np.shape(PVx) = ', np.shape(PVx)\n\n # Go through each site and fill in rows i and NP + i for that site (psi_L and psi_R)\n kk = 0\n for ii in mlat.inner_indices:\n # grav frequency for this particle (note the difference in indexing is due to inner/outer split)\n omg = Omg[kk]\n\n # pinning/gravitational matrix -- note: will divide later by factor of -2\n mm[ii, ii] += -2. * omg\n mm[num_sites + ii, num_sites + ii] += 2. * omg\n\n for nn in range(NN):\n # the index of the gyroscope i is connected to (particle j)\n ni = NL[ii, nn]\n # true connection?\n k = KL[ii, nn]\n # spring frequency for this connection\n omk = OmK[ii, nn]\n\n if abs(k) > eps:\n # Compute the vector connecting site ii to site ni\n # We index PVx as [i,nn] since it is the same shape as NL (and corresponds to its indexing)\n diffx = xy[ni, 0] - xy[ii, 0] + PVx[ii, nn]\n diffy = xy[ni, 1] - xy[ii, 1] + PVy[ii, nn]\n alphaij = np.arctan2(diffy, diffx)\n\n rij_mag = np.sqrt(diffx ** 2 + diffy ** 2)\n # print 'rij mag', rij_mag\n if rij_mag < eps:\n raise RuntimeError('Distance between connected sites is very near zero (less than epsilon)!')\n rij_mag = 1\n\n # get the magnitude of l, the length of the pendulum, wrt unit length\n als = rij_mag ** 2 * (mlat.lp['aoverl']) ** 2\n\n # These are Nash SI eqn S6, multiplied by (l^2/I\\omega)\n fpara_p = - omk * (1 - (1. / 12.) * als) / rij_mag ** 5\n fpara_q = omk * (1 + (1. / 6.) * als) / rij_mag ** 5\n fperp_p = omk * 0.25 * (1 + (1. / 3.) * als) / rij_mag ** 5\n fperp_q = -omk * 0.25 * (1 + (1. / 3.) * als) / rij_mag ** 5\n\n omk_i_plus = fpara_p + fperp_p\n omk_i_minus = fpara_p - fperp_p\n omk_j_plus = fpara_q + fperp_q\n omk_j_minus = fpara_q - fperp_q\n\n # Form kfactor\n if np.abs(PVx[ii, nn]) > eps or np.abs(PVy[ii, nn]) > eps:\n kfactor = np.exp(1j * (PVx[ii, nn] * kvec[0] + PVy[ii, nn] * kvec[1]))\n else:\n kfactor = 1.0\n\n # Create phase factors\n expi2t = np.exp(1j * 2. * alphaij)\n exp_negi2t = np.exp(-1j * 2. * alphaij)\n\n # (psi_L psi_L components)\n # add top left chunk: -/+1/2 Omk, note: will divide by -2 later\n mm[ii, ii] += omk_i_plus\n if ni in mlat.inner_indices:\n mm[ii, ni] += -omk_j_plus * kfactor\n\n # (psi_L psi_R components) top right chunk\n mm[ii, ii + num_sites] += omk_i_minus * expi2t\n if ni in mlat.inner_indices:\n mm[ii, ni + num_sites] += -omk_j_minus * expi2t * kfactor\n\n # (psi_R psi_L components) bottom left chunk\n mm[ii + num_sites, ii] += -omk_i_minus * exp_negi2t\n if ni in mlat.inner_indices:\n mm[ii + num_sites, ni] += omk_j_minus * exp_negi2t * kfactor\n\n # (psi_R psi_R components) bottom right chunk\n mm[ii + num_sites, ii + num_sites] += -omk_i_plus\n if ni in mlat.inner_indices:\n mm[ii + num_sites, ni + num_sites] += omk_j_plus * kfactor\n\n kk += 1\n\n return 0.5 * mm * (-1j)", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def sparse_expectation(mat, vec):\n return np.vdot(vec, mat.dot(vec)).real", "def evolve(self, k_vec, Nt,**kwargs):\n \n M_eff = np.eye((self.Nd), dtype=complex) # aux matrix\n T = 1.\n for it in range(Nt):\n \n # update the Hamiltonian for time-inteval\n self.updateH(k_vec, it)\n\n # return eigenenergies and vectors\n E_k, U = lg.eig(self.H_kc) \n\n # U^-1 * exp(H_d) U\n U_inv = lg.inv(U)\n\n # construct a digonal matrix out of a vector\n M1 = (np.exp(-1.j*E_k*T) * U_inv.T).T\n\n #MM = np.dot(U_inv,np.dot(H_M, U))\n MM = np.dot(U,M1)\n M_eff = np.dot(M_eff,MM)\n # end of loop\n Ek, Uk = lg.eig( M_eff )\n idx = (np.log(Ek).imag).argsort()\n Efl_k = np.log(Ek).imag[idx]\n Ufl_k = Uk[idx]\n return Efl_k, Ufl_k", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def _matvec(x):\n return _normal_matvec(matvec, x)", "def eigsh(A, M = None, k = 6, sigma = None, which = 'LM', v0=None,\n ncv = None, maxiter = None, tol = 0., return_eigenvectors = True,\n Minv = None, OPinv = None, mode = 'normal'):\n if M is not None:\n raise NotImplementedError(\"M is not currently supported!\")\n if v0 is not None:\n raise NotImplementedError(\"v0 is not currently supported!\")\n if ncv is not None:\n raise NotImplementedError(\"ncv is not currently supported!\")\n if Minv is not None:\n raise NotImplementedError(\"Minv is not currently supported!\")\n if OPinv is not None:\n raise NotImplementedError(\"OPinv is not currently supported!\")\n inp_data = FrovedisFeatureData(A, dense_kind='rowmajor')\n X = inp_data.get()\n x_dtype = inp_data.get_dtype()\n x_itype = inp_data.get_itype()\n dense = inp_data.is_dense()\n nrows = inp_data.numRows()\n ncols = inp_data.numCols()\n\n if nrows != ncols:\n raise ValueError('expected squared symmetric matrix (shape=%s)' % (inp_data.shape,))\n if k <= 0:\n raise ValueError('k must be greater than 0.')\n if k >= nrows:\n raise ValueError('k must be less than or equal to N for N * N square matrix.')\n if sigma is not None and not dense:\n raise ValueError('currently sigma is only supported for dense matrices.')\n if sigma is None:\n sigma = np.finfo(np.float32).max\n\n if which not in ['LM', 'SM', 'LA', 'SA', 'BE']:\n raise ValueError('which must be one of LM, SM, LA, SA, or BE')\n if mode in ['buckling', 'cayley']:\n raise ValueError('currenly normal mode is only supported!')\n if maxiter is None:\n maxiter = 10 * nrows\n wantEv = return_eigenvectors\n (host, port) = FrovedisServer.getServerInstance()\n res = rpclib.compute_eigsh(host, port, X.get(),\n k, which.encode('ascii'),\n sigma, maxiter, wantEv,\n tol, x_dtype,\n x_itype, dense)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n sptr = res[\"eigenval\"]\n uptr = res[\"eigenvec\"]\n m_m = res['m']\n k_k = res['k']\n eigval = FrovedisVector({'dptr' : sptr, 'size' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n if wantEv:\n eigvec = FrovedisDenseMatrix('C', {'dptr' : uptr, 'nrow' : m_m, 'ncol' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n return eigval, eigvec\n else:\n return eigval", "def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H", "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x", "def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos", "def get_eigvals_eigvects(\n num_layers,\n numeric_matrices_eV_over_angsquared,\n layer_mass_amu,\n use_banded_algorithm=False,\n):\n # Based on the units in input, and indicating with:\n # - [hbar omega] the numeric value for the frequency in meV => hbar omega = [hbar omega] * meV\n # - [K] the numeric value of K in eV/ang^2\n # - [m] the layer mass in amu\n # we have (we omit the sign, and for units considerations we 'drop' U):\n # omega^2 = K / m =>\n # (hbar omega)^2 = hbar^2 * K / m =>\n # [hbar omega]^2 * meV^2 = hbar^2 * [K] / [m] * eV/ang^2 / amu = [K] / [m] * hbar^2 * eV/ang^2 / amu =>\n # [hbar omega]^2 = = [K] / [m] * ( hbar^2 * eV/ang^2 / amu / meV^2 )\n # so that the conversion factor is the last bracketed term:\n # conversion_factor = hbar^2 * eV / (angstrom^2 * amu * meV^2)\n conversion_factor = 4180.15925\n # NOTE: for simplicity, the conversion is applied at the very end\n\n if use_banded_algorithm:\n # 3 blocks (below, same layer, and above) of size 3 => total width of 9\n # Since we only store the upper part, we only need a width of 4 (diagonal + 3 superdiagonals)\n K_matrix = np.zeros((4, num_layers * 3))\n else:\n K_matrix = np.zeros((num_layers * 3, num_layers * 3))\n\n # Note: I construct -K, actually\n for block_idx in range(num_layers):\n # Interaction with upper layer\n if block_idx < num_layers - 1: # Not in the last layer\n current_block = np.array(\n numeric_matrices_eV_over_angsquared[\n block_idx % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx + 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n # Interaction with lower layer\n if block_idx > 0: # Not in the first layer\n previous_block = np.array(\n numeric_matrices_eV_over_angsquared[\n (block_idx - 1) % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx - 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n\n # We want to get the eigenvalues of omega^2 U = - 1/M_layer K U\n K_matrix /= layer_mass_amu\n\n # Get frequencies (eigvals) and eigenvectors (for mode analysis)\n if use_banded_algorithm:\n eigvals, eigvects = scipy.linalg.eig_banded(K_matrix, lower=False)\n else:\n eigvals, eigvects = np.linalg.eigh(K_matrix)\n\n eigvals *= conversion_factor\n\n ## The first three should be acoustic i.e. almost zero; the rest should be positive\n ## I don't check as depending on the units it's hard to define a correct absolute energy\n # assert np.sum(np.abs(eigvals[:3])) < 1.0e-8\n\n # Remove the first three acoustic modes\n return eigvals[3:], eigvects[:, 3:]", "def FV_moVMF(xx, vmf):\n \n # Attributes of the moVMF.\n #mean_dir = vmf.cluster_centers_ # Shape: (K, d)\n kappa = vmf.concentrations_ # Shape: (K, )\n weights = vmf.weights_ # Shape: (K, )\n n_comps = vmf.n_clusters # Integer scalar\n \n # Encoded document.\n xx = np.atleast_2d(xx) # Shape: (T, d) \n xx = normalize(xx) # Normalize input data\n T = xx.shape[0] # Doc. length\n d = xx.shape[1] # Dimensionality of word/feat. vectors\n \n # Array to store the result.\n out = np.zeros((n_comps, d), dtype=np.float32) # Shape: (K, d)\n \n # Posterior probabilities.\n probs = vmf.log_likelihood(xx) # Shape: (T, K)\n \n # Vectorization of the sum over t of `gamma_t(i)*x_t`.\n probs_xx = np.dot(probs, xx) # Shape: (K, d)\n \n # Derivatives with respect to the mean directions.\n d_mean = d * probs_xx # Shape: (K, d)\n \n # Normalization.\n eps = 1e-6 # Avoids dividing by 0\n np.divide(d_mean, (kappa.reshape((n_comps, 1)) + eps), out=d_mean)\n \n out = d_mean / (weights.reshape((n_comps, 1)) + eps)\n \n return out.flatten()", "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def SVM_train(Ktrain,y,lbda_vec):\r\n n = Ktrain.shape[0]\r\n for idx, lbda in enumerate(lbda_vec): \r\n C = 1/(2*lbda*n)\r\n P = matrix(Ktrain, tc=\"d\")\r\n q = - matrix(y,tc=\"d\")\r\n G = matrix( np.concatenate( (np.diagflat(y) , -np.diagflat(y) ), axis=0 ),tc=\"d\" )\r\n h1 = C * np.ones((n,1))\r\n h2 = np.zeros((n,1)) \r\n h = matrix(np.concatenate((h1,h2),axis=0))\r\n\r\n solvers.options['show_progress'] = False\r\n \r\n sol = solvers.qp(P,q,G,h) \r\n a = np.asarray(sol['x'])\r\n\r\n #alpha is sparse\r\n a[np.where(np.abs(a) < 1e-4)] = 0\r\n y_svm = np.dot(Ktrain,a)\r\n\r\n print(\"Précision pour lambda = \" + str(lbda) + \" :\", accuracy(y_svm,y))", "def get_vf_matrix(self, geom_dict, view_matrix, obstr_matrix, list_pvrow):\n n_all_surfaces = view_matrix.shape[0]\n view_factors = np.zeros((n_all_surfaces, n_all_surfaces), dtype=float)\n\n # --- First deal with finite surfaces from the registry, and treat only\n # half of the views because symmetry will be used next\n n_finite_surfaces = n_all_surfaces - 1 # no sky\n view_matrix_upper_finite_surfaces = np.triu(\n view_matrix[:n_finite_surfaces, :n_finite_surfaces])\n indices_views_finite = np.where(view_matrix_upper_finite_surfaces)\n\n n_views = len(indices_views_finite[0])\n geometries = list(geom_dict.values())\n for i in range(n_views):\n idx = (indices_views_finite[0][i], indices_views_finite[1][i])\n view = self.mapper.reverse_view[view_matrix[idx]]\n line_i = geometries[idx[0]]\n line_j = geometries[idx[1]]\n obstr_index = obstr_matrix[idx]\n if obstr_index is not None:\n obstructing_pvrow = list_pvrow[obstr_matrix[idx]]\n else:\n obstructing_pvrow = None\n # The following line takes the most time to execute (looped)\n view_factors[idx] = self.mapper.function_mapping[view](\n line_i, line_j, obstructing_pvrow)\n\n # Use the reciprocity property of view factors to speed up the\n # vfactor calculation: A_1 * F_1-2 = A_2 * F_2-1 ==> symmetric matrx\n areas = np.array([surf.length for surf in geometries])\n matrix_areas = np.diag(areas)\n matrix_areas_inv = np.diag(1. / areas)\n\n upper_matrix_reciprocity = np.dot(matrix_areas,\n view_factors[:n_finite_surfaces,\n :n_finite_surfaces])\n\n total_matrix_reciprocity = (upper_matrix_reciprocity +\n upper_matrix_reciprocity.T)\n finite_vf_matrix = np.dot(matrix_areas_inv, total_matrix_reciprocity)\n view_factors[:n_finite_surfaces, :n_finite_surfaces] = finite_vf_matrix\n\n # --- Then do the calculations for the sky, which is the remaining\n # portion of the hemisphere\n view_factors[:-1, -1] = 1. - np.sum(view_factors[:-1, :-1], axis=1)\n return view_factors", "def sparse_matlab(i, j, v, m, n):\n return csr_matrix((v, (i, j)), shape=(m, n))", "def _safe_inv22_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n\n # We set delta_inv to 0. in case of a rank deficient matrix ; a\n # rank-deficient input matrix *M* will lead to a null matrix in output\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n if np.all(rank2):\n # Normal 'optimized' flow.\n delta_inv = 1./delta\n else:\n # 'Pathologic' flow.\n delta_inv = np.zeros(M.shape[0])\n delta_inv[rank2] = 1./delta[rank2]\n\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def minkowskiArrayDot(X, vec):\n MDP_max = -(1 + 1e-10)\n k = X.shape[1]\n vec = vec.reshape((k, -1))\n mod = np.ones(vec.shape)\n mod[-1] = -1\n MDP = np.matmul(X, vec*mod)\n #MDP[MDP > MDP_max] = MDP_max\n return MDP", "def get_leftLaInv(k_list, l_list, m_list, mult_table_vals, n_dims, gradeList):\n\n identity = np.zeros((n_dims,))\n identity[gradeList.index(0)] = 1\n\n @numba.njit\n def leftLaInvJIT(value):\n intermed = np.zeros((n_dims, n_dims))\n for test_ind, i in enumerate(k_list):\n j = l_list[test_ind]\n k = m_list[test_ind]\n intermed[i, j] += mult_table_vals[test_ind] * value[k]\n intermed = np.transpose(intermed)\n if abs(linalg.det(intermed)) < _eps:\n raise ValueError(\"multivector has no left-inverse\")\n sol = linalg.solve(intermed, identity)\n return sol\n\n return leftLaInvJIT", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def solve_eq(xVec):\n \n PSI = xVec[0:vecLen] \n Cxx = xVec[1*vecLen:2*vecLen] \n Cyy = xVec[2*vecLen:3*vecLen] \n Cxy = xVec[3*vecLen:4*vecLen]\n\n\n # Useful Vectors\n Txx = oneOverWi * Cxx \n Txx[N*M] -= oneOverWi\n Tyy = oneOverWi * Cyy \n Tyy[N*M] -= oneOverWi\n Txy = oneOverWi * Cxy\n\n U = + dot(MDY, PSI)\n V = - dot(MDX, PSI)\n LAPLACPSI = dot(LAPLAC, PSI)\n\n # Useful Operators\n MMU = tsm.c_prod_mat(U)\n MMV = tsm.c_prod_mat(V)\n VGRAD = dot(MMU,MDX) + dot(MMV,MDY)\n MMDXU = tsm.c_prod_mat(dot(MDX, U))\n MMDXV = tsm.c_prod_mat(dot(MDX, V))\n MMDYU = tsm.c_prod_mat(dot(MDY, U))\n MMDYV = tsm.c_prod_mat(dot(MDY, V))\n\n MMDXPSI = tsm.c_prod_mat(dot(MDX, LAPLACPSI))\n MMDXCXX = tsm.c_prod_mat(dot(MDX, Cxx))\n MMDXCYY = tsm.c_prod_mat(dot(MDX, Cyy))\n MMDXCXY = tsm.c_prod_mat(dot(MDX, Cxy))\n\n #######calculate the Residuals########\n\n residualsVec = zeros((4*vecLen), dtype='complex')\n\n #####psi\n residualsVec[0:vecLen] = - Re*dot(MMU, dot(MDX, LAPLACPSI)) \\\n - Re*dot(MMV, dot(MDY, LAPLACPSI)) \\\n + beta*dot(BIHARM, PSI) \\\n - (1.-beta)*(dot(MDXX, Txy) + dot(MDXY, (Tyy - Txx)) \\\n - dot(MDYY, Txy))\n\n #####xx\n residualsVec[vecLen:2*vecLen] = - dot(VGRAD, Cxx) \\\n + 2.*dot(MMDXU, Cxx) \\\n + 2.*dot(MMDYU, Cxy) - Txx\n\n #####yy\n residualsVec[2*vecLen:3*vecLen] = - dot(VGRAD, Cyy) \\\n + 2.*dot(MMDXV, Cxy) \\\n + 2.*dot(MMDYV, Cyy) - Tyy\n\n #####xy\n residualsVec[3*vecLen:4*vecLen] = - dot(VGRAD, Cxy) \\\n + dot(MMDXV, Cxx) + dot(MMDYU, Cyy)\\\n - Txy\n\n #####psi0\n residualsVec[N*M:(N+1)*M] = - Re*dot(VGRAD, U)[N*M:(N+1)*M] \\\n + beta*dot(MDYYY, PSI)[N*M:(N+1)*M] \\\n + (1.-beta)*dot(MDY,Txy)[N*M:(N+1)*M]\n # set the pressure gradient (pressure driven flow)\n # residualsVec[N*M] += 2.0\n\n # set the forcing on the zeroth mode for non pressure driven flow.\n residualsVec[N*M:(N+1)*M] += forcingVec\n\n\n ##### Apply boundary conditions to residuals vector\n\n # dxPsi = 0 \n for k in range (2*N+1): \n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-2] = dot((k-N)*kx*BTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-1] = dot((k-N)*kx*BBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi(+-1) = 0 \n for k in range (2*N+1):\n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-4] = dot(DERIVTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-3] = dot(DERIVBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi0(+-1) = +-1\n residualsVec[N*M + M-3] = dot(DERIVTOP, PSI[N*M:(N+1)*M]) - 1.\n residualsVec[N*M + M-2] = dot(DERIVBOT, PSI[N*M:(N+1)*M]) + 1.\n\n # Psi0(-1) = 0\n residualsVec[N*M + M-1] = dot(BBOT, (PSI[N*M:(N+1)*M]))\n\n return (residualsVec)", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def one_step(self):\r\n assert (self.uv_vol is not None)\r\n assert (self.guv_vol is not None)\r\n assert (self.uv_bound is not None)\r\n assert (self.vf_vect_bound is not None)\r\n assert (self.vF_vect_vol is not None)\r\n # Shape checks\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])\r\n assert (self.uv_vol.shape == self.guv_vol.shape)\r\n assert (self.uv_vol.shape == self.uv_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])\r\n \r\n if self.step == 0:\r\n self.check_k_matrix_stability()\r\n # print(\"Epsilon is :\"+str(self.Epsilon))\r\n # print(\"Beta is :\"+str(self.Beta))\r\n\r\n # Form \"Stiffness\" matrix:\r\n K = self.make_k_matrix()\r\n # Form \"Force\" vector: \r\n f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound\r\n\r\n # print(\"FORCE VECTOR:\")\r\n # print(f)\r\n # print(\"STIFFNESS MATRIX\")\r\n # print(K)\r\n # print(\"UV_VOL\")\r\n # print(self.uv_vol)\r\n # print(\"EPSILON * GUV_VOL\")\r\n # print(self.Epsilon * self.guv_vol)\r\n # print(\"UV_BOUND * COEFF\")\r\n # print((self.Epsilon / self.Beta) * self.uv_bound)\r\n sol = scipy_sparse_linsolve(K, f)\r\n # print(\"SOLUTION\")\r\n # print(sol)\r\n return sol", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def _matvec(self, x):\n \n x = x.reshape((self.NH,))\n #\n # Compute kinetic energy operator\n #\n tx = self.KEO @ x \n \n # \n # Compute potential energy operator\n #\n xquad = self.basis.fbrToQuad(x,axis = 0) # xquad has shape (Nq,)\n vx = self.basis.quadToFbr(self.V * xquad) # vx has shape (NH,)\n \n return tx + vx", "def knn(self,query_vec,k, stdev=False):\n\n sims = []\n if k > len(self.terms):\n k = len(self.terms)\n sims = np.matmul(self.vectors, query_vec.vector)\n if stdev:\n sims = zscore(sims)\n indices = np.argpartition(sims, -k)[-k:]\n indices = sorted(indices, key=lambda i: sims[i], reverse=True)\n results = []\n for index in indices:\n results.append([sims[index], self.terms[index]])\n return results", "def update_params(self, mat):\n assert mat.shape == self.shape\n vec = mat.reshape(mat.size)\n self.alpha = np.linalg.solve(self.covs_mat.T.dot(self.covs_mat),\n self.covs_mat.T.dot(vec))", "def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError", "def _normal_matvec(matvec, x):\n matvec_x, vjp = jax.vjp(matvec, x)\n return vjp(matvec_x)[0]", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def mumps_eigsh(matrix, k, sigma, **kwargs):\n class LuInv(sla.LinearOperator):\n\n def __init__(self, matrix):\n instance = kwant.linalg.mumps.MUMPSContext()\n instance.analyze(matrix, ordering='pord')\n instance.factor(matrix)\n self.solve = instance.solve\n sla.LinearOperator.__init__(self, matrix.dtype, matrix.shape)\n\n def _matvec(self, x):\n return self.solve(x.astype(self.dtype))\n\n opinv = LuInv(matrix - sigma * sp.identity(matrix.shape[0]))\n return sla.eigsh(matrix, k, sigma=sigma, OPinv=opinv, **kwargs)", "def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K", "def kinetic_energy(v, Mm=1.):\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n # timeit.timeit('vt[:,0]**2+vt[:,1]**2', setup='import numpy as np; vt = np.random.rand(10000,2)', number=1000)\n KE = 0.5 * sum(Mm * speed_squared)\n return KE", "def test_set_matrix_vec(self, backend, vecmat, elements, elem_vnode,\n kernel_inc_vec, kernel_set_vec, g, skip_cuda):\n op2.par_loop(kernel_inc_vec, elements(3,3),\n vecmat((elem_vnode[op2.i[0]], elem_vnode[op2.i[1]]), op2.INC),\n g(op2.READ))\n # Check we have ones in the matrix\n assert vecmat.array.sum() == 2*2*3*3*elements.size\n op2.par_loop(kernel_set_vec, elements(3,3),\n vecmat((elem_vnode[op2.i[0]], elem_vnode[op2.i[1]]), op2.WRITE),\n g(op2.READ))\n # Check we have set all values in the matrix to 1\n assert_allclose(vecmat.array, numpy.ones_like(vecmat.array))\n vecmat.zero()", "def k_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n ensemble_average_fun = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.k_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n ensemble_average_fun[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n ensemble_average_fun_TS = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: \\\r\n self.beta_U_1(\r\n np.concatenate(([self.lambda_TS], lambda_))\r\n ) + self.beta_A_0_abs_isometric(\r\n 1, np.concatenate(([self.lambda_TS], lambda_))\r\n )\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.k_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n ensemble_average_fun_TS[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(\r\n v_i, transition_state=True\r\n )[2][-(self.M - 1):, 0],\r\n **kwargs\r\n )\r\n return ensemble_average_fun_TS/ensemble_average_fun", "def vbmstep(self):\n for k in range(self.k):\n self.beta_k[k] = self.beta_0 + self.counts[k]\n self.m_k[k] = (1 / self.beta_k[k]) * (self.beta_0 * self.m_0 +\n self.counts[k] * self.means[k])\n\n tmp = (self.beta_0 * self.counts[k]) / (self.beta_0 + self.counts[k])\n tmp2 = (self.means[k] - self.m_0)\n tmp = np.linalg.inv(self.W_0) + self.counts[k] * self.covars[k] + tmp * tmp2 @ tmp2.T\n self.w_k[k] = np.linalg.inv(tmp)\n self.nu_k[k] = self.nu_0 + self.counts[k]\n self.alpha_k[k] = self.alpha_0[k] + self.counts[k]", "def matrix_inv(mat):\n\ta = mat[0,0]\n\tb = mat[0,1]\n\tc = mat[0,2]\n\td = mat[1,0]\n\te = mat[1,1]\n\tf = mat[1,2]\n\tg = mat[2,0]\n\th = mat[2,1]\n\ti = mat[2,2]\n\n\tdet = b*f*g + c*d*h + a*e*i - a*f*h - b*d*i - c*e*g\n\n\tinvmat = np.zeros((3,3))\n\tinvmat[0,0] = (e*i - f*h) / det\n\tinvmat[0,1] = (c*h - b*i) / det\n\tinvmat[0,2] = (b*f - c*e) / det\n\tinvmat[1,0] = (f*g - d*i) / det\n\tinvmat[1,1] = (a*i - c*g) / det\n\tinvmat[1,2] = (c*d - a*f) / det\n\tinvmat[2,0] = (d*h - e*g) / det\n\tinvmat[2,1] = (b*g - a*h) / det\n\tinvmat[2,2] = (a*e - b*d) / det\n\treturn invmat", "def find_min_norm_element(vecs):\n # Solution lying at the combination of two points\n\n\n vecs_clone = []\n for i in range(len(vecs)):\n # assert len(vecs[i]) == 1\n vecs_task = []\n for k in range(len(vecs[i])):\n vecs_task.append(vecs[i][k].view(-1))\n vecs_clone.append(torch.cat(vecs_task).unsqueeze(0))\n vecs_clone = torch.cat(vecs_clone)\n\n grad_mat = torch.matmul(vecs_clone, vecs_clone.t())\n\n # dps = {}\n init_sol = MinNormSolver._min_norm_2d(grad_mat)\n \n n = len(vecs)\n sol_vec = torch.zeros([n,]).cuda()\n sol_vec[init_sol[0][0]] = init_sol[1]\n sol_vec[init_sol[0][1]] = 1 - init_sol[1]\n# sol_vec = sol_vec.unsqueeze(0)\n\n if n < 3:\n # This is optimal for n=2, so return the solution\n return sol_vec , init_sol[2]\n \n iter_count = 0\n\n # grad_mat = np.zeros((n,n))\n # for i in range(n):\n # for j in range(n):\n # grad_mat[i,j] = dps[(i, j)]\n \n\n while iter_count < MinNormSolver.MAX_ITER:\n grad_dir = -1.0 * torch.matmul(grad_mat, sol_vec)\n# sol_vec = sol_vec.squeeze()\n new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)\n\n v1v1 = torch.sum(sol_vec.unsqueeze(1).repeat(1, n)*sol_vec.unsqueeze(0).repeat(n, 1)*grad_mat)\n v1v2 = torch.sum(sol_vec.unsqueeze(1).repeat(1, n)*new_point.unsqueeze(0).repeat(n, 1)*grad_mat)\n v2v2 = torch.sum(new_point.unsqueeze(1).repeat(1, n)*new_point.unsqueeze(0).repeat(n, 1)*grad_mat)\n\n nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)\n new_sol_vec = nc*sol_vec + (1-nc)*new_point\n change = new_sol_vec - sol_vec\n if torch.sum(torch.abs(change)) < MinNormSolver.STOP_CRIT:\n return sol_vec, nd\n sol_vec = new_sol_vec", "def run_vqe(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=None,\n mode=\"min_val\",\n ):\n # N=int(np.ceil(np.log2(len(self.mat))))\n # hk = np.zeros((2**N,2**N),dtype='complex')\n # hk[:self.mat.shape[0], :self.mat.shape[1]] = self.mat\n N = self.n_qubits()\n if mode == \"max_val\":\n Hamil_mat = aqua.operators.MatrixOperator(-1 * self.mat)\n # Hamil_mat = MatrixOperator(-1 * self.mat)\n else:\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n if var_form is None:\n if reps is None:\n reps = 2\n # reps=5\n from qiskit.circuit.library import EfficientSU2\n\n var_form = EfficientSU2(N, reps=reps)\n if optimizer is None:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form)\n # vqe = VQE(Hamil_qop, var_form)\n else:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form, optimizer)\n # vqe = VQE(Hamil_qop, var_form, optimizer)\n vqe_result = vqe.run(backend)\n en = np.real(vqe_result[\"eigenvalue\"])\n # params=vqe.optimal_params\n # circuit=vqe.construct_circuit(params)\n if mode == \"max_val\":\n en = -1 * en\n # states = np.sort(\n # np.real(\n # vqe.expectation.convert(\n # StateFn(vqe.operator, is_measurement=True)\n # ).to_matrix()\n # )\n # )\n return en, vqe_result, vqe", "def SpMV_viaMKL( A, x ):\n SpMV = mkl.mkl_cspblas_dcsrgemv\n # Dissecting the \"cspblas_dcsrgemv\" name:\n # \"c\" - for \"c-blas\" like interface (as opposed to fortran)\n # Also means expects sparse arrays to use 0-based indexing, which python does\n # \"sp\" for sparse\n # \"d\" for double-precision\n # \"csr\" for compressed row format\n # \"ge\" for \"general\", e.g., the matrix has no special structure such as symmetry\n # \"mv\" for \"matrix-vector\" multiply\n\n if not sparse.isspmatrix_csr(A):\n raise Exception(\"Matrix must be in csr format\")\n (m,n) = A.shape\n\n # The data of the matrix\n data = A.data.ctypes.data_as(POINTER(c_double))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n nVectors = 1\n if x.ndim is 1:\n y = np.empty(m,dtype=np.double,order='F')\n if x.size != n:\n raise Exception(\"x must have n entries. x.size is %d, n is %d\" % (x.size,n))\n elif x.shape[1] is 1:\n y = np.empty((m,1),dtype=np.double,order='F')\n if x.shape[0] != n:\n raise Exception(\"x must have n entries. x.size is %d, n is %d\" % (x.size,n))\n else:\n nVectors = x.shape[1]\n y = np.empty((m,nVectors),dtype=np.double,order='F')\n if x.shape[0] != n:\n raise Exception(\"x must have n entries. x.size is %d, n is %d\" % (x.size,n))\n\n # Check input\n if x.dtype.type is not np.double:\n x = x.astype(np.double,copy=True)\n # Put it in column-major order, otherwise for nVectors > 1 this FAILS completely\n if x.flags['F_CONTIGUOUS'] is not True:\n x = x.copy(order='F')\n\n if nVectors == 1:\n np_x = x.ctypes.data_as(POINTER(c_double))\n np_y = y.ctypes.data_as(POINTER(c_double))\n # now call MKL. This returns the answer in np_y, which links to y\n SpMV(byref(c_char(b\"N\")), byref(c_int(m)),data ,indptr, indices, np_x, np_y ) \n else:\n for columns in range(nVectors):\n xx = x[:,columns]\n yy = y[:,columns]\n np_x = xx.ctypes.data_as(POINTER(c_double))\n np_y = yy.ctypes.data_as(POINTER(c_double))\n SpMV(byref(c_char(b\"N\")), byref(c_int(m)),data,indptr, indices, np_x, np_y ) \n\n return y", "def _get_mult_function_runtime_sparse(k_list, l_list, m_list, mult_table_vals, n_dims):\n @numba.njit\n def mv_mult(value, other_value):\n output = np.zeros(n_dims)\n for ind, k in enumerate(k_list):\n v_val = value[k]\n if v_val != 0.0:\n m = m_list[ind]\n ov_val = other_value[m]\n if ov_val != 0.0:\n l = l_list[ind]\n output[l] += v_val * mult_table_vals[ind] * ov_val\n return output\n\n return mv_mult", "def force_vector_matrix(pop, kbest, G, e=0.0):\n sub = lambda seq1, seq2: [0 if s1 == s2 else 1 for s1, s2 in zip(seq1, seq2)]\n zero = lambda: [0 for _ in range(len(pop[0]))]\n\n def estimate_force(a, b):\n a_string = a.as_vector()\n b_string = b.as_vector()\n\n R = hamming_distances(a_string, b_string)\n ## TODO: here must be a multiplication of a vector and a number\n val = (G*(a.mass*b.mass)/R + e)\n f = [val * d for d in sub(a_string, b_string)]\n return f\n\n mat = [[zero() if p == b else estimate_force(p, b) for b in pop[0:kbest]] for p in pop]\n return mat", "def update_params(self, mat):\n assert mat.shape == self.shape\n a, s, b = np.linalg.svd(mat, full_matrices=False)\n self.u = a[:, :self.rank]*s[:self.rank]\n self.v = b.T[:, :self.rank]", "def vandermonde_matrix(x):\n m = size(x) \n n = m+1\n V = ones((m, n))\n for j in range(1, n):\n for i in range(0, m):\n V[i,j] = pow(x[i],j) \n return V", "def multMatVect(v, A, m1, B, m2):\r\n if multMatVect.dot_modulo is None:\r\n A_sym = tensor.lmatrix('A')\r\n s_sym = tensor.ivector('s')\r\n m_sym = tensor.iscalar('m')\r\n A2_sym = tensor.lmatrix('A2')\r\n s2_sym = tensor.ivector('s2')\r\n m2_sym = tensor.iscalar('m2')\r\n o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)\r\n multMatVect.dot_modulo = function(\r\n [A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)\r\n\r\n # This way of calling the Theano fct is done to bypass Theano overhead.\r\n f = multMatVect.dot_modulo\r\n f.input_storage[0].storage[0] = A\r\n f.input_storage[1].storage[0] = v[:3]\r\n f.input_storage[2].storage[0] = m1\r\n f.input_storage[3].storage[0] = B\r\n f.input_storage[4].storage[0] = v[3:]\r\n f.input_storage[5].storage[0] = m2\r\n f.fn()\r\n r = f.output_storage[0].storage[0]\r\n\r\n return r", "def solve_cholesky(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b, sym_pos=True)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b.ravel(), sym_pos=True).reshape(*b.shape)\n else:\n raise NotImplementedError", "def hybrid_sim(vec, mat, top_k=5, alpha=0.1):\n euc_vec = 0.\n idx = np.array(range(mat.shape[0]))\n\n if alpha > 0:\n # step 1: caculate euclidean distance\n euc_vec = euclid_dist(vec, mat)\n if len(euc_vec) > top_k:\n idx = np.argsort(euc_vec)[:top_k]\n euc_vec = euc_vec[idx]\n mat = mat[idx, :]\n euc_vec = _euc_sim(euc_vec)\n\n cos_vec = cos_sim(vec, mat)\n hyb_vec = (1 - alpha) * (0.5 * cos_vec + 0.5) + alpha * euc_vec\n\n return idx, hyb_vec", "def _jvp_isotonic_mag(solution, vector, w, l, eps=1e-4):\n x = solution\n mask = jnp.pad(jnp.absolute(jnp.diff(x)) <= eps, (1, 0))\n ar = jnp.arange(x.size)\n\n inds_start = jnp.where(mask == 0, ar, +jnp.inf).sort()\n u = 1 + l * w\n one_hot_start = jax.nn.one_hot(inds_start, len(vector))\n a = _cumsum_einsum(one_hot_start)\n a = jnp.append(jnp.diff(a[::-1], axis=0)[::-1], a[-1].reshape(1, -1), axis=0)\n return (\n ((a.T * (a @ (vector * u))).T) / ((a * u).sum(1, keepdims=True) + 1e-8)\n ).sum(0)", "def solve_for_eigenvectors(matrix, num, mode=\"general\"):\n\n # Construct a sparse matrix\n if mode == \"general\":\n return linalg.eigs(matrix, num)\n\n if mode == \"symmetric\":\n return linalg.eigsh(matrix, num)", "def FTLM_static_iteration(O_dict,E,V,Q_T,beta=0):\n\tnv = E.size\n\n\n\tp = _np.exp(-_np.outer(_np.atleast_1d(beta),E))\n\tc = _np.einsum(\"j,aj,...j->a...\",V[0,:],V,p)\n\n\tr,Q_T = _get_first_lv(iter(Q_T))\n\n\tresults_dict = {}\n\n\tAr_dict = {key:A.dot(r) for key,A in iteritems(O_dict)}\n\n\tfor i,lv in enumerate(Q_T): # nv matvecs\n\t\tfor key,A in iteritems(O_dict):\n\t\t\tif key in results_dict:\n\t\t\t\tresults_dict[key] += _np.squeeze(c[i,...] * _np.vdot(lv,Ar_dict[key]))\n\t\t\telse:\n\t\t\t\tresults_dict[key] = _np.squeeze(c[i,...] * _np.vdot(lv,Ar_dict[key]))\n\n\treturn results_dict,_np.squeeze(c[0,...])", "def get_embedding_matrix(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k), dtype='float32') \n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def v(resistances, r_i, applied_voltages, **kwargs):\n if r_i.word_line > 0 or r_i.bit_line > 0:\n g = fill.g(resistances, r_i)\n i = fill.i(applied_voltages, resistances, r_i)\n\n utils.message('Started solving for v.', **kwargs)\n v_matrix = linalg.spsolve(g.tocsc(), i)\n utils.message('Solved for v.', **kwargs)\n\n # if `num_examples == 1`, it can result in 1D array.\n if v_matrix.ndim == 1:\n v_matrix = v_matrix.reshape(v_matrix.shape[0], 1)\n\n # if one of the interconnect resistances is zero, only half of the\n # matrix_v had to be solved. The other half can be filled without\n # solving because the node voltages are known.\n if r_i.word_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n new_v_matrix[resistances.size:, ] = v_matrix\n v_matrix = new_v_matrix\n if r_i.bit_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = v_matrix\n v_matrix = new_v_matrix\n else:\n # if both interconnect resistances are zero, all node voltages are\n # known.\n v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n\n return v_matrix", "def fetch_top_k(vect, mat, k):\n resultant = np.dot(mat, vect)\n arglist = np.argsort(resultant)\n arglist = arglist[-1:(-1 - k):-1]\n return arglist, resultant", "def matern_kernel(x_1, x_2, l, v):\n\n\tassert l > 0 and v > 0, \"The hyperparameters l and v must be > 0\"\n\tdist = euclidean_distances(x_1.reshape(-1,1), x_2.reshape(-1,1))\n\tdist[dist == 0.0] += 1e-10\n\tz = np.sqrt(2*v) * dist / l\n\treturn (2**(1-v)/gamma(v)) * (z**v) * kv(v, z)", "def test_zero_vector_matrix(self, backend, vecmat):\n vecmat.zero()\n expected_matrix = numpy.zeros((8,8), dtype=valuetype)\n eps=1.e-14\n assert_allclose(vecmat.values, expected_matrix, eps)", "def load_velocity_matrix(self, k_idx):\n k_str = self.kpt_str % k_idx\n if self.exciton_obj.is_complex:\n vel_mat = np.array(self.file_storage[self.mat_str][k_str])\n else:\n vel_re = np.array(self.file_storage[self.mat_str][k_str]['real'])\n vel_im = np.array(self.file_storage[self.mat_str][k_str]['imag'])\n vel_mat = vel_re + 1j*vel_im\n return vel_mat", "def lambda_matrix_kspace(glat, eps=1e-10):\n return lambda kvec: dynamical_matrix_kspace(kvec, glat, eps=eps)", "def Lanczos(A, k, *, sparse=False, dim=None):\n if sparse:\n n = dim\n dtype = torch.float64\n Amap = A\n else:\n n = A.shape[0]\n dtype = A.dtype\n Amap = lambda v: torch.matmul(A, v)\n Qk = torch.zeros((n, k), dtype=dtype)\n alphas = torch.zeros(k, dtype=dtype)\n betas = torch.zeros(k - 1, dtype=dtype)\n q = torch.randn(n, dtype=dtype)\n q = q / torch.norm(q)\n u = Amap(q)\n alpha = torch.matmul(q, u)\n Qk[:, 0] = q\n alphas[0] = alpha\n beta = 0\n qprime = torch.randn(n, dtype=dtype)\n for i in range(1, k):\n r = u - alpha * q - beta * qprime\n\n # The simple but expensive full reorthogonalization process\n # in order to recover the orthogonality among the Lanczos vectors caused by\n # rounding error in floating point arithmetic.\n r -= torch.matmul(Qk[:, :i], torch.matmul(Qk[:, :i].T, r))\n\n qprime = q\n beta = torch.norm(r)\n q = r / beta\n u = Amap(q)\n alpha = torch.matmul(q, u)\n alphas[i] = alpha\n betas[i - 1] = beta\n Qk[:, i] = q\n T = torch.diag(alphas) + torch.diag(betas, diagonal=1) + torch.diag(betas, diagonal=-1)\n return Qk, T", "def compute_vtv(v_matrix, n_gauss):\n v_dim = v_matrix.shape[1] # subspace dim\n f_dim = v_matrix.shape[0] / n_gauss # feature dim\n r, c = Extractor.get_rfpf_shape(v_dim)\n\n # Allocate space if necessary\n out = np.zeros((r, c, n_gauss), dtype=v_matrix.dtype, order='F')\n\n # reshape v to convenience\n v3d = v_matrix.reshape((n_gauss, f_dim, v_dim))\n\n for ii in range(n_gauss):\n out[:, :, ii] = Extractor.rank_k_update(v3d[ii, :, :].T, out=out[:, :, ii])\n\n return out", "def get_kmat(phases, mod_depths=(1, 1, 1), amps=(1, 1, 1)):\n\n mat = []\n for p, m, a in zip(phases, mod_depths, amps):\n row_vec = a * np.array([1, 0.5 * m * np.exp(1j * p), 0.5 * m * np.exp(-1j * p)])\n mat.append(row_vec)\n mat = np.asarray(mat)\n\n return mat", "def get_inverse_hvp_lissa(v, sess, v_placeholder, hessian_vector,\r\n batch_size=None,\r\n scale=10, damping=0.0, num_samples=1, recursion_depth=10000): \r\n inverse_hvp = None\r\n print_iter = recursion_depth / 10\r\n\r\n for i in range(num_samples):\r\n # samples = np.random.choice(self.num_train_examples, size=recursion_depth)\r\n \r\n cur_estimate = v\r\n\r\n for j in range(recursion_depth):\r\n \r\n # feed_dict = fill_feed_dict_with_one_ex(\r\n # data_set, \r\n # images_placeholder, \r\n # labels_placeholder, \r\n # samples[j]) \r\n feed_dict = fill_feed_dict_with_batch(x, y_, Test_input, Test_label, batch_size=batch_size)\r\n\r\n feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)\r\n hessian_vector_val = sess.run(hessian_vector, feed_dict=feed_dict)\r\n cur_estimate = [a + (1-damping) * b - c/scale for (a,b,c) in zip(v, cur_estimate, hessian_vector_val)] \r\n\r\n # Update: v + (I - Hessian_at_x) * cur_estimate\r\n if (j % print_iter == 0) or (j == recursion_depth - 1):\r\n print(\"Recursion at depth %s: norm is %.8lf\" % (j, np.linalg.norm(cur_estimate[0])))\r\n feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)\r\n\r\n if inverse_hvp is None:\r\n inverse_hvp = [b/scale for b in cur_estimate]\r\n else:\r\n inverse_hvp = [a + b/scale for (a, b) in zip(inverse_hvp, cur_estimate)] \r\n\r\n inverse_hvp = [a/num_samples for a in inverse_hvp]\r\n return inverse_hvp", "def svm_admm(X, y, mylambda=1., rho=1., rel_par=1., QUIET = False, MAX_ITER = 200, ABSTOL = 1e-6, RELTOL = 1e-2):\n if not QUIET:\n tic = time.time()\n m, n = X.shape \n y_raveld = y.ravel() \n # A is a matrix given by [-y_j*x_j -y_j]\n A = - np.dot(np.diag(y_raveld), np.concatenate((X, np.ones((m, 1))), axis = 1))\n\n #Data preprocessing\n m, n = A.shape\n \n #ADMM solver\n x = np.zeros((n, N))\n z = np.zeros((n, N))\n u = np.zeros((n, N))\n\n if not QUIET:\n print('\\n%3s\\t%10s\\t%10s\\t%10s\\t%10s\\t%10s' %('iter',\n 'r np.linalg.norm', \n 'eps pri', \n 's np.linalg.norm', \n 'eps dual', \n 'objective'))\n\n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n\n for k in range(MAX_ITER):\n # x-update \n for i in range(N):\n A_temp = A[i * num_per_batch: (i + 1) * num_per_batch, :]\n y_temp = y[i * num_per_batch: (i + 1) * num_per_batch, :]\n #\n # temp1 = -z[:, i] + u[:, i]\n # fun = lambda x: np.sum(np.maximum(np.dot(A_temp, x.reshape((n, 1))) + 1, np.zeros((num_per_batch, 1)))) + \\\n # rho/2. * np.dot(x + temp1, x + temp1)\n # # np.random.uniform(-1, 1, (n,1))\n # result = scipy.optimize.minimize(fun, 0.1 * np.ones((n, 1)), tol = 1e-8, method = 'Nelder-Mead')\n # x_temp = result.x\n #\n x_var = Variable(n)\n constraints = []\n objective = Minimize(sum_entries(pos( A_temp * x_var + 1)) + rho/2. * sum_squares((x_var - z[:, i] + u[:, i])))\n prob = Problem(objective, constraints)\n result = prob.solve()\n x_temp = x_var.value\n\n x_temp = x_temp.reshape((x_temp.shape[0], 1))\n x[:, i] = x_temp.ravel()\n\n xave = np.mean(x, axis = 1)\n\n # z-update\n zold = np.copy(z)\n x_hat = rel_par * x + (1. - rel_par) * zold\n z = N * rho/(1./mylambda + N * rho) * np.mean(x_hat + u, axis = 1)\n z = z.reshape((z.shape[0], 1))\n z = np.dot(z, np.ones((1, N))) # N columns of the same values\n\n # u-update\n u = u + x_hat - z\n\n # diagnostics, reporting, termination checks\n h['objval'][k] = myobjective(A, mylambda, x, z)\n h['r_norm'][k] = np.linalg.norm(x - z)\n h['s_norm'][k] = np.linalg.norm(rho * (z - zold))\n h['eps_pri'][k] = np.sqrt(n) * ABSTOL+ RELTOL * np.maximum(np.linalg.norm(x), np.linalg.norm(-z))\n h['eps_dual'][k] = np.sqrt(n) * ABSTOL + RELTOL * np.linalg.norm(rho * u)\n if not QUIET:\n print('%4d\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.2f' %(k + 1,\\\n h['r_norm'][k],\\\n h['eps_pri'][k],\\\n h['s_norm'][k],\\\n h['eps_dual'][k],\\\n h['objval'][k]))\n\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n\n if not QUIET:\n toc = time.time()-tic\n print(\"\\nElapsed time is %.2f seconds\"%toc)\n\n return z, h", "def _apply_inverse(matrix, V, options=None):\n\n default_options = _options(matrix)\n\n if options is None:\n options = next(iter(default_options.values()))\n elif isinstance(options, str):\n if options == 'least_squares':\n for k, v in default_options.items():\n if k.startswith('least_squares'):\n options = v\n break\n assert not isinstance(options, str)\n else:\n options = default_options[options]\n else:\n assert 'type' in options and options['type'] in default_options \\\n and options.keys() <= default_options[options['type']].keys()\n user_options = options\n options = default_options[user_options['type']]\n options.update(user_options)\n\n promoted_type = np.promote_types(matrix.dtype, V.dtype)\n R = np.empty((len(V), matrix.shape[1]), dtype=promoted_type)\n\n if options['type'] == 'solve':\n for i, VV in enumerate(V):\n try:\n R[i] = np.linalg.solve(matrix, VV)\n except np.linalg.LinAlgError as e:\n raise InversionError('{}: {}'.format(str(type(e)), str(e)))\n elif options['type'] == 'least_squares_lstsq':\n for i, VV in enumerate(V):\n try:\n R[i], _, _, _ = np.linalg.lstsq(matrix, VV, rcond=options['rcond'])\n except np.linalg.LinAlgError as e:\n raise InversionError('{}: {}'.format(str(type(e)), str(e)))\n elif options['type'] == 'bicgstab':\n for i, VV in enumerate(V):\n R[i], info = bicgstab(matrix, VV, tol=options['tol'], maxiter=options['maxiter'])\n if info != 0:\n if info > 0:\n raise InversionError('bicgstab failed to converge after {} iterations'.format(info))\n else:\n raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.\n format(info))\n elif options['type'] == 'bicgstab_spilu':\n # workaround for https://github.com/pymor/pymor/issues/171\n try:\n ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],\n drop_rule=options['spilu_drop_rule'], permc_spec=options['spilu_permc_spec'])\n except TypeError as t:\n logger = getLogger('pymor.operators.numpy._apply_inverse')\n logger.error(\"ignoring drop_rule in ilu factorization\")\n ilu = spilu(matrix, drop_tol=options['spilu_drop_tol'], fill_factor=options['spilu_fill_factor'],\n permc_spec=options['spilu_permc_spec'])\n precond = LinearOperator(matrix.shape, ilu.solve)\n for i, VV in enumerate(V):\n R[i], info = bicgstab(matrix, VV, tol=options['tol'], maxiter=options['maxiter'], M=precond)\n if info != 0:\n if info > 0:\n raise InversionError('bicgstab failed to converge after {} iterations'.format(info))\n else:\n raise InversionError('bicgstab failed with error code {} (illegal input or breakdown)'.\n format(info))\n elif options['type'] == 'spsolve':\n try:\n # maybe remove unusable factorization:\n if hasattr(matrix, 'factorization'):\n fdtype = matrix.factorizationdtype\n if not np.can_cast(V.dtype, fdtype, casting='safe'):\n del matrix.factorization\n\n if list(map(int, scipy.version.version.split('.'))) >= [0, 14, 0]:\n if hasattr(matrix, 'factorization'):\n # we may use a complex factorization of a real matrix to\n # apply it to a real vector. In that case, we downcast\n # the result here, removing the imaginary part,\n # which should be zero.\n R = matrix.factorization.solve(V.T).T.astype(promoted_type, copy=False)\n elif options['keep_factorization']:\n # the matrix is always converted to the promoted type.\n # if matrix.dtype == promoted_type, this is a no_op\n matrix.factorization = splu(matrix_astype_nocopy(matrix, promoted_type), permc_spec=options['permc_spec'])\n matrix.factorizationdtype = promoted_type\n R = matrix.factorization.solve(V.T).T\n else:\n # the matrix is always converted to the promoted type.\n # if matrix.dtype == promoted_type, this is a no_op\n R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).T\n else:\n # see if-part for documentation\n if hasattr(matrix, 'factorization'):\n for i, VV in enumerate(V):\n R[i] = matrix.factorization.solve(VV).astype(promoted_type, copy=False)\n elif options['keep_factorization']:\n matrix.factorization = splu(matrix_astype_nocopy(matrix, promoted_type), permc_spec=options['permc_spec'])\n matrix.factorizationdtype = promoted_type\n for i, VV in enumerate(V):\n R[i] = matrix.factorization.solve(VV)\n elif len(V) > 1:\n factorization = splu(matrix_astype_nocopy(matrix, promoted_type), permc_spec=options['permc_spec'])\n for i, VV in enumerate(V):\n R[i] = factorization.solve(VV)\n else:\n R = spsolve(matrix_astype_nocopy(matrix, promoted_type), V.T, permc_spec=options['permc_spec']).reshape((1, -1))\n except RuntimeError as e:\n raise InversionError(e)\n elif options['type'] == 'lgmres':\n for i, VV in enumerate(V):\n R[i], info = lgmres(matrix, VV.copy(i),\n tol=options['tol'],\n maxiter=options['maxiter'],\n inner_m=options['inner_m'],\n outer_k=options['outer_k'])\n if info > 0:\n raise InversionError('lgmres failed to converge after {} iterations'.format(info))\n assert info == 0\n elif options['type'] == 'least_squares_lsmr':\n for i, VV in enumerate(V):\n R[i], info, itn, _, _, _, _, _ = lsmr(matrix, VV.copy(i),\n damp=options['damp'],\n atol=options['atol'],\n btol=options['btol'],\n conlim=options['conlim'],\n maxiter=options['maxiter'],\n show=options['show'])\n assert 0 <= info <= 7\n if info == 7:\n raise InversionError('lsmr failed to converge after {} iterations'.format(itn))\n elif options['type'] == 'least_squares_lsqr':\n for i, VV in enumerate(V):\n R[i], info, itn, _, _, _, _, _, _, _ = lsqr(matrix, VV.copy(i),\n damp=options['damp'],\n atol=options['atol'],\n btol=options['btol'],\n conlim=options['conlim'],\n iter_lim=options['iter_lim'],\n show=options['show'])\n assert 0 <= info <= 7\n if info == 7:\n raise InversionError('lsmr failed to converge after {} iterations'.format(itn))\n elif options['type'] == 'pyamg':\n if len(V) > 0:\n V_iter = iter(enumerate(V))\n R[0], ml = pyamg.solve(matrix, next(V_iter)[1],\n tol=options['tol'],\n maxiter=options['maxiter'],\n return_solver=True)\n for i, VV in V_iter:\n R[i] = pyamg.solve(matrix, VV,\n tol=options['tol'],\n maxiter=options['maxiter'],\n existing_solver=ml)\n elif options['type'] == 'pyamg-rs':\n ml = pyamg.ruge_stuben_solver(matrix,\n strength=options['strength'],\n CF=options['CF'],\n presmoother=options['presmoother'],\n postsmoother=options['postsmoother'],\n max_levels=options['max_levels'],\n max_coarse=options['max_coarse'],\n coarse_solver=options['coarse_solver'])\n for i, VV in enumerate(V):\n R[i] = ml.solve(VV,\n tol=options['tol'],\n maxiter=options['maxiter'],\n cycle=options['cycle'],\n accel=options['accel'])\n elif options['type'] == 'pyamg-sa':\n ml = pyamg.smoothed_aggregation_solver(matrix,\n symmetry=options['symmetry'],\n strength=options['strength'],\n aggregate=options['aggregate'],\n smooth=options['smooth'],\n presmoother=options['presmoother'],\n postsmoother=options['postsmoother'],\n improve_candidates=options['improve_candidates'],\n max_levels=options['max_levels'],\n max_coarse=options['max_coarse'],\n diagonal_dominance=options['diagonal_dominance'])\n for i, VV in enumerate(V):\n R[i] = ml.solve(VV,\n tol=options['tol'],\n maxiter=options['maxiter'],\n cycle=options['cycle'],\n accel=options['accel'])\n elif options['type'].startswith('generic') or options['type'].startswith('least_squares_generic'):\n logger = getLogger('pymor.operators.numpy._apply_inverse')\n logger.warn('You have selected a (potentially slow) generic solver for a NumPy matrix operator!')\n from pymor.operators.numpy import NumpyMatrixOperator\n from pymor.vectorarrays.numpy import NumpyVectorArray\n return genericsolvers.apply_inverse(NumpyMatrixOperator(matrix),\n NumpyVectorArray(V, copy=False),\n options=options).data\n else:\n raise ValueError('Unknown solver type')\n return R", "def FMatrix(self,xvec,zrun,pars=None):\n n = xvec.Length()\n F = KFMatrixUnitary(n)\n debug('kfmodel.Fmatrix ',F)\n return F", "def cmvn2(vec,in_norm=None, variance_normalization=False,dim=80):\n rows,cols = vec.shape\n if in_norm is None:\n norm = [[-3.42167211,-3.19438577,-3.38188171,-3.70518327,-3.95481634,-4.08967972,\n-4.12971735,-4.0177989,-4.05439854,-4.11131907,-4.2040782,-4.20991182,\n-4.25162649,-4.25907564,-4.2473011,-4.2863965,-4.3228898,-4.34782124,\n-4.42950296,-4.39487934,-4.36633348,-4.50143957,-4.48567581,-4.5968647,\n-4.61216831,-4.68406868,-4.68915033,-4.70958185,-4.69221592,-4.70501041,\n-4.70832491,-4.72276783,-4.74502897,-4.77747059,-4.79214573,-4.81906843,\n-4.84250784,-4.8643012,-4.88663578,-4.85466433,-4.90646744,-4.9041872,\n-4.9521184,-4.97165966,-5.01090717,-5.0324893,-5.03520489,-5.03818893,\n-5.04275227,-5.06600761,-5.08489704,-5.11085701,-5.12284422,-5.12537432,\n-5.10954142,-5.08986282,-5.09612083,-5.12694502,-5.16363811,-5.19640732,\n-5.22519541,-5.21797276,-5.21604729,-5.2105999,-5.21371508,-5.21609163,\n-5.2056222,-5.19626617,-5.16277838,-5.13859081,-5.13667679,-5.15312576,\n-5.17222881,-5.1936388,-5.22146034,-5.23832226,-5.24389744,-5.21634912,\n-5.15253687,-5.05822802,1.25118387,0.16807194,0.02456923],\n[0.3435652,0.30806524,0.2948626,0.29855329,0.29850823,0.29500216,\n0.2900461,0.28056651,0.28067291,0.28453702,0.28764045,0.28579083,\n0.28413242,0.28140688,0.27958646,0.28081656,0.28304908,0.28531724,\n0.28741103,0.28793833,0.28851834,0.293441,0.29677734,0.30205214,\n0.30518064,0.30842769,0.31117955,0.31127203,0.31129918,0.31215218,\n0.31162351,0.31246269,0.31293857,0.31346714,0.31359836,0.31413645,\n0.31463048,0.31555009,0.31622899,0.31533957,0.31715053,0.31806079,\n0.31910229,0.31948549,0.31972486,0.3182689,0.31538239,0.31367698,\n0.31298089,0.31383485,0.31637794,0.31893483,0.320057,0.31951809,\n0.31782046,0.31567478,0.31514621,0.31691712,0.3202112,0.32393128,\n0.32680854,0.32837763,0.33002022,0.33165351,0.33369759,0.33539012,\n0.33612099,0.3356232,0.33299479,0.33120826,0.3311016,0.33190542,\n0.33274376,0.33311793,0.33442715,0.33595425,0.33788115,0.34010333,\n0.3433814,0.34954873,2.91277742,2.19889498,4.09453058]]\n else:\n norm = in_norm \n \n norm_vec = numpy.tile(norm[0][:dim],(rows,1))\n \n stdev_vec = numpy.tile(norm[1][:dim],(rows,1))\n \n vec = vec * stdev_vec\n vec += norm_vec\n\n return vec", "def solve_lin(matrix_u,vector_d):\n m_np = np.array(matrix_u)\n v_np = np.array(vector_d)\n\n return np.linalg.solve(m_np, v_np)", "def dynamical_matrix_kspace(kvec, mglat, eps=1e-9, basis=None):\n # Determine if the network has twisted boundary conditions\n if 'theta_twist' in mglat.lp:\n thetatwist = mglat.lp['theta_twist']\n else:\n thetatwist = None\n if 'phi_twist' in mglat.lp:\n phitwist = mglat.lp['phi_twist']\n else:\n phitwist = None\n\n notwist = thetatwist in [None, 0.] and phitwist in [None, 0.]\n\n # grab basis from lp if it is a key\n if basis is None and 'basis' in mglat.lp:\n basis = mglat.lp['basis']\n\n if basis in [None, 'XY']:\n '''Compute the dynamical matrix using the xy realspace positions in a simple Euclidean basis'''\n if mglat.bL is None:\n # Rest lengths of springs == distances between particles\n if notwist:\n # not twisted, no stretch, XY basis\n matrix = calc_matrix_magnetic_kvec(kvec, mglat, eps=eps)\n # Using psi basis for now since it is the only one that works.\n # matrix = calc_kmatrix_psi(kvec, mglat, eps=eps)\n # outname = '/Users/npmitchell/Desktop/test/' + 'kx{0:0.2f}'.format(kvec[0]) +\\\n # 'ky{0:0.2f}'.format(kvec[1])\n # leplt.plot_complex_matrix(matrix, name='dynamical_matrix', outpath=outname)\n else:\n # twisted, no stretch, XY basis\n print 'PV = ', mglat.lattice.PV\n print 'thetatwist = ', thetatwist\n print 'phitwist = ', phitwist\n if mglat.lp['periodic_strip']:\n # All periodic bonds are twisted\n matrix = calc_kmatrix_maggyros_twist(kvec, mglat, eps=eps)\n else:\n # First create thetaKL and phiKL, such that thetaKL[i, nn] is 1 if NL[i, nn] is rotated by theta as\n # viewed by particle i and similar for phiKL[i, nn] rotated by phi.\n if 'annulus' in mglat.lp['LatticeTop'] or mglat.lp['shape'] == 'annulus':\n twistcut = np.array([0., 0., np.max(mglat.lattice.xy[:, 0]), 0.])\n thetaKL = mglatfns.form_twistedKL(kvec, mglat, eps=eps)\n phiKL = np.zeros_like(thetaKL, dtype=int)\n else:\n raise RuntimeError('Currently only have twistedKL set up for annular samples')\n\n # Certain bonds are twisted, while the others are normal.\n matrix = calc_kmatrix_maggyros_twist_bonds(kvec, mglat, thetaKL, phiKL, eps=eps)\n else:\n # Rest lengths of springs != distances between particles\n matrix = calc_kmatrix_maggyros_stretched(kvec, mglat, eps=eps)\n elif basis == 'psi':\n '''Compute the dynamical matrix using the basis of clockwise and counter-clockwise oscillating modes'''\n if notwist:\n matrix = calc_kmatrix_magnetic_psi(kvec, mglat, eps=eps)\n else:\n raise RuntimeError('Have not handled twisted psi-basis case yet')\n\n if 'immobile_boundary' in mglat.lp:\n if mglat.lp['immobile_boundary']:\n boundary = mglat.lattice.get_boundary()\n for ind in boundary:\n matrix[2 * ind, :] = 0\n matrix[2 * ind + 1, :] = 0\n return matrix", "def L3(self,\n emb_vec: tf.Tensor):\n # [N, E], [N, E] Tensors\n vec_real, vec_img = tf.split(emb_vec, 2, axis=1)\n # [N, E] Tensor\n mod_vec = tf.sqrt(tf.pow(vec_real, 2) + tf.pow(vec_img, 2))\n mod_vec_cube = tf.pow(mod_vec, 3)\n # Scalar\n res = tf.reduce_sum(mod_vec_cube, [0, 1])\n return res", "def power_matrix(A, k):\n nrow = np.shape(A)[0]\n A0 = np.identity(nrow) \n for k in range(q):\n A0 = np.dot(A0, A)\n \n return A0", "def create_matrix(array_vec, size_vector=128, size_block=32):\n matrix = []\n for i in array_vec:\n vec = [i[j:j + size_block] for j in range(0, len(i), size_block)]\n\n for j in range(size_block):\n t = []\n for k in vec:\n t += rotate(k, j)\n\n matrix.append(t)\n return matrix", "def _log_vector_matrix(vs, ms):\n return tf.reduce_logsumexp(vs[..., tf.newaxis] + ms, axis=-2)", "def build_embedding_matrix_from_gensim_model(word_index, model, method=\"model\", lower=True, verbose=True):\n embedding_matrix = None\n for word, i in tqdm(word_index.items(), disable=not verbose):\n if lower:\n word = word.lower()\n embedding_vector = get_vect(word, model, method)\n if embedding_matrix is None and embedding_vector is not None:\n embedding_matrix = np.zeros((len(word_index) + 1, embedding_vector.shape[0]))\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n return embedding_matrix", "def vbe_step(self):\n\n digam_alpha = digamma(np.sum(self.alpha_k))\n for k in range(self.k):\n\n # compute estimate over ln det(lamb)\n tmp = sum(digamma((self.nu_k[k] + 1 - j) / 2) for j in range(self.dim))\n\n det = np.linalg.det(self.w_k[k])\n self.log_lamb[k] = tmp + self.dim * np.log(2) + np.log(det)\n\n # compute estimate for ln pi\n self.log_pi[k] = digamma(self.alpha_k[k]) - digam_alpha\n\n for n in range(self.n):\n tmp = self.x[n] - self.m_k[k]\n # compute estimate over mu and lambda\n self.estimate[n, k] = self.dim * (1 / self.beta_k[k]) + self.nu_k[k] * (tmp.T @ self.w_k[k] @ tmp)", "def test_assemble_vec_mass(self, backend, mass_vector_ffc, vecmat, coords,\n elements, elem_vnode,\n expected_vector_matrix):\n op2.par_loop(mass_vector_ffc, elements(3,3),\n vecmat((elem_vnode[op2.i[0]], elem_vnode[op2.i[1]]), op2.INC),\n coords(elem_vnode, op2.READ))\n eps=1.e-6\n assert_allclose(vecmat.values, expected_vector_matrix, eps)", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def val_get_left_gmt_matrix(x, k_list, l_list, m_list, mult_table_vals, ndims):\n intermed = np.zeros((ndims, ndims))\n test_ind = 0\n for k in k_list:\n j = l_list[test_ind]\n i = m_list[test_ind]\n intermed[j, i] += mult_table_vals[test_ind] * x[k]\n test_ind = test_ind + 1\n return intermed", "def ALIGNF(km_list, ky):\n n_feat = len(km_list)\n\n #km_list_copy = []\n # center the kernel first\n #for i in range(n_feat):\n # km_list_copy.append(center(km_list[i].copy()))\n #ky_copy = center(ky.copy())\n\n\n a = np.zeros(n_feat)\n for i in range(n_feat):\n a[i] = f_dot(km_list[i], ky)\n\n M = np.zeros((n_feat, n_feat))\n for i in range(n_feat):\n for j in range(i,n_feat):\n M[i,j] = f_dot(km_list[i],km_list[j])\n M[j,i] = M[i,j]\n\n Q = 2*M\n C = -2*a\n\n Q = Q + np.diag(np.ones(n_feat)*1e-8)\n\n ################################################\n # Using mosek to solve the quadratice programming\n\n # Set upper diagonal element to zeros, mosek only accept lower triangle\n iu = np.triu_indices(n_feat,1)\n Q[iu] = 0\n\n # start solving with mosek\n inf = 0.0\n env = mosek.Env()\n env.set_Stream(mosek.streamtype.log, streamprinter)\n\n # Create a task \n task = env.Task()\n task.set_Stream(mosek.streamtype.log, streamprinter)\n\n # Set up bound for variables \n bkx = [mosek.boundkey.lo]* n_feat\n blx = [0.0] * n_feat\n #bkx = [mosek.boundkey.fr]* n_feat\n #blx = [-inf] * n_feat\n bux = [+inf] * n_feat\n\n numvar = len(bkx)\n\n task.appendvars(numvar)\n\n for j in range(numvar):\n task.putcj(j,C[j])\n task.putvarbound(j,bkx[j],blx[j],bux[j])\n\n # Set up quadratic objective \n inds = np.nonzero(Q)\n qsubi = inds[0].tolist()\n qsubj = inds[1].tolist()\n qval = Q[inds].tolist()\n\n # Input quadratic objective \n task.putqobj(qsubi,qsubj,qval)\n\n # Input objective sense (minimize/mximize) \n task.putobjsense(mosek.objsense.minimize)\n\n task.optimize()\n\n # Print a summary containing information \n # about the solution for debugging purposes \n task.solutionsummary(mosek.streamtype.msg)\n\n solsta = task.getsolsta(mosek.soltype.itr)\n if (solsta == mosek.solsta.optimal or\n solsta == mosek.solsta.near_optimal):\n # Output a solution \n xx = np.zeros(numvar, float)\n task.getxx(mosek.soltype.itr, xx)\n #xx = xx/np.linalg.norm(xx)\n return xx\n else:\n print solsta\n xx = np.zeros(numvar, float)\n task.getxx(mosek.soltype.itr, xx)\n #xx = xx/np.linalg.norm(xx)\n return xx", "def vecTimesMat(x, M):\n return [dot(m, x) for m in transpose(M)]", "def eigenalgo(self, accuracy: float = 0, cap: int = 50000, version: str = \"Givens\", not_skip: bool = True):\n j, temps, verify_accuracy = 0, 0, np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n if version == \"Gram-Schmidt\":\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.gram_schmidt_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Givens\":\n verify_accuracy = np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.givens_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Rayleigh\":\n not_sing, diff, cond, j = True, accuracy + 1, True, 0\n temps = time()\n while cond: # Stop condition, all eigenvalues must be different\n while diff > accuracy and j < cap and not_sing:\n j += 1\n self.rvap, self.vep, diff, not_sing = self.rayleigh_iteration(self.rvap, self.vep)\n\n cond = False\n if j < cap:\n self.calc, first, not_sing = np.zeros(self.N, dtype=bool), True, True\n for i in range(self.N):\n if np.sum(np.less(np.abs(self.rvap - self.rvap[i]), 10 ** -6)) != 1:\n self.rvap[i + 1:] += self.memorize[i]\n if first:\n self.memorize[i] += 0.5\n self.vep[i + 1:, i + 1:] = np.eye(self.N - i - 1)\n first, cond, diff = False, True, accuracy + 1\n self.calc[i + 1:] = 1\n temps = time() - temps\n return self.rvap, self.vep, diff, j, temps\n\n else:\n print(\"Please select an appropriate value for the version parameter\")\n\n temps = time() - temps\n diff = np.max(abs(self.vap[verify_accuracy]))\n return np.diag(self.vap), self.vep, diff, j, temps", "def updateH(self,k_vec,it):\n self.k_vec = k_vec\n self.it = it\n self.H_kc = fl.H_k(k_vec, self.it, self.delta)", "def InverseMatrix(matrix,vector):\r\n # Unveri reversible matrix\r\n if Determinant(matrix, 1) == 0:\r\n print(\"Error,Singular Matrix\\n\")\r\n return\r\n # result matrix initialized as singularity matrix\r\n result = MakeIMatrix(len(matrix), len(matrix))\r\n # loop for each row\r\n for i in range(len(matrix[0])):\r\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\r\n # pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[i][i] = 1/matrix[i][i]\r\n result = MultiplyMatrix(elementary, result)\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\r\n # elementary matrix and multiply with the result matrix )\r\n for j in range(i+1, len(matrix)):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n\r\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\r\n # (make elementary matrix and multiply with the result matrix )\r\n for i in range(len(matrix[0])-1, 0, -1):\r\n for j in range(i-1, -1, -1):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n return result", "def mode_expansion_from_model_forsparse(Psi, omega, M, K, measured):\n\n\n measured = measured.reshape(-1) # retained dofs\n num_measured = len(measured)\n ndof = int(M.shape[0]) # length(M);\n unmeasured_dofs = list(set(np.arange(ndof)) - set(measured))\n num_unmeasured = len(unmeasured_dofs)\n\n M= lil_matrix(M)\n\n K= lil_matrix(K)\n\n Muu = slice_forSparse(M, unmeasured_dofs, unmeasured_dofs)\n\n Kuu = slice_forSparse(K, unmeasured_dofs, unmeasured_dofs)\n\n Mum = slice_forSparse(M, unmeasured_dofs, measured)\n\n Kum = slice_forSparse(K, unmeasured_dofs, measured)\n\n if isinstance(omega, float):\n omega = np.array(omega).reshape(1)\n\n Psi_full = np.zeros((num_measured + num_unmeasured, Psi.shape[1]))\n Psi_full[measured] = Psi\n\n for i, omega_n in enumerate(omega):\n Psi_i = Psi[:, i].reshape(-1, 1)\n Psi_unmeasured = la.solve((Kuu - Muu * omega_n**2),\n (Kum - Mum * omega_n**2)@Psi_i)\n Psi_unmeasured = Psi_unmeasured.reshape(-1, )\n Psi_full[unmeasured_dofs, i] = Psi_unmeasured\n # Psi_full = Psi_full.reshape(-1, 1)\n return Psi_full", "def eigen_operator(shape, e, v, **kargs):\n def matvec(x):\n k = [np.dot(x.T, vi) for vi in v]\n return np.sum([ki * ei * vi for ki, ei, vi in zip(k, e, v)], axis=0)\n return LinearOperator(shape, matvec=matvec, rmatvec=matvec, **kargs)", "def svd(matrix, approach):\n\n # Getting the eigenvalues and vectors of transpose(A) * A for V and Sigma\n a = mat_multiply(transpose(matrix), matrix)\n if approach == \"qr\":\n V, sigma, iterations = qr_eig(a)\n else:\n V, sigma, iterations = eig(a)\n\n # Sorting singular values and the colums of V accordingly\n V = transpose(V)\n\n singular_values = list()\n sorted_V = list()\n\n r = 0\n for i in range(rows(sigma)):\n singular_values.append([(sigma[i][i]), i])\n if sigma[i][i] > math.exp(-8):\n r += 1\n\n singular_values.sort(key=first_item, reverse=True)\n\n sigma_r = eye(r)\n sigma_r_inv = eye(r)\n\n # Constructing the sorted U and sigma matrices\n i, j = 0, 0\n for value in singular_values:\n if value[0] > math.exp(-8):\n sorted_V.append(V[value[1]])\n sigma_r[j][j] = value[0] ** (1 / 2)\n sigma_r_inv[j][j] = 1 / (value[0] ** (1 / 2))\n j += 1\n i += 1\n\n # Constructing U by multiplying V and sigma inverse\n sorted_U = mat_multiply(mat_multiply(matrix, transpose(sorted_V)), sigma_r_inv)\n\n return (sorted_U, sigma_r, sorted_V, r, iterations)", "def computesvd(train, K):\n mean = global_mean(train)\n train = standardize(train, mean)\n std = compute_std(train)\n train = div_std(train)\n\n U, s, Vt = sp.linalg.svds(train, k=K)\n\n dim = (len(s), len(s))\n S = np.zeros(dim)\n for i in range(len(s)):\n S[i, i] = mt.sqrt(s[i])\n\n U = sp.lil_matrix(U, dtype=np.float32) # dim(M,k)\n S = sp.lil_matrix(S, dtype=np.float32) # dim(k,k)\n Vt = sp.lil_matrix(Vt, dtype=np.float32) # dim(k,N)\n\n user_features = S.dot(Vt)\n item_features = np.transpose(U.dot(S))\n\n return user_features, item_features, mean, std", "def lanczos_decomp(vector_prod_fn, scalar, n, k):\n Q = tf.zeros([n, 1])\n v = tf.random_uniform([n, 1])\n v = v / tf.norm(v)\n Q = tf.concat([Q, v], axis=1)\n\n # diagonals of the tridiagonal matrix\n beta = tf.constant(0.0, dtype=tf.float32, shape=[1])\n alpha = tf.constant(0.0, dtype=tf.float32, shape=[1])\n\n for i in range(k):\n v = vector_prod_fn(tf.reshape(Q[:, i+1], [n, 1])) - tf.scalar_mul(scalar, tf.reshape(Q[:, i+1], [n, 1]))\n v = tf.reshape(v, [n,])\n curr_alpha = tf.reshape(tf.reduce_sum(v * Q[:, i+1]), [1,])\n alpha = tf.concat([alpha, curr_alpha], axis=0)\n v = v-beta[-1]*Q[:, i]-alpha[-1]*Q[:, i+1]\n curr_beta = tf.reshape(tf.norm(v), [1,])\n beta = tf.concat([beta, curr_beta], axis=0)\n curr_norm = tf.reshape(v/(beta[-1]+1e-8), [n, 1])\n Q = tf.concat([Q, curr_norm], axis=1)\n\n alpha = tf.slice(alpha, begin=[1], size=[-1])\n beta = tf.slice(beta, begin=[1], size=[k-1])\n Q = tf.slice(Q, begin=[0, 1], size=[-1, k])\n return alpha, beta, Q", "def k_fold_FVmoVMF(data, wv_model, n_comp=15, k=10, reg=1):\n \n ## Prepare the corpus.\n tokenized_data_text = [data[k][0] for k in range(len(data))] # data\n \n # Initialize a moVMF with K components.\n vmf_neu = VonMisesFisherMixture(n_clusters=n_comp, posterior_type='soft', n_init=4, n_jobs=-2,\n init='k-means++')\n\n # Fit the word embedding data with the GMM model.\n vmf_neu.fit(normalize(wv_model.vectors))\n \n ## Create train/test sets.\n data_tags = [data[k][1] for k in range(len(data))] # tags\n comb_data = list(zip(tokenized_data_text, data_tags))\n random.shuffle(comb_data)\n folds = chunks(comb_data, k)\n \n k_fold_acc = []\n \n for fold in folds:\n # Training data\n X_train = [fold[0][k][0] for k in range(len(fold[0]))] # text \n y_train = [fold[0][k][1] for k in range(len(fold[0]))] # labels\n \n # Test data\n X_test = [fold[1][k][0] for k in range(len(fold[1]))] # text \n y_test = [fold[1][k][1] for k in range(len(fold[1]))] # labels\n \n # Get sentence embedding by using the FVs.\n X_train_FV = [FV_moVMF(BoWE_doc(wv_model, X_train[k]), vmf_neu) for k in range(len(X_train))]\n X_test_FV = [FV_moVMF(BoWE_doc(wv_model, X_test[k]), vmf_neu) for k in range(len(X_test))]\n \n ## Logistic regression classifier.\n\n # Use the elements in train_vecs as feature vectors.\n logreg = linear_model.LogisticRegression(C=reg, n_jobs=1, solver='liblinear', multi_class='ovr')\n logreg = logreg.fit(X_train_FV, y_train)\n\n ## Evaluation.\n acc = evaluate_prediction(logreg, X_test_FV, y_test)\n k_fold_acc.append(acc)\n \n return k_fold_acc", "def compute_kappa_map(lens_vec, size, size_map):\n\n par_file_name = \"kappa_map.par\"\n fit_file_name = \"kappa_map.fits\"\n z_source = 2.0\n size_map = size_map * 1.05\n\n file_map = open(par_file_name, 'w')\n\n conv_lens_vec(lens_vec)\n\n file_map.write(\"runmode\\n\" )\n file_map.write(\" reference 3 0 0\\n\")\n file_map.write(\" verbose 0\\n\" )\n file_map.write(\" mass 3 \" + str(size) + \" \" + \\\n str(lens_vec[0][\"z_lens\"]) + \" \" + fit_file_name + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"source\\n\")\n file_map.write(\" z_source \" + str(z_source) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"grille\\n\")\n file_map.write(\" nombre 128\\n\")\n file_map.write(\" nlens 4\\n\")\n file_map.write(\" nlens_crit 1\\n\")\n file_map.write(\" nlens_opt 0\\n\")\n file_map.write(\" polaire 1\\n\")\n file_map.write(\" end\\n\")\n\n\n for i in range(len(lens_vec)):\n string_out = 'potential ' + str(i) + '\\n'\n file_map.write(string_out)\n #print string_out,\n for keys in lens_vec[i].keys():\n string_out = ' ' + keys + ' ' + str(lens_vec[i][keys]) + \\\n '\\n'\n #print string_out,\n file_map.write(string_out)\n file_map.write(' end\\n')\n\n file_map.write(\"cosmology\\n\")\n file_map.write(\" H0 70.0\\n\")\n file_map.write(\" omega 0.3\\n\")\n file_map.write(\" lambda 0.7\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"champ\\n\")\n file_map.write(\" xmin -101\\n\")\n file_map.write(\" xmax 100\\n\")\n file_map.write(\" ymin -101\\n\")\n file_map.write(\" ymax 100\\n\")\n file_map.write(\" dmax \" + str(size_map) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"fini\\n\")\n\n file_map.close()", "def _matvec(self, h: np.ndarray) -> np.ndarray:\n return convolve(self.x, h, mode='valid', method=self.method)", "def kl_normal(qm, qv, pm, pv):\n element_wise = 0.5 * (t.log(pv) - t.log(qv) + qv / pv + (qm - pm).pow(2) / pv - 1)\n kl = element_wise.sum(-1)\n return kl", "def test_matvec_adjoint(self):\n signal = tf.constant([1, 2, 4], dtype=tf.float32)\n result = tf.linalg.matvec(self.linop, signal, adjoint_a=True)\n self.assertAllClose(\n result, tf.linalg.matvec(tf.transpose(self.matrix), signal))", "def get_eigen_value(A, v):\n Av = np.dot(A, v)\n print(\"Mag v, should be 1:\", mag(v))\n lmb = mag(Av) / mag(v)\n return lmb", "def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors", "def fastdiag_solver(KM):\n dim = len(KM)\n n = tuple(K.shape[0] for (K,_) in KM)\n EV = [scipy.linalg.eigh(_asdense(K), _asdense(M)) for (K,M) in KM]\n\n diags = []\n for d in range(dim):\n D = [np.ones(n[j]) for j in range(dim)]\n D[d] = EV[d][0] # eigenvalues\n diags.append(reduce(np.kron, D))\n diag = sum(diags)\n\n l_op = KroneckerOperator(*tuple(U for (_,U) in EV))\n r_op = KroneckerOperator(*tuple(U.T for (_,U) in EV))\n\n return l_op * DiagonalOperator(1.0 / diag) * r_op" ]
[ "0.57746553", "0.57497203", "0.5428247", "0.5418458", "0.5324983", "0.5313086", "0.5289994", "0.52694213", "0.5208599", "0.5204189", "0.5199041", "0.5128154", "0.5054749", "0.5047108", "0.49921957", "0.4964914", "0.49533275", "0.49489313", "0.4900625", "0.48871598", "0.48866433", "0.48843616", "0.48802063", "0.48596802", "0.4856164", "0.48491302", "0.48322314", "0.48235464", "0.48173493", "0.4811363", "0.47989658", "0.47985512", "0.47846204", "0.47839916", "0.47826296", "0.47824982", "0.4777942", "0.47616595", "0.47467318", "0.47434738", "0.473399", "0.47234505", "0.47176987", "0.4701982", "0.46975946", "0.46868584", "0.46803492", "0.4676045", "0.4673196", "0.46688217", "0.46608052", "0.46606284", "0.4647755", "0.46412063", "0.4639123", "0.46247354", "0.46244687", "0.46133488", "0.4610992", "0.4601349", "0.45842463", "0.45774212", "0.45760378", "0.45740756", "0.456111", "0.45594546", "0.45585436", "0.45545253", "0.4552892", "0.4550111", "0.4548125", "0.4546098", "0.45402032", "0.45373905", "0.45371208", "0.45351523", "0.45291284", "0.45251393", "0.4522833", "0.45186892", "0.4518472", "0.45166704", "0.45158952", "0.45155576", "0.45142752", "0.45129347", "0.45125532", "0.45001552", "0.44987547", "0.44959563", "0.44925416", "0.4490946", "0.4486997", "0.4486345", "0.44804037", "0.44773757", "0.44762108", "0.44681227", "0.4464356", "0.4463486" ]
0.6143391
0
Helper function to generate a jitted function to do an implicitly restarted arnoldi factorization of `matvec`. The returned routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization.
def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable: arnoldi_fact = _generate_arnoldi_factorization(jax) # ###################################################### # ####### NEW SORTING FUCTIONS INSERTED HERE ######### # ###################################################### @functools.partial(jax.jit, static_argnums=(1,)) def LR_sort(evals, p): inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1] shifts = evals[inds][-p:] return shifts, inds @functools.partial(jax.jit, static_argnums=(1,)) def LM_sort(evals, p): inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1] shifts = evals[inds][-p:] return shifts, inds # ####################################################### # ####################################################### # ####################################################### @functools.partial(jax.jit, static_argnums=(4, 5, 6)) def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh): funs = [LR_sort, LM_sort] shifts, _ = funs[which](evals, p) # compress to k = numeig q = jax.numpy.zeros(Hm.shape[0]) q = jax.ops.index_update(q, jax.ops.index[-1], 1) m = Hm.shape[0] for shift in shifts: Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m)) Hm = Qj.T.conj() @ Hm @ Qj Vm = Qj.T @ Vm q = q @ Qj fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1] Vk = Vm[0:k, :] Hk = Hm[0:k, 0:k] H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype) H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk) Z = jax.numpy.linalg.norm(fk) v = fk / Z krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :], Vk) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v) Z = jax.numpy.linalg.norm(fk) #if fk is a zero-vector then arnoldi has exactly converged. #use small threshold to check this return krylov_vectors, H, fk, Z < res_thresh @functools.partial(jax.jit, static_argnums=(2,)) def update_data(Vm_tmp, Hm_tmp, numits): Vm = Vm_tmp[0:numits, :] Hm = Hm_tmp[0:numits, 0:numits] fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1] return Vm, Hm, fm @functools.partial(jax.jit, static_argnums=(3,)) def get_vectors(Vm, unitary, inds, numeig): def body_vector(i, vals): krv, unitary, states, inds = vals dim = unitary.shape[1] n, m = jax.numpy.divmod(i, dim) states = jax.ops.index_add(states, jax.ops.index[n, :], krv[m, :] * unitary[m, inds[n]]) return [krv, unitary, states, inds] state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype) _, _, state_vectors, _ = jax.lax.fori_loop( 0, numeig * Vm.shape[0], body_vector, [Vm, unitary, state_vectors, inds]) state_norms = jax.numpy.linalg.norm(state_vectors, axis=1) state_vectors = state_vectors / state_norms[:, None] return state_vectors def implicitly_restarted_arnoldi_method( matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter, res_thresh) -> Tuple[List[Tensor], List[Tensor]]: """ Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec` by alternating between compression and re-expansion of an initial `num_krylov_vecs`-step Arnoldi factorization. Note: The caller has to ensure that the dtype of the return value of `matvec` matches the dtype of the initial state. Otherwise jax will raise a TypeError. Args: matvec: A callable representing the linear operator. args: Arguments to `matvec`. `matvec` is called with `matvec(x, *args)` with `x` the input array on which `matvec` should act. initial_state: An starting vector for the iteration. num_krylov_vecs: Number of krylov vectors of the arnoldi factorization. numeig: The number of desired eigenvector-eigenvalue pairs. which: Which eigenvalues to target. Currently supported: `which = 'LR'` or `which = 'LM'`. eps: Convergence flag. If the norm of a krylov vector drops below `eps` the iteration is terminated. maxiter: Maximum number of (outer) iteration steps. Returns: eta, U: Two lists containing eigenvalues and eigenvectors. """ N = np.prod(initial_state.shape) p = num_krylov_vecs - numeig num_krylov_vecs = np.min([num_krylov_vecs, N]) if (p <= 1) and (num_krylov_vecs < N): raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <" f" `num_krylov_vecs` <= N={N}," f" `num_krylov_vecs`={num_krylov_vecs}") dtype = initial_state.dtype # initialize arrays krylov_vectors = jax.numpy.zeros( (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]), dtype=dtype) H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype) # perform initial arnoldi factorization Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args, initial_state, krylov_vectors, H, 0, num_krylov_vecs, eps) # obtain an m-step arnoldi factorization Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits) it = 0 if which == 'LR': _which = 0 elif which == 'LM': _which = 1 else: raise ValueError(f"which = {which} not implemented") # make sure the dtypes are matching if maxiter > 0: if Vm.dtype == np.float64: dtype = np.complex128 elif Vm.dtype == np.float32: dtype = np.complex64 elif Vm.dtype == np.complex128: dtype = Vm.dtype elif Vm.dtype == np.complex64: dtype = Vm.dtype else: raise TypeError(f'dtype {Vm.dtype} not supported') Vm = Vm.astype(dtype) Hm = Hm.astype(dtype) fm = fm.astype(dtype) while (it < maxiter) and (not converged): evals, _ = jax.numpy.linalg.eig(Hm) krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig, p, _which, res_thresh) if converged: break v0 = jax.numpy.reshape(fk, initial_state.shape) # restart Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0, krylov_vectors, H, numeig, num_krylov_vecs, eps) Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs) it += 1 ev_, U_ = np.linalg.eig(np.array(Hm)) eigvals = jax.numpy.array(ev_) U = jax.numpy.array(U_) _, inds = LR_sort(eigvals, _which) vectors = get_vectors(Vm, U, inds, numeig) return eigvals[inds[0:numeig]], [ jax.numpy.reshape(vectors[n, :], initial_state.shape) for n in range(numeig) ] return implicitly_restarted_arnoldi_method
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]", "def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos", "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps", "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x", "def poweig(A, x0, maxiter = 100, ztol= 1.0e-5, mode= 0, teststeps=1):\n m = len(A)\n xi = x0[:] \n \n for n in range(maxiter):\n # matrix vector multiplication.\n xim1 = xi[:]\n for i in range(m):\n xi[i] = 0.0\n for j in range(m):\n xi[i] += A[i][j] * xim1[j]\n print n, xi\n if mode == 0:\n vlen = sqrt(sum([xi[k]**2 for k in range(m)]))\n xi = [xi[k] /vlen for k in range(m)]\n elif mode == 1:\n for k in range(m-1, -1, -1):\n c = abs(xi[k])\n if c > 1.0e-5:\n xi = [xi[k] /c for k in range(m)]\n break\n # early termination test.\n if n % teststeps == 0:\n S = sum([xi[k]-xim1[k] for k in range(m)])\n if abs(S) < ztol:\n break\n #print n, xi\n # Compute Rayleigh quotient.\n numer = sum([xi[k] * xim1[k] for k in range(m)])\n denom = sum([xim1[k]**2 for k in range(m)])\n xlambda = numer/denom\n return xlambda, xi", "def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out", "def main():\n print 'Running the power method...'\n dim = input('Give the dimension : ')\n nbit = input('How many iterations ? ')\n j = complex(0, 1)\n rnd = np.random.normal(0, 1, (dim, dim)) \\\n + np.random.normal(0, 1, (dim, dim))*j\n nbs = np.random.normal(0, 1, (dim, 1)) \\\n + np.random.normal(0, 1, (dim, 1))*j\n rndmat = np.matrix(rnd)\n rndvec = np.matrix(nbs)\n eigmax = power_method(rndmat, rndvec, nbit)\n check(rndmat, eigmax)", "def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def eigenalgo(self, accuracy: float = 0, cap: int = 50000, version: str = \"Givens\", not_skip: bool = True):\n j, temps, verify_accuracy = 0, 0, np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n if version == \"Gram-Schmidt\":\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.gram_schmidt_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Givens\":\n verify_accuracy = np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.givens_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Rayleigh\":\n not_sing, diff, cond, j = True, accuracy + 1, True, 0\n temps = time()\n while cond: # Stop condition, all eigenvalues must be different\n while diff > accuracy and j < cap and not_sing:\n j += 1\n self.rvap, self.vep, diff, not_sing = self.rayleigh_iteration(self.rvap, self.vep)\n\n cond = False\n if j < cap:\n self.calc, first, not_sing = np.zeros(self.N, dtype=bool), True, True\n for i in range(self.N):\n if np.sum(np.less(np.abs(self.rvap - self.rvap[i]), 10 ** -6)) != 1:\n self.rvap[i + 1:] += self.memorize[i]\n if first:\n self.memorize[i] += 0.5\n self.vep[i + 1:, i + 1:] = np.eye(self.N - i - 1)\n first, cond, diff = False, True, accuracy + 1\n self.calc[i + 1:] = 1\n temps = time() - temps\n return self.rvap, self.vep, diff, j, temps\n\n else:\n print(\"Please select an appropriate value for the version parameter\")\n\n temps = time() - temps\n diff = np.max(abs(self.vap[verify_accuracy]))\n return np.diag(self.vap), self.vep, diff, j, temps", "def projective_factorization(x, max_iterations=1):\n\n n_views = len(x)\n n_points = x[0].shape[1]\n\n iterations = 0\n\n #lambda matrix, approximate depths\n l = np.ones((n_views, n_points))\n\n #normalization matrices\n norm_matrices = []\n\n # normalize coordinates\n xn = np.zeros((3*n_views, n_points))\n for i in range(n_views):\n\n #find normalization matrix for projections i\n x_norm, T = normalize_points(x[i], is_homogeneous=True)\n xn[3*i:3*(i+1), :] = x_norm\n norm_matrices.append(T)\n\n while iterations < max_iterations:\n # normalize the lambda matrix\n lr_norm = norm(l, axis=1)\n ln = l / lr_norm[:, np.newaxis]\n lc_norm = norm(ln, axis=0)\n ln /= lc_norm\n\n # repeat the lambdas\n ln = np.repeat(ln, 3, axis=0)\n\n #build the factorization matrix\n fact_matrix = ln*xn\n\n u, d, vh = svd(fact_matrix)\n\n print(d[3] / d[4])\n d = d[:4]/d[0]\n\n # from the svd decomposition we can find the projections and 3d points\n p_matrices = u[:, :4]\n x_3d = np.dot(np.diag(d), vh[:4, :])\n\n iterations += 1\n if iterations != max_iterations:\n\n w_matrix = np.dot(p_matrices, x_3d)\n\n for i in range(n_views):\n l[i, :] = w_matrix[3*i+2, :]\n\n cameras = []\n\n for i in range(n_views):\n # denormalize camera matrices\n c_matrix = np.dot(inv(norm_matrices[i]), p_matrices[3*i:3*(i+1), :])\n\n cameras.append(c_matrix)\n\n return cameras, x_3d", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def k_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n ensemble_average_fun = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.k_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n ensemble_average_fun[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n ensemble_average_fun_TS = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: \\\r\n self.beta_U_1(\r\n np.concatenate(([self.lambda_TS], lambda_))\r\n ) + self.beta_A_0_abs_isometric(\r\n 1, np.concatenate(([self.lambda_TS], lambda_))\r\n )\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.k_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n ensemble_average_fun_TS[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(\r\n v_i, transition_state=True\r\n )[2][-(self.M - 1):, 0],\r\n **kwargs\r\n )\r\n return ensemble_average_fun_TS/ensemble_average_fun", "def power_iteration(X):\n #X, languages=prepare_data_matrix()\n M=X\n M=M-np.mean(M, axis=0)\n M=np.cov(M, rowvar=False) #the covariance matrix, size 100x100\n x=np.ones(len(M)) #a random starting vector composed of 100 ones, it only cant be of all zeros\n difference=np.ones(len(x))\n\n #print(np.linalg.norm(difference))\n while np.linalg.norm(difference) >= 10**-5: #we iterate until the difference between the previous and the new x is really small, lets say 10^-5\n #print(x.T.shape)\n oldx=x\n z=M.dot((x.T))\n x=z.T\n x=x/np.linalg.norm(x)\n difference=np.linalg.norm(oldx-x)\n #the x that we get at the end of this loop is our eigenvector\n\n #print(x.dot(M).shape)\n #print(x.shape)\n y=(x.dot(M)).dot(x.T) #y is the corresponding eigenvalue to the eigenvector x\n \n return x, y", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def neldermead(func, x0s,\n ftol=1e-2, maxfev=500):\n\n fcalls = 0\n x0s = np.asarray(x0s)\n M, N = x0s.shape\n if M!=N+1:\n raise ValueError(\"x0s must be N+1 points of dimension N\")\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n one2np1 = list(range(1, N + 1))\n\n sim = np.zeros((N + 1, N), dtype=x0s.dtype)\n fsim = np.zeros((N + 1,), float)\n for i in range(N+1):\n sim[i] = x0s[i]\n fsim[i] = func(sim[i])\n fcalls += 1\n\n # sort so sim[0,:] has the lowest function value\n ind = np.argsort(fsim)\n fsim = np.take(fsim, ind, 0)\n sim = np.take(sim, ind, 0)\n\n while (fcalls < maxfev):\n if np.max(np.abs(fsim[0] - fsim[1:])) <= ftol:\n break\n\n xbar = np.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho)*xbar - rho*sim[-1]\n fxr = func(xr)\n fcalls += 1\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho*chi)*xbar - rho*chi*sim[-1]\n fxe = func(xe)\n fcalls += 1\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi*rho)*xbar - psi*rho*sim[-1]\n fxc = func(xc)\n fcalls += 1\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi)*xbar + psi*sim[-1]\n fxcc = func(xcc)\n fcalls += 1\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma*(sim[j] - sim[0])\n fsim[j] = func(sim[j])\n fcalls += 1\n\n ind = np.argsort(fsim)\n sim = np.take(sim, ind, 0)\n fsim = np.take(fsim, ind, 0)\n\n x = sim[0]\n fval = fsim[0]\n return x, fval", "def deflated_power_iteration(operator,\n num_eigenthings=10,\n power_iter_steps=20,\n power_iter_err_threshold=1e-4,\n momentum=0.0,\n use_gpu=True,\n to_numpy=True):\n eigenvals = []\n eigenvecs = []\n current_op = operator\n prev_vec = None\n\n def _deflate(x, val, vec):\n return val * vec.dot(x) * vec\n\n for _ in range(num_eigenthings):\n eigenval, eigenvec = power_iteration(current_op, power_iter_steps,\n power_iter_err_threshold,\n momentum=momentum,\n use_gpu=use_gpu,\n init_vec=prev_vec)\n\n def _new_op_fn(x, op=current_op, val=eigenval, vec=eigenvec):\n return op.apply(x) - _deflate(x, val, vec)\n current_op = LambdaOperator(_new_op_fn, operator.size)\n prev_vec = eigenvec\n eigenvals.append(eigenval)\n eigenvec = eigenvec.cpu()\n if to_numpy:\n eigenvecs.append(eigenvec.numpy())\n else:\n eigenvecs.append(eigenvec)\n\n eigenvals = np.array(eigenvals)\n eigenvecs = np.array(eigenvecs)\n\n # sort them in descending order\n sorted_inds = np.argsort(eigenvals)\n eigenvals = eigenvals[sorted_inds][::-1]\n eigenvecs = eigenvecs[sorted_inds][::-1]\n return eigenvals, eigenvecs", "def _compute_R1_from_vector(n, ind_mat, vec):\r\n\r\n R1 = 0\r\n for l in range(n):\r\n R1 += np.sum((2*ind_mat[:, l]+1) * vec**2)\r\n\r\n return R1", "def fn(i, j, mv):\n if not (0 <= i < m and 0 <= j < n): return 1 \n if mv == 0: return 0\n return (fn(i-1, j, mv-1) + fn(i, j-1, mv-1) + fn(i, j+1, mv-1) + fn(i+1, j, mv-1)) % 1_000_000_007", "def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)", "def get_leftLaInv(k_list, l_list, m_list, mult_table_vals, n_dims, gradeList):\n\n identity = np.zeros((n_dims,))\n identity[gradeList.index(0)] = 1\n\n @numba.njit\n def leftLaInvJIT(value):\n intermed = np.zeros((n_dims, n_dims))\n for test_ind, i in enumerate(k_list):\n j = l_list[test_ind]\n k = m_list[test_ind]\n intermed[i, j] += mult_table_vals[test_ind] * value[k]\n intermed = np.transpose(intermed)\n if abs(linalg.det(intermed)) < _eps:\n raise ValueError(\"multivector has no left-inverse\")\n sol = linalg.solve(intermed, identity)\n return sol\n\n return leftLaInvJIT", "def minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, **unknown_options):\n maxfun = maxfev\n retall = return_all\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n\n maxiter = 10\n maxfun = 10\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n\n iterations = 1\n\n while iterations < maxiter:\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0,\n status=warnflag, success=(warnflag == 0),\n message=None, x=x, final_simplex=(sim, fsim))\n return result", "def posdef_eig(mat):\n return posdef_eig_functions[POSDEF_EIG_METHOD](mat)", "def _get_mult_function_runtime_sparse(k_list, l_list, m_list, mult_table_vals, n_dims):\n @numba.njit\n def mv_mult(value, other_value):\n output = np.zeros(n_dims)\n for ind, k in enumerate(k_list):\n v_val = value[k]\n if v_val != 0.0:\n m = m_list[ind]\n ov_val = other_value[m]\n if ov_val != 0.0:\n l = l_list[ind]\n output[l] += v_val * mult_table_vals[ind] * ov_val\n return output\n\n return mv_mult", "def alternative_iterative_method(x0, n, gamma, b):\n # Parameters:\n MAX_ITER = 1000\n n2 = n**2\n\n # Creating NxN versions of vector for easier indexing during iteration\n b = b.copy().reshape(n, n)\n b_transposed = b.copy().T\n x0 = x0.copy().reshape(n, n)\n x0_transposed = x0.copy().T\n x1 = x0.copy()\n x1_transposed = x0_transposed.copy()\n\n # No need for M, N, only a smaller tridiagonal system:\n H = scipy.sparse.diags((-1, 2, -1), (-1, 0, 1), shape=(n, n), format=\"csr\")\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n, n), format=\"csr\")\n M1 = gammaI + H # Corresponds to both (gI + M) & (gI + N) in equations\n M2 = gammaI - H # Corresponds to both (gI - M) & (gI - N) in equations\n\n # Preallocating RHS of equations\n RHS7 = np.zeros((n, n), dtype=np.float64)\n RHS8 = np.zeros((n, n), dtype=np.float64)\n\n k = 0\n while k < MAX_ITER:\n for i in range(n): # Loading RHS values for Equation (7):\n RHS7[:, i] = scipy.sparse.csr_matrix.dot(M2, x0_transposed[i]) + b_transposed[i]\n for i in range(n): # Solving N independent tridig mat systems related to Eq(7):\n x1[i] = scipy.sparse.linalg.spsolve(M1, RHS7[i])\n RHS8[i] = scipy.sparse.csr_matrix.dot(M2, x1[i]) + b[i] # Loading RHS values for Equation (8):\n for i in range(n): # Solving N independent tridig mat systems related to Eq(8):\n x1_transposed[i] = scipy.sparse.linalg.spsolve(M1, RHS8[:, i])\n\n k += 1\n if np.allclose(x1_transposed, x0_transposed, rtol=1e-8):\n break\n x0_transposed = x1_transposed.copy()\n\n res = x1_transposed.T.reshape(n2)\n return res, k", "def get_i_eigen_function(i, X, K, times_bigger_than_machine_epsilon=1.0):\n la, v = linalg.eigh(K) # eigh uses the knowledge that K is symmetric p.s.d\n cutoff = times_bigger_than_machine_epsilon * np.finfo(float).eps\n # Get the index of the first element greater than cutoff\n cutoff_i = np.arange(la.shape[0])[la > cutoff][0]\n i_ = max(cutoff_i, i)\n f = create_f(x_is=X, alpha_is=v[i_, :])\n return f", "def _jvp_isotonic_mag(solution, vector, w, l, eps=1e-4):\n x = solution\n mask = jnp.pad(jnp.absolute(jnp.diff(x)) <= eps, (1, 0))\n ar = jnp.arange(x.size)\n\n inds_start = jnp.where(mask == 0, ar, +jnp.inf).sort()\n u = 1 + l * w\n one_hot_start = jax.nn.one_hot(inds_start, len(vector))\n a = _cumsum_einsum(one_hot_start)\n a = jnp.append(jnp.diff(a[::-1], axis=0)[::-1], a[-1].reshape(1, -1), axis=0)\n return (\n ((a.T * (a @ (vector * u))).T) / ((a * u).sum(1, keepdims=True) + 1e-8)\n ).sum(0)", "def find_min_norm_element(vecs):\n # Solution lying at the combination of two points\n\n\n vecs_clone = []\n for i in range(len(vecs)):\n # assert len(vecs[i]) == 1\n vecs_task = []\n for k in range(len(vecs[i])):\n vecs_task.append(vecs[i][k].view(-1))\n vecs_clone.append(torch.cat(vecs_task).unsqueeze(0))\n vecs_clone = torch.cat(vecs_clone)\n\n grad_mat = torch.matmul(vecs_clone, vecs_clone.t())\n\n # dps = {}\n init_sol = MinNormSolver._min_norm_2d(grad_mat)\n \n n = len(vecs)\n sol_vec = torch.zeros([n,]).cuda()\n sol_vec[init_sol[0][0]] = init_sol[1]\n sol_vec[init_sol[0][1]] = 1 - init_sol[1]\n# sol_vec = sol_vec.unsqueeze(0)\n\n if n < 3:\n # This is optimal for n=2, so return the solution\n return sol_vec , init_sol[2]\n \n iter_count = 0\n\n # grad_mat = np.zeros((n,n))\n # for i in range(n):\n # for j in range(n):\n # grad_mat[i,j] = dps[(i, j)]\n \n\n while iter_count < MinNormSolver.MAX_ITER:\n grad_dir = -1.0 * torch.matmul(grad_mat, sol_vec)\n# sol_vec = sol_vec.squeeze()\n new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)\n\n v1v1 = torch.sum(sol_vec.unsqueeze(1).repeat(1, n)*sol_vec.unsqueeze(0).repeat(n, 1)*grad_mat)\n v1v2 = torch.sum(sol_vec.unsqueeze(1).repeat(1, n)*new_point.unsqueeze(0).repeat(n, 1)*grad_mat)\n v2v2 = torch.sum(new_point.unsqueeze(1).repeat(1, n)*new_point.unsqueeze(0).repeat(n, 1)*grad_mat)\n\n nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)\n new_sol_vec = nc*sol_vec + (1-nc)*new_point\n change = new_sol_vec - sol_vec\n if torch.sum(torch.abs(change)) < MinNormSolver.STOP_CRIT:\n return sol_vec, nd\n sol_vec = new_sol_vec", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def gauss_seidel_solver(self, mat, rhs):\n x = np.zeros_like(rhs)\n for it_count in range(1, self.iterations_number):\n x_new = np.zeros_like(x)\n if self.verbose > 1:\n print(\"Iteration {0}: {1}\".format(it_count, x))\n for i in range(mat.shape[0]):\n s1 = np.dot(mat[i, :i], x_new[:i])\n s2 = np.dot(mat[i, i + 1:], x[i + 1:])\n x_new[i] = (rhs[i] - s1 - s2) / mat[i, i]\n if np.allclose(x, x_new, rtol=1e-8):\n break\n x = x_new\n return x", "def solve_cg(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n init: Optional[Any] = None,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.cg(matvec, b, x0=init, **kwargs)[0]", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def eig_faces(u_mat, nmode, dim):\n n = int(nmode)\n nparray = np.zeros(np.size(u_mat[:,0]))\n for i in range(n):\n nparray = nparray + u_mat[:,i]\n \n nparray = np.reshape(nparray,dim)\n return(nparray)", "def test_eigsum_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n _, h, _ = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(np.sum(h), np.trace(g0_inv_full))", "def run_vqe(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=None,\n mode=\"min_val\",\n ):\n # N=int(np.ceil(np.log2(len(self.mat))))\n # hk = np.zeros((2**N,2**N),dtype='complex')\n # hk[:self.mat.shape[0], :self.mat.shape[1]] = self.mat\n N = self.n_qubits()\n if mode == \"max_val\":\n Hamil_mat = aqua.operators.MatrixOperator(-1 * self.mat)\n # Hamil_mat = MatrixOperator(-1 * self.mat)\n else:\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n if var_form is None:\n if reps is None:\n reps = 2\n # reps=5\n from qiskit.circuit.library import EfficientSU2\n\n var_form = EfficientSU2(N, reps=reps)\n if optimizer is None:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form)\n # vqe = VQE(Hamil_qop, var_form)\n else:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form, optimizer)\n # vqe = VQE(Hamil_qop, var_form, optimizer)\n vqe_result = vqe.run(backend)\n en = np.real(vqe_result[\"eigenvalue\"])\n # params=vqe.optimal_params\n # circuit=vqe.construct_circuit(params)\n if mode == \"max_val\":\n en = -1 * en\n # states = np.sort(\n # np.real(\n # vqe.expectation.convert(\n # StateFn(vqe.operator, is_measurement=True)\n # ).to_matrix()\n # )\n # )\n return en, vqe_result, vqe", "def root_finding_newton(fun, J, x, eps, max_iter, args):\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value_)\n\n for i in range(x.size): # wtf numba!?!?!\n x[i] += delta[i, 0]\n\n F_value = fun(x, args)\n F_value_ = F_value.reshape((-1, 1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError(\"Maximum iteration reached in newton root finding!\")\n return x, iteration_counter", "def _compute_R2_from_vector(n, ind_mat, vec):\r\n\r\n R2 = 0\r\n for l in range(n):\r\n _idx2keep = np.where(ind_mat[:, l] > 0)[0]\r\n idx2keep_1 = []\r\n idx2keep_2 = []\r\n for idx in _idx2keep:\r\n temp = ind_mat[idx, :].copy()\r\n temp[l] -= 1\r\n if _is_sorted(temp):\r\n res_temp = np.where((ind_mat == temp).all(axis=1))\r\n idx2keep_1.append(idx)\r\n idx2keep_2.append(res_temp[0][0])\r\n temp_vec = vec[idx2keep_1] * vec[idx2keep_2]\r\n R2 = 2 * np.sum(ind_mat[idx2keep_1, l] * temp_vec)\r\n\r\n return R2", "def solve_normal_cg(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n **kwargs) -> Any:\n def _matvec(x):\n \"\"\"Computes A^T A x.\"\"\"\n return _normal_matvec(matvec, x)\n\n if ridge is not None:\n _matvec = _make_ridge_matvec(_matvec, ridge=ridge)\n\n Ab = _rmatvec(matvec, b)\n\n return jax.scipy.sparse.linalg.cg(_matvec, Ab, **kwargs)[0]", "def evolve(self, k_vec, Nt,**kwargs):\n \n M_eff = np.eye((self.Nd), dtype=complex) # aux matrix\n T = 1.\n for it in range(Nt):\n \n # update the Hamiltonian for time-inteval\n self.updateH(k_vec, it)\n\n # return eigenenergies and vectors\n E_k, U = lg.eig(self.H_kc) \n\n # U^-1 * exp(H_d) U\n U_inv = lg.inv(U)\n\n # construct a digonal matrix out of a vector\n M1 = (np.exp(-1.j*E_k*T) * U_inv.T).T\n\n #MM = np.dot(U_inv,np.dot(H_M, U))\n MM = np.dot(U,M1)\n M_eff = np.dot(M_eff,MM)\n # end of loop\n Ek, Uk = lg.eig( M_eff )\n idx = (np.log(Ek).imag).argsort()\n Efl_k = np.log(Ek).imag[idx]\n Ufl_k = Uk[idx]\n return Efl_k, Ufl_k", "def solve_gevp_gen(a, t_0, algorithm, sort_by_vectors=15, **kwargs):\n B = np.matrix(a[t_0])\n try:\n f = algorithm(B=B, **kwargs)\n except TypeError:\n # If the function doesn't do currying, implement that here\n f = lambda A: algorithm(B=B, A=A)\n except LinAlgError:\n return\n\n eigenvectors = None\n count = 0\n\n for j in range(t_0 + 1, 32):\n try:\n eigenvalues, new_eigenvectors = f(np.matrix(a[j]))\n \n if eigenvectors is None:\n eigenvectors = np.zeros_like(new_eigenvectors)\n\n if j < sort_by_vectors:\n # TODO Sortieren nach Eigenwert\n perm = permutation_indices(eigenvalues)\n else:\n perm = reorder_by_ev(new_eigenvectors, eigenvectors, B)\n\n eigenvectors = new_eigenvectors[:,perm]\n eigenvalues = eigenvalues[:,perm]\n \n count += 1\n\n yield eigenvalues, eigenvectors\n\n except (LinAlgError, TypeError) as e:\n #import traceback\n #traceback.print_exc()\n return", "def solve_eq(xVec):\n \n PSI = xVec[0:vecLen] \n Cxx = xVec[1*vecLen:2*vecLen] \n Cyy = xVec[2*vecLen:3*vecLen] \n Cxy = xVec[3*vecLen:4*vecLen]\n\n\n # Useful Vectors\n Txx = oneOverWi * Cxx \n Txx[N*M] -= oneOverWi\n Tyy = oneOverWi * Cyy \n Tyy[N*M] -= oneOverWi\n Txy = oneOverWi * Cxy\n\n U = + dot(MDY, PSI)\n V = - dot(MDX, PSI)\n LAPLACPSI = dot(LAPLAC, PSI)\n\n # Useful Operators\n MMU = tsm.c_prod_mat(U)\n MMV = tsm.c_prod_mat(V)\n VGRAD = dot(MMU,MDX) + dot(MMV,MDY)\n MMDXU = tsm.c_prod_mat(dot(MDX, U))\n MMDXV = tsm.c_prod_mat(dot(MDX, V))\n MMDYU = tsm.c_prod_mat(dot(MDY, U))\n MMDYV = tsm.c_prod_mat(dot(MDY, V))\n\n MMDXPSI = tsm.c_prod_mat(dot(MDX, LAPLACPSI))\n MMDXCXX = tsm.c_prod_mat(dot(MDX, Cxx))\n MMDXCYY = tsm.c_prod_mat(dot(MDX, Cyy))\n MMDXCXY = tsm.c_prod_mat(dot(MDX, Cxy))\n\n #######calculate the Residuals########\n\n residualsVec = zeros((4*vecLen), dtype='complex')\n\n #####psi\n residualsVec[0:vecLen] = - Re*dot(MMU, dot(MDX, LAPLACPSI)) \\\n - Re*dot(MMV, dot(MDY, LAPLACPSI)) \\\n + beta*dot(BIHARM, PSI) \\\n - (1.-beta)*(dot(MDXX, Txy) + dot(MDXY, (Tyy - Txx)) \\\n - dot(MDYY, Txy))\n\n #####xx\n residualsVec[vecLen:2*vecLen] = - dot(VGRAD, Cxx) \\\n + 2.*dot(MMDXU, Cxx) \\\n + 2.*dot(MMDYU, Cxy) - Txx\n\n #####yy\n residualsVec[2*vecLen:3*vecLen] = - dot(VGRAD, Cyy) \\\n + 2.*dot(MMDXV, Cxy) \\\n + 2.*dot(MMDYV, Cyy) - Tyy\n\n #####xy\n residualsVec[3*vecLen:4*vecLen] = - dot(VGRAD, Cxy) \\\n + dot(MMDXV, Cxx) + dot(MMDYU, Cyy)\\\n - Txy\n\n #####psi0\n residualsVec[N*M:(N+1)*M] = - Re*dot(VGRAD, U)[N*M:(N+1)*M] \\\n + beta*dot(MDYYY, PSI)[N*M:(N+1)*M] \\\n + (1.-beta)*dot(MDY,Txy)[N*M:(N+1)*M]\n # set the pressure gradient (pressure driven flow)\n # residualsVec[N*M] += 2.0\n\n # set the forcing on the zeroth mode for non pressure driven flow.\n residualsVec[N*M:(N+1)*M] += forcingVec\n\n\n ##### Apply boundary conditions to residuals vector\n\n # dxPsi = 0 \n for k in range (2*N+1): \n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-2] = dot((k-N)*kx*BTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-1] = dot((k-N)*kx*BBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi(+-1) = 0 \n for k in range (2*N+1):\n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-4] = dot(DERIVTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-3] = dot(DERIVBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi0(+-1) = +-1\n residualsVec[N*M + M-3] = dot(DERIVTOP, PSI[N*M:(N+1)*M]) - 1.\n residualsVec[N*M + M-2] = dot(DERIVBOT, PSI[N*M:(N+1)*M]) + 1.\n\n # Psi0(-1) = 0\n residualsVec[N*M + M-1] = dot(BBOT, (PSI[N*M:(N+1)*M]))\n\n return (residualsVec)", "def eigen(X):\n\n symmetric = np.alltrue(np.isclose(X - X.T, np.zeros(n)))\n small = max(X.shape) <= 11\n\n if symmetric:\n return jacobi(X)\n elif small:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm3(X, maxiter=maxiter)\n else:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm2(X, maxiter=maxiter)", "def _materialize_array(matvec, shape, dtype=None):\n x = jnp.zeros(shape, dtype)\n return jax.jacfwd(matvec)(x)", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def calculate_posvij_matrices(main_tetrad_ark):\n\n\t# Import all the possible solutions to the Vij matrices\n\tvij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n\tvij_matrices = []\n\n\tprint(\"\t\t\t\t\t\t\t\")\n\tprint(\"\tCalculating Vij matrices\")\n\tprint(\"\t\t\t\t\t\t\t\")\n\t# for i in range(0, len(main_tetrad_ark)):\n\tfor i in range(0, len(vij_possibilities)):\n\t\ttet_i = [x[1] for x in main_tetrad_ark[i]]\n\t\ttri_tet = [np.transpose(i) for i in tet_i]\n\t\tprint(\"# ********************************\")\n\t\t# print(\"\t\t\t\t\t\t\t\t \")\n\t\tprint(\"MATRIX i: \", i)\n\t\tprint(\"\t\t\t\t\t\t\t\t \")\n\t\tfor j in range(0, len(main_tetrad_ark)):\n\t\t\ttet_j = [x[1] for x in main_tetrad_ark[j]]\n\t\t\ttrj_tet = [np.transpose(j) for j in tet_j]\n\t\t\tvij_temp = []\n\t\t\t# print(\"# ********************************\")\n\t\t\tprint(\"\t\t\")\n\t\t\tprint(\"MATRIX j: \", j)\n\t\t\ttemp_zero = np.zeros((4,4), dtype=int)\n\t\t\tfor x in range(0,len(tet_i)):\n\t\t\t\ttest_1half = np.dot(tri_tet[x],tet_j[x])\n\t\t\t\ttest_2half = np.dot(trj_tet[x],tet_i[x])\n\t\t\t\ttest_difs = np.subtract(test_1half, test_2half)\n\t\t\t\t# print(\" \")\n\t\t\t\t# print(test_difs)\n\t\t\t\ttemp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n\t\t\t\tvij_temp.append(temp_mat)\n\t\t\t\t# print(\"\")\n\t\t\ttemp_add1 = np.add(vij_temp[0], vij_temp[1])\n\t\t\ttemp_add2 = np.add(temp_add1, vij_temp[2])\n\t\t\ttempf = np.add(temp_add2, vij_temp[3])\n\t\t\t# tempf = np.divide(temp_add3, 2)\n\t\t\tfor ijx in vij_possibilities:\n\t\t\t\tif np.array_equal(temp_addf, ijx[0]):\n\t\t\t\t\tprint(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n\t\t\t\t\tprint(\"l-solution found:\", ijx[1])\n\t\t\t\t\tprint(temp_addf)\n\t\t\t\t\tprint(\"\")\n\t\t\t\t\tprint(ijx[0])\n\t\t\tif np.array_equal(temp_addf, temp_zero):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tvij_matrices.append(temp_addf)\n\t\t\t# print(\"\")\n\t\t\tprint(temp_addf)\n\t\t\t# vij_matrices.append(temp_addf)\n\t\tvijmats_size = sys.getsizeof(vij_matrices)\n\t\tprint(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n\tprint(\"Length of Vij Matrices\")\n\tprint(len(vij_matrices))\n\tpass", "def root_finding_newton(fun, J, x, eps, max_iter, args):\n F_value = fun(x, *args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n delta = np.linalg.solve(J(x, args), -F_value_)\n\n for i in range(x.size): #wtf numba!?!?!\n x[i] += delta[i,0]\n\n F_value = fun(x, *args)\n F_value_ = F_value.reshape((-1,1))\n F_norm = np.linalg.norm(F_value, 2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n raise ValueError('Maximum iteration reached in newton root finding!')\n return x, iteration_counter", "def power_method(mat, start, maxit):\n result = start\n for i in xrange(maxit):\n result = mat*result\n result = result/np.linalg.norm(result)\n return result", "def fn(i, k):\n if i == len(nums): return 0\n if k < 0: return inf \n ans = inf\n rmx = -inf # range max \n rsm = 0 # range sum \n for j in range(i, len(nums)): \n rmx = max(rmx, nums[j])\n rsm += nums[j]\n ans = min(ans, rmx*(j-i+1) - rsm + fn(j+1, k-1))\n return ans", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def get_eigvals_eigvects(\n num_layers,\n numeric_matrices_eV_over_angsquared,\n layer_mass_amu,\n use_banded_algorithm=False,\n):\n # Based on the units in input, and indicating with:\n # - [hbar omega] the numeric value for the frequency in meV => hbar omega = [hbar omega] * meV\n # - [K] the numeric value of K in eV/ang^2\n # - [m] the layer mass in amu\n # we have (we omit the sign, and for units considerations we 'drop' U):\n # omega^2 = K / m =>\n # (hbar omega)^2 = hbar^2 * K / m =>\n # [hbar omega]^2 * meV^2 = hbar^2 * [K] / [m] * eV/ang^2 / amu = [K] / [m] * hbar^2 * eV/ang^2 / amu =>\n # [hbar omega]^2 = = [K] / [m] * ( hbar^2 * eV/ang^2 / amu / meV^2 )\n # so that the conversion factor is the last bracketed term:\n # conversion_factor = hbar^2 * eV / (angstrom^2 * amu * meV^2)\n conversion_factor = 4180.15925\n # NOTE: for simplicity, the conversion is applied at the very end\n\n if use_banded_algorithm:\n # 3 blocks (below, same layer, and above) of size 3 => total width of 9\n # Since we only store the upper part, we only need a width of 4 (diagonal + 3 superdiagonals)\n K_matrix = np.zeros((4, num_layers * 3))\n else:\n K_matrix = np.zeros((num_layers * 3, num_layers * 3))\n\n # Note: I construct -K, actually\n for block_idx in range(num_layers):\n # Interaction with upper layer\n if block_idx < num_layers - 1: # Not in the last layer\n current_block = np.array(\n numeric_matrices_eV_over_angsquared[\n block_idx % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx + 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n # Interaction with lower layer\n if block_idx > 0: # Not in the first layer\n previous_block = np.array(\n numeric_matrices_eV_over_angsquared[\n (block_idx - 1) % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx - 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n\n # We want to get the eigenvalues of omega^2 U = - 1/M_layer K U\n K_matrix /= layer_mass_amu\n\n # Get frequencies (eigvals) and eigenvectors (for mode analysis)\n if use_banded_algorithm:\n eigvals, eigvects = scipy.linalg.eig_banded(K_matrix, lower=False)\n else:\n eigvals, eigvects = np.linalg.eigh(K_matrix)\n\n eigvals *= conversion_factor\n\n ## The first three should be acoustic i.e. almost zero; the rest should be positive\n ## I don't check as depending on the units it's hard to define a correct absolute energy\n # assert np.sum(np.abs(eigvals[:3])) < 1.0e-8\n\n # Remove the first three acoustic modes\n return eigvals[3:], eigvects[:, 3:]", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol", "def _safe_inv22_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n\n # We set delta_inv to 0. in case of a rank deficient matrix ; a\n # rank-deficient input matrix *M* will lead to a null matrix in output\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n if np.all(rank2):\n # Normal 'optimized' flow.\n delta_inv = 1./delta\n else:\n # 'Pathologic' flow.\n delta_inv = np.zeros(M.shape[0])\n delta_inv[rank2] = 1./delta[rank2]\n\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def _matvec(x):\n return _normal_matvec(matvec, x)", "def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # SVD Factorization\n U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n\n # Remove dimensions related with very small singular values\n U = U[:, s > tol]\n Vt = Vt[s > tol, :]\n s = s[s > tol]\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(z)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n z = U.dot(aux2)\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = V 1/s U.T x\n aux1 = U.T.dot(x)\n aux2 = 1/s*aux1\n z = Vt.T.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _", "def get_mat_n(T_e, r_0, M, N, E, g_e, R_h, tmpN_t, tmpN_max):\n mat_n = np.zeros((M, len(T_e))) # matrix for the result\n for i in range(len(T_e)):\n R_e = get_R_e(E, M, g_e, T_e[-i-1]) # matrix with transition rates (env)\n #print get_R_e_test(E, M, g_e, T_e, R_e, 10e-15)\n R = get_R(R_e, R_h) # total transition rates\n data = (R, M, N) # arguments for fsolve \n #-----------solve the nonlinear system of equations-------------------- \n solution = fsolve(func, r_0,args=data, full_output=1)\n if solution[2] == 0: # if sol. didnt conv., repeat calcul.\n print i\n else:\n n1 = get_n1(solution[0],N) # occupation number of the ground state\n n = np.zeros(M) # vector of all occupation numbers\n n[0], n[1:] = n1 , solution[0] \n if np.any(n<0.): # if solution is unphysical \n print \"Needed to repeat calculation at Temperature T_e =\", T_e[-i-1] \n n = get_cor_n(i, T_e, r_0, M, N, E, g_e, R_h, tmpN_t, tmpN_max)\n if n == None:\n print \"Calculation failed! You may choose a larger tmpN_max.\"\n break\n else:\n r_0 = n[1:]\n else:\n r_0 = solution[0]\n mat_n[:,-i-1] = n\n return mat_n", "def sparse_expectation(mat, vec):\n return np.vdot(vec, mat.dot(vec)).real", "def solve_lu(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape, b.dtype)\n return jax.numpy.linalg.solve(A, b)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape, b.dtype) # 4d array (tensor)\n A = A.reshape(-1, b.shape[0] * b.shape[1]) # 2d array (matrix)\n return jax.numpy.linalg.solve(A, b.ravel()).reshape(*b.shape)\n else:\n raise NotImplementedError", "def get_inverse_hvp_lissa(v, sess, v_placeholder, hessian_vector,\r\n batch_size=None,\r\n scale=10, damping=0.0, num_samples=1, recursion_depth=10000): \r\n inverse_hvp = None\r\n print_iter = recursion_depth / 10\r\n\r\n for i in range(num_samples):\r\n # samples = np.random.choice(self.num_train_examples, size=recursion_depth)\r\n \r\n cur_estimate = v\r\n\r\n for j in range(recursion_depth):\r\n \r\n # feed_dict = fill_feed_dict_with_one_ex(\r\n # data_set, \r\n # images_placeholder, \r\n # labels_placeholder, \r\n # samples[j]) \r\n feed_dict = fill_feed_dict_with_batch(x, y_, Test_input, Test_label, batch_size=batch_size)\r\n\r\n feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)\r\n hessian_vector_val = sess.run(hessian_vector, feed_dict=feed_dict)\r\n cur_estimate = [a + (1-damping) * b - c/scale for (a,b,c) in zip(v, cur_estimate, hessian_vector_val)] \r\n\r\n # Update: v + (I - Hessian_at_x) * cur_estimate\r\n if (j % print_iter == 0) or (j == recursion_depth - 1):\r\n print(\"Recursion at depth %s: norm is %.8lf\" % (j, np.linalg.norm(cur_estimate[0])))\r\n feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)\r\n\r\n if inverse_hvp is None:\r\n inverse_hvp = [b/scale for b in cur_estimate]\r\n else:\r\n inverse_hvp = [a + b/scale for (a, b) in zip(inverse_hvp, cur_estimate)] \r\n\r\n inverse_hvp = [a/num_samples for a in inverse_hvp]\r\n return inverse_hvp", "def fiedler(A):\n L = laplacian(A,randomWalk=0)\n eigs = np.linalg.eigvals(L)\n ind = np.argsort(np.abs(eigs))[1]\n fEig = eigs[ind]\n return fEig", "def _func_mat(mat, func):\n d, U = np.linalg.eigh(mat)\n return np.dot(func(np.abs(d)) * U, U.T)", "def als(matrix, n_factors=8,n_iterations=15, lambda_=10):\r\n\tm, n = matrix.shape\r\n\tQ = matrix\r\n\tW = Q > 0.5\r\n\tW = W.astype(int)\r\n\tprint('X and Y randomly initialzied.')\r\n\tX = 5 * np.random.rand(m, n_factors) \r\n\tY = 5 * np.random.rand(n_factors, n)\r\n\tfor ii in range(n_iterations):\r\n\t\tfor u, Wu in enumerate(W):\r\n\t\t\tX[u] = np.linalg.solve(np.dot(Y, np.dot(np.diag(Wu), Y.T)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(Y, np.dot(np.diag(Wu), Q[u].T))).T\r\n\t\tfor i, Wi in enumerate(W.T):\r\n\t\t\tY[:,i] = np.linalg.solve(np.dot(X.T, np.dot(np.diag(Wi), X)) + lambda_ * np.eye(n_factors),\r\n\t np.dot(X.T, np.dot(np.diag(Wi), Q[:, i])))\r\n\t\tprint('{}th iteration is completed of {}'.format(ii + 1,n_iterations))\r\n\tprediction = np.dot(X,Y)\r\n\tprint('Done.')\r\n\treturn prediction, X, Y", "def eigenvects(mat):\n # Check if symbols are present\n if hasSymbols(mat):\n return mat.eigenvects()\n # Purely numeric matrix\n newMat = recursiveEvaluate(mat.as_mutable())\n return newMat.eigenvects()", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def eigvals(input):\n\n is_input_dparray = isinstance(input, dparray)\n\n if (not use_origin_backend(input) and is_input_dparray):\n if (input.size > 0):\n return dpnp_eigvals(input)\n\n return call_origin(numpy.linalg.eigvals, input)", "def jit(func):\n return func", "def Integrator_1(t_vec, x0):\n\tx = np.zeros((len(t_vec), x0.shape[0], N_max)) # set up the array of x values\n\t\n\tfor N in range(1, N_max+1):\n\t\tx[0, :, N-1] = x0.reshape(2)\n\t\tfor i in range(1,len(t_vec)):\n\t\t\tx[i,:,N-1] = (f_step(0, t_vec[i], N) @ x0).reshape(2)\n\t\t\tif (i*100) % (len(t_vec)-1) == 0:\n\t\t\t\tprint(\"\\r\" + \"integrated {:.0%}\".format(i/(len(t_vec)-1)), end='')\n\t\t\n\t\tprint(' done order ', N)\n\t\t\n\treturn x", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def auxminrho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= auxmin_cc_piece(x,k_ind,m_ind) \n\n return f", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def define_ising_helper_functions():\n\n @njit(cache=True)\n def fast_sum(J, s):\n \"\"\"Helper function for calculating energy in calc_e(). Iterates couplings J.\"\"\"\n e = np.zeros(s.shape[0])\n for n in range(s.shape[0]):\n k = 0\n for i in range(s.shape[1]-1):\n for j in range(i+1,s.shape[1]):\n e[n] += J[k]*s[n,i]*s[n,j]\n k += 1\n return e\n\n @njit(\"float64[:](int64[:,:],float64[:])\")\n def calc_e(s, params):\n \"\"\"\n Parameters\n ----------\n s : 2D ndarray of ints\n state either {0,1} or {+/-1}\n params : ndarray\n (h, J) vector\n\n Returns\n -------\n E : ndarray\n Energies of all given states.\n \"\"\"\n \n e = -fast_sum(params[s.shape[1]:],s)\n e -= np.sum(s*params[:s.shape[1]],1)\n return e\n \n def mch_approximation(samples, dlamda):\n \"\"\"Function for making MCH approximation step for Ising model.\"\"\"\n dE = calc_e(samples, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = pair_corr(samples, weights=np.exp(-dE)/len(dE), concat=True) * ZFraction \n assert not (np.any(predsisj < -1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n \n @njit(cache=True)\n def calc_observables(samples):\n \"\"\"Observables for Ising model.\"\"\"\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0], n+n*(n-1)//2))\n \n k = 0\n for i in range(n):\n obs[:,i] = samples[:,i]\n for j in range(i+1,n):\n obs[:,n+k] = samples[:,i] * samples[:,j]\n k += 1\n return obs\n return calc_e, calc_observables, mch_approximation", "def vis_eigen_explore_row(ref_code, eigvect_avg, eigv_avg, G, figdir=\"\", RND=None, namestr=\"\", indivimg=False,\n transpose=True, eiglist=[1,2,4,7,16], maxdist=120, rown=5, sphere=False, save=True): # ImDist=None, distrown=19\n if RND is None: RND = np.random.randint(10000)\n if eiglist is None: eiglist = list(range(len(eigv_avg)))\n t0 = time()\n codes_page = []\n mtg_col = []\n ticks = np.linspace(-maxdist, maxdist, rown)\n for idx, eigi in enumerate(eiglist): # range(eig_rng[0]+1, eig_rng[1]+1):\n if not sphere:\n interp_codes = LExpMap(ref_code, eigvect_avg[:, -eigi-1], rown, (-maxdist, maxdist))\n else:\n interp_codes = SExpMap(ref_code, eigvect_avg[:, -eigi-1], rown, (-maxdist, maxdist))\n codes_page.append(interp_codes)\n img_page = G.render(interp_codes)\n mtg = build_montages(img_page, (256, 256), (rown, 1), transpose=transpose)[0]\n if save:\n imsave(join(figdir, \"%s_eig%d_%04d.jpg\" % (namestr, eigi+1, RND)), np.uint8(mtg * 255.0))\n plt.imsave(join(figdir, \"%s_eig%d_%04d.pdf\" % (namestr, eigi+1, RND)), mtg, )\n mtg_col.append(mtg)\n if indivimg and save:\n for deviation, img in zip(ticks, img_page):\n imsave(join(figdir, \"%s_eig%d_%.1e_%04d.jpg\" % (namestr,eigi+1, deviation, RND)), np.uint8(img * 255.0))\n codes_all = np.concatenate(tuple(codes_page), axis=0)\n print(\"Finish printing page (%.1fs)\" % (time() - t0))\n # if ImDist is not None: # if distance metric available then compute this\n # distmat, ticks, fig = vis_distance_curve(ref_code, eigvect_avg, eigv_avg, G, ImDist, eiglist=eiglist,\n\t# maxdist=maxdist, rown=rown, distrown=distrown, sphere=sphere, figdir=figdir, RND=RND, namestr=namestr, )\n # return mtg, codes_all, distmat, fig\n # else:\n return mtg_col, codes_all", "def mandel_numba(x, y, max_iters):\n i = 0\n c = complex(x, y)\n z = 0.0j\n for i in range(max_iters):\n z = z * z + c\n if (z.real * z.real + z.imag * z.imag) >= 4:\n return i\n\n return 255", "def csr_mulvec_wrap(fn):\n\n @functools.wraps(fn)\n def csr_mul_vector(A, x):\n if A.nnz > 50000 and _NUM_THREAD_WORKERS > 1:\n return par_dot_csr_matvec(A, x)\n else:\n y = fn(A, x)\n if isinstance(x, qarray):\n y = qarray(y)\n return y\n\n return csr_mul_vector", "def asm_eigenpro_fn(samples, map_fn, top_q, bs_gpu, alpha, min_q=5, seed=1):\n\n np.random.seed(seed) # set random seed for subsamples\n start = time.time()\n n_sample, _ = samples.shape\n\n if top_q is None:\n svd_q = min(n_sample - 1, 1000)\n else:\n svd_q = top_q\n\n eigvals, eigvecs, beta = svd.nystrom_kernel_svd(samples, map_fn, svd_q)\n\n # Choose k such that the batch size is bounded by\n # the subsample size and the memory size.\n # Keep the original k if it is pre-specified.\n if top_q is None:\n max_bs = min(max(n_sample / 5, bs_gpu), n_sample)\n top_q = np.sum(np.power(1 / eigvals, alpha) < max_bs) - 1\n top_q = max(top_q, min_q)\n\n eigvals, tail_eigval = eigvals[:top_q - 1], eigvals[top_q - 1]\n eigvecs = eigvecs[:, :top_q - 1]\n\n device = samples.device\n eigvals_t = torch.tensor(eigvals.copy()).to(device)\n eigvecs_t = torch.tensor(eigvecs).to(device)\n tail_eigval_t = torch.tensor(tail_eigval, dtype=torch.float).to(device)\n\n scale = utils.float_x(np.power(eigvals[0] / tail_eigval, alpha))\n diag_t = (1 - torch.pow(tail_eigval_t / eigvals_t, alpha)) / eigvals_t\n\n def eigenpro_fn(grad, kmat):\n '''Function to apply EigenPro preconditioner.'''\n return torch.mm(eigvecs_t * diag_t,\n torch.t(torch.mm(torch.mm(torch.t(grad),\n kmat),\n eigvecs_t)))\n\n print(\"SVD time: %.2f, top_q: %d, top_eigval: %.2f, new top_eigval: %.2e\" %\n (time.time() - start, top_q, eigvals[0], eigvals[0] / scale))\n\n\n return eigenpro_fn, scale, eigvals[0], beta", "def davidson_guess(mult_by_A,N,neig,Adiag=None):\n Mmax = min(N,2000)\n tol = 1e-6\n\n #Adiagcheck = np.zeros(N,np.complex128)\n #for i in range(N):\n # test = np.zeros(N,np.complex128)\n # test[i] = 1.0\n # Adiagcheck[i] = mult_by_A(test)[i]\n #print \"Analytical Adiag == numerical Adiag?\", np.allclose(Adiag,Adiagcheck)\n\n if Adiag is None:\n Adiag = np.zeros(N,np.complex128)\n for i in range(N):\n test = np.zeros(N,np.complex128)\n test[i] = 1.0\n Adiag[i] = mult_by_A(test)[i]\n\n xi = np.zeros(N,np.complex128)\n\n evals = np.zeros(neig,np.complex128)\n evecs = np.zeros((N,neig),np.complex128)\n\n Mtot = 0\n for guess in range(neig):\n print(\"Working on guess =\", guess+1, \"/\", neig)\n for M in range(1,Mmax+1):\n if M == 1:\n # Unit vector 'target' as the guess\n b = np.zeros((N,1))\n b[guess,0] = 1.0\n Ab = np.zeros((N,1),np.complex128)\n Ab[:,0] = mult_by_A(b[:,0])\n else:\n Ab = np.column_stack( (Ab,mult_by_A(b[:,M-1])) )\n\n Atilde = np.dot(b.conj().transpose(),Ab)\n lamda, alpha = diagonalize_asymm(Atilde)\n\n overlap_guess_j_max = -99\n target = 0\n for j, overlap_guess_j in enumerate(alpha[0,:]):\n if overlap_guess_j > overlap_guess_j_max:\n overlap_guess_j_max = overlap_guess_j\n target = j\n\n lamda_k = lamda[target]\n alpha_k = alpha[:,target]\n\n if M == Mmax:\n print(\" -- M reached Mmax\")\n break\n\n q = np.dot( Ab-lamda_k*b, alpha_k )\n if np.linalg.norm(q) < tol:\n evals[guess] = lamda_k\n evecs[:,guess] = np.dot(b,alpha_k)\n Mtot += M\n print(\" -- Converged in\", M, \"iterations\")\n break\n\n for i in range(N):\n eps = 0.\n if np.allclose(lamda_k,Adiag[i]):\n eps = 1e-8\n xi[i] = q[i]/(lamda_k-Adiag[i]+eps)\n\n # orthonormalize xi wrt b\n bxi,R = np.linalg.qr(np.column_stack((b,xi)))\n # append orthonormalized xi to b\n b = np.column_stack((b,bxi[:,-1]))\n\n if M > 1 and M == Mmax:\n print(\"WARNING: Davidson algorithm reached max basis size \"\n \"M = %d without converging.\"%(M))\n\n return evals, evecs, Mtot", "def gkm_fv(seq, l=4, k=3, rev_comp=False, normalize=False):\n assert k < l\n retval = np.zeros(len(gkm_name(l=l, k=k)))\n if not seq:\n if rev_comp:\n return np.zeros(gkm_rc_indices(l=l, k=k).shape[1])\n return retval # Array of 0's\n\n # Transform base strings to int\n seq_int = [BASE_TO_INT[base] for base in seq]\n seq_int_lmers = [seq_int[i:i+l] for i in range(0, len(seq_int) - l + 1)]\n seq_int_lmers = [s for s in seq_int_lmers if 4 not in s] # Ignore items with N\n if not seq_int_lmers:\n if rev_comp:\n return np.zeros(gkm_rc_indices(l=l, k=k).shape[1])\n return retval # Array of 0's\n seq_int_lmers_stacked = np.vstack(seq_int_lmers)\n\n # Convert lmers to gapped kmers\n quad_base = np.power(4, np.arange(k))[::-1]\n keep_locs = list(itertools.combinations(range(l), k))\n for x, loc in zip(np.arange(len(keep_locs))[::-1], keep_locs):\n kept_cols = seq_int_lmers_stacked[:, loc]\n intified = kept_cols @ quad_base + x * 4**k\n np.add.at(retval, intified, 1)\n if rev_comp: # Consolidate reverse complements\n rc_indices = gkm_rc_indices(l=l, k=k)\n retval = retval[rc_indices[0, :]] + retval[rc_indices[1, :]]\n if normalize:\n retval /= float(len(seq))\n return retval", "def iterative_improvement(opt_matrix, w_matrix, r_vector, c_min, c_max, max_iter=None, verbose=True,\n use_cuda=False):\n\n total_time = 0\n with Timer(\"Calculating initial solution...\", verbose) as t:\n opt_matrix = optimal_result(opt_matrix, w_matrix, r_vector)\n total_time += t.get_time_s()\n\n total_per_col = opt_matrix.sum(axis=0)\n n_rows = opt_matrix.shape[0]\n over_alloc, under_alloc = print_solution_diagnostic(total_per_col, c_min, c_max, verbose)\n\n cuda_interation = CudaIteration(opt_matrix.shape[0], opt_matrix.shape[1])\n\n solution_vals = []\n iteration_n = 0\n while (sum(over_alloc) > 1 or sum(under_alloc) > 1) and iteration_n != max_iter:\n solution_vals.append(np.sum(np.multiply(opt_matrix, w_matrix)))\n iteration_n += 1\n with Timer(f\"Iteration {iteration_n} :\\n\", verbose) as t:\n over_alloc_pct, under_alloc_pct, can_add, can_remove = \\\n get_iteration_parameters(total_per_col, c_min, c_max, n_rows)\n if use_cuda:\n opt_matrix = cuda_interation(opt_matrix, over_alloc_pct, under_alloc_pct, can_add, can_remove)\n else:\n opt_matrix = iteration_improve(opt_matrix, over_alloc_pct, under_alloc_pct, can_add, can_remove)\n total_per_col = opt_matrix.sum(axis=0)\n over_alloc, under_alloc = print_solution_diagnostic(total_per_col, c_min, c_max, verbose)\n total_time += t.get_time_s()\n solution_vals.append(np.sum(np.multiply(opt_matrix, w_matrix)))\n\n cuda_compute_time = cuda_interation.computation_time\n\n return solution_vals, total_time, cuda_compute_time", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))", "def lanczos_decomp(vector_prod_fn, scalar, n, k):\n Q = tf.zeros([n, 1])\n v = tf.random_uniform([n, 1])\n v = v / tf.norm(v)\n Q = tf.concat([Q, v], axis=1)\n\n # diagonals of the tridiagonal matrix\n beta = tf.constant(0.0, dtype=tf.float32, shape=[1])\n alpha = tf.constant(0.0, dtype=tf.float32, shape=[1])\n\n for i in range(k):\n v = vector_prod_fn(tf.reshape(Q[:, i+1], [n, 1])) - tf.scalar_mul(scalar, tf.reshape(Q[:, i+1], [n, 1]))\n v = tf.reshape(v, [n,])\n curr_alpha = tf.reshape(tf.reduce_sum(v * Q[:, i+1]), [1,])\n alpha = tf.concat([alpha, curr_alpha], axis=0)\n v = v-beta[-1]*Q[:, i]-alpha[-1]*Q[:, i+1]\n curr_beta = tf.reshape(tf.norm(v), [1,])\n beta = tf.concat([beta, curr_beta], axis=0)\n curr_norm = tf.reshape(v/(beta[-1]+1e-8), [n, 1])\n Q = tf.concat([Q, curr_norm], axis=1)\n\n alpha = tf.slice(alpha, begin=[1], size=[-1])\n beta = tf.slice(beta, begin=[1], size=[k-1])\n Q = tf.slice(Q, begin=[0, 1], size=[-1, k])\n return alpha, beta, Q", "def _eigen_fns(mat, fns):\n evals, evecs = _eigh(mat)\n\n def transform(fn):\n \"\"\"Generates a transform given a function on the eigenvalues.\"\"\"\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _\n\n return tuple(transform(fn) for fn in fns)", "def limblength(j, mat):\n\n minval = math.inf\n\n for i in range(len(mat)):\n for k in range(len(mat[i])):\n if i != j != k:\n minval = min(minval, (mat[i, j] + mat[j, k] - mat[i, k]) / 2.0)\n\n return int(minval)", "def heavy_fixCM_eigvals(NP, b, c, params):\n l = params['l']\n k = params['k']\n I3 = params['I3']\n # Here, omega_3 is just the MAGNITUDE, not signed\n w3 = np.abs(params['w3'][0])\n gn = params['Mm'] * params['g']\n\n # Check output if small system\n print 'gn = ', gn\n print 'b = ', b\n print 'c = ', c\n\n if NP == 1:\n pass\n elif NP == 2:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n -1j * l * gn / (I3 * w3),\n l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n -l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3)\n ])\n print 'exact_eigvals are =', eigvals\n return eigvals\n elif NP == 3:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0., 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0., 0., 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn - 2. * (-1) ** (b) * l ** 2 * k) / (I3 * w3), 0., \\\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [0., 0., (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n # -1j*l*gn/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - 3. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j-3.*l*k*(-1)**(b) - gn)/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j - l*k*(-1)**(b) - gn)/(I3*w3)\n ])\n return eigvals\n else:\n return np.array([])", "def solve_gmres(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n tol: float = 1e-5,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol, **kwargs)[0]", "def Newton_system(F, J, cst, x, max_iter=100, eps=1e-4):\n F_value = F(cst, x)\n F_norm = np.linalg.norm(F_value, ord=2) # l2 norm of vector\n iteration_counter = 0\n while abs(F_norm) > eps and iteration_counter < max_iter:\n try:\n delta = np.linalg.solve(J(cst, x), -F_value)\n except LinAlgError:\n print(\"Singular matrix in np.linalg.solve, after \", iteration_counter, \" iterations.\")\n return x, -1\n else:\n x = x + delta\n F_value = F(cst, x)\n F_norm = np.linalg.norm(F_value, ord=2)\n iteration_counter += 1\n\n # Here, either a solution is found, or too many iterations\n if abs(F_norm) > eps:\n iteration_counter = -1\n return x, iteration_counter", "def nonnegative_tensor_factorization(X, r, method='anls_bpp',\n tol=1e-4, stop_criterion=1,\n min_iter=20, max_iter=200, max_time=1e6,\n init=None, orderWays=None):\n\n nWay = len(X.shape)\n\n if orderWays is None:\n orderWays = np.arange(nWay)\n\n # set initial values\n if init is not None:\n F_cell = init\n else:\n Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]\n F_cell = Finit\n\n grad = getGradient(X, F_cell, nWay, r)\n\n nr_X = X.norm()\n nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2\n for i in range(nWay)))\n\n if method == \"anls_bpp\":\n method = anls_bpp()\n elif method == \"anls_asgroup\":\n method = anls_asgroup()\n else:\n raise Exception(\"Unknown method\")\n\n # Execute initializer\n F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays)\n\n tStart = time.time()\n\n if stop_criterion == 2:\n F_kten = ktensor(F_cell)\n rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n\n # main iterations\n for iteration in range(max_iter):\n cntu = True\n\n F_cell, FF_init = method.iterSolver(X, F_cell,\n FF_init, nWay, r, orderWays)\n F_kten = ktensor(F_cell)\n\n if iteration >= min_iter:\n\n if time.time() - tStart > max_time:\n cntu = False\n\n else:\n\n if stop_criterion == 1:\n pGrad = getProjGradient(X, F_cell, nWay, r)\n SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)\n if SC_PGRAD < tol:\n cntu = False\n\n elif stop_criterion == 2:\n prev_rel_Error = rel_Error\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n SC_DIFF = np.abs(prev_rel_Error - rel_Error)\n if SC_DIFF < tol:\n cntu = False\n else:\n rel_Error = getRelError(X, F_kten, nWay, nr_X)\n if rel_Error < 1:\n cntu = False\n\n if not cntu:\n break\n\n return F_kten", "def runpower(matrix, n, tolerance, max_num=None, return_vector=True):\n\tcalculate_next = True\n\teigenvalue_list = []\n\teigenvector_list = []\n\tleading_eigenvalue = np.nan\n\twhile(calculate_next):\t\n\t\tnew_eigenvalue, v = runpower_one(matrix, n)\n\t\tif np.isnan(leading_eigenvalue):\n\t\t\tleading_eigenvalue = new_eigenvalue\n\t\teigenvalue_list.append(new_eigenvalue)\n\t\teigenvector_list.append(v)\n\t\tif max_num is not None and len(eigenvalue_list) == max_num:\n\t\t\tbreak\n\t\tif abs(1.0 * new_eigenvalue / leading_eigenvalue) < tolerance:\n\t\t\tcalculate_next = False\n\t\telse:\n\t\t\tmatrix = matrix - new_eigenvalue * np.outer(v,v)\n\tif return_vector:\n\t\treturn eigenvalue_list, np.asarray(eigenvector_list).T\n\telse:\n\t\treturn eigenvalue_list", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def gen_vee_func(self):\n dual_func = self.dual_func\n omt_func = self.omt_func\n @numba.njit\n def vee(aval, bval):\n return dual_func(omt_func(dual_func(aval), dual_func(bval)))\n return vee", "def mylinearsvm(beta, lambd, x, y, step_size_init, eps=0.0000001, max_iter=100):\n theta = beta\n t = step_size_init\n grad_beta = grad(beta, lambd, x, y)\n beta_vals = [beta]\n objs = [obj(beta, lambd, x, y)]\n iter = 0\n while np.linalg.norm(grad_beta) > eps and iter < max_iter: \n # THE CODE BELOW SO IT USES BACKTRACKING LINE SEARCH INSTEAD OF A CONSTANT STEP SIZE\n t = backtracking(beta, lambd=lambd, x=x, y=y, step_size=t)\n # THE CODE BELOW USES UPDATING THETA FOR BETA OPTIMAZATION\n beta = theta - t*grad_beta\n theta = beta + (iter/(iter+3))*(beta - beta_vals[-1])\n obj_val = obj(beta,lambd, x, y)\n beta_vals.append(beta)\n objs.append(obj_val)\n grad_beta = grad(theta, lambd, x, y)\n iter += 1\n \n return np.array(beta_vals), np.array(objs)", "def magma_zgeev_m(jobvl, jobvr, n, a, lda,\n w, vl, ldvl, vr, ldvr, work, lwork, rwork):\n\n jobvl = _vec_conversion[jobvl]\n jobvr = _vec_conversion[jobvr]\n info = c_int_type()\n status = _libmagma.magma_zgeev_m(jobvl, jobvr, n, int(a), lda,\n int(w), int(vl), ldvl, int(vr), ldvr,\n int(work), lwork, int(rwork), ctypes.byref(info))\n magmaCheckStatus(status)", "def _gth_solve_jit(A, out):\n n = A.shape[0]\n\n # === Reduction === #\n for k in range(n-1):\n scale = np.sum(A[k, k+1:n])\n if scale <= 0:\n # There is one (and only one) recurrent class contained in\n # {0, ..., k};\n # compute the solution associated with that recurrent class.\n n = k+1\n break\n for i in range(k+1, n):\n A[i, k] /= scale\n\n for j in range(k+1, n):\n A[i, j] += A[i, k] * A[k, j]\n\n # === Backward substitution === #\n out[n-1] = 1\n for k in range(n-2, -1, -1):\n for i in range(k+1, n):\n out[k] += out[i] * A[i, k]\n\n # === Normalization === #\n norm = np.sum(out)\n for k in range(n):\n out[k] /= norm", "def Avv_func(f):\n\n def Avv(x, v):\n def F(s):\n return f(x + v * s)\n\n return jacfwd(jacfwd(F))(0.0)\n\n return Avv", "def erfcinv(a):", "def fn(i, j, mask):\n if j == n: return 1 \n if i == m: return fn(0, j+1, mask)\n ans = 0 \n for x in 1<<2*i, 1<<2*i+1, 0b11<<2*i: \n mask0 = mask ^ x\n if mask0 & 0b11<<2*i and (i == 0 or (mask0 >> 2*i) & 0b11 != (mask0 >> 2*i-2) & 0b11): \n ans += fn(i+1, j, mask0)\n return ans % 1_000_000_007" ]
[ "0.6499808", "0.6398598", "0.59417737", "0.57333624", "0.5555907", "0.54603535", "0.5243884", "0.5202897", "0.51741433", "0.5137478", "0.5136315", "0.50940466", "0.50810033", "0.50402594", "0.50402594", "0.5007165", "0.49688074", "0.49596807", "0.49502552", "0.49388844", "0.49363267", "0.49228942", "0.49161905", "0.4905708", "0.48975727", "0.48457804", "0.48309895", "0.48132214", "0.47981742", "0.4763864", "0.47601113", "0.47289056", "0.47140205", "0.47028455", "0.46960393", "0.469242", "0.46826664", "0.46770445", "0.46731937", "0.4671276", "0.46705145", "0.466336", "0.46583274", "0.4653387", "0.464277", "0.46309626", "0.46297005", "0.46274114", "0.46234602", "0.46219507", "0.4613786", "0.4606912", "0.46055564", "0.4597329", "0.45952696", "0.4592658", "0.45926353", "0.45925823", "0.45889476", "0.45825365", "0.4581618", "0.45793483", "0.45743743", "0.4567401", "0.4566088", "0.45621455", "0.45598108", "0.45595482", "0.45559", "0.45518637", "0.45448196", "0.4544589", "0.4535739", "0.453289", "0.45321614", "0.45313874", "0.45290157", "0.45266983", "0.4525597", "0.45237076", "0.45204625", "0.45129693", "0.45108834", "0.45081082", "0.4505976", "0.45039138", "0.4502356", "0.45022386", "0.45006043", "0.4496386", "0.44827926", "0.44825134", "0.44797006", "0.4478459", "0.44731754", "0.4471301", "0.44633037", "0.44604424", "0.44576603", "0.4457414" ]
0.5256993
6
Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization.
def implicitly_restarted_arnoldi_method( matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter, res_thresh) -> Tuple[List[Tensor], List[Tensor]]: N = np.prod(initial_state.shape) p = num_krylov_vecs - numeig num_krylov_vecs = np.min([num_krylov_vecs, N]) if (p <= 1) and (num_krylov_vecs < N): raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <" f" `num_krylov_vecs` <= N={N}," f" `num_krylov_vecs`={num_krylov_vecs}") dtype = initial_state.dtype # initialize arrays krylov_vectors = jax.numpy.zeros( (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]), dtype=dtype) H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype) # perform initial arnoldi factorization Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args, initial_state, krylov_vectors, H, 0, num_krylov_vecs, eps) # obtain an m-step arnoldi factorization Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits) it = 0 if which == 'LR': _which = 0 elif which == 'LM': _which = 1 else: raise ValueError(f"which = {which} not implemented") # make sure the dtypes are matching if maxiter > 0: if Vm.dtype == np.float64: dtype = np.complex128 elif Vm.dtype == np.float32: dtype = np.complex64 elif Vm.dtype == np.complex128: dtype = Vm.dtype elif Vm.dtype == np.complex64: dtype = Vm.dtype else: raise TypeError(f'dtype {Vm.dtype} not supported') Vm = Vm.astype(dtype) Hm = Hm.astype(dtype) fm = fm.astype(dtype) while (it < maxiter) and (not converged): evals, _ = jax.numpy.linalg.eig(Hm) krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig, p, _which, res_thresh) if converged: break v0 = jax.numpy.reshape(fk, initial_state.shape) # restart Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0, krylov_vectors, H, numeig, num_krylov_vecs, eps) Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs) it += 1 ev_, U_ = np.linalg.eig(np.array(Hm)) eigvals = jax.numpy.array(ev_) U = jax.numpy.array(U_) _, inds = LR_sort(eigvals, _which) vectors = get_vectors(Vm, U, inds, numeig) return eigvals[inds[0:numeig]], [ jax.numpy.reshape(vectors[n, :], initial_state.shape) for n in range(numeig) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps", "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x", "def eigenalgo(self, accuracy: float = 0, cap: int = 50000, version: str = \"Givens\", not_skip: bool = True):\n j, temps, verify_accuracy = 0, 0, np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n if version == \"Gram-Schmidt\":\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.gram_schmidt_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Givens\":\n verify_accuracy = np.ones((self.N, self.N), dtype=bool) ^ np.eye(self.N, dtype=bool)\n temps = time()\n while np.any(abs(self.vap[verify_accuracy]) > accuracy) and j < cap:\n j += 1\n q, r = self.givens_qr()\n self.vap, self.vep = r @ q, self.vep @ q\n\n elif version == \"Rayleigh\":\n not_sing, diff, cond, j = True, accuracy + 1, True, 0\n temps = time()\n while cond: # Stop condition, all eigenvalues must be different\n while diff > accuracy and j < cap and not_sing:\n j += 1\n self.rvap, self.vep, diff, not_sing = self.rayleigh_iteration(self.rvap, self.vep)\n\n cond = False\n if j < cap:\n self.calc, first, not_sing = np.zeros(self.N, dtype=bool), True, True\n for i in range(self.N):\n if np.sum(np.less(np.abs(self.rvap - self.rvap[i]), 10 ** -6)) != 1:\n self.rvap[i + 1:] += self.memorize[i]\n if first:\n self.memorize[i] += 0.5\n self.vep[i + 1:, i + 1:] = np.eye(self.N - i - 1)\n first, cond, diff = False, True, accuracy + 1\n self.calc[i + 1:] = 1\n temps = time() - temps\n return self.rvap, self.vep, diff, j, temps\n\n else:\n print(\"Please select an appropriate value for the version parameter\")\n\n temps = time() - temps\n diff = np.max(abs(self.vap[verify_accuracy]))\n return np.diag(self.vap), self.vep, diff, j, temps", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def poweig(A, x0, maxiter = 100, ztol= 1.0e-5, mode= 0, teststeps=1):\n m = len(A)\n xi = x0[:] \n \n for n in range(maxiter):\n # matrix vector multiplication.\n xim1 = xi[:]\n for i in range(m):\n xi[i] = 0.0\n for j in range(m):\n xi[i] += A[i][j] * xim1[j]\n print n, xi\n if mode == 0:\n vlen = sqrt(sum([xi[k]**2 for k in range(m)]))\n xi = [xi[k] /vlen for k in range(m)]\n elif mode == 1:\n for k in range(m-1, -1, -1):\n c = abs(xi[k])\n if c > 1.0e-5:\n xi = [xi[k] /c for k in range(m)]\n break\n # early termination test.\n if n % teststeps == 0:\n S = sum([xi[k]-xim1[k] for k in range(m)])\n if abs(S) < ztol:\n break\n #print n, xi\n # Compute Rayleigh quotient.\n numer = sum([xi[k] * xim1[k] for k in range(m)])\n denom = sum([xim1[k]**2 for k in range(m)])\n xlambda = numer/denom\n return xlambda, xi", "def eigenvects(mat):\n # Check if symbols are present\n if hasSymbols(mat):\n return mat.eigenvects()\n # Purely numeric matrix\n newMat = recursiveEvaluate(mat.as_mutable())\n return newMat.eigenvects()", "def solve_for_eigenvectors(matrix, num, mode=\"general\"):\n\n # Construct a sparse matrix\n if mode == \"general\":\n return linalg.eigs(matrix, num)\n\n if mode == \"symmetric\":\n return linalg.eigsh(matrix, num)", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,\n random_state=None):\n\n from scipy.sparse import csc_matrix\n from scipy.linalg import LinAlgError\n\n random_state = check_random_state(random_state)\n\n vectors = as_float_array(vectors, copy=copy)\n\n eps = np.finfo(float).eps\n n_samples, n_components = vectors.shape\n\n # Normalize the eigenvectors to an equal length of a vector of ones.\n # Reorient the eigenvectors to point in the negative direction with respect\n # to the first element. This may have to do with constraining the\n # eigenvectors to lie in a specific quadrant to make the discretization\n # search easier.\n norm_ones = np.sqrt(n_samples)\n for i in range(vectors.shape[1]):\n vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \\\n * norm_ones\n if vectors[0, i] != 0:\n vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])\n\n # Normalize the rows of the eigenvectors. Samples should lie on the unit\n # hypersphere centered at the origin. This transforms the samples in the\n # embedding space to the space of partition matrices.\n vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]\n\n svd_restarts = 0\n has_converged = False\n\n # If there is an exception we try to randomize and rerun SVD again\n # do this max_svd_restarts times.\n while (svd_restarts < max_svd_restarts) and not has_converged:\n\n # Initialize first column of rotation matrix with a row of the\n # eigenvectors\n rotation = np.zeros((n_components, n_components))\n rotation[:, 0] = vectors[random_state.randint(n_samples), :].T\n\n # To initialize the rest of the rotation matrix, find the rows\n # of the eigenvectors that are as orthogonal to each other as\n # possible\n c = np.zeros(n_samples)\n for j in range(1, n_components):\n # Accumulate c to ensure row is as orthogonal as possible to\n # previous picks as well as current one\n c += np.abs(np.dot(vectors, rotation[:, j - 1]))\n rotation[:, j] = vectors[c.argmin(), :].T\n\n last_objective_value = 0.0\n n_iter = 0\n\n while not has_converged:\n n_iter += 1\n\n t_discrete = np.dot(vectors, rotation)\n\n labels = t_discrete.argmax(axis=1)\n vectors_discrete = csc_matrix(\n (np.ones(len(labels)), (np.arange(0, n_samples), labels)),\n shape=(n_samples, n_components))\n\n t_svd = vectors_discrete.T * vectors\n\n try:\n U, S, Vh = np.linalg.svd(t_svd)\n svd_restarts += 1\n except LinAlgError:\n print(\"SVD did not converge, randomizing and trying again\")\n break\n\n ncut_value = 2.0 * (n_samples - S.sum())\n if ((abs(ncut_value - last_objective_value) < eps) or\n (n_iter > n_iter_max)):\n has_converged = True\n else:\n # otherwise calculate rotation and continue\n last_objective_value = ncut_value\n rotation = np.dot(Vh.T, U.T)\n\n if not has_converged:\n raise LinAlgError('SVD did not converge')\n return labels", "def test_inverse_eigenvectors_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n rv, h, rv_inv = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(rv.dot(rv_inv), np.identity(*h.shape), atol=1e-14)", "def test_eigsum_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n _, h, _ = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(np.sum(h), np.trace(g0_inv_full))", "def estimate_ivec(nt, ft, v_matrix, vtv_matrix, eye=None):\n v_dim = v_matrix.shape[1]\n n_gauss = nt.shape[1]\n\n # Construct eye if necessary\n if eye is None:\n eye = Extractor.to_rfpf(np.eye(v_dim, dtype=v_matrix.dtype).T)\n\n it = eye.T.reshape((1, -1))\n vtvt = vtv_matrix.T.reshape((n_gauss, -1))\n\n b = np.dot(ft, v_matrix).T\n lt = np.dot(nt, vtvt) + it\n\n l = lt.reshape((vtv_matrix.shape[1], vtv_matrix.shape[0])).T\n\n out = Extractor.solve(l, b)\n\n return out", "def posdef_eig(mat):\n return posdef_eig_functions[POSDEF_EIG_METHOD](mat)", "def get_eigvals_eigvects(\n num_layers,\n numeric_matrices_eV_over_angsquared,\n layer_mass_amu,\n use_banded_algorithm=False,\n):\n # Based on the units in input, and indicating with:\n # - [hbar omega] the numeric value for the frequency in meV => hbar omega = [hbar omega] * meV\n # - [K] the numeric value of K in eV/ang^2\n # - [m] the layer mass in amu\n # we have (we omit the sign, and for units considerations we 'drop' U):\n # omega^2 = K / m =>\n # (hbar omega)^2 = hbar^2 * K / m =>\n # [hbar omega]^2 * meV^2 = hbar^2 * [K] / [m] * eV/ang^2 / amu = [K] / [m] * hbar^2 * eV/ang^2 / amu =>\n # [hbar omega]^2 = = [K] / [m] * ( hbar^2 * eV/ang^2 / amu / meV^2 )\n # so that the conversion factor is the last bracketed term:\n # conversion_factor = hbar^2 * eV / (angstrom^2 * amu * meV^2)\n conversion_factor = 4180.15925\n # NOTE: for simplicity, the conversion is applied at the very end\n\n if use_banded_algorithm:\n # 3 blocks (below, same layer, and above) of size 3 => total width of 9\n # Since we only store the upper part, we only need a width of 4 (diagonal + 3 superdiagonals)\n K_matrix = np.zeros((4, num_layers * 3))\n else:\n K_matrix = np.zeros((num_layers * 3, num_layers * 3))\n\n # Note: I construct -K, actually\n for block_idx in range(num_layers):\n # Interaction with upper layer\n if block_idx < num_layers - 1: # Not in the last layer\n current_block = np.array(\n numeric_matrices_eV_over_angsquared[\n block_idx % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=current_block,\n block_i=block_idx + 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n # Interaction with lower layer\n if block_idx > 0: # Not in the first layer\n previous_block = np.array(\n numeric_matrices_eV_over_angsquared[\n (block_idx - 1) % len(numeric_matrices_eV_over_angsquared)\n ]\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx,\n block_j=block_idx,\n factor=+1,\n banded=use_banded_algorithm,\n )\n add_block(\n matrix=K_matrix,\n block=previous_block,\n block_i=block_idx - 1,\n block_j=block_idx,\n factor=-1,\n banded=use_banded_algorithm,\n )\n\n # We want to get the eigenvalues of omega^2 U = - 1/M_layer K U\n K_matrix /= layer_mass_amu\n\n # Get frequencies (eigvals) and eigenvectors (for mode analysis)\n if use_banded_algorithm:\n eigvals, eigvects = scipy.linalg.eig_banded(K_matrix, lower=False)\n else:\n eigvals, eigvects = np.linalg.eigh(K_matrix)\n\n eigvals *= conversion_factor\n\n ## The first three should be acoustic i.e. almost zero; the rest should be positive\n ## I don't check as depending on the units it's hard to define a correct absolute energy\n # assert np.sum(np.abs(eigvals[:3])) < 1.0e-8\n\n # Remove the first three acoustic modes\n return eigvals[3:], eigvects[:, 3:]", "def eigen_vector_i_all(self):\n return self._eig_vec", "def gauss_seidel_solver(self, mat, rhs):\n x = np.zeros_like(rhs)\n for it_count in range(1, self.iterations_number):\n x_new = np.zeros_like(x)\n if self.verbose > 1:\n print(\"Iteration {0}: {1}\".format(it_count, x))\n for i in range(mat.shape[0]):\n s1 = np.dot(mat[i, :i], x_new[:i])\n s2 = np.dot(mat[i, i + 1:], x[i + 1:])\n x_new[i] = (rhs[i] - s1 - s2) / mat[i, i]\n if np.allclose(x, x_new, rtol=1e-8):\n break\n x = x_new\n return x", "def power_iteration(X):\n #X, languages=prepare_data_matrix()\n M=X\n M=M-np.mean(M, axis=0)\n M=np.cov(M, rowvar=False) #the covariance matrix, size 100x100\n x=np.ones(len(M)) #a random starting vector composed of 100 ones, it only cant be of all zeros\n difference=np.ones(len(x))\n\n #print(np.linalg.norm(difference))\n while np.linalg.norm(difference) >= 10**-5: #we iterate until the difference between the previous and the new x is really small, lets say 10^-5\n #print(x.T.shape)\n oldx=x\n z=M.dot((x.T))\n x=z.T\n x=x/np.linalg.norm(x)\n difference=np.linalg.norm(oldx-x)\n #the x that we get at the end of this loop is our eigenvector\n\n #print(x.dot(M).shape)\n #print(x.shape)\n y=(x.dot(M)).dot(x.T) #y is the corresponding eigenvalue to the eigenvector x\n \n return x, y", "def truncated_svd(A,k=None):", "def eig_faces(u_mat, nmode, dim):\n n = int(nmode)\n nparray = np.zeros(np.size(u_mat[:,0]))\n for i in range(n):\n nparray = nparray + u_mat[:,i]\n \n nparray = np.reshape(nparray,dim)\n return(nparray)", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def eigen(X):\n\n symmetric = np.alltrue(np.isclose(X - X.T, np.zeros(n)))\n small = max(X.shape) <= 11\n\n if symmetric:\n return jacobi(X)\n elif small:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm3(X, maxiter=maxiter)\n else:\n maxiter = 10 ** max(*X.shape, 4)\n return qrm2(X, maxiter=maxiter)", "def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # SVD Factorization\n U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n\n # Remove dimensions related with very small singular values\n U = U[:, s > tol]\n Vt = Vt[s > tol, :]\n s = s[s > tol]\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(z)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n z = U.dot(aux2)\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = V 1/s U.T x\n aux1 = U.T.dot(x)\n aux2 = 1/s*aux1\n z = Vt.T.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def eigs(self,num_eigvals,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num)\n eigvals, eigvecs = eigsh(ham,k=num_eigvals*num_sites,which='SM')\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(num_eigvals):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable:\n\n @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6))\n def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho):\n \"\"\"\n Jitted lanczos routine.\n Args:\n matvec: A callable implementing the matrix-vector product of a\n linear operator.\n arguments: Arguments to `matvec` additional to an input vector.\n `matvec` will be called as `matvec(init, *args)`.\n init: An initial input state to `matvec`.\n ncv: Number of krylov iterations (i.e. dimension of the Krylov space).\n neig: Number of eigenvalue-eigenvector pairs to be computed.\n landelta: Convergence parameter: if the norm of the current Lanczos vector\n falls below `landelta`, iteration is stopped.\n reortho: If `True`, reorthogonalize all krylov vectors at each step.\n This should be used if `neig>1`.\n Returns:\n jax.numpy.ndarray: Eigenvalues\n list: Eigenvectors\n \"\"\"\n\n def body_modified_gram_schmidt(i, vals):\n vector, krylov_vectors = vals\n v = krylov_vectors[i, :]\n vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors]\n\n def body_lanczos(vals):\n current_vector, krylov_vectors, vector_norms = vals[0:3]\n diagonal_elements, matvec, args, _ = vals[3:7]\n threshold, i, maxiteration = vals[7:]\n norm = jax.numpy.linalg.norm(current_vector)\n normalized_vector = current_vector / norm\n normalized_vector, krylov_vectors = jax.lax.cond(\n reortho, True,\n lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt,\n [normalized_vector, krylov_vectors]),\n False, lambda x: [normalized_vector, krylov_vectors])\n Av = matvec(normalized_vector, *args)\n\n diag_element = jax.numpy.vdot(normalized_vector, Av)\n\n res = jax.numpy.reshape(\n jax.numpy.ravel(Av) -\n jax.numpy.ravel(normalized_vector) * diag_element -\n krylov_vectors[i - 1] * norm, Av.shape)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :],\n jax.numpy.ravel(normalized_vector))\n\n vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1],\n norm)\n diagonal_elements = jax.ops.index_update(diagonal_elements,\n jax.ops.index[i - 1],\n diag_element)\n\n return [\n res, krylov_vectors, vector_norms, diagonal_elements, matvec, args,\n norm, threshold, i + 1, maxiteration\n ]\n\n def cond_fun(vals):\n _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals\n\n def check_thresh(check_vals):\n val, thresh = check_vals\n return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x)\n\n return jax.lax.cond(iteration <= maxiteration, [norm, threshold],\n check_thresh, False, lambda x: x)\n\n numel = jax.numpy.prod(init.shape)\n krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype)\n norms = jax.numpy.zeros(ncv, dtype=init.dtype)\n diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype)\n\n norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0)\n\n norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0),\n dtype=init.dtype)).dtype\n initvals = [\n init, krylov_vecs, norms, diag_elems, matvec, arguments,\n norms_dtype.type(1.0), landelta, 1, ncv\n ]\n output = jax.lax.while_loop(cond_fun, body_lanczos, initvals)\n final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output\n krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :],\n jax.numpy.ravel(final_state))\n\n A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag(\n norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1)\n eigvals, U = jax.numpy.linalg.eigh(A_tridiag)\n eigvals = eigvals.astype(A_tridiag.dtype)\n\n def body_vector(i, vals):\n krv, unitary, states = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m + 1, :] * unitary[m, n])\n return [krv, unitary, states]\n\n state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype)\n _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1),\n body_vector,\n [krylov_vecs, U, state_vectors])\n\n return jax.numpy.array(eigvals[0:neig]), [\n jax.numpy.reshape(vectors[n, :], init.shape) /\n jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig)\n ]\n\n return jax_lanczos", "def main():\n print 'Running the power method...'\n dim = input('Give the dimension : ')\n nbit = input('How many iterations ? ')\n j = complex(0, 1)\n rnd = np.random.normal(0, 1, (dim, dim)) \\\n + np.random.normal(0, 1, (dim, dim))*j\n nbs = np.random.normal(0, 1, (dim, 1)) \\\n + np.random.normal(0, 1, (dim, 1))*j\n rndmat = np.matrix(rnd)\n rndvec = np.matrix(nbs)\n eigmax = power_method(rndmat, rndvec, nbit)\n check(rndmat, eigmax)", "def run_vqe(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=None,\n mode=\"min_val\",\n ):\n # N=int(np.ceil(np.log2(len(self.mat))))\n # hk = np.zeros((2**N,2**N),dtype='complex')\n # hk[:self.mat.shape[0], :self.mat.shape[1]] = self.mat\n N = self.n_qubits()\n if mode == \"max_val\":\n Hamil_mat = aqua.operators.MatrixOperator(-1 * self.mat)\n # Hamil_mat = MatrixOperator(-1 * self.mat)\n else:\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n if var_form is None:\n if reps is None:\n reps = 2\n # reps=5\n from qiskit.circuit.library import EfficientSU2\n\n var_form = EfficientSU2(N, reps=reps)\n if optimizer is None:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form)\n # vqe = VQE(Hamil_qop, var_form)\n else:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form, optimizer)\n # vqe = VQE(Hamil_qop, var_form, optimizer)\n vqe_result = vqe.run(backend)\n en = np.real(vqe_result[\"eigenvalue\"])\n # params=vqe.optimal_params\n # circuit=vqe.construct_circuit(params)\n if mode == \"max_val\":\n en = -1 * en\n # states = np.sort(\n # np.real(\n # vqe.expectation.convert(\n # StateFn(vqe.operator, is_measurement=True)\n # ).to_matrix()\n # )\n # )\n return en, vqe_result, vqe", "def calculate_posvij_matrices(main_tetrad_ark):\n\n\t# Import all the possible solutions to the Vij matrices\n\tvij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n\tvij_matrices = []\n\n\tprint(\"\t\t\t\t\t\t\t\")\n\tprint(\"\tCalculating Vij matrices\")\n\tprint(\"\t\t\t\t\t\t\t\")\n\t# for i in range(0, len(main_tetrad_ark)):\n\tfor i in range(0, len(vij_possibilities)):\n\t\ttet_i = [x[1] for x in main_tetrad_ark[i]]\n\t\ttri_tet = [np.transpose(i) for i in tet_i]\n\t\tprint(\"# ********************************\")\n\t\t# print(\"\t\t\t\t\t\t\t\t \")\n\t\tprint(\"MATRIX i: \", i)\n\t\tprint(\"\t\t\t\t\t\t\t\t \")\n\t\tfor j in range(0, len(main_tetrad_ark)):\n\t\t\ttet_j = [x[1] for x in main_tetrad_ark[j]]\n\t\t\ttrj_tet = [np.transpose(j) for j in tet_j]\n\t\t\tvij_temp = []\n\t\t\t# print(\"# ********************************\")\n\t\t\tprint(\"\t\t\")\n\t\t\tprint(\"MATRIX j: \", j)\n\t\t\ttemp_zero = np.zeros((4,4), dtype=int)\n\t\t\tfor x in range(0,len(tet_i)):\n\t\t\t\ttest_1half = np.dot(tri_tet[x],tet_j[x])\n\t\t\t\ttest_2half = np.dot(trj_tet[x],tet_i[x])\n\t\t\t\ttest_difs = np.subtract(test_1half, test_2half)\n\t\t\t\t# print(\" \")\n\t\t\t\t# print(test_difs)\n\t\t\t\ttemp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n\t\t\t\tvij_temp.append(temp_mat)\n\t\t\t\t# print(\"\")\n\t\t\ttemp_add1 = np.add(vij_temp[0], vij_temp[1])\n\t\t\ttemp_add2 = np.add(temp_add1, vij_temp[2])\n\t\t\ttempf = np.add(temp_add2, vij_temp[3])\n\t\t\t# tempf = np.divide(temp_add3, 2)\n\t\t\tfor ijx in vij_possibilities:\n\t\t\t\tif np.array_equal(temp_addf, ijx[0]):\n\t\t\t\t\tprint(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n\t\t\t\t\tprint(\"l-solution found:\", ijx[1])\n\t\t\t\t\tprint(temp_addf)\n\t\t\t\t\tprint(\"\")\n\t\t\t\t\tprint(ijx[0])\n\t\t\tif np.array_equal(temp_addf, temp_zero):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tvij_matrices.append(temp_addf)\n\t\t\t# print(\"\")\n\t\t\tprint(temp_addf)\n\t\t\t# vij_matrices.append(temp_addf)\n\t\tvijmats_size = sys.getsizeof(vij_matrices)\n\t\tprint(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n\tprint(\"Length of Vij Matrices\")\n\tprint(len(vij_matrices))\n\tpass", "def truncate(self, num_kl, tol=1e-1, flag=\"default\"):\n\n # if num_kl > np.size(self.eigen_vals):\n # num_kl = np.size(self.eigen_vals)\n\n if flag == \"partial\":\n previous = 0\n for i in range(0, num_kl):\n current = self.eigen_vals[i] + previous\n indx = i\n if abs(current - previous) <= tol:\n break\n previous = current\n return indx\n\n total = np.sum(self.eigen_vals)\n sum_val = 0.0\n for i in range(0, num_kl):\n sum_val = sum_val + self.eigen_vals[i]\n if sum_val / total >= 0.95:\n break\n return i", "def compute_resonances(elt=None, K0=None, K1=None, K2=None, neigs=0):\n\n if elt is not None:\n (N, nnz) = problem_size(elt)\n issparse = (neigs != 0) and (nnz < 0.2 * N**2) and (N > 100)\n (K0, K1, K2) = form_operators(elt, issparse)\n\n N = len(K0)\n\n #TODO: Adding sparsity\n Z = np.zeros((N,N))\n I = np.eye(N)\n\n A = np.vstack((np.hstack((K0, Z)), np.hstack((Z, I))))\n B = np.vstack((np.hstack((-K1, -K2)), np.hstack((I, Z))))\n\n ll = eig(a=A, b=B, left=False, right=False)\n ll_sorted = (np.unique(ll.round(decimals=4)))[np.argsort(np.abs(np.unique(ll.round(decimals=4))))]\n ll_sorted = ll_sorted[np.abs(ll_sorted) < 1e308]\n return ll_sorted * 1.0j\n\n \n # (ll, V) = eig(a=A, b=B)\n # if neigs is 0:\n # return (ll*1.0j, V)\n # else:\n # #TODO: Figure out LU decomposition and inverse iteration \n # #TODO: When sparse, use scipy.sparse.linalg to use linear operator\n # return (l[:neigs], V[:,:neigs])", "def eig(self,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num).toarray()\n eigvals, eigvecs = eigh(ham)\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(eigvals.size):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def davidson_solver(ax_function, preconditioner, guess, e_conv=1.0E-8, r_conv=None, no_eigs=1, max_vecs_per_root=10, maxiter=100):\n\n if r_conv == None:\n r_conv = e_conv * 100\n d_tol = 1.0E-8\n\n # using the shape of the guess vectors to set the dimension of the matrix\n N = guess.shape[0]\n\n #sanity check, guess subspace must be at least equal to number of eigenvalues\n nli = guess.shape[1]\n if nli < no_eigs:\n raise ValueError(\"Not enough guess vectors provided!\")\n\n nl = nli\n converged=False\n count = 0\n sub_count = nli\n A_w_old = np.ones(nli)\n max_ss_size = nli * max_vecs_per_root\n B = np.zeros((N,N))\n B[:,:nli] = guess\n\n ### begin loop\n while count < maxiter:\n active_mask = [True for x in range(nl)]\n # Apply QR decomposition on B to orthogonalize the new vectors wrto all other subspace vectors\n ## orthogonalize preconditioned residuals against all other vectors in the search subspace\n B, r = np.linalg.qr(B)\n\n # compute sigma vectors corresponding to the new vectors sigma_i = A B_i\n sigma = np.zeros((N,nl))\n for i in range(nl):\n bvec = B[:,i]\n sigma[:,i] = ax_function(B[:,i])\n\n # compute subspace matrix A_b = Btranspose sigma\n A_b = np.dot(B[:,:nl].T, sigma)\n\n # solve eigenvalue problem for subspace matrix; choose n lowest eigenvalue eigpairs\n A_w, A_v = np.linalg.eig(A_b)\n\n # sorting eigenvalues and corresponding eigenvectors\n A_v = A_v[:, A_w.argsort()]\n A_w = A_w[A_w.argsort()]\n\n # here, check if no residuals > max no residuals, if so, collapse subspace\n sub_count = A_v.shape[0]\n if sub_count >= max_ss_size:\n print(\"Subspace too big. Collapsing.\\n\")\n Bnew = np.zeros((N,N))\n Bnew[:,:nli] = np.dot(B[:,:nl], A_v[:,:nli])\n B = Bnew\n nl = nli\n continue\n # else, build residual matrix\n ## residual_i = sigma * eigvec - eigval * B * eigvec\n norm = np.zeros(nli)\n for i in range(0, nli):\n mat = A - A_w[i] * np.identity(N) \n residual = np.dot(mat, np.dot(B[:,:sub_count], A_v[:,i]))\n\n ## check for convergence by norm of residuals\n norm[i] = np.linalg.norm(residual)\n ##apply the preconditioner (A_ii - A_v_i)^-1\n precon_resid = preconditioner(residual, i, A, A_w)\n\n ## normalize and add to search subspace if they're larger than a threshold\n if np.linalg.norm(precon_resid) > d_tol:\n B[:,nl+1] = precon_resid\n nl += 1\n\n # check for convergence by diff of eigvals and residual norms\n check = norm < r_conv\n eig_norm = np.linalg.norm(A_w[:no_eigs] - A_w_old[:no_eigs])\n A_w_old = A_w\n if(check.all() == True and eig_norm < e_conv):\n converged = True\n break\n count += 1 \n\n if converged:\n print(\"Davidson converged at iteration number {}. \\n Eigenvalues: {} \\n Eigenvectors: {}\".format(count, A_w[:no_eigs], A_v[:,:no_eigs]))\n else:\n print(\"Davidson did not converge. Max iterations exceeded.\")", "def gs(self, k=50):\n # a. initialize V1 to Vk as a matrix of zeros\n Vs = np.zeros((k, self.ATA.shape[0]), dtype=float)\n\n # initialize u_n as first eigen vector?\n # un = self.eigen_vectors[0]\n\n # looking for k largest eigenvalues and associated eigenvectors\n # of ATA\n # b. for i = 1 to k\n for i in tqdm(range(len(Vs))):\n print(\"Doing i\")\n\n # i. randomly generated vector of size m\n # (length of latitudes, in this case?)\n # scale entire vector by its magnitude, to make magnitude = 1\n u1 = scale_mag_1(np.random.rand(self.ATA.shape[0]))\n un = u1 # at first, u_n is u_1 and random\n\n diff = 1 # set initial diff too high to trip while loop\n while diff > 1e-3:\n\n print(\"Doing ii\")\n # ii. u_(n+1) = A^T*A*u_n\n u1more = np.dot(self.ATA, un)\n\n print(\"Doing iii\")\n # iii. u_(n+1) = u_(n+1) - Sigma_j^(i-1)(u_(n+1)^T * V_j) * V_j\n u1more = u1more - np.sum([\n np.dot(np.dot(u1more.T, Vs[j]), Vs[j]) for j in range(i)\n ])\n\n print(\"Doing iv\")\n # iv. u_(n+1) = u_(n+1) / || u_(n+1) ||\n # just norm mag\n u1more = scale_mag_1(u1more)\n\n diff = mag(u1more - un)\n print(\"Diff:\", diff)\n\n un = u1more\n\n Vs[i] = un", "def eigsh(A, M = None, k = 6, sigma = None, which = 'LM', v0=None,\n ncv = None, maxiter = None, tol = 0., return_eigenvectors = True,\n Minv = None, OPinv = None, mode = 'normal'):\n if M is not None:\n raise NotImplementedError(\"M is not currently supported!\")\n if v0 is not None:\n raise NotImplementedError(\"v0 is not currently supported!\")\n if ncv is not None:\n raise NotImplementedError(\"ncv is not currently supported!\")\n if Minv is not None:\n raise NotImplementedError(\"Minv is not currently supported!\")\n if OPinv is not None:\n raise NotImplementedError(\"OPinv is not currently supported!\")\n inp_data = FrovedisFeatureData(A, dense_kind='rowmajor')\n X = inp_data.get()\n x_dtype = inp_data.get_dtype()\n x_itype = inp_data.get_itype()\n dense = inp_data.is_dense()\n nrows = inp_data.numRows()\n ncols = inp_data.numCols()\n\n if nrows != ncols:\n raise ValueError('expected squared symmetric matrix (shape=%s)' % (inp_data.shape,))\n if k <= 0:\n raise ValueError('k must be greater than 0.')\n if k >= nrows:\n raise ValueError('k must be less than or equal to N for N * N square matrix.')\n if sigma is not None and not dense:\n raise ValueError('currently sigma is only supported for dense matrices.')\n if sigma is None:\n sigma = np.finfo(np.float32).max\n\n if which not in ['LM', 'SM', 'LA', 'SA', 'BE']:\n raise ValueError('which must be one of LM, SM, LA, SA, or BE')\n if mode in ['buckling', 'cayley']:\n raise ValueError('currenly normal mode is only supported!')\n if maxiter is None:\n maxiter = 10 * nrows\n wantEv = return_eigenvectors\n (host, port) = FrovedisServer.getServerInstance()\n res = rpclib.compute_eigsh(host, port, X.get(),\n k, which.encode('ascii'),\n sigma, maxiter, wantEv,\n tol, x_dtype,\n x_itype, dense)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n sptr = res[\"eigenval\"]\n uptr = res[\"eigenvec\"]\n m_m = res['m']\n k_k = res['k']\n eigval = FrovedisVector({'dptr' : sptr, 'size' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n if wantEv:\n eigvec = FrovedisDenseMatrix('C', {'dptr' : uptr, 'nrow' : m_m, 'ncol' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n return eigval, eigvec\n else:\n return eigval", "def eigen_vector_i(self, i):\n return self._eig_vec[:,i]", "def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)", "def update_params(self, mat):\n assert mat.shape == self.shape\n vec = mat.reshape(mat.size)\n self.alpha = np.linalg.solve(self.covs_mat.T.dot(self.covs_mat),\n self.covs_mat.T.dot(vec))", "def heavy_fixCM_eigvals(NP, b, c, params):\n l = params['l']\n k = params['k']\n I3 = params['I3']\n # Here, omega_3 is just the MAGNITUDE, not signed\n w3 = np.abs(params['w3'][0])\n gn = params['Mm'] * params['g']\n\n # Check output if small system\n print 'gn = ', gn\n print 'b = ', b\n print 'c = ', c\n\n if NP == 1:\n pass\n elif NP == 2:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n -1j * l * gn / (I3 * w3),\n l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n -l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3)\n ])\n print 'exact_eigvals are =', eigvals\n return eigvals\n elif NP == 3:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0., 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0., 0., 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn - 2. * (-1) ** (b) * l ** 2 * k) / (I3 * w3), 0., \\\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [0., 0., (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n # -1j*l*gn/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - 3. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j-3.*l*k*(-1)**(b) - gn)/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j - l*k*(-1)**(b) - gn)/(I3*w3)\n ])\n return eigvals\n else:\n return np.array([])", "def solve_eq(xVec):\n \n PSI = xVec[0:vecLen] \n Cxx = xVec[1*vecLen:2*vecLen] \n Cyy = xVec[2*vecLen:3*vecLen] \n Cxy = xVec[3*vecLen:4*vecLen]\n\n\n # Useful Vectors\n Txx = oneOverWi * Cxx \n Txx[N*M] -= oneOverWi\n Tyy = oneOverWi * Cyy \n Tyy[N*M] -= oneOverWi\n Txy = oneOverWi * Cxy\n\n U = + dot(MDY, PSI)\n V = - dot(MDX, PSI)\n LAPLACPSI = dot(LAPLAC, PSI)\n\n # Useful Operators\n MMU = tsm.c_prod_mat(U)\n MMV = tsm.c_prod_mat(V)\n VGRAD = dot(MMU,MDX) + dot(MMV,MDY)\n MMDXU = tsm.c_prod_mat(dot(MDX, U))\n MMDXV = tsm.c_prod_mat(dot(MDX, V))\n MMDYU = tsm.c_prod_mat(dot(MDY, U))\n MMDYV = tsm.c_prod_mat(dot(MDY, V))\n\n MMDXPSI = tsm.c_prod_mat(dot(MDX, LAPLACPSI))\n MMDXCXX = tsm.c_prod_mat(dot(MDX, Cxx))\n MMDXCYY = tsm.c_prod_mat(dot(MDX, Cyy))\n MMDXCXY = tsm.c_prod_mat(dot(MDX, Cxy))\n\n #######calculate the Residuals########\n\n residualsVec = zeros((4*vecLen), dtype='complex')\n\n #####psi\n residualsVec[0:vecLen] = - Re*dot(MMU, dot(MDX, LAPLACPSI)) \\\n - Re*dot(MMV, dot(MDY, LAPLACPSI)) \\\n + beta*dot(BIHARM, PSI) \\\n - (1.-beta)*(dot(MDXX, Txy) + dot(MDXY, (Tyy - Txx)) \\\n - dot(MDYY, Txy))\n\n #####xx\n residualsVec[vecLen:2*vecLen] = - dot(VGRAD, Cxx) \\\n + 2.*dot(MMDXU, Cxx) \\\n + 2.*dot(MMDYU, Cxy) - Txx\n\n #####yy\n residualsVec[2*vecLen:3*vecLen] = - dot(VGRAD, Cyy) \\\n + 2.*dot(MMDXV, Cxy) \\\n + 2.*dot(MMDYV, Cyy) - Tyy\n\n #####xy\n residualsVec[3*vecLen:4*vecLen] = - dot(VGRAD, Cxy) \\\n + dot(MMDXV, Cxx) + dot(MMDYU, Cyy)\\\n - Txy\n\n #####psi0\n residualsVec[N*M:(N+1)*M] = - Re*dot(VGRAD, U)[N*M:(N+1)*M] \\\n + beta*dot(MDYYY, PSI)[N*M:(N+1)*M] \\\n + (1.-beta)*dot(MDY,Txy)[N*M:(N+1)*M]\n # set the pressure gradient (pressure driven flow)\n # residualsVec[N*M] += 2.0\n\n # set the forcing on the zeroth mode for non pressure driven flow.\n residualsVec[N*M:(N+1)*M] += forcingVec\n\n\n ##### Apply boundary conditions to residuals vector\n\n # dxPsi = 0 \n for k in range (2*N+1): \n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-2] = dot((k-N)*kx*BTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-1] = dot((k-N)*kx*BBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi(+-1) = 0 \n for k in range (2*N+1):\n if k == N: continue # skip the 0th component \n residualsVec[k*M + M-4] = dot(DERIVTOP, PSI[k*M:(k+1)*M])\n residualsVec[k*M + M-3] = dot(DERIVBOT, PSI[k*M:(k+1)*M])\n del k\n\n # dyPsi0(+-1) = +-1\n residualsVec[N*M + M-3] = dot(DERIVTOP, PSI[N*M:(N+1)*M]) - 1.\n residualsVec[N*M + M-2] = dot(DERIVBOT, PSI[N*M:(N+1)*M]) + 1.\n\n # Psi0(-1) = 0\n residualsVec[N*M + M-1] = dot(BBOT, (PSI[N*M:(N+1)*M]))\n\n return (residualsVec)", "def find_min_norm_element(vecs):\n # Solution lying at the combination of two points\n\n\n vecs_clone = []\n for i in range(len(vecs)):\n # assert len(vecs[i]) == 1\n vecs_task = []\n for k in range(len(vecs[i])):\n vecs_task.append(vecs[i][k].view(-1))\n vecs_clone.append(torch.cat(vecs_task).unsqueeze(0))\n vecs_clone = torch.cat(vecs_clone)\n\n grad_mat = torch.matmul(vecs_clone, vecs_clone.t())\n\n # dps = {}\n init_sol = MinNormSolver._min_norm_2d(grad_mat)\n \n n = len(vecs)\n sol_vec = torch.zeros([n,]).cuda()\n sol_vec[init_sol[0][0]] = init_sol[1]\n sol_vec[init_sol[0][1]] = 1 - init_sol[1]\n# sol_vec = sol_vec.unsqueeze(0)\n\n if n < 3:\n # This is optimal for n=2, so return the solution\n return sol_vec , init_sol[2]\n \n iter_count = 0\n\n # grad_mat = np.zeros((n,n))\n # for i in range(n):\n # for j in range(n):\n # grad_mat[i,j] = dps[(i, j)]\n \n\n while iter_count < MinNormSolver.MAX_ITER:\n grad_dir = -1.0 * torch.matmul(grad_mat, sol_vec)\n# sol_vec = sol_vec.squeeze()\n new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)\n\n v1v1 = torch.sum(sol_vec.unsqueeze(1).repeat(1, n)*sol_vec.unsqueeze(0).repeat(n, 1)*grad_mat)\n v1v2 = torch.sum(sol_vec.unsqueeze(1).repeat(1, n)*new_point.unsqueeze(0).repeat(n, 1)*grad_mat)\n v2v2 = torch.sum(new_point.unsqueeze(1).repeat(1, n)*new_point.unsqueeze(0).repeat(n, 1)*grad_mat)\n\n nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)\n new_sol_vec = nc*sol_vec + (1-nc)*new_point\n change = new_sol_vec - sol_vec\n if torch.sum(torch.abs(change)) < MinNormSolver.STOP_CRIT:\n return sol_vec, nd\n sol_vec = new_sol_vec", "def matrix_svd(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n\n # SVD each sector at a time.\n # While doing so, also keep track of a list of all singular values, as\n # well as a heap that gives the negative of the largest singular value\n # in each sector. These will be needed later when deciding how to\n # truncate the decomposition.\n svds = {}\n dims = {}\n minus_next_sings = []\n all_sings = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=self.dtype)\n s = np.empty((m,), dtype=np.float_)\n v = np.empty((m, shp[1]), dtype=self.dtype)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n u, s, v = spsla.svds(\n v, k=maxchi, return_singular_vectors=True\n )\n order = np.argsort(-s)\n u = u[:, order]\n s = s[order]\n v = v[order, :]\n else:\n u, s, v = np.linalg.svd(v, full_matrices=False)\n svd = (s, u, v)\n svds[k] = svd\n dims[k] = 0\n sings = svd[0]\n all_sings.append(sings)\n if 0 not in sings.shape:\n heapq.heappush(minus_next_sings, (-sings[0], k))\n try:\n all_sings = np.concatenate(all_sings)\n except ValueError:\n all_sings = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_sings,\n svds,\n minus_next_sings,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n svds = {k: v for k, v in svds.items() if dims[k] > 0}\n for k, v in svds.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n svds[k] = (v[0][:d], v[1][:, :d], v[2][:d, :])\n else:\n del svds[k]\n\n # Initialize U, S, V.\n d = self.dirs[0]\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=0,\n )\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=np.float_,\n invar=False,\n charge=0,\n )\n V = type(self)(\n [new_dim, self.shape[1]],\n qhape=[new_qim, self.qhape[1]],\n dirs=[d, self.dirs[1]],\n qodulus=self.qodulus,\n dtype=self.dtype,\n charge=self.charge,\n )\n\n # Set the blocks of U, S and V.\n for k, v in svds.items():\n k_U = (k[0], k[0])\n S[(k[0],)] = v[0]\n U[k_U] = v[1]\n V[k] = v[2]\n\n return U, S, V, rel_err", "def run_vqd(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=2,\n # reps=5,\n ):\n tmp = HermitianSolver(self.mat)\n max_eigval, vqe_result, vqe = tmp.run_vqe(\n backend=backend,\n var_form=var_form,\n optimizer=optimizer,\n reps=reps,\n mode=\"max_val\",\n )\n eigvals = [max_eigval]\n eigstates = [vqe_result.eigenstate]\n for r in range(len(tmp.mat) - 1):\n val, vqe_result, vqe = tmp.run_vqe(\n backend=backend,\n var_form=var_form,\n optimizer=optimizer,\n reps=reps,\n )\n outer_prod = np.outer(\n vqe_result.eigenstate, np.conj(vqe_result.eigenstate).T\n )\n tmp.mat = tmp.mat - (val - max_eigval) * outer_prod\n eigvals.append(val)\n eigstates.append(vqe_result.eigenstate)\n tmp = HermitianSolver(tmp.mat)\n\n eigvals = np.array(eigvals)\n eigstates = np.array(eigstates)\n order = np.argsort(eigvals)\n eigvals = eigvals[order]\n eigstates = eigstates[order]\n return eigvals, eigstates", "def evolve(self, k_vec, Nt,**kwargs):\n \n M_eff = np.eye((self.Nd), dtype=complex) # aux matrix\n T = 1.\n for it in range(Nt):\n \n # update the Hamiltonian for time-inteval\n self.updateH(k_vec, it)\n\n # return eigenenergies and vectors\n E_k, U = lg.eig(self.H_kc) \n\n # U^-1 * exp(H_d) U\n U_inv = lg.inv(U)\n\n # construct a digonal matrix out of a vector\n M1 = (np.exp(-1.j*E_k*T) * U_inv.T).T\n\n #MM = np.dot(U_inv,np.dot(H_M, U))\n MM = np.dot(U,M1)\n M_eff = np.dot(M_eff,MM)\n # end of loop\n Ek, Uk = lg.eig( M_eff )\n idx = (np.log(Ek).imag).argsort()\n Efl_k = np.log(Ek).imag[idx]\n Ufl_k = Uk[idx]\n return Efl_k, Ufl_k", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def FindEigenstates(**args):\n\tprop = SetupProblem(**args)\n\n\t#use custom initial residual if provided\n\tinitialResidual = args.get(\"initialResidual\")\n\tif initialResidual != None:\n\t\tprop.psi.GetData()[:] = initialResidual.GetData()\n\n\t#find eigenstates\n\t#solver = pyprop.ArpackSolver(prop)\n\tsolver = pyprop.PiramSolver(prop)\n\tsolver.Solve()\n\treturn solver", "def matrix_inv(mat):\n\ta = mat[0,0]\n\tb = mat[0,1]\n\tc = mat[0,2]\n\td = mat[1,0]\n\te = mat[1,1]\n\tf = mat[1,2]\n\tg = mat[2,0]\n\th = mat[2,1]\n\ti = mat[2,2]\n\n\tdet = b*f*g + c*d*h + a*e*i - a*f*h - b*d*i - c*e*g\n\n\tinvmat = np.zeros((3,3))\n\tinvmat[0,0] = (e*i - f*h) / det\n\tinvmat[0,1] = (c*h - b*i) / det\n\tinvmat[0,2] = (b*f - c*e) / det\n\tinvmat[1,0] = (f*g - d*i) / det\n\tinvmat[1,1] = (a*i - c*g) / det\n\tinvmat[1,2] = (c*d - a*f) / det\n\tinvmat[2,0] = (d*h - e*g) / det\n\tinvmat[2,1] = (b*g - a*h) / det\n\tinvmat[2,2] = (a*e - b*d) / det\n\treturn invmat", "def projective_factorization(x, max_iterations=1):\n\n n_views = len(x)\n n_points = x[0].shape[1]\n\n iterations = 0\n\n #lambda matrix, approximate depths\n l = np.ones((n_views, n_points))\n\n #normalization matrices\n norm_matrices = []\n\n # normalize coordinates\n xn = np.zeros((3*n_views, n_points))\n for i in range(n_views):\n\n #find normalization matrix for projections i\n x_norm, T = normalize_points(x[i], is_homogeneous=True)\n xn[3*i:3*(i+1), :] = x_norm\n norm_matrices.append(T)\n\n while iterations < max_iterations:\n # normalize the lambda matrix\n lr_norm = norm(l, axis=1)\n ln = l / lr_norm[:, np.newaxis]\n lc_norm = norm(ln, axis=0)\n ln /= lc_norm\n\n # repeat the lambdas\n ln = np.repeat(ln, 3, axis=0)\n\n #build the factorization matrix\n fact_matrix = ln*xn\n\n u, d, vh = svd(fact_matrix)\n\n print(d[3] / d[4])\n d = d[:4]/d[0]\n\n # from the svd decomposition we can find the projections and 3d points\n p_matrices = u[:, :4]\n x_3d = np.dot(np.diag(d), vh[:4, :])\n\n iterations += 1\n if iterations != max_iterations:\n\n w_matrix = np.dot(p_matrices, x_3d)\n\n for i in range(n_views):\n l[i, :] = w_matrix[3*i+2, :]\n\n cameras = []\n\n for i in range(n_views):\n # denormalize camera matrices\n c_matrix = np.dot(inv(norm_matrices[i]), p_matrices[3*i:3*(i+1), :])\n\n cameras.append(c_matrix)\n\n return cameras, x_3d", "def eigvals(input):\n\n is_input_dparray = isinstance(input, dparray)\n\n if (not use_origin_backend(input) and is_input_dparray):\n if (input.size > 0):\n return dpnp_eigvals(input)\n\n return call_origin(numpy.linalg.eigvals, input)", "def N_max_matriz_covarianza(C):\n # valores auxiliares\n n_filas_cova = np.shape( C )[0]\n n_cols_cova = np.shape( C )[1]\n # valores y vectores propios\n eig_val, eig_vec = la.eig( C )\n eig_vals = eig_val.real # valores propios (lambda_k)\n eig_vecs = eig_vec.real # vectores propios (v_k) (columnas de eig_vecs)\n # vector con indices de valores propios\n idx_vec = np.arange((np.size(eig_vals)))\n # transformamos los valores propios y los indices a un espacio log-log\n x = idx_vec+1\n y = np.abs(eig_vals)\n # Triangular/circumscribed circle simple approximation to curvature \n # (after Roger Stafford)\n\n # the series of points used for the triangle/circle\n x1 = x[:-2]\n x2 = x[1:-1]\n x3 = x[2:]\n y1 = y[:-2]\n y2 = y[1:-1]\n y3 = y[2:]\n\n # the side lengths for each triangle\n a = np.sqrt(np.square(x3-x2)+np.square(y3-y2))\n b = np.sqrt(np.square(x1-x3)+np.square(y1-y3))\n c = np.sqrt(np.square(x2-x1)+np.square(y2-y1))\n # semi perimetro\n s = (a+b+c)/2.\n # radio de cada circulo\n R = (a*b*c)/(4*np.sqrt((s*(s-a)*(s-b)*(s-c))))\n # The curvature for each estimate for each value which is\n # the reciprocal of its circumscribed radius. Since there aren't circles for \n # the end points they have no curvature\n kappa = np.ones((n_filas_cova))\n kappa[0] = 0.\n kappa[-1] = 0.\n kappa[1:-1] = np.reciprocal(R)\n idx_max = np.where(kappa == np.max(kappa))[0][0] - 1\n return idx_max", "def eig_vals_vects(matrix, sort='imag', not_hermitian=True, verbose=False):\n # if len(matrix) < 10:\n # print '\\nFinding eigvals, matrix = ', matrix\n\n # check if hermitian:\n if not_hermitian:\n eigval, eigvect = np.linalg.eig(matrix)\n else:\n if (matrix == matrix.conj().T).all():\n if verbose:\n print 'Shortcut eigvect/vals since matrix is hermitian...'\n eigval, eigvect = np.linalg.eigh(matrix)\n else:\n if verbose:\n print 'matrix is not hermitian...'\n eigval, eigvect = np.linalg.eig(matrix)\n\n # use imaginary part to get ascending order of eigvals\n if sort == 'imag':\n si = np.argsort(np.imag(eigval))\n elif sort == 'real':\n si = np.argsort(np.real(eigval))\n else:\n si = np.arange(len(eigval))\n\n eigvect = np.array(eigvect)\n eigvect_out = eigvect.T[si]\n eigval_out = eigval[si]\n\n # if len(eigval_out) < 10:\n # print 'eigvals return as =', eigval_out\n\n return eigval_out, eigvect_out", "def eigensolve(self, epsilon=0.85):\n raise NotImplementedError(\"eigensolve Incomplete\")", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def sparse_expectation(mat, vec):\n return np.vdot(vec, mat.dot(vec)).real", "def test_inverse_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n rv, h, rv_inv = gt.matrix.decompose_gf(g0_inv_full)\n g0 = gt.matrix.construct_gf(rv_inv=rv_inv, diag_inv=h**-1, rv=rv)\n assert_allclose(g0.dot(g0_inv_full), np.identity(size), atol=1e-14)\n assert_allclose(g0, la.inv(g0_inv_full))\n g0_alt = gt.matrix.Decomposition(rv, h**-1, rv_inv).reconstruct(kind='full')\n assert_allclose(g0, g0_alt)", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def truncated_svd(A,k=None):\n \n \n \n AHA=np.conj(A).T.dot(A)\n evals,evecs=la.eig(AHA)\n order=np.argsort(evals)\n\n evals=evals[order][::-1].copy()\n evecs=evecs.T[order][::-1].copy()\n m,n=AHA.shape\n \n tol=1e-12\n Vh=[]\n for i in xrange(0,m):\n\t\t if np.abs(evals[i])>=tol:\n\t \t\tVh+=[evecs[i]]\n \n Vh=np.array(Vh)\n s=np.sqrt(evals[:Vh.shape[0]])\n U=[]\n for i in xrange(0,len(s)):\n U+=[(1./s[i])*A.dot(Vh[i])]\n U=np.array(U).T\n \n return U,s,Vh", "def decrease_resolution(vec):\n\n lowMvec = zeros((2*NOld+1)*M, dtype='complex')\n for n in range(2*NOld+1):\n lowMvec[n*M:(n+1)*M] = vec[n*MOld:n*MOld + M]\n del n\n\n lowNMvec = zeros((2*N+1)*M, dtype='D')\n lowNMvec = lowMvec[(NOld-N)*M:(NOld-N)*M + (2*N+1)*M]\n\n return lowNMvec", "def _safe_inv22_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n\n # We set delta_inv to 0. in case of a rank deficient matrix ; a\n # rank-deficient input matrix *M* will lead to a null matrix in output\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n if np.all(rank2):\n # Normal 'optimized' flow.\n delta_inv = 1./delta\n else:\n # 'Pathologic' flow.\n delta_inv = np.zeros(M.shape[0])\n delta_inv[rank2] = 1./delta[rank2]\n\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def get_eigen_values_and_vectors(matrix, num_values):\n (w, v) = eigen_decomp(matrix)\n eigen_values = []\n eigen_vectors = []\n ### YOUR CODE HERE\n max_indexs=np.argpartition(w, -num_values)\n max_indexs=max_indexs[-num_values:]\n ids=np.argsort(w[max_indexs])\n sort_index=max_indexs[ids]\n eigen_values=w[sort_index]\n eigen_vectors=v[:,sort_index]\n ### END YOUR CODE\n return eigen_values, eigen_vectors", "def _compute_primalEigenmatrix(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"P\"):\n return\n params = {\"expand\": expand, \"factor\": factor, \"simplify\": simplify}\n self._.P = self._compute_eigenmatrix(self.kTable(**params),\n self.PTR, **params)", "def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)", "def InverseMatrix(matrix,vector):\r\n # Unveri reversible matrix\r\n if Determinant(matrix, 1) == 0:\r\n print(\"Error,Singular Matrix\\n\")\r\n return\r\n # result matrix initialized as singularity matrix\r\n result = MakeIMatrix(len(matrix), len(matrix))\r\n # loop for each row\r\n for i in range(len(matrix[0])):\r\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\r\n # pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[i][i] = 1/matrix[i][i]\r\n result = MultiplyMatrix(elementary, result)\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\r\n # elementary matrix and multiply with the result matrix )\r\n for j in range(i+1, len(matrix)):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n\r\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\r\n # (make elementary matrix and multiply with the result matrix )\r\n for i in range(len(matrix[0])-1, 0, -1):\r\n for j in range(i-1, -1, -1):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n return result", "def update_params(self, mat):\n assert mat.shape == self.shape\n a, s, b = np.linalg.svd(mat, full_matrices=False)\n self.u = a[:, :self.rank]*s[:self.rank]\n self.v = b.T[:, :self.rank]", "def eigengap_method(a_, k=None):\n n = a_.shape[0]\n\n eigen_values = np.diagonal(a_)\n # sorts eigen-values, and keeps the *indices* of the sorted array\n sorted_indices = np.argsort(eigen_values)\n if k is None:\n # calculates the abs difference array for the first half of the eigen-values\n delta_arr = np.diff(eigen_values[sorted_indices][:ceil(n / 2)])\n np.abs(delta_arr, out=delta_arr)\n # gets the first appearance of the maximum difference\n k = np.argmax(delta_arr) + 1\n return sorted_indices[:k]", "def solve(mat, y):\n reduced = gaussian_elim(mat)\n sol = np.zeros(shape=(mat.shape[0]))\n S = 0\n for i in reversed(range(len(sol))):\n sol[i] = (y[i]-S) / reduced[i][i]\n S += y[i] - S\n return sol", "def lb(K, KG, tol=0, sparse_solver=True, silent=False,\n num_eigvalues=25, num_eigvalues_print=5):\n msg('Running linear buckling analysis...', silent=silent)\n\n msg('Eigenvalue solver... ', level=2, silent=silent)\n\n k = min(num_eigvalues, KG.shape[0]-2)\n if sparse_solver:\n mode = 'cayley'\n try:\n msg('eigsh() solver...', level=3, silent=silent)\n eigvals, eigvecs = eigsh(A=KG, k=k,\n which='SM', M=K, tol=tol, sigma=1., mode=mode)\n msg('finished!', level=3, silent=silent)\n except Exception as e:\n warn(str(e), level=4, silent=silent)\n msg('aborted!', level=3, silent=silent)\n sizebkp = KG.shape[0]\n K, KG, used_cols = remove_null_cols(K, KG, silent=silent)\n msg('eigsh() solver...', level=3, silent=silent)\n eigvals, peigvecs = eigsh(A=KG, k=k,\n which='SM', M=K, tol=tol, sigma=1., mode=mode)\n msg('finished!', level=3, silent=silent)\n eigvecs = np.zeros((sizebkp, num_eigvalues),\n dtype=peigvecs.dtype)\n eigvecs[used_cols, :] = peigvecs\n\n else:\n size = KG.shape[0]\n K, KG, used_cols = remove_null_cols(K, KG, silent=silent)\n K = K.toarray()\n KG = KG.toarray()\n msg('eigh() solver...', level=3, silent=silent)\n eigvals, peigvecs = eigh(a=KG, b=K)\n msg('finished!', level=3, silent=silent)\n eigvecs = np.zeros((size, num_eigvalues), dtype=peigvecs.dtype)\n eigvecs[used_cols, :] = peigvecs[:, :num_eigvalues]\n\n eigvals = -1./eigvals\n\n eigvals = eigvals\n eigvecs = eigvecs\n\n msg('finished!', level=2, silent=silent)\n\n msg('first {0} eigenvalues:'.format(num_eigvalues_print), level=1,\n silent=silent)\n\n for eig in eigvals[:num_eigvalues_print]:\n msg('{0}'.format(eig), level=2, silent=silent)\n\n return eigvals, eigvecs", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def _matvec(x):\n return _normal_matvec(matvec, x)", "def matrix_eig(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n hermitian=False,\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n evenTrunc = False,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n # If chis is not specfied, there is no even truncation scheme; else, we\n # keep track of the chi we specfied\n if chis is None:\n evenTrunc = False\n else:\n try:\n chis = list(chis)\n except TypeError:\n chis = [chis]\n chiSpec = max(chis)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n assert self.charge == 0\n assert self.dirs[0] + self.dirs[1] == 0\n assert set(zip(self.qhape[0], self.shape[0])) == set(\n zip(self.qhape[1], self.shape[1])\n )\n\n S_dtype = np.float_ if hermitian else np.complex_\n U_dtype = self.dtype if hermitian else np.complex_\n\n # Eigenvalue decompose each sector at a time.\n # While doing so, also keep track of a list of all eigenvalues, as well\n # as a heap that gives the negative of the absolute value of the\n # largest eigenvalue in each sector. These will be needed later when\n # deciding how to truncate the eigenvalues.\n eigdecomps = {}\n dims = {}\n minusabs_next_eigs = []\n all_eigs = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n # This matrix is empty and trivial.\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=U_dtype)\n s = np.empty((m,), dtype=S_dtype)\n eigdecomp = (s, u)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n if hermitian:\n s, u = spsla.eighs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n s, u = spsla.eigs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n if hermitian:\n s, u = np.linalg.eigh(v)\n else:\n s, u = np.linalg.eig(v)\n order = np.argsort(-np.abs(s))\n s = s[order]\n u = u[:, order]\n s = s.astype(S_dtype)\n u = u.astype(U_dtype)\n eigdecomp = (s, u)\n eigdecomps[k] = eigdecomp\n dims[k] = 0\n all_eigs.append(s)\n if 0 not in s.shape:\n heapq.heappush(minusabs_next_eigs, (-np.abs(s[0]), k))\n try:\n all_eigs = np.concatenate(all_eigs)\n except ValueError:\n all_eigs = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_eigs,\n eigdecomps,\n minusabs_next_eigs,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # truncate in both sectors evenly\n if evenTrunc and chiSpec == chi:\n # This piece of codes is only designed\n # with Z2 symmetry tensor in mind\n errmeg = \"The matrix should have two sectors (0,0) and (1,1).\"\n assert len(dims) == 2, errmeg\n if chiSpec % 2 == 0:\n dims[(0, 0)] = int(chiSpec / 2)\n dims[(1, 1)] = int(chiSpec / 2)\n else:\n dims[(0, 0)] = int((chiSpec + 1) / 2)\n dims[(1, 1)] = int((chiSpec - 1) / 2)\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n eigdecomps = {k: v for k, v in eigdecomps.items() if dims[k] > 0}\n for k, v in eigdecomps.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n eigdecomps[k] = (v[0][:d], v[1][:, :d])\n else:\n del eigdecomps[k]\n\n # Initialize S and U.\n d = self.dirs[0]\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=S_dtype,\n invar=False,\n charge=0,\n )\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=U_dtype,\n charge=0,\n )\n\n # Set the blocks of U, S and V.\n for k, v in eigdecomps.items():\n S[(k[0],)] = v[0]\n k_U = (k[0], k[0])\n U[k_U] = v[1]\n\n return S, U, rel_err", "def svd_compress_gs(mat, k):\n U, singular_vals, V = np.linalg.svd(mat)\n rank = len(singular_vals)\n print(\"Image rank %r\" % rank)\n if k > rank:\n print(\"k is larger than rank of image %r\" % rank)\n return mat\n # take columns less than k from U\n U_p = U[:, :k]\n # take rows less than k from V\n V_p = V[:k, :]\n # build the new S matrix with top k diagnal elements\n S_p = np.zeros((k, k), mat.dtype)\n for i in range(k):\n S_p[i][i] = singular_vals[i]\n print(\"U_p shape {0}, S_p shape {1}, V_p shape {2}\".format(\n U_p.shape, S_p.shape, V_p.shape))\n compressed = np.dot(np.dot(U_p, S_p), V_p)\n ss = ssim(mat, compressed,\n dynamic_range=compressed.max() - compressed.min())\n print(\"Strucural similarity: %r\" % ss)\n return U_p, S_p, V_p", "def svd_compress_gs(mat, k):\n U, singular_vals, V = np.linalg.svd(mat)\n rank = len(singular_vals)\n print(\"Image rank %r\" % rank)\n if k > rank:\n print(\"k is larger than rank of image %r\" % rank)\n return mat\n # take columns less than k from U\n U_p = U[:, :k]\n # take rows less than k from V\n V_p = V[:k, :]\n # build the new S matrix with top k diagnal elements\n S_p = np.zeros((k, k), mat.dtype)\n for i in range(k):\n S_p[i][i] = singular_vals[i]\n print(\"U_p shape {0}, S_p shape {1}, V_p shape {2}\".format(\n U_p.shape, S_p.shape, V_p.shape))\n compressed = np.dot(np.dot(U_p, S_p), V_p)\n ss = ssim(mat, compressed,\n dynamic_range=compressed.max() - compressed.min())\n print(\"Strucural similarity: %r\" % ss)\n return U_p, S_p, V_p, ss", "def invert_L1_svd():", "def eigvals(self):\n raise NotImplementedError", "def geteigenspinors(spinorsize):\n\n e = ()\n myl = 1/2\n # loop-and-a-half (TM)\n while True:\n for m in np.arange(1/2, myl+1, 1):\n for ms in [1, -1]:\n for s in [-1, 1]:\n e += ((myl, m*ms, s),)\n myl += 1\n if len(e) >= spinorsize:\n break\n\n e = e[:spinorsize]\n eigenspinors = sorted(e, key=itemgetter(2, 0, 1))\n return eigenspinors", "def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]", "def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable:\n\n arnoldi_fact = _generate_arnoldi_factorization(jax)\n\n # ######################################################\n # ####### NEW SORTING FUCTIONS INSERTED HERE #########\n # ######################################################\n @functools.partial(jax.jit, static_argnums=(1,))\n def LR_sort(evals, p):\n inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n @functools.partial(jax.jit, static_argnums=(1,))\n def LM_sort(evals, p):\n inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1]\n shifts = evals[inds][-p:]\n return shifts, inds\n\n # #######################################################\n # #######################################################\n # #######################################################\n @functools.partial(jax.jit, static_argnums=(4, 5, 6))\n def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh):\n funs = [LR_sort, LM_sort]\n shifts, _ = funs[which](evals, p)\n # compress to k = numeig\n q = jax.numpy.zeros(Hm.shape[0])\n q = jax.ops.index_update(q, jax.ops.index[-1], 1)\n m = Hm.shape[0]\n\n for shift in shifts:\n Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m))\n Hm = Qj.T.conj() @ Hm @ Qj\n Vm = Qj.T @ Vm\n q = q @ Qj\n\n fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1]\n Vk = Vm[0:k, :]\n Hk = Hm[0:k, 0:k]\n H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype)\n H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk)\n Z = jax.numpy.linalg.norm(fk)\n v = fk / Z\n krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :],\n Vk)\n krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v)\n Z = jax.numpy.linalg.norm(fk)\n #if fk is a zero-vector then arnoldi has exactly converged.\n #use small threshold to check this\n return krylov_vectors, H, fk, Z < res_thresh\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def update_data(Vm_tmp, Hm_tmp, numits):\n Vm = Vm_tmp[0:numits, :]\n Hm = Hm_tmp[0:numits, 0:numits]\n fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1]\n return Vm, Hm, fm\n\n @functools.partial(jax.jit, static_argnums=(3,))\n def get_vectors(Vm, unitary, inds, numeig):\n\n def body_vector(i, vals):\n krv, unitary, states, inds = vals\n dim = unitary.shape[1]\n n, m = jax.numpy.divmod(i, dim)\n states = jax.ops.index_add(states, jax.ops.index[n, :],\n krv[m, :] * unitary[m, inds[n]])\n return [krv, unitary, states, inds]\n\n state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype)\n _, _, state_vectors, _ = jax.lax.fori_loop(\n 0, numeig * Vm.shape[0], body_vector,\n [Vm, unitary, state_vectors, inds])\n state_norms = jax.numpy.linalg.norm(state_vectors, axis=1)\n state_vectors = state_vectors / state_norms[:, None]\n return state_vectors\n\n\n def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"\n Implicitly restarted arnoldi factorization of `matvec`. The routine\n finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec`\n by alternating between compression and re-expansion of an initial\n `num_krylov_vecs`-step Arnoldi factorization.\n\n Note: The caller has to ensure that the dtype of the return value\n of `matvec` matches the dtype of the initial state. Otherwise jax\n will raise a TypeError.\n\n Args:\n matvec: A callable representing the linear operator.\n args: Arguments to `matvec`. `matvec` is called with\n `matvec(x, *args)` with `x` the input array on which\n `matvec` should act.\n initial_state: An starting vector for the iteration.\n num_krylov_vecs: Number of krylov vectors of the arnoldi factorization.\n numeig: The number of desired eigenvector-eigenvalue pairs.\n which: Which eigenvalues to target. Currently supported: `which = 'LR'`\n or `which = 'LM'`.\n eps: Convergence flag. If the norm of a krylov vector drops below `eps`\n the iteration is terminated.\n maxiter: Maximum number of (outer) iteration steps.\n Returns:\n eta, U: Two lists containing eigenvalues and eigenvectors.\n \"\"\"\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]\n\n return implicitly_restarted_arnoldi_method", "def compact_svd(A, tol=1e-6):\r\n eigs, vecs = la.eig(A.conj().T@A)\r\n svs = np.sqrt(eigs)\r\n #sort eigenvalues and eigenvectors accordingly\r\n sorter = list(zip(svs,vecs.T))\r\n sorter.sort(reverse=True, key=lambda tup: tup[0])\r\n svs = [x[0] for x in sorter]\r\n vecs = [x[1] for x in sorter]\r\n #find number of nonzero eigenvalues\r\n r_not = svs.count(0)\r\n r = len(svs) - r_not\r\n svs_1 = np.array(svs[:r])\r\n vecs_1 = np.array(vecs[:r])\r\n u_1 = (A@vecs_1)/svs_1\r\n\r\n return u_1, svs_1, vecs_1.conj().T", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def parcellate_PCA(matrix, mat_type, path_pref, rot='quartimax', eigval_thr=1):\n if rot == 'quartimax':\n rotation = 0.0\n elif rot == 'varimax':\n rotation = 1.0\n else:\n raise Exception('This factor rotation type is not handled')\n # To have more than just a reference of matrix in mat\n mat = matrix + 0\n # Get the eigenvalues and eigenvectors of the\n # mat = cov(2D_connectivity_matrix)\n # gamma_eigval, omega_eigvec = np.linalg.eig(mat)\n u, gamma_eigval, omega = np.linalg.svd(mat, full_matrices=True)\n # SVD third output is the transposed of the eigen vectors\n omega_eigvec = omega.T\n if mat_type == \"covariance\":\n comp_thr = eigval_thr * np.mean(gamma_eigval)\n elif mat_type == \"correlation\":\n comp_thr = eigval_thr\n else:\n raise Exception('This factor rotation type is not handled')\n\n # Sort the Gamma_eigval in decreasing order of magnitude, and sort\n # the order of the eigenvectors accordingly\n indsort = np.argsort(gamma_eigval)[::-1]\n\n # The SSQ_loadings is equal to the eigenvalues of the SM (cov(data))\n # They correspond to the values in the 'Extraction Sum of Squared\n # loadings' in SPSS\n gamma_eigval_sort = gamma_eigval[indsort]\n omega_eigvec_sort = omega_eigvec[:,indsort]\n\n # We keep only the components which have an eigenvalue above comp_thr\n keep = np.where(gamma_eigval_sort > comp_thr)\n ind = 0\n while gamma_eigval_sort[ind] > comp_thr:\n ind += 1\n gamma_eigval_sort = gamma_eigval_sort[:ind]\n omega_eigvec_sort = omega_eigvec_sort[:,:ind]\n\n SSQ_loadings = gamma_eigval_sort\n # The matrix of factor laodings (like in SPSS)\n Lambda = omega_eigvec_sort.dot(np.diag(np.sqrt(np.abs(gamma_eigval_sort))))\n print(pd.DataFrame(Lambda))\n # SPSS: The rescaled loadings matrix\n Lambda_rescaled = np.dot(np.sqrt(np.diag(np.diag(cov))), Lambda)\n\n # SPSS: communalities\n h = [np.sum(gamma_eigval*(omega_eigvec[i]**2)) for i in range(len(omega_eigvec))]\n\n lambda_rot = rotate_components(Lambda, q = 1000, gamma=rotation)\n print(pd.DataFrame(lambda_rot))\n # Get sum of squared loadings\n SSQ_loadings_rot = np.sum(lambda_rot**2, axis=0)\n print(pd.DataFrame(SSQ_loadings_rot))\n # Sort the SSQ_loadings_rot in descending order to prepare for the\n # power fitting\n SSQ_loadings_rot_sorted = np.sort(SSQ_loadings_rot)\n SSQ_loadings_rot_sorted_descending = SSQ_loadings_rot_sorted[::-1]\n\n # --------------------------------------------------------------------------\n # (5) Fit a power law to the sorted SSQ_Loadings_rot to Estimate\n # the number of relevant factors Npc using the fitpower function in\n # do_PCA_utilities.py (only the first 50 SSQ_Loadings are considered).\n # Returns the number of components to consider: Npc\n # --------------------------------------------------------------------------\n npc = fit_power(SSQ_loadings_rot_sorted_descending)\n print('\\n Power fitting of the eigenvalues associated with the rotated')\n print('loadings estimated the presence of ' + str(npc) + ' clusters \\n')\n\n\n # --------------------------------------------------------------------------\n # (6) Rotate Lambda_Npc = Lambda[:,Npc]\n # Returns the final Factor loadings, defining the clusters\n # --------------------------------------------------------------------------\n lambda_npc = Lambda[:, 0:npc]\n\n return (lambda_rot, npc)\n # return (lambda_npc, npc)", "def get_leftLaInv(k_list, l_list, m_list, mult_table_vals, n_dims, gradeList):\n\n identity = np.zeros((n_dims,))\n identity[gradeList.index(0)] = 1\n\n @numba.njit\n def leftLaInvJIT(value):\n intermed = np.zeros((n_dims, n_dims))\n for test_ind, i in enumerate(k_list):\n j = l_list[test_ind]\n k = m_list[test_ind]\n intermed[i, j] += mult_table_vals[test_ind] * value[k]\n intermed = np.transpose(intermed)\n if abs(linalg.det(intermed)) < _eps:\n raise ValueError(\"multivector has no left-inverse\")\n sol = linalg.solve(intermed, identity)\n return sol\n\n return leftLaInvJIT", "def get_eigen_value(A, v):\n Av = np.dot(A, v)\n print(\"Mag v, should be 1:\", mag(v))\n lmb = mag(Av) / mag(v)\n return lmb", "def put_eigvecs(self, dest):\n if parallel.is_rank_zero():\n self.put_array(self.eigvecs, dest)\n parallel.barrier()", "def eig_vals_vects_hermitian(matrix, sort='imag'):\n # if len(matrix) < 10:\n # print '\\nFinding eigvals, matrix = ', matrix\n eigval, eigvect = np.linalg.eig(matrix)\n # use imaginary part to get ascending order of eigvals\n if sort == 'imag':\n si = np.argsort(np.imag(eigval))\n elif sort == 'real':\n si = np.argsort(np.real(eigval))\n else:\n si = np.arange(len(eigval))\n\n eigvect = np.array(eigvect)\n eigvect_out = eigvect.T[si]\n eigval_out = eigval[si]\n if len(eigval_out) < 10:\n print 'eigvals return as =', eigval_out\n return eigval_out, eigvect_out", "def svd(matrix, approach):\n\n # Getting the eigenvalues and vectors of transpose(A) * A for V and Sigma\n a = mat_multiply(transpose(matrix), matrix)\n if approach == \"qr\":\n V, sigma, iterations = qr_eig(a)\n else:\n V, sigma, iterations = eig(a)\n\n # Sorting singular values and the colums of V accordingly\n V = transpose(V)\n\n singular_values = list()\n sorted_V = list()\n\n r = 0\n for i in range(rows(sigma)):\n singular_values.append([(sigma[i][i]), i])\n if sigma[i][i] > math.exp(-8):\n r += 1\n\n singular_values.sort(key=first_item, reverse=True)\n\n sigma_r = eye(r)\n sigma_r_inv = eye(r)\n\n # Constructing the sorted U and sigma matrices\n i, j = 0, 0\n for value in singular_values:\n if value[0] > math.exp(-8):\n sorted_V.append(V[value[1]])\n sigma_r[j][j] = value[0] ** (1 / 2)\n sigma_r_inv[j][j] = 1 / (value[0] ** (1 / 2))\n j += 1\n i += 1\n\n # Constructing U by multiplying V and sigma inverse\n sorted_U = mat_multiply(mat_multiply(matrix, transpose(sorted_V)), sigma_r_inv)\n\n return (sorted_U, sigma_r, sorted_V, r, iterations)", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def find_min_norm_element_FW(vecs):\n # Solution lying at the combination of two points\n dps = {}\n init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)\n\n n=len(vecs)\n sol_vec = np.zeros(n)\n sol_vec[init_sol[0][0]] = init_sol[1]\n sol_vec[init_sol[0][1]] = 1 - init_sol[1]\n\n if n < 3:\n # This is optimal for n=2, so return the solution\n return sol_vec , init_sol[2]\n\n iter_count = 0\n\n grad_mat = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n grad_mat[i,j] = dps[(i, j)]\n\n while iter_count < MinNormSolver.MAX_ITER:\n t_iter = np.argmin(np.dot(grad_mat, sol_vec))\n\n v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))\n v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])\n v2v2 = grad_mat[t_iter, t_iter]\n\n nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)\n new_sol_vec = nc*sol_vec\n new_sol_vec[t_iter] += 1 - nc\n\n change = new_sol_vec - sol_vec\n if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:\n return sol_vec, nd\n sol_vec = new_sol_vec", "def _compute_primalEigenmatrix(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"P\"):\n return\n if self._has(\"p\"):\n self._.P = self._compute_eigenmatrix(self._.p, expand=expand,\n factor=factor,\n simplify=simplify)\n else:\n if not self._has(\"Q\"):\n self.dualEigenmatrix(expand=expand, factor=factor,\n simplify=simplify)\n self._.P = self._.n * self._.Q.inverse()\n self._check_eigenmatrices()", "def compute_kappa_map(lens_vec, size, size_map):\n\n par_file_name = \"kappa_map.par\"\n fit_file_name = \"kappa_map.fits\"\n z_source = 2.0\n size_map = size_map * 1.05\n\n file_map = open(par_file_name, 'w')\n\n conv_lens_vec(lens_vec)\n\n file_map.write(\"runmode\\n\" )\n file_map.write(\" reference 3 0 0\\n\")\n file_map.write(\" verbose 0\\n\" )\n file_map.write(\" mass 3 \" + str(size) + \" \" + \\\n str(lens_vec[0][\"z_lens\"]) + \" \" + fit_file_name + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"source\\n\")\n file_map.write(\" z_source \" + str(z_source) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"grille\\n\")\n file_map.write(\" nombre 128\\n\")\n file_map.write(\" nlens 4\\n\")\n file_map.write(\" nlens_crit 1\\n\")\n file_map.write(\" nlens_opt 0\\n\")\n file_map.write(\" polaire 1\\n\")\n file_map.write(\" end\\n\")\n\n\n for i in range(len(lens_vec)):\n string_out = 'potential ' + str(i) + '\\n'\n file_map.write(string_out)\n #print string_out,\n for keys in lens_vec[i].keys():\n string_out = ' ' + keys + ' ' + str(lens_vec[i][keys]) + \\\n '\\n'\n #print string_out,\n file_map.write(string_out)\n file_map.write(' end\\n')\n\n file_map.write(\"cosmology\\n\")\n file_map.write(\" H0 70.0\\n\")\n file_map.write(\" omega 0.3\\n\")\n file_map.write(\" lambda 0.7\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"champ\\n\")\n file_map.write(\" xmin -101\\n\")\n file_map.write(\" xmax 100\\n\")\n file_map.write(\" ymin -101\\n\")\n file_map.write(\" ymax 100\\n\")\n file_map.write(\" dmax \" + str(size_map) + \"\\n\")\n file_map.write(\" end\\n\")\n file_map.write(\"fini\\n\")\n\n file_map.close()", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def KLT(data):\n\n eigs = []\n N = 64 # dimension of each outer product matrix\n for sig in data:\n cur_eigs = []\n cov_matrix = np.zeros((N, N), dtype=np.complex128)\n o = np.empty((N, N), dtype=np.complex128)\n for i in range(len(data[0])- N):\n cov_matrix += np.outer(sig[i:i+N], np.conj(sig[i:i+N]), o)\n cov_matrix /= N\n eigv = scipy.linalg.eigvalsh(cov_matrix, eigvals=(63, 63))\n\n eigs.append(max(eigv) / np.trace(cov_matrix) / N)\n return np.real(np.array(eigs))", "def test_eigenvalues_of_too_few_points_results_in_0():\n a = np.array([5])\n pc = create_point_cloud(a, a, a)\n\n compute_features(pc, [[0]], pc, [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n eigen_val_123 = np.array([pc[keys.point]['eigenv_{}'.format(i)]['data'] for i in [1, 2, 3]])\n assert not np.any(np.isnan(eigen_val_123))\n assert not np.any(np.isinf(eigen_val_123))", "def eigs(m, n, y, cov_fun, lw, loo, k, ylim, figsize, seed):\n # Setup covariance\n np.random.seed(seed)\n T = y * n\n\n names = ['true', 'sample', 'lw_oracle', 'isolw_oracle', 'kfold', 'isokfold',\n 'mv_oracle', 'isonlsq_mv_oracle', 'isonlsq_mv_kfold']\n if lw:\n names += ['lw']\n if loo:\n names += ['loo', 'isoloo']\n dfs = {\n name: pd.DataFrame(np.zeros((m, n)))\n for name in names\n }\n\n pbar = tqdm(total=m)\n for j in range(m):\n # Build Model\n if cov_fun in ['slr', 'factor']:\n fm_seed = np.random.randint(1, 2**32 - 1)\n Sigma, tmp = cov_functions[cov_fun](n, seed=fm_seed)\n else:\n Sigma, tmp = cov_functions[cov_fun](n)\n dfs['true'].iloc[j, :] = tau = annualize_vol(tmp / n)\n\n if ylim is None:\n ylim = (0., 2 * np.max(tau))\n\n # Generate data\n X = sample(Sigma, T)\n S = cov(X)\n lam, U = eig(S)\n\n # Note: eigenvalues need to be scaled by 1 / n to convert to variance\n # Sample covariance\n dfs['sample'].iloc[j, :] = annualize_vol(lam / n)\n\n # Oracle LW NLS shrinkage\n _, tmp = nls_oracle(X, S, U, Sigma)\n dfs['lw_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n tmp = isotonic_regression(tmp)\n dfs['isolw_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n\n # LW NLS shrinkage\n if lw:\n S_lw = nlshrink_covariance(X, centered=True)\n tmp = eig(S_lw, return_eigenvectors=False)\n dfs['lw'].loc[j, :] = annualize_vol(tmp / n)\n\n # LOO LW NLS shrinkage\n if loo:\n _, tmp = nls_loo_cv(X, S, U)\n dfs['loo'].iloc[j, :] = annualize_vol(tmp / n)\n tmp = isotonic_regression(tmp)\n dfs['isoloo'].iloc[j, :] = annualize_vol(tmp / n)\n\n # K-fold LW NLS shrinkage\n _, tmp = nls_kfold_cv(X, S, U, k)\n dfs['kfold'].iloc[j, :] = annualize_vol(tmp / n)\n tmp = isotonic_regression(tmp)\n dfs['isokfold'].iloc[j, :] = annualize_vol(tmp / n)\n\n # MinVar NLS shrinkage\n _, tmp = minvar_nls_oracle(X, S, lam, U, Sigma)\n dfs['mv_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n # Note: Applying isotonic regression after solving for the oracle values\n # is consistently way worse than solving the constrained LS problem so\n # it is omitted.\n # lam_1, lam_n = lam[0], lam[-1]\n # tmp = isotonic_regression(tmp, y_min=lam_n, y_max=lam_1)\n # dfs['isomv_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n _, tmp = minvar_nls_oracle(X, S, lam, U, Sigma, isotonic=True)\n dfs['isonlsq_mv_oracle'].iloc[j, :] = annualize_vol(tmp / n)\n\n _, tmp = minvar_nls_kfold(X, S, lam, U, k)\n dfs['isonlsq_mv_kfold'].iloc[j, :] = annualize_vol(tmp / n)\n\n pbar.update()\n\n # Generate band plots for various shrinkage methods\n fig, (ax0, ax1, ax2) = plt.subplots(figsize=figsize, ncols=3)\n band_plot(dfs['true'], ax0, 'true')\n band_plot(dfs['true'], ax1, 'true')\n band_plot(dfs['true'], ax2, 'true')\n\n band_plot(dfs['sample'], ax0, 'sample')\n band_plot(dfs['sample'], ax1, 'sample')\n band_plot(dfs['sample'], ax2, 'sample')\n\n if lw:\n band_plot(dfs['lw'], ax1, 'lw')\n\n if loo:\n band_plot(dfs['loo'], ax0, 'loo')\n band_plot(dfs['isoloo'], ax1, 'isoloo')\n\n band_plot(dfs['kfold'], ax0, 'kfold')\n band_plot(dfs['isokfold'], ax1, 'isokfold')\n\n band_plot(dfs['mv_oracle'], ax0, 'mv_oracle')\n # band_plot(dfs['isomv_oracle'], ax1, 'isomv_oracle')\n band_plot(dfs['isonlsq_mv_oracle'], ax2, 'isonlsq_mv_oracle')\n band_plot(dfs['isonlsq_mv_kfold'], ax2, 'isonlsq_mv_kfold')\n\n ax0.legend()\n ax1.legend()\n ax2.legend()\n ax0.set_ylim(*ylim)\n ax1.set_ylim(*ylim)\n ax2.set_ylim(*ylim)\n\n plt.show()", "def deflated_power_iteration(operator,\n num_eigenthings=10,\n power_iter_steps=20,\n power_iter_err_threshold=1e-4,\n momentum=0.0,\n use_gpu=True,\n to_numpy=True):\n eigenvals = []\n eigenvecs = []\n current_op = operator\n prev_vec = None\n\n def _deflate(x, val, vec):\n return val * vec.dot(x) * vec\n\n for _ in range(num_eigenthings):\n eigenval, eigenvec = power_iteration(current_op, power_iter_steps,\n power_iter_err_threshold,\n momentum=momentum,\n use_gpu=use_gpu,\n init_vec=prev_vec)\n\n def _new_op_fn(x, op=current_op, val=eigenval, vec=eigenvec):\n return op.apply(x) - _deflate(x, val, vec)\n current_op = LambdaOperator(_new_op_fn, operator.size)\n prev_vec = eigenvec\n eigenvals.append(eigenval)\n eigenvec = eigenvec.cpu()\n if to_numpy:\n eigenvecs.append(eigenvec.numpy())\n else:\n eigenvecs.append(eigenvec)\n\n eigenvals = np.array(eigenvals)\n eigenvecs = np.array(eigenvecs)\n\n # sort them in descending order\n sorted_inds = np.argsort(eigenvals)\n eigenvals = eigenvals[sorted_inds][::-1]\n eigenvecs = eigenvecs[sorted_inds][::-1]\n return eigenvals, eigenvecs", "def v(resistances, r_i, applied_voltages, **kwargs):\n if r_i.word_line > 0 or r_i.bit_line > 0:\n g = fill.g(resistances, r_i)\n i = fill.i(applied_voltages, resistances, r_i)\n\n utils.message('Started solving for v.', **kwargs)\n v_matrix = linalg.spsolve(g.tocsc(), i)\n utils.message('Solved for v.', **kwargs)\n\n # if `num_examples == 1`, it can result in 1D array.\n if v_matrix.ndim == 1:\n v_matrix = v_matrix.reshape(v_matrix.shape[0], 1)\n\n # if one of the interconnect resistances is zero, only half of the\n # matrix_v had to be solved. The other half can be filled without\n # solving because the node voltages are known.\n if r_i.word_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n new_v_matrix[resistances.size:, ] = v_matrix\n v_matrix = new_v_matrix\n if r_i.bit_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = v_matrix\n v_matrix = new_v_matrix\n else:\n # if both interconnect resistances are zero, all node voltages are\n # known.\n v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n\n return v_matrix", "def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration", "def SVM_train(Ktrain,y,lbda_vec):\r\n n = Ktrain.shape[0]\r\n for idx, lbda in enumerate(lbda_vec): \r\n C = 1/(2*lbda*n)\r\n P = matrix(Ktrain, tc=\"d\")\r\n q = - matrix(y,tc=\"d\")\r\n G = matrix( np.concatenate( (np.diagflat(y) , -np.diagflat(y) ), axis=0 ),tc=\"d\" )\r\n h1 = C * np.ones((n,1))\r\n h2 = np.zeros((n,1)) \r\n h = matrix(np.concatenate((h1,h2),axis=0))\r\n\r\n solvers.options['show_progress'] = False\r\n \r\n sol = solvers.qp(P,q,G,h) \r\n a = np.asarray(sol['x'])\r\n\r\n #alpha is sparse\r\n a[np.where(np.abs(a) < 1e-4)] = 0\r\n y_svm = np.dot(Ktrain,a)\r\n\r\n print(\"Précision pour lambda = \" + str(lbda) + \" :\", accuracy(y_svm,y))" ]
[ "0.61382663", "0.5593468", "0.55151695", "0.5378377", "0.5306997", "0.53011614", "0.5231085", "0.5190737", "0.5158738", "0.5093091", "0.5071404", "0.50447", "0.50041604", "0.49797606", "0.49743566", "0.49714258", "0.49670354", "0.49553815", "0.4946754", "0.49402496", "0.4939668", "0.49330664", "0.4910911", "0.48993132", "0.48936844", "0.48664683", "0.48374194", "0.4837403", "0.48193133", "0.48023084", "0.47734773", "0.47506717", "0.4749387", "0.47327554", "0.4726291", "0.47206303", "0.47160584", "0.47111475", "0.47080427", "0.46977293", "0.46948674", "0.4690161", "0.46504107", "0.464567", "0.4627226", "0.4616866", "0.46164426", "0.46129793", "0.46061403", "0.4593014", "0.45873624", "0.45838776", "0.45761067", "0.4575948", "0.45644867", "0.45550233", "0.4549464", "0.45489505", "0.45484704", "0.45467982", "0.4541989", "0.4540561", "0.45381764", "0.45288363", "0.45246932", "0.45084968", "0.45030636", "0.45013613", "0.44964477", "0.44955197", "0.44921014", "0.4490926", "0.44861645", "0.4481057", "0.44769377", "0.4476315", "0.44679642", "0.44627097", "0.4460353", "0.4459247", "0.44572392", "0.44546878", "0.44521707", "0.44442293", "0.44432583", "0.4431595", "0.44264367", "0.44205582", "0.44203016", "0.44098395", "0.44035307", "0.44001484", "0.43963146", "0.43949682", "0.4394933", "0.43911093", "0.43860435", "0.43830052", "0.4382624", "0.43761355" ]
0.67910457
0
Allows Jax (the module) to be passed in as an argument rather than imported, since doing the latter breaks the build. In addition, instantiates certain of the enclosed functions as concrete objects within a Dict, allowing them to be cached. This avoids spurious recompilations that would otherwise be triggered by attempts to pass callables into Jitted functions. The important function here is functions["gmres_m"], which implements GMRES. The other functions are exposed only for testing.
def gmres_wrapper(jax: types.ModuleType): jnp = jax.numpy def gmres_m(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x0: jax.ShapedArray, tol: float, atol: float, num_krylov_vectors: int, maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]: """ Solve A x = b for x using the m-restarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(*A_args) gmres_m solves Ax = b (1) where x and b are length-n vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M). Args: A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape. A_args: A list of positional arguments to A_mv. b: The b in A @ x = b. x0: Initial guess solution. tol, atol: Solution tolerance to achieve, norm(residual) <= max(tol * norm(b), atol). tol is also used to set the threshold at which the Arnoldi factorization terminates. num_krylov_vectors: Size of the Krylov space to build at each restart. maxiter: The Krylov space will be repeatedly rebuilt up to this many times. Returns: x: The approximate solution. beta: Norm of the residual at termination. n_iter: Number of iterations at termination. converged: Whether the desired tolerance was achieved. """ num_krylov_vectors = min(num_krylov_vectors, b.size) x = x0 b_norm = jnp.linalg.norm(b) tol = max(tol * b_norm, atol) for n_iter in range(maxiter): done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol, b_norm) if done: break return x, beta, n_iter, done def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray, tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]: """ A single restart of GMRES. Args: A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and `v` have the same shape. A_args: A list of positional arguments to A_mv. b: The `b` in `A @ x = b`. x: Initial guess solution. tol: Solution tolerance to achieve, num_krylov_vectors : Size of the Krylov space to build. Returns: done: Whether convergence was achieved. beta: Magnitude of residual (i.e. the error estimate). x: The approximate solution. """ r, beta = gmres_residual(A_mv, A_args, b, x) k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors, x0, r, beta, tol, b_norm) x = gmres_update(k, V, R, beta_vec, x0) done = k < num_krylov_vectors - 1 return done, beta, x @jax.jit def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]: """ Computes the residual vector r and its norm, beta, which is minimized by GMRES. Args: A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape. A_args: A list of positional arguments to A_mv. b: The b in A @ x = b. x: Initial guess solution. Returns: r: The residual vector. beta: Its magnitude. """ r = b - A_mv(x, *A_args) beta = jnp.linalg.norm(r) return r, beta def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray, beta_vec: jax.ShapedArray, x0: jax.ShapedArray) -> jax.ShapedArray: """ Updates the solution in response to the information computed by the main GMRES loop. Args: k: The final iteration which was reached by GMRES before convergence. V: The Arnoldi matrix of Krylov vectors. R: The R factor in H = QR where H is the Arnoldi overlap matrix. beta_vec: Stores the Givens factors used to map H into QR. x0: The initial guess solution. Returns: x: The updated solution. """ q = min(k, R.shape[1]) y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q]) x = x0 + V[:, :q] @ y return x @functools.partial(jax.jit, static_argnums=(2,)) def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int, x0: jax.ShapedArray, r: jax.ShapedArray, beta: float, tol: float, b_norm: float) -> Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray]: """ Builds the Arnoldi decomposition of (A, v), where v is the normalized residual of the current solution estimate. The decomposition is returned as V, R, where V is the usual matrix of Krylov vectors and R is the upper triangular matrix in H = QR, with H the usual matrix of overlaps. Args: A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and `v` have the same shape. A_args: A list of positional arguments to A_mv. n_kry: Size of the Krylov space to build; this is called num_krylov_vectors in higher level code. x0: Guess solution. r: Residual vector. beta: Magnitude of r. tol: Solution tolerance to achieve. b_norm: Magnitude of b in Ax = b. Returns: k: Counts the number of iterations before convergence. V: The Arnoldi matrix of Krylov vectors. R: From H = QR where H is the Arnoldi matrix of overlaps. beta_vec: Stores Q implicitly as Givens factors. """ n = r.size err = beta v = r / beta # These will store the Givens rotations used to update the QR decompositions # of the Arnoldi matrices. # cos : givens[0, :] # sine: givens[1, :] givens = jnp.zeros((2, n_kry), dtype=x0.dtype) beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta) V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype) V = jax.ops.index_update(V, jax.ops.index[:, 0], v) R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype) # The variable data for the carry call. Each iteration modifies these # values and feeds the results to the next iteration. k = 0 gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need. givens) # < Modified between iterations. gmres_constants = (tol, A_mv, A_args, b_norm, n_kry) gmres_carry = (gmres_variables, gmres_constants) # The 'x' input for the carry call. Each iteration will receive an ascending # loop index (from the jnp.arange) along with the constant data # in gmres_constants. gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition, gmres_krylov_work, gmres_carry) gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables return (k, V, R, beta_vec) VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray, float, jax.ShapedArray] ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int] GmresCarryType = Tuple[VarType, ConstType] @jax.jit def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool: """ This function dictates whether the main GMRES while loop will proceed. It is equivalent to: if k < n_kry and err > tol: return True else: return False where k, n_kry, err, and tol are unpacked from gmres_carry. Args: gmres_carry: The gmres_carry from gmres_krylov. Returns: (bool): Whether to continue iterating. """ gmres_constants, gmres_variables = gmres_carry tol = gmres_constants[0] k = gmres_variables[0] err = gmres_variables[4] n_kry = gmres_constants[4] def is_iterating(k, n_kry): return k < n_kry def not_converged(args): err, tol = args return err >= tol return jax.lax.cond(is_iterating(k, n_kry), # Predicate. not_converged, # Called if True. lambda x: False, # Called if False. (err, tol)) # Arguments to calls. @jax.jit def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType: """ Performs a single iteration of gmres_krylov. See that function for a more detailed description. Args: gmres_carry: The gmres_carry from gmres_krylov. Returns: gmres_carry: The updated gmres_carry. """ gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables tol, A_mv, A_args, b_norm, _ = gmres_constants V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol) R_col, givens = apply_givens_rotation(H[:, k], givens, k) R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:]) # Update the residual vector. cs, sn = givens[:, k] * beta_vec[k] beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn) err = jnp.abs(sn) / b_norm gmres_variables = (k + 1, V, R, beta_vec, err, givens) return (gmres_variables, gmres_constants) @jax.jit def _gs_step(r: jax.ShapedArray, v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]: """ Performs one iteration of the stabilized Gram-Schmidt procedure, with r to be orthonormalized against {v} = {v_0, v_1, ...}. Args: r: The new vector which is not in the initially orthonormal set. v_i: The i'th vector in that set. Returns: r_i: The updated r which is now orthonormal with v_i. h_i: The overlap of r with v_i. """ h_i = jnp.vdot(v_i, r) r_i = r - h_i * v_i return r_i, h_i @jax.jit def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence, V: jax.ShapedArray, H: jax.ShapedArray, tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]: """ Performs the kth iteration of the Arnoldi reduction procedure. Args: k: The current iteration. A_mv, A_args: A function A_mv(v, *A_args) performing a linear transformation on v. V: A matrix of size (n, K + 1), K > k such that each column in V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes. H: A matrix of size (K, K), K > k with H[:, k] all zeroes. Returns: V, H: With their k'th columns respectively filled in by a new orthogonalized Krylov vector and new overlaps. """ v = A_mv(V[:, k], *A_args) v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T) v_norm = jnp.linalg.norm(v_new) r_new = v_new / v_norm # Normalize v unless it is the zero vector. r_new = jax.lax.cond(v_norm > tol, lambda x: x[0] / x[1], lambda x: 0.*x[0], (v_new, v_norm) ) H = jax.ops.index_update(H, jax.ops.index[:, k], H_k) H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm) V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new) return V, H #################################################################### # GIVENS ROTATIONS #################################################################### @jax.jit def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> jax.ShapedArray: """ Successively applies each of the rotations stored in givens to H_col. Args: H_col : The vector to be rotated. givens: 2 x K, K > k matrix of rotation factors. k : Iteration number. Returns: H_col : The rotated vector. """ rotation_carry = (H_col, 0, k, givens) def loop_condition(carry): i = carry[1] k = carry[2] return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0) def apply_ith_rotation(carry): H_col, i, k, givens = carry cs = givens[0, i] sn = givens[1, i] H_i = cs * H_col[i] - sn * H_col[i + 1] H_ip1 = sn * H_col[i] + cs * H_col[i + 1] H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i) H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1) return (H_col, i + 1, k, givens) rotation_carry = jax.lax.while_loop(loop_condition, apply_ith_rotation, rotation_carry) H_col = rotation_carry[0] return H_col @jax.jit def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]: """ Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors. Args: H_col : The column of H to be rotated. givens: A matrix representing the cosine and sine factors of the previous GMRES Givens rotations, in that order (i.e. givens[0, :] -> the cos factor). k : Iteration number. Returns: R_col : The column of R obtained by transforming H_col. givens_k: The new elements of givens that zeroed out the k+1'th element of H_col. """ # This call successively applies each of the # Givens rotations stored in givens[:, :k] to H_col. H_col = apply_rotations(H_col, givens, k) cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1]) givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k) givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k) r_k = cs_k * H_col[k] - sn_k * H_col[k + 1] R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k) R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.) return R_col, givens @jax.jit def givens_rotation(v1: float, v2: float) -> Tuple[float, float]: """ Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta) so that [cs -sn] @ [v1] = [r] [sn cs] [v2] [0] Args: v1, v2: The scalars. Returns: cs, sn: The rotation factors. """ t = jnp.sqrt(v1**2 + v2**2) cs = v1 / t sn = -v2 / t return cs, sn fnames = [ "gmres_m", "gmres_residual", "gmres_krylov", "gs_step", "kth_arnoldi_step", "givens_rotation" ] functions = [ gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step, givens_rotation ] class Functions: def __init__(self, fun_dict): self.dict = fun_dict def __getattr__(self, name): return self.dict[name] return Functions(dict(zip(fnames, functions)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_scipy_special__cephes(finder, module):\n module.AddGlobalName(\"gammaln\")", "def _refresh_cache():\n global _num_types, _num_funcs\n\n num_types = interrogate_number_of_global_types()\n num_funcs = interrogate_number_of_functions()\n\n if num_types != _num_types:\n for i in range(num_types):\n itype = interrogate_get_global_type(i)\n if interrogate_type_outer_class(itype):\n continue\n modname = interrogate_type_module_name(itype)\n _modules.add(modname)\n _store_type(modname, itype)\n\n _num_types = num_types\n\n if num_funcs != _num_funcs:\n for i in range(num_funcs):\n ifunc = interrogate_get_function(i)\n parent = interrogate_function_class(ifunc)\n if not parent:\n parent = interrogate_function_module_name(ifunc)\n _modules.add(parent)\n\n # Store it by both the original and mangled name.\n name = interrogate_function_name(ifunc)\n mangled_name1 = _translate_function_name(name, False)\n _func_cache[(parent, mangled_name1)] = ifunc\n if not name.startswith('~'):\n mangled_name2 = _translate_function_name(name, True)\n _func_cache[(parent, mangled_name2)] = ifunc\n\n _num_funcs = num_funcs", "def cache_globals(f):\n def action(co):\n return __cache_globals__(co,f.func_globals)\n f.func_code = __transform_codeobjects__(f.func_code,action)\n return f", "def load_functions(self, module_name, path=None):\n# try:\n if True:\n if not path:\n path = os.getcwd()\n if not isinstance(path,list):\n path = [path]\n file,filename,desc = imp.find_module(module_name,path)\n funcs = imp.load_module(module_name, file, filename, desc)\n if hasattr(funcs,'_init'):\n getattr(funcs,'_init')(self)\n attrs = [attr for attr in funcs.__dict__ \n if not attr.startswith('__')\n and attr is not '_init'\n and not hasattr(getattr(funcs,attr),'__base__')]\n for attr in attrs:\n try:\n print 'Adding', attr, 'to', self._name\n self.add_function(getattr(funcs,attr))\n except:\n print 'Error adding', attr, 'to', self._name", "def decorate_module_function(cls, f: 'Callable', **setting_kwds) -> None:\n # Filter out builtins.\n if not get_file_of_object(f):\n return\n\n namespace = vars(inspect.getmodule(f))\n namespace[f.__name__] = cls(**setting_kwds)(f)", "def __init__(\n self,\n module: Union[module_utils.CompiledModule, None],\n function: Union[Callable[[TracedModule], None], None],\n _load_dict: Optional[Dict[str, Any]] = None,\n ):\n if _load_dict is None:\n # Extract metadata from module and function.\n self.module_name = module.module_name\n self.compiled_paths = module.compiled_paths\n self.backend_name = module.backend_info.backend_name\n self.backend_id = module.backend_info.backend_id\n self.backend_driver = module.backend_info.driver\n self.iree_serializable = module.iree_serializable()\n self.tflite_serializable = module.tflite_serializable()\n self.function_name = function.__name__\n self.function_sourcefile = inspect.getsourcefile(function)\n source, start_line = inspect.getsourcelines(function)\n self.function_line_numbers = (start_line, start_line + len(source))\n self.function_source = \"\".join(source)\n\n self.calls = []\n else:\n self.module_name = _load_dict[\"module_name\"]\n self.compiled_paths = _load_dict[\"compiled_paths\"]\n self.backend_name = _load_dict[\"backend_name\"]\n self.backend_id = _load_dict[\"backend_id\"]\n self.backend_driver = _load_dict[\"backend_driver\"]\n self.iree_serializable = _load_dict[\"iree_serializable\"]\n self.tflite_serializable = _load_dict[\"tflite_serializable\"]\n self.function_name = _load_dict[\"function_name\"]\n self.function_sourcefile = _load_dict[\"function_sourcefile\"]\n self.function_line_numbers = _load_dict[\"function_line_numbers\"]\n self.function_source = _load_dict[\"function_source\"]\n self.calls = _load_dict[\"calls\"]", "def __init__(self):\n self.functions = {}", "def _get_method_impl(self, fn, o_self, args, kwargs, mode):\r\n if kwargs:\r\n raise NotImplementedError()\r\n\r\n cache = self.module_method_cache\r\n\r\n args_types = tuple(theano_type(arg) for arg in args)\r\n key = (fn, args_types)\r\n\r\n if key not in cache:\r\n inputs = [a() for a in args_types]\r\n print 'compiling', fn, 'for inputs', inputs\r\n rval = fn(o_self, *inputs)\r\n\r\n print 'compiling to compute outputs', rval.outputs\r\n\r\n if isinstance(rval.outputs, (tuple, list)):\r\n all_required_inputs = theano.gof.graph.inputs(rval.outputs)\r\n else:\r\n all_required_inputs = theano.gof.graph.inputs([rval.outputs])\r\n\r\n # construct In instances for the symbolic_member instances that can automatically be\r\n # included here.\r\n module_inputs = [theano.compile.io.In(\r\n variable=v,\r\n value=v._theanoclass_container,\r\n mutable=(v in rval.updates),\r\n update=rval.updates.get(v, None))\r\n for v in all_required_inputs \\\r\n if hasattr(v, '_theanoclass_container') and not (v in inputs)]\r\n\r\n cache[key] = dict(theano_function=theano.function(inputs+module_inputs, rval.outputs),\r\n updates=rval.updates,\r\n outputs=rval.outputs,\r\n mode=mode)\r\n\r\n return cache[key]", "def __init__(self, function, original_name, cache_name=None, default_value=None, use_default=True,\n key_replace_name=None):\n functools.update_wrapper(self, function)\n self.func = function\n self.name = function.__name__\n self.original_name = original_name\n self.cache_name = cache_name\n self.default_value = default_value\n self.use_default = use_default\n self.key_replace_name = key_replace_name", "def __init__(self, install_defaults=True):\n self.functions = {}\n if install_defaults:\n for function in _default_functions:\n self.register_function(function)", "def __call__(fun_name):", "def _use_methods(cls):\n retdict = JobCalculation._use_methods\n retdict.update({\n \"structure\": {\n 'valid_types': StructureData,\n 'additional_parameter': None,\n 'linkname': 'structure',\n 'docstring': \"Choose the input structure to use\",\n },\n \"settings\": {\n 'valid_types': ParameterData,\n 'additional_parameter': None,\n 'linkname': 'settings',\n 'docstring': \"Use an additional node for special settings\",\n },\n \"parameters\": {\n 'valid_types':\n ParameterData,\n 'additional_parameter':\n None,\n 'linkname':\n 'parameters',\n 'docstring': (\n \"Use a node that specifies the input parameters \"\n \"for the wannier code\"\n ),\n },\n \"projections\": {\n 'valid_types': (OrbitalData, List),\n 'additional_parameter': None,\n 'linkname': 'projections',\n 'docstring': (\"Starting projections of class OrbitalData\"),\n },\n \"local_input_folder\": {\n 'valid_types':\n FolderData,\n 'additional_parameter':\n None,\n 'linkname':\n 'local_input_folder',\n 'docstring': (\n \"Use a local folder as parent folder (for \"\n \"restarts and similar\"\n ),\n },\n \"remote_input_folder\": {\n 'valid_types': RemoteData,\n 'additional_parameter': None,\n 'linkname': 'remote_input_folder',\n 'docstring': (\"Use a remote folder as parent folder\"),\n },\n \"kpoints\": {\n 'valid_types': KpointsData,\n 'additional_parameter': None,\n 'linkname': 'kpoints',\n 'docstring':\n \"Use the node defining the kpoint sampling to use\",\n },\n \"kpoint_path\": {\n 'valid_types':\n ParameterData,\n 'additional_parameter':\n None,\n 'linkname':\n 'kpoint_path',\n 'docstring':\n \"Use the node defining the k-points path for bands interpolation (see documentation for the format)\",\n },\n })\n\n return retdict", "def __init__(self, *args):\n _hypre.HypreGMRES_swiginit(self, _hypre.new_HypreGMRES(*args))", "def build_jax_assemble_eval(fenics_templates: FenicsVariable) -> Callable:\n\n def decorator(fenics_function: Callable) -> Callable:\n def jax_assemble_eval(*args):\n return jax_assemble_eval_p.bind(*args)\n\n jax_assemble_eval_p = Primitive(\"jax_assemble_eval\")\n jax_assemble_eval_p.def_impl(\n lambda *args: assemble_eval(fenics_function, fenics_templates, *args)[0]\n )\n\n jax_assemble_eval_p.def_abstract_eval(\n lambda *args: jax.abstract_arrays.make_shaped_array(\n assemble_eval(fenics_function, fenics_templates, *args)[0]\n )\n )\n\n def jax_assemble_eval_batch(vector_arg_values, batch_axes):\n assert len(set(batch_axes)) == 1 # assert that all batch axes are same\n assert (\n batch_axes[0] == 0\n ) # assert that batch axis is zero, need to rewrite for a general case?\n res = list(map(jax_assemble_eval, *vector_arg_values))\n res = np.asarray(res)\n return res, batch_axes[0]\n\n jax.batching.primitive_batchers[jax_assemble_eval_p] = jax_assemble_eval_batch\n\n # @trace(\"djax_assemble_eval\")\n def djax_assemble_eval(*args):\n return djax_assemble_eval_p.bind(*args)\n\n djax_assemble_eval_p = Primitive(\"djax_assemble_eval\")\n # djax_assemble_eval_p.multiple_results = True\n djax_assemble_eval_p.def_impl(\n lambda *args: vjp_assemble_eval(fenics_function, fenics_templates, *args)\n )\n\n defvjp_all(jax_assemble_eval_p, djax_assemble_eval)\n\n return jax_assemble_eval\n\n return decorator", "def __init__(self, *args):\n _hypre.HypreFGMRES_swiginit(self, _hypre.new_HypreFGMRES(*args))", "def __def_function__():\n pass", "def _make_modules(is_train):\n return {\n 'conversion': functools.partial(\n conversion, is_train=is_train, is_extrapolation=False),\n 'time': functools.partial(time, is_train=is_train),\n }", "def Map(context, funcname, *nodesets):\n (prefix, local) = ExpandQName(funcname, namespaces=context.processorNss)\n func = (g_extFunctions.get(expanded) or\n CoreFunctions.CoreFunctions.get(expanded, None))\n if not func:\n raise Exception('Dynamically invoked function %s not found.'%funcname)\n flist = [f]*len(nodesets)\n lf = lambda x, f, *args: apply(f, args)\n retlist = apply(map, (lf, flist) + nodesets)\n\n proc = context.processor\n result_nodeset = []\n for ret in retlist:\n proc.pushResult()\n proc.writers[-1].text(Conversions.StringValue(ret))\n frag = proc.popResult()\n context.rtfs.append(frag)\n result_nodeset.append(frag.childNodes[0])\n return result_nodeset", "def test_func_dict_not_imported_module():\n\n plot_toggles = {\"SMF\": True}\n module_name = \"not_a_module.funcs\"\n function_prefix = \"calc_\"\n\n with pytest.raises(KeyError):\n func_dict = generate_func_dict(plot_toggles, module_name, function_prefix)", "def buildModule(name):\n m = imp.new_module(name)\n # function from another module\n m.foreignFunction = aFunction\n # function of the anonymous module\n exec \"\"\"\ndef isOk():\n return foreignFunction()\n\"\"\" in m.__dict__\n return m", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "def create_ast(client: TypeAny) -> Globals:\n ast = Globals(client=client)\n\n modules: TypeList[TypeTuple[str, TypeAny]] = [\n (\"xgboost\", xgb),\n (\"xgboost.core\", xgb.core),\n (\"xgboost.sklearn\", xgb.sklearn),\n ]\n\n classes: TypeList[TypeTuple[str, str, TypeAny]] = [\n (\"xgboost.DMatrix\", \"xgboost.DMatrix\", xgb.core.DMatrix),\n (\"xgboost.core.DMatrix\", \"xgboost.core.DMatrix\", xgb.core.DMatrix),\n (\"xgboost.core.Booster\", \"xgboost.core.Booster\", xgb.core.Booster),\n (\n \"xgboost.core.XGBoostError\",\n \"xgboost.core.XGBoostError\",\n xgb.core.XGBoostError,\n ),\n # classifiers\n (\"xgboost.XGBClassifier\", \"xgboost.XGBClassifier\", xgb.XGBClassifier),\n (\"xgboost.XGBRFClassifier\", \"xgboost.XGBRFClassifier\", xgb.XGBRFClassifier),\n # (\"xgboost.dask.DaskXGBRFClassifier\"), Currently dask is not supported in syft\n # regreessors\n (\"xgboost.XGBRegressor\", \"xgboost.XGBRegressor\", xgb.XGBRegressor),\n (\"xgboost.XGBRFRegressor\", \"xgboost.XGBRFRegressor\", xgb.XGBRFRegressor),\n # (\"xgboost.dask.DaskXGBRFRegressor\"), Currently dask is not supported in syft\n ]\n\n methods = [\n (\"xgboost.train\", \"xgboost.core.Booster\"),\n (\"xgboost.core.Booster.predict\", \"numpy.ndarray\"),\n # classifiers\n (\"xgboost.XGBClassifier.fit\", \"xgboost.XGBClassifier\"),\n (\"xgboost.XGBClassifier.predict\", \"numpy.ndarray\"),\n (\"xgboost.XGBRFClassifier.fit\", \"xgboost.XGBRFClassifier\"),\n (\"xgboost.XGBRFClassifier.predict\", \"numpy.ndarray\"),\n # regressors\n (\"xgboost.XGBRegressor.fit\", \"xgboost.XGBRegressor\"),\n (\"xgboost.XGBRegressor.predict\", \"numpy.ndarray\"),\n (\"xgboost.XGBRFRegressor.fit\", \"xgboost.XGBRFClassifier\"),\n (\"xgboost.XGBRFRegressor.predict\", \"numpy.ndarray\"),\n ]\n\n add_modules(ast, modules)\n add_classes(ast, classes)\n add_methods(ast, methods)\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_storable_object_attr_convenience_methods()\n\n return ast", "def decorate_package_function(cls, f: 'Callable', **setting_kwds) -> None:\n # Filter out builtins.\n if not get_file_of_object(f):\n return\n\n f_deco = cls(**setting_kwds)(f)\n\n namespace = vars(inspect.getmodule(f))\n\n fmodname = inspect.getmodule(f).__name__\n\n # 'sklearn.cluster.k_means_'\n basic_modname = inspect.getmodulename(fmodname.replace('.', '/') + '.py')\n # 'k_means_' or 'some_module', or None\n if basic_modname and '.' in fmodname:\n fpackagename = namespace['__package__'] # '.'.join(fmodname.split('.')[:-1])\n exec(\"import \" + fpackagename)\n package_dict = eval(\"vars(%s)\" % fpackagename)\n package_dict[f.__name__] = f_deco\n\n namespace[f.__name__] = f_deco", "def __init__(self, return_kernel_map=False, return_weights=False):\n MinkowskiModuleBase.__init__(self)\n self.return_kernel_map = return_kernel_map\n self.return_weights = return_weights\n self.interp = MinkowskiInterpolationFunction()", "def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._functions: DefaultDict[AnyFunctionDef, int] = defaultdict(int)", "def register_functions(self):\n try:\n self.toolbox.register(\"expr\", gp.genHalfAndHalf, pset=self.pset, min_=1, max_=2)\n self.toolbox.register(\"individual\", tools.initIterate, creator.Individual, self.toolbox.expr)\n self.toolbox.register(\"population\", tools.initRepeat, list, self.toolbox.individual)\n self.toolbox.register(\"compile\", gp.compile, pset=self.pset)\n\n self.toolbox.register(\"evaluate\", self.evalFunction)\n \"\"\"\n tools.selTournament get here 3 individuals are selected randomly from the pop.\n https://github.com/DEAP/deap/issues/214\n \"\"\"\n self.toolbox.register(\"select\", tools.selTournament, tournsize=3)\n self.toolbox.register(\"mate\", gp.cxOnePoint)\n self.toolbox.register(\"expr_mut\", gp.genFull, min_=0, max_=2)\n self.toolbox.register(\"mutate\", gp.mutUniform, expr=self.toolbox.expr_mut, pset=self.pset)\n except Exception as e:\n print('\\nMethod GeneticProgramming.register_functions did not work: ', e.__repr__())", "def __call__(self, key):\n\n def wrapper(func):\n self._registry[key] = func\n\n return wrapper", "def register_optimizer(key, module):\n register(key, module, optimizer_dict)", "def _init_builtins(self):\n for k, rexp in self.expressions.items():\n func = getattr(self, \"%s_processor\"%k)()\n yield (rexp, [func] + self._extra_rules.get(k, []))", "def assign_functions(self):\n # get function declarations from json string\n self.functions = self.definitions.get(\"functions\", [])\n\n # generate function declaration in header file\n header = cls.header_from_function_name_and_args(\n _func[\"name\"], _func[\"args\"]\n )\n\n _functions = OrderedDict()\n for func in self.functions:\n _name = func[\"name\"]\n _type = func[\"type\"]\n _args = func[\"args\"]\n _deriv = self.get_derivatives(func.get(\"deriv\", []))\n _functions[_name] = {\n \"name\": _name,\n \"type\": _type,\n \"args\": _args,\n \"deriv\": _deriv,\n }\n self._functions = _functions", "def make_processing_functions(self):\n return", "def _import(self, module_name):\n # load keywords\n kw = __import__('keywords')\n # set real rpc proxy\n kw.var_cache['proxy'] = device_proxy\n kw.var_cache['reflection'] = reflection_proxy\n kw.var_cache['local'] = local_proxy\n # load script\n __import__(module_name)\n # register all kw func from keywords.kw_func\n self.kw_func.update(kw.kw_func)", "def dispatch(class_u_want):\n return {\n 'graph': lambda: graphdata(),\n 'MRD': lambda: MRD(),\n # 'custom': lambda: # matrix submitted by user\n }.get(class_u_want, lambda: None)()", "def proxyModule(original, **replacements):\n class _ModuleProxy(object):\n def __getattribute__(self, name):\n if name in replacements:\n return replacements[name]\n else:\n return getattr(original, name)\n\n def __repr__(self):\n return \"<Proxy for %r: %s replaced>\" % (\n original, ', '.join(replacements.keys()))\n return _ModuleProxy()", "def pyelemfunctions():\n for elemid in unique(top.idpyelem[:top.nppyelem]):\n ip = (top.idpyelem[:top.nppyelem] == elemid)\n x = top.xpyelem[:top.nppyelem][ip]\n y = top.ypyelem[:top.nppyelem][ip]\n z = top.zpyelem[:top.nppyelem][ip]\n # --- The conversion to int is needed since a numpy.int64 is different than an int.\n (ex,ey,ez,bx,by,bz) = pyelemfunctionsdict[int(elemid)](x,y,z)\n top.expyelem[:top.nppyelem][ip] = ex\n top.eypyelem[:top.nppyelem][ip] = ey\n top.ezpyelem[:top.nppyelem][ip] = ez\n top.bxpyelem[:top.nppyelem][ip] = bx\n top.bypyelem[:top.nppyelem][ip] = by\n top.bzpyelem[:top.nppyelem][ip] = bz", "def cacheable(cache_key_template = None):\n class Wrapper(object):\n def __init__(self, fct, cache_key_template):\n self.fct = fct\n self.fct_call = self.uncached_fct_call\n self.key = cache_key_template\n self.cache = {}\n self.__name__ = fct.__name__\n self.__doc__ = fct.__doc__\n def __call__(self, *args, **kwargs):\n #~ print self, self.fct_call\n return self.fct_call(*args, **kwargs)\n def uncached_fct_call(self, *args, **kwargs):\n try:\n del kwargs['cache_key']\n return self.fct(*args, **kwargs)\n except KeyError:\n return self.fct(*args, **kwargs)\n def cached_fct_call(self, *args, **kwargs):\n try:\n #~ print \"1\"\n kwargs_sort = kwargs.keys()\n #~ print \"2\"\n kwargs_sort.sort()\n #~ print \"3\"\n kwargs_values = tuple([kwargs[i] for i in kwargs_sort])\n #~ print \"4\"\n cache_key = self.key % (args + kwargs_values)\n except TypeError:\n # when self.key is None\n try:\n #~ print \"5\"\n cache_key = kwargs.pop('cache_key')\n except KeyError:\n print(\"Caching activated, but no cache_key given! Will not use cache for this call.\")\n return self.fct(*args, **kwargs)\n try:\n #~ print \"6\"\n return self.cache[cache_key]\n except KeyError:\n print(\"Caching result\")\n #~ print \"7\"\n self.cache[cache_key] = self.fct(*args, **kwargs)\n #~ print \"8\"\n return self.cache[cache_key]\n def cache_on(self):\n self.fct_call = self.cached_fct_call\n def cache_off(self):\n self.fct_call = self.uncached_fct_call\n self.cache.clear() # maybe not necessary\n def __repr__(self):\n '''Return the function's docstring.'''\n return self.fct.__repr__()\n #~ def __get__(self, obj, objtype):\n #~ '''Support instance methods.'''\n #~ print 'get krijgt: ', obj, objtype\n #~ print 'get geeft : ', functools.partial(self.__call__, obj)\n #~ return functools.partial(self.__call__, obj)\n def __get__(self, instance, owner):\n '''Support instance methods. From:\n http://metapython.blogspot.nl/2010/11/python-instance-methods-how-are-they.html'''\n #~ print 'getting'\n #~ a = time()\n #~ instance.__dict__['__call__'] = MethodType(self, instance, owner)\n #~ print 'getting', self, instance, owner\n #~ try:\n #~ raise AssertionError\n #~ except AssertionError:\n #~ traceback.print_stack()\n #~ self.fct_call = self.fct_call.__get__(instance, owner)\n thing = types.MethodType(self, instance, owner)\n #~ thing = self.__get__(instance, owner) # dit zou equivalent moeten zijn aan MethodType(self, instance, owner)\n #~ thing = self.__class__(self.fct_call.__get__(instance, owner))\n #~ print time()-a\n return thing\n #~ test = self.fct_call.__get__(instance, owner)\n #~ result = self.__class__(self.fct_call.__get__(instance, owner))\n #~ print \"get\", self, instance, owner, MethodType(self, instance, owner)\n #~ print test#, result\n #~ return result\n #~ return self.__get__(instance, owner)\n #raise AssertionError\n #return MethodType(self, instance, owner)\n #instance.__call__ = MethodType(self, instance, owner)\n #return instance.__call__\n #~ try:\n #~ return self.as_method\n #~ except AttributeError:\n #~ print \"AttributeError!\"\n #~ self.as_method = \n #~ return self.as_method\n #~ def __set__(self, value):\n #~ pass\n \n def closer(f):\n # See http://stackoverflow.com/questions/233673/lexical-closures-in-python#235764\n # on closures.\n return Wrapper(f, cache_key_template)\n \n return closer", "def init(fn: Callable[..., Any], module: Module,\n mutable: CollectionFilter = DenyList(\"intermediates\"),\n ) -> Callable[..., FrozenVariableDict]:\n init_fn = init_with_output(fn, module, mutable)\n @functools.wraps(init_fn)\n def init_wrapper(*args, **kwargs):\n return init_fn(*args, **kwargs)[1]\n return init_wrapper", "def jit(func):\n return func", "def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper", "def get_functions():\n\n filenames = set()\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'functions')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n functions_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'functions')\n sys.path.append(functions_path)\n for file_ in os.listdir(functions_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n functions = {}\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n if hasattr(module, 'function_name'):\n functions[module.function_name] = module\n except ImportError:\n pass\n\n return functions", "def ioc(globals):\n\tfrom Module.Shapes.ShapeFactory import shape_factory\n\tglobals['shape_factory'] = shape_factory\n\tfrom Module.Lighting.Colors import Colors\n\tglobals['Colors'] = Colors", "def module(self, name=None):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n if name in self._modules:\n raise Error(\"Module already defined: {0}\".format(_name))\n\n self._modules[_name] = fn\n return fn\n return wrapper", "def __init__(self, function: ast3.FunctionDef, module: \"Module\") -> None:\n\n # easy data\n self._function = function\n self.name = function.name\n self.line = function.lineno\n self.column = function.col_offset\n self.body = function.body\n self.module = module\n self.decorators = function.decorator_list\n\n # time to parse arguments\n self._args = function.args.args\n self.args = [Arg(x) for x in self._args]\n self.functions = [\n Function(x, self.module)\n for x in self.body\n if isinstance(x, ast3.FunctionDef)\n ]\n self.classes = [\n Class(x, self.module) for x in self.body if isinstance(x, ast3.ClassDef)\n ]\n self.untyped = [\n x for x in self.args if not x.typed and x not in DEFAULT_ARG_IGNORE\n ]\n self.doc = None\n self.returns = None\n self.return_typed = False\n self.missing_args: Set[str] = set()\n self.unexpected_args: Set[str] = set()\n arg_names = set(x.name for x in self.args if x.name not in DEFAULT_ARG_IGNORE)\n self.missing_args = arg_names\n if isinstance(self.body[0], ast3.Expr):\n # this is most likely a doc string\n self.doc = Doc(self.body[0], Doc.Type.FUNCTION)\n doc_arg_names = set(x for x, y in self.doc.args.items())\n self.missing_args = arg_names - doc_arg_names\n self.unexpected_args = doc_arg_names - arg_names\n if function.returns:\n self.return_typed = True\n self.returns = parse_elt(function.returns) # type: ignore\n\n # complexity checks\n self._radon = cc_visit_ast(self._function)[0]\n self.complexity = self._radon.complexity\n self.is_method = self._radon.is_method\n self._halstead = h_visit_ast(self._function)", "def mapInstacefunction(cls, fname):\n newlist = list(clslist)\n return list(map(lambda x: eval(\"x().\" + fname+\"()\"), newlist))", "def register(name):\n def func(cls):\n \"\"\"\n See register\n \"\"\"\n REGISTRY[name] = cls()\n return cls\n return func", "def make_local_functions_constant():\n\n import inspect\n from types import FunctionType\n\n frame = inspect.currentframe(1)\n local_functions = {}\n for sym,value in frame.f_globals.iteritems():\n if isinstance(value,FunctionType) and value.func_globals is frame.f_globals:\n local_functions[sym] = value\n\n __mass_replace__(local_functions.values(),local_functions)\n return", "def get_extension_funcs():\n raise NotImplementedError()", "def run(\n self, func=None, fargs=[], precompil_only=False,\n compil_only=False):\n precompiled = OrderedDict()\n compiled = OrderedDict()\n\n with PyV8.JSLocker():\n with PyV8.JSContext() as js_context:\n self.logger.debug('Set JS global context class attributes')\n for k, v in self.js_global_vars.items():\n self.logger.debug(\n 'Set attribute name=%s, value=%s' % (k, v))\n # Convert to JS objects\n setattr(\n js_context.locals, k,\n self._get_js_obj(js_context, v))\n\n with PyV8.JSEngine() as engine:\n precompil_error = False\n try:\n for js_lib, js_code in self.js_libs_code.items():\n self.logger.debug('Precompile JS lib: %s' % js_lib)\n precompiled[js_lib] = engine.precompile(js_code)\n except SyntaxError:\n precompil_error = True\n\n if not precompil_error and precompil_only:\n return precompiled\n\n for js_lib, js_code in self.js_libs_code.items():\n self.logger.debug('Compile JS lib: %s' % js_lib)\n cparams = dict(\n source=self.js_libs_code[js_lib],\n name=js_lib)\n if js_lib in precompiled:\n cparams['precompiled'] = precompiled[js_lib]\n compiled[js_lib] = engine.compile(**cparams)\n if compil_only:\n return True\n\n result = None\n for js_lib, js_script in compiled.items():\n self.logger.debug('Run JS lib: %s' % js_lib)\n result = js_script.run()\n\n if not func or type(func) != str:\n return result\n\n if fargs and not isinstance(fargs, (list, tuple)):\n raise ArgumentError(\n 'The \"fargs\" must be list or tuple')\n\n if func not in js_context.locals:\n raise JSFunctionNotExists(\n 'Function \"%s\" not exists in JS context' % func)\n\n # Convert to JS objects\n for i in range(len(fargs)):\n fargs[i] = self._get_js_obj(js_context, fargs[i])\n\n # Convert to Python objects\n return self._get_py_obj(\n js_context,\n js_context.locals[func](*fargs))", "def make_local_modules_constant():\n import inspect\n from types import FunctionType,ModuleType\n\n frame = inspect.currentframe(1)\n local_functions = []\n local_modules = {}\n for sym,value in frame.f_globals.iteritems():\n if isinstance(value,FunctionType) and value.func_globals is frame.f_globals:\n local_functions.append(value)\n elif isinstance(value,ModuleType):\n local_modules[sym] = value\n\n __mass_replace__(local_functions,local_modules)\n return", "def __init__(self):\n self.function_dict = {\n \"Sphere\": self.draw_sphere,\n \"BSpline\": self.draw_nurbspatch,\n \"Cylinder\": self.draw_cylinder,\n \"Cone\": self.draw_cone,\n \"Torus\": self.draw_torus,\n \"Plane\": self.draw_plane,\n }", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def create_function_dict(user_functions, internal_functions, params):\n functions = {**internal_functions, **user_functions}\n\n partialed = {\n name: partial(func, params=params)\n if \"params\" in inspect.getfullargspec(func).args\n else func\n for name, func in functions.items()\n }\n\n return partialed", "def instantiate(cls, device, **kwargs):\n\n def keygen(cls, device, **kwargs):\n \"\"\"Generate the cache key from device and attributes.\"\"\"\n key = '%s/%s' % (cls.__name__, device)\n for v in kwargs.values():\n key += '/' + str(v)\n return key\n\n def creator(cls, cache_key, device, **kwargs):\n \"\"\"Create and then cache a function.\"\"\"\n function = cls(cache_key, device, **kwargs)\n _GLOBAL_CACHED_FUNCTIONS[cache_key] = function\n return function\n\n cache_key = keygen(cls, device, **kwargs)\n try:\n return _GLOBAL_CACHED_FUNCTIONS[cache_key]\n except KeyError:\n return creator(cls, cache_key, device, **kwargs)", "def go(*args, **kws): \n class fncWrap(cgmGeneral.cgmFuncCls):\t\t\n def __init__(self,*args, **kws):\n super(fncWrap, self).__init__(*args, **kws)\n self._str_funcName = 'TemplateFactory.go'\t\n self._b_reportTimes = 0 #..we always want this on so we're gonna set it on\n self._cgmClass = 'TemplateFactory.go'\n '''\n\t mModule = None,\n\t forceNew = True,\n\t loadTemplatePose = True,\n\t tryTemplateUpdate = False,\n\t geo = None,\n\t **kws\n\t '''\n self._l_ARGS_KWS_DEFAULTS = [{'kw':'mModule',\"default\":None,\"argType\":'cgmModule','help':\"This must be a cgm module\"},\n {'kw':'forceNew',\"default\":True,\"argType\":'bool','help':\"Whether to force a new one\"},\n {'kw':'loadTemplatePose',\"default\":True,\"argType\":'bool','help':\"Whether to attempt to load a tempate pose or now\"},\n {'kw':'tryTemplateUpdate',\"default\":True,\"argType\":'bool','help':\"Whether to attempt to update the template with saved settings after creation\"},\n {'kw':'geo',\"default\":None,\"argType\":'mGeo,str','help':\"Geo to use for processing\"}]\t \n self.__dataBind__(*args, **kws)\n\n self.l_funcSteps = [{'step':'Initial Validation','call':self._step_validate_},\n {'step':'Need Templating?','call':self._step_templateNeed_},\n {'step':'Templating Data Bind','call':self._step_templatingDataBind_},\t \n {'step':'Checking template toggles','call':self._step_verifyModuleTemplateToggles_},\n {'step':'Main process','call':self._step_templateProcess_},\n {'step':'Tag Children','call':self._step_tagChildren_},\t \t \n ]\n\n def _step_validate_(self):\n assert self.d_kws['mModule'].isModule(),\"Not a module\"\n self._mi_module = self.d_kws['mModule']# Link for shortness\n self._str_reportStart = \"{0}('{1}')\".format(self._str_reportStart,self._mi_module.p_nameShort)\n\n if self.d_kws['loadTemplatePose']:#trying this\n self.l_funcSteps.append({'step':'Load Template Pose','call':self._step_poseLoad_})\n\n try:#Geo -------------------------------------------------------------------------------------------\n if self.d_kws['geo'] is None:\n try:\n self.d_kws['geo'] = self._mi_module.modulePuppet.getUnifiedGeo()\n if not self.d_kws['geo']:\n raise ValueError, \"Module puppet missing geo\"\n except StandardError,error:log.warning(\"geo failed to find: %s\"%(error) + \"=\"*75) \n self.str_geo = cgmValid.objString(self.d_kws['geo'],mayaType=['mesh','nurbsSurface'])\n except StandardError,error:\n self.log_error(\" geo failed : {0}\".format(error)) \n\n def _step_templateNeed_(self):\n #Before something can be templated, we need to see if it has a puppet yet\n if not self._mi_module.getMessage('modulePuppet') and not self._mi_module.getMessage('moduleParent'):\n self.log_debug(\"No modulePuppet or moduleParent. Need to create\")\n if self._mi_module.getMessage(\"helper\"):\n self._mi_module.__buildSimplePuppet__()\n else:\n self.log_error(\"No modulePuppet or moduleParent and no helper\")\t\t\n return\n\n if self._mi_module.mClass in ['cgmEyelids','cgmEyeball']:#Some special objects don't need no stinkin templating!\n if self._mi_module.getMessage('helper'):\n log.info(\"Helper object found. No templating necessary\")\t \n return \n\n if self.d_kws['tryTemplateUpdate']:\n self.log_info(\"Trying template update...\")\n if self._mi_module.templateSettings_call('update'):\n self.log_info(\"Template update...\")\t\t \n if self.d_kws['loadTemplatePose']:\n self.log_info(\"Trying loadTemplatePose...\") \n try:self._mi_module.templateSettings_call('load')\n except Exception,err:\n self.log_error(\"Load pose fail: {0}\".format(err))\n return False\n return self._SuccessReturn_()\n\n if self._mi_module.isTemplated():\n if self.d_kws['forceNew']:\n self._mi_module.deleteTemplate()\n else:\n log.warning(\"'%s' has already been templated\"%mModule.getShortName())\n return self._SuccessReturn_()\n\n\n def _step_templatingDataBind_(self):\n\n self.mi_modulePuppet = self._mi_module.modulePuppet\n\n self.cls = \"TemplateFactory.go\"\n\n self.moduleNullData = attributes.returnUserAttrsToDict(self._mi_module.mNode)\n self._mi_templateNull = self._mi_module.templateNull#link\n\n self.rigNull = self._mi_module.getMessage('rigNull')[0] or False\n self.moduleParent = self.moduleNullData.get('moduleParent')\n self.moduleColors = self._mi_module.getModuleColors()\n self.l_coreNames = self._mi_module.coreNames.value\n self.d_coreNamesAttrs = self._mi_module.coreNames.d_indexToAttr\n self.corePosList = self._mi_templateNull.templateStarterData\n self.foundDirections = False #Placeholder to see if we have it\n\n assert len(self.l_coreNames) == len(self.corePosList),\"coreNames length and corePosList doesn't match\"\n\n #>>> part name \n self.partName = self._mi_module.getPartNameBase()\n self.partType = self._mi_module.moduleType or False\n self._partName = self._mi_module.getPartNameBase()\n self._strShortName = self._mi_module.getShortName() or False \n\n self.direction = None\n if self._mi_module.hasAttr('cgmDirection'):\n self.direction = self._mi_module.cgmDirection or None\n\n #Verify we have a puppet and that puppet has a masterControl which we need for or master scale plug\n if not self.mi_modulePuppet.getMessage('masterControl'):\n if not self.mi_modulePuppet._verifyMasterControl():\n raise StandardError,\"MasterControl failed to verify\"\n\n self._mi_masterControl = self._mi_module.modulePuppet.masterControl\n self._mi_masterSettings = self._mi_masterControl.controlSettings\n self._mi_deformGroup = self._mi_module.modulePuppet.masterNull.deformGroup \n\n #>>> template null \n self.templateNullData = attributes.returnUserAttrsToDict(self._mi_templateNull.mNode)\n\n #>>>Connect switches\n\n def _step_verifyModuleTemplateToggles_(self):\n verify_moduleTemplateToggles(self)\n def _step_templateProcess_(self):\n try:\n if self._mi_module.mClass == 'cgmLimb':\n log.debug(\"mode: cgmLimb Template\")\n\n build_limbTemplate(self)\t\n\n if 'ball' in self.l_coreNames and 'ankle' in self.l_coreNames:\n try:\n doCastPivots(self._mi_module)\n except Exception,error:raise Exception,\"Cast pivots fail | {0}\".format(error)\n\n elif self._mi_module.mClass == 'cgmEyeball':\n log.info(\"mode: cgmEyeball\")\n try:doMakeEyeballTemplate(self)\n except StandardError,error:log.warning(\">>> %s.go >> build failed: %s\"%(self._mi_module.p_nameShort,error)) \n\n else:\n raise NotImplementedError,\"haven't implemented '{0} templatizing yet\".format(self._mi_module.mClass)\n\n except Exception,error:\n raise Exception,\"build fail! |{0}\".format(error)\n\n def _step_tagChildren_(self):\n doTagChildren(self._mi_module)\n\n def _step_poseLoad_(self):\n #>>> store template settings\n self._mi_module.templateSettings_call('load')\n\n\n\n \"\"\"\n self._mi_templateNull.overrideEnabled = 1\t\t\n cgmMeta.cgmAttr(self._mi_masterSettings.mNode,'templateVis',lock=False).doConnectOut(\"%s.%s\"%(self._mi_templateNull.mNode,'overrideVisibility'))\n cgmMeta.cgmAttr(self._mi_masterSettings.mNode,'templateLock',lock=False).doConnectOut(\"%s.%s\"%(self._mi_templateNull.mNode,'overrideDisplayType')) \n \"\"\"\n return fncWrap(*args, **kws).go()", "def cachefor(name):\n def decorator(func):\n assert name not in cachefuncs\n cachefuncs[name] = func\n return func\n return decorator", "def late_import():\n global NumberField_quadratic\n global NumberFieldElement_quadratic\n global AlgebraicNumber_base\n global AlgebraicNumber\n global AlgebraicReal\n global AA, QQbar, SR\n global CLF, RLF, CDF\n if NumberFieldElement_quadratic is None:\n import sage.rings.number_field.number_field\n import sage.rings.number_field.number_field_element_quadratic as nfeq\n NumberField_quadratic = sage.rings.number_field.number_field.NumberField_quadratic\n NumberFieldElement_quadratic = nfeq.NumberFieldElement_quadratic\n import sage.rings.qqbar\n AlgebraicNumber_base = sage.rings.qqbar.AlgebraicNumber_base\n AlgebraicNumber = sage.rings.qqbar.AlgebraicNumber\n AlgebraicReal = sage.rings.qqbar.AlgebraicReal\n AA = sage.rings.qqbar.AA\n QQbar = sage.rings.qqbar.QQbar\n import sage.symbolic.ring\n SR = sage.symbolic.ring.SR\n from .real_lazy import CLF, RLF\n from .complex_double import CDF", "def _make_functions(namespace):\n for fil in registry.filters:\n func_name = camel2enthought(fil.id)\n class_name = fil.id\n if func_name.endswith('_filter'):\n func_name = func_name[:-7]\n class_name = class_name[:-6]\n class_name = class_name + 'Factory'\n\n # Don't create any that are already defined.\n if class_name in namespace:\n continue\n\n # The class to wrap.\n klass = new.classobj(class_name, \n (_AutomaticFilterFactory,),\n {'__doc__': fil.help,}\n )\n klass._metadata = fil\n\n # The mlab helper function.\n func = make_function(klass)\n\n # Inject class/function into the namespace and __all__.\n namespace[class_name] = klass\n namespace[func_name] = func\n __all__.append(func_name)", "def load(module_name):\r\n if module_name.startswith('http://'):\r\n pico_url, module_name = module_name.split('/pico/')\r\n global url\r\n url = pico_url + '/pico/'\r\n module_dict = get(url + module_name)\r\n module = imp.new_module(module_name)\r\n module.__doc__ = module_dict['__doc__']\r\n functions = module_dict['functions']\r\n for function_def in functions:\r\n name = function_def['name']\r\n args = function_def['args']\r\n args_string = ', '.join([\"%s=%s\"%(arg, json.dumps(default).replace(\"null\", \"None\")) for arg, default in args if arg != None])\r\n stream = function_def['stream']\r\n docstring = function_def['doc']\r\n exec(\"\"\"\r\ndef f(%s):\r\n \\\"\\\"\\\" %s \\\"\\\"\\\"\r\n return _call_function('%s', '%s', locals(), %s)\r\n\"\"\"%(args_string, docstring, module_name, name, stream))\r\n setattr(module, name, f)\r\n return module", "def _setup(self):\r\n if not self.is_setup:\r\n\r\n #construct the default name\r\n name_func = getattr(self.parent_class, 'get_element_type', None) or getattr(self.parent_class, 'get_label', None)\r\n default_path = (name_func() if name_func else 'gremlin') + '.groovy'\r\n\r\n self.path = self.path or default_path\r\n if self.path.startswith('/'):\r\n path = self.path\r\n else:\r\n path = inspect.getfile(self.parent_class)\r\n path = os.path.split(path)[0]\r\n path += '/' + self.path\r\n\r\n #TODO: make this less naive\r\n gremlin_obj = None\r\n for grem_obj in parse(path):\r\n if grem_obj.name == self.method_name:\r\n gremlin_obj = grem_obj\r\n break\r\n\r\n if gremlin_obj is None:\r\n raise ThunderdomeGremlinException(\"The method '{}' wasnt found in {}\".format(self.method_name, path))\r\n\r\n for arg in gremlin_obj.args:\r\n if arg in self.arg_list:\r\n raise ThunderdomeGremlinException(\"'{}' defined more than once in gremlin method arguments\".format(arg))\r\n self.arg_list.append(arg)\r\n\r\n self.function_body = gremlin_obj.body\r\n self.function_def = gremlin_obj.defn\r\n self.is_setup = True", "def intrinsic(*args, **kwargs):\n # Make inner function for the actual work\n def _intrinsic(func):\n name = getattr(func, '__name__', str(func))\n llc = _Intrinsic(name, func, **kwargs)\n llc._register()\n return llc\n\n if not kwargs:\n # No option is given\n return _intrinsic(*args)\n else:\n # options are given, create a new callable to recv the\n # definition function\n def wrapper(func):\n return _intrinsic(func)\n return wrapper", "def InterpolateFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def __init__(self, inputs, outputs, mode=None, updates=None, givens=None, no_default_updates=False,\n accept_inplace=False, name=None, rebuild_strict=True, allow_input_downcast=None, profile=None,\n on_unused_input='raise'):\n\n # Meta\n self.inputs = inputs\n self.outputs = outputs\n self.mode = mode\n self.updates = updates\n self.givens = givens\n self.no_default_updates = no_default_updates\n self.accept_inplace = accept_inplace\n self.name = name\n self.rebuild_strict = rebuild_strict\n self.allow_input_downcast = allow_input_downcast\n self.profile = profile\n self.on_unused_input = on_unused_input\n\n # Function containers\n self._thfunction = None\n self._function = self.__call__\n\n # Compile function\n self.compile()", "def _core(fn=None, **flags):\n\n # need set the attr and access on c++\n def deco(fn):\n fn._func_graph_flags = {\n 'core': True,\n **flags,\n }\n return fn\n\n if fn is not None:\n ret = deco(fn)\n else:\n ret = deco\n return ret", "def _isolateImports(mf, f, *a, **kw):\n\n\n oldMetaPath = sys.meta_path\n oldPathHooks = sys.path_hooks\n _PEP302Mapper._oldSysModules = sys.modules.copy()\n oldImport = __builtin__.__import__\n #where is your god now?\n sys.path_hooks = []\n sys.modules.clear()\n sys.meta_path = [mf]\n __builtins__['__import__'] = mf.xocImport\n\n\n\n #stupid special case for the stdlib\n if mf.mapper.contains('warnings'):\n sys.modules['warnings'] = mf.mapper.lookup('warnings')\n\n try:\n return f(*a, **kw)\n finally:\n sys.meta_path = oldMetaPath\n sys.path_hooks = oldPathHooks\n sys.modules.clear()\n sys.modules.update(_PEP302Mapper._oldSysModules)\n __builtins__['__import__'] = oldImport", "def mapping(self, mapped):\n # Allow mappings to be passed as a string if they don't exist yet\n # see http://stackoverflow.com/questions/1095543/get-name-of-calling-functions-module-in-python\n # We store the module it came from here, but we don't actually resolve\n # the path until we need to access the mapping, because it won't be\n # in scope until then.\n if isinstance(mapped, str):\n if '.' in mapped:\n # A full python path has been passed\n module = '.'.join(mapped.split('.')[:-1])\n mapped = mapped.split('.')[-1]\n self._mapping_module = importlib.import_module(module)\n else:\n # Only a relative name has been passed, assume it's in\n # the same module who called us\n constructor_called_from = inspect.stack()[2]\n called_from_module = inspect.getmodule(constructor_called_from[0])\n self._mapping_module = called_from_module\n\n self._mapping = mapped", "def _compile_ufuncs(cls):\n cls._ufuncs = {} # Reset the dictionary so each ufunc will get recompiled\n\n if cls.ufunc_mode == \"jit-lookup\":\n cls._build_lookup_tables()", "def reload_definitions():\n package_list = [\n # Reload minimum needs\n 'safe.definitions.minimum_needs',\n # Reload everything that depends on minimum_needs\n 'safe.definitions.fields',\n 'safe.definitions',\n\n # Reload min needs postprocessors\n 'safe.processors.minimum_needs_post_processors',\n # Reload everything that depends on postprocessors\n 'safe.processors',\n 'safe.impact_function.postprocessors',\n 'safe.impact_function',\n\n # Reload everything that depends on reporting\n 'safe.report.extractors.aggregate_postprocessors',\n 'safe.report.extractors.minimum_needs',\n 'safe.report'\n ]\n for p in package_list:\n reload(importlib.import_module(p))\n\n from safe.definitions import minimum_needs\n from safe import processors\n LOGGER.debug('Minimum Needs list:')\n for m in minimum_needs.minimum_needs_fields:\n LOGGER.debug(m)\n\n LOGGER.debug('Minimum Needs Processors list:')\n for m in processors.minimum_needs_post_processors:\n LOGGER.debug(m)", "def cacheHandlers(self):\n\n def collect_handlers(module):\n\n def wanted(member):\n return (isclass(member) and\n issubclass(member, handlers.HandlerBase) and\n member.__name__.endswith('Handler'))\n\n m = {}\n for name, obj in getmembers(module, wanted):\n m[name] = obj(self.skype)\n m[name].init()\n return m\n\n self.handlers = collect_handlers(handlers)\n if custom_handlers:\n self.handlers.update(collect_handlers(custom_handlers))", "def _map_fn(self):\n raise NotImplementedError", "def __init__(self,\n namespace_whitelist=None,\n save_debug_info=False,\n function_aliases=None):\n self.namespace_whitelist = _validate_namespace_whitelist(\n namespace_whitelist)\n self.save_debug_info = save_debug_info\n self.function_aliases = function_aliases if function_aliases else dict()", "def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }", "def from_input(filename):\n funcs = collections.OrderedDict() # order matters.\n with open(filename) as inp:\n userinp = yaml.load(inp)\n for details in userinp['dependents']:\n dependent_name = details['name']\n print('Loading {}'.format(dependent_name))\n cls = getattr(sys.modules[__name__], details['class'])\n if issubclass(cls, SmoothClassFunction):\n bounds = SoftBounds(*details['bounds'])\n funcs[dependent_name] = cls(bounds)\n else:\n bound = HardBounds(details['bound'])\n funcs[dependent_name] = cls(bound)\n build_all_splines(funcs.values())\n return funcs", "def init_py_impls(self):\r\n def compose_impl(r):\r\n # this is not optimal at all eg in add(*1 -> mul(x, y), *1)\r\n # it will calculate *1 twice\r\n # it also doesn't follow fgraph.toposort but that's (presumably)\r\n # still correct since we only have scalar ops\r\n if r in self.fgraph.inputs:\r\n idx = self.fgraph.inputs.index(r)\r\n return lambda inputs: inputs[idx]\r\n elif r.owner is None: # in fgraph.orphans:\r\n return lambda inputs: r.data\r\n node = r.owner\r\n producers = [compose_impl(input) for input in node.inputs]\r\n return lambda inputs: node.op.impl(*[p(inputs) for p in producers])\r\n self._impls = [compose_impl(r) for r in self.fgraph.outputs]", "def _instantiate_attributes_before_function(self, context=None):\n self._instantiate_pathway(context=context)\n # super(Process_Base, self)._instantiate_function(context=context)", "def compile_function(self, function, arguments):", "def creator(cls, cache_key, device, **kwargs):\n function = cls(cache_key, device, **kwargs)\n _GLOBAL_CACHED_FUNCTIONS[cache_key] = function\n return function", "def register_plugin_calls(*funcs):\n wrapped_dict = {}\n for func in funcs:\n wrapped_dict[func.__name__] = _handle_serialization(func)\n XenAPIPlugin.dispatch(wrapped_dict)", "def GetScriptableInterface(f):\n\n\tconstants = [] # returned as a sorted list\n\tfunctions = {} # returned as a sorted list of items\n\tproperties = {} # returned as a sorted list of items\n\n\tfor name in f.order:\n\t\tfeatures = f.features[name]\n\t\tif features[\"Category\"] != \"Deprecated\":\n\t\t\tif features[\"FeatureType\"] == \"val\":\n\t\t\t\tconstants.append( (name, features) )\n\t\t\telif features[\"FeatureType\"] in [\"fun\",\"get\",\"set\"]:\n\t\t\t\tif features[\"FeatureType\"] == \"get\":\n\t\t\t\t\tpropname = name.replace(\"Get\", \"\", 1)\n\t\t\t\t\tproperties[propname] = (name, properties.get(propname,(None,None))[1])\n\n\t\t\t\telif features[\"FeatureType\"] == \"set\":\n\t\t\t\t\tpropname = name.replace(\"Set\", \"\", 1)\n\t\t\t\t\tproperties[propname] = (properties.get(propname,(None,None))[0], name)\n\n\t\t\t\telse:\n\t\t\t\t\tfunctions[name] = features\n\n\tpropertiesCopy = properties.copy()\n\tfor propname, (getterName, setterName) in propertiesCopy.items():\n\t\tgetter = getterName and f.features[getterName]\n\t\tsetter = setterName and f.features[setterName]\n\n\t\tgetterValue, getterIndex, getterIndexName, getterType = 0, None, None, None\n\t\tsetterValue, setterIndex, setterIndexName, setterType = 0, None, None, None\n\t\tpropType, propIndex, propIndexName = None, None, None\n\n\t\tisok = (getterName or setterName) and not (getter is setter)\n\n\t\tif isok and getter:\n\t\t\tif getter['Param2Type'] == 'stringresult':\n\t\t\t\tgetterType = getter['Param2Type']\n\t\t\telse:\n\t\t\t\tgetterType = getter['ReturnType']\n\t\t\tgetterType = ConvertEnu(getterType)\n\t\t\tgetterValue = getter['Value']\n\t\t\tgetterIndex = getter['Param1Type'] or 'void'\n\t\t\tgetterIndexName = getter['Param1Name']\n\n\t\t\tisok = ((getter['Param2Type'] or 'void') == 'void') or (getterType == 'stringresult')\n\n\t\tif isok and setter:\n\t\t\tsetterValue = setter['Value']\n\t\t\tsetterType = ConvertEnu(setter['Param1Type']) or 'void'\n\t\t\tsetterIndex = 'void'\n\t\t\tif (setter['Param2Type'] or 'void') != 'void':\n\t\t\t\tsetterIndex = setterType\n\t\t\t\tsetterIndexName = setter['Param1Name']\n\t\t\t\tsetterType = ConvertEnu(setter['Param2Type'])\n\n\t\t\tisok = (setter['ReturnType'] == 'void') or (setter['ReturnType'] == 'int' and setterType=='string')\n\n\t\tif isok and getter and setter:\n\t\t\tisok = ((getterType == setterType) or (getterType == 'stringresult' and setterType == 'string')) and (getterIndex == setterIndex)\n\n\t\tpropType = getterType or setterType\n\t\tpropIndex = getterIndex or setterIndex\n\t\tpropIndexName = getterIndexName or setterIndexName\n\n\t\tif isok:\n\t\t\t# do the types appear to be useable? THIS IS OVERRIDDEN BELOW\n\t\t\tisok = (propType in ('int', 'position', 'line', 'pointer', 'colour', 'colouralpha', 'bool', 'string', 'stringresult')\n\t\t\t\tand propIndex in ('void','int','position','line','string','bool'))\n\n\t\t\t# getters on string properties follow a different protocol with this signature\n\t\t\t# for a string getter and setter:\n\t\t\t# get int funcname(void,stringresult)\n\t\t\t# set void funcname(void,string)\n\t\t\t#\n\t\t\t# For an indexed string getter and setter, the indexer goes in\n\t\t\t# wparam and must not be called 'int length', since 'int length'\n\t\t\t# has special meaning.\n\n\t\t\t# A bool indexer has a special meaning. It means \"if the script\n\t\t\t# assigns the language's nil value to the property, call the\n\t\t\t# setter with args (0,0); otherwise call it with (1, value).\"\n\t\t\t#\n\t\t\t# Although there are no getters indexed by bool, I suggest the\n\t\t\t# following protocol: If getter(1,0) returns 0, return nil to\n\t\t\t# the script. Otherwise return getter(0,0).\n\n\n\t\tif isok:\n\t\t\tproperties[propname] = {\n\t\t\t\t\"GetterValue\" : getterValue,\n\t\t\t\t\"SetterValue\" : setterValue,\n\t\t\t\t\"PropertyType\" : propType,\n\t\t\t\t\"IndexParamType\" : propIndex,\n\t\t\t\t\"IndexParamName\" : propIndexName,\n\t\t\t\t# The rest of this metadata is added to help generate documentation\n\t\t\t\t\"Category\" : (getter or setter)[\"Category\"],\n\t\t\t\t\"GetterName\" : getterName,\n\t\t\t\t\"SetterName\" : setterName,\n\t\t\t\t\"GetterComment\" : CommentString(getter),\n\t\t\t\t\"SetterComment\" : CommentString(setter)\n\t\t\t}\n\t\t\t#~ print(properties[propname])\n\n\t\t\t# If it is exposed as a property, the constant name is not picked up implicitly\n\t\t\t# (because the name is different) but its constant should still be exposed.\n\t\t\tif getter:\n\t\t\t\tconstants.append( (\"SCI_\" + getterName.upper(), getter))\n\t\t\tif setter:\n\t\t\t\tconstants.append( (\"SCI_\" + setterName.upper(), setter))\n\t\telse:\n\t\t\t# Cannot parse as scriptable property (e.g. not symmetrical), so export as functions\n\t\t\tdel(properties[propname])\n\t\t\tif getter:\n\t\t\t\tfunctions[getterName] = getter\n\t\t\tif setter:\n\t\t\t\tfunctions[setterName] = setter\n\n\tfunclist = list(functions.items())\n\tfunclist.sort()\n\n\tproplist = list(properties.items())\n\tproplist.sort()\n\n\tconstants.sort()\n\n\treturn (constants, funclist, proplist)", "def numpy_extension():\n jsonpickle.ext.numpy.register_handlers()\n yield # control to the test function.\n jsonpickle.ext.numpy.unregister_handlers()", "def create_user_functions():\n\n # user_function.lgi useing PARAM_A and PARAM_B for slope and intercept\n lagrit_input = \"\"\"\ncmo/DELATT/mo_pts/dfield\ncompute / distance_field / mo_pts / mo_line_work / dfield\nmath/multiply/mo_pts/x_four/1,0,0/mo_pts/dfield/PARAM_A/\nmath/add/mo_pts/x_four/1,0,0/mo_pts/x_four/PARAM_B/\ncmo/copyatt/mo_pts/mo_pts/fac_n/x_four\nfinish\n\"\"\"\n f = open('user_function.lgi', 'w')\n f.write(lagrit_input)\n f.close()\n\n # user_function2.lgi uses PARAM_A2 and PARAM_B2 for slope and intercept\n lagrit_input = \"\"\"\ncmo/DELATT/mo_pts/dfield\ncompute / distance_field / mo_pts / mo_line_work / dfield\nmath/multiply/mo_pts/x_four/1,0,0/mo_pts/dfield/PARAM_A2/\nmath/add/mo_pts/x_four/1,0,0/mo_pts/x_four/PARAM_B2/\ncmo/copyatt/mo_pts/mo_pts/fac_n/x_four\nfinish\n\"\"\"\n f = open('user_function2.lgi', 'w')\n f.write(lagrit_input)\n f.close()", "def __init__(self, func):\n self.dictionary = {}\n self.func = func", "def decorate_function(cls, f: 'Callable', **setting_kwds) -> None:\n # Filter out builtins.\n if not get_file_of_object(f):\n return\n\n caller_frame = sys._getframe(1) # caller's frame\n namespace = caller_frame.f_globals\n namespace[f.__name__] = cls(**setting_kwds)(f)", "def _parse_functions(self, locals: dict):\n functions_dict = dict(filter(self._isfunction, locals.items()))\n functions = []\n if not self.args:\n functions.append(next(iter(functions_dict.values())))\n else:\n for i in range(len(self.args)):\n if functions_dict.get(self.args[0]):\n functions.append(functions_dict[self.args.pop(0)])\n else:\n if not functions:\n msg = f'ezmake command args: {self.args} did not ' + \\\n 'match any functions defined in Makefile.py: %s' %\\\n list(functions_dict.keys())\n raise TypeError(msg)\n break\n self.functions = functions", "def install_ast_funcs(self, ast_ctx):\n sym_table = {}\n for name, func in self.ast_functions.items():\n sym_table[name] = func(ast_ctx)\n ast_ctx.set_local_sym_table(sym_table)", "def codegen_reload_data():\n return {\n \"package\": u\"fn_utilities\",\n \"message_destinations\": [u\"fn_utilities\"],\n \"functions\": [u\"utilities_artifact_hash\", u\"utilities_attachment_hash\", u\"utilities_attachment_to_base64\", u\"utilities_attachment_zip_extract\", u\"utilities_attachment_zip_list\", u\"utilities_base64_to_artifact\", u\"utilities_base64_to_attachment\", u\"utilities_call_rest_api\", u\"utilities_domain_distance\", u\"utilities_email_parse\", u\"utilities_excel_query\", u\"utilities_expand_url\", u\"utilities_extract_ssl_cert_from_url\", u\"utilities_get_contact_info\", u\"utilities_json2html\", u\"utilities_parse_ssl_certificate\", u\"utilities_pdfid\", u\"utilities_resilient_search\", u\"utilities_shell_command\", u\"utilities_string_to_attachment\", u\"utilities_timer\", u\"utilities_xml_transformation\"],\n \"workflows\": [u\"example_artifact_attachment_to_base64\", u\"example_artifact_hash\", u\"example_attachment_hash\", u\"example_attachment_to_base64\", u\"example_call_rest_api\", u\"example_create_artifacts_from_excel_data\", u\"example_domain_distance\", u\"example_email_parsing_artifact\", u\"example_email_parsing_attachment\", u\"example_extract_ssl_cert_from_url\", u\"example_get_incident_contact_info\", u\"example_get_task_contact_info\", u\"example_json2html\", u\"example_parse_ssl_certificate\", u\"example_pdfid\", u\"example_resilient_search\", u\"example_shell_command\", u\"example_string_to_attachment\", u\"example_timer\", u\"example_timer_parallel\", u\"example_xml_transformation\", u\"example_zip_list\", u\"example_zip_to_artifact\", u\"utilities_expand_url\"],\n \"actions\": [u\"Example: (Artifact) Attachment to Base64\", u\"Example: Artifact Hash\", u\"Example: Attachment Hash\", u\"Example: Attachment to Base64\", u\"Example: Call REST API\", u\"Example: Domain Distance\", u\"Example: Email Parsing (Artifact)\", u\"Example: Email Parsing (Attachment)\", u\"Example: Expand URL\", u\"Example: Extract SSL Certificate\", u\"Example: Get Incident Contact Info\", u\"Example: Get Task Contact Info\", u\"Example: JSON2HTML\", u\"Example: Parse SSL Certificate\", u\"Example: PDFiD\", u\"Example: Resilient Search\", u\"Example: Shell Command\", u\"Example: String to Attachment\", u\"Example: Timer Epoch\", u\"Example: Timers in Parallel\", u\"Example: Use Excel Data\", u\"Example: XML Transformation\", u\"Example: Zip Extract\", u\"Example: Zip List\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"incident_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": [u\"Convert JSON to rich text v1.0\"],\n \"playbooks\": []\n }", "def _createModuleObj(self):\n ModuleFaultCohesiveKin.__init__(self)\n return", "def __create_custom_objects():\n # make some preparation to properly load objects from keras_contribute\n instance_holder = {\"instance\": None}\n\n class ClassWrapper(CRF):\n def __init__(self, *args, **kwargs):\n instance_holder[\"instance\"] = self\n super(ClassWrapper, self).__init__(*args, **kwargs)\n\n def loss(*args):\n method = getattr(instance_holder[\"instance\"], \"loss_function\")\n return method(*args)\n\n def accuracy(*args):\n method = getattr(instance_holder[\"instance\"], \"accuracy\")\n return method(*args)\n\n return {\"ClassWrapper\": ClassWrapper, \"CRF\": ClassWrapper, \"crf_loss\": loss,\n \"crf_viterbi_accuracy\": accuracy}", "def _decorate ( name = _name ) :\n import LoKiCore.decorators as _LoKiCore\n _mcp = 'const LHCb::MCParticle*'\n _mcv = 'const LHCb::MCVertex*'\n #_vp = std.vector( _mcp )\n #_vv = std.vector( _mcv )\n #_vd = std.vector( 'double' )\n _vp = 'std::vector<const LHCb::MCParticle*>' ## std.vector( _mcp )\n _vv = 'std::vector<const LHCb::MCVertex*>' ## std.vector( _mcv )\n _vd = 'std::vector<double>' ## std.vector( 'double' )\n #\n \n # MCParticle -> double\n \n _decorated = _LoKiCore.getAndDecorateFunctions ( \n name , ## modulr name \n LoKi.Functor (_mcp,'double') , ## the base\n LoKi.Dicts.FunCalls (LHCb.MCParticle) , ## call-traits\n LoKi.Dicts.FuncOps (_mcp,_mcp) ) ## operators&operations\n \n # MCVertex -> double\n \n _decorated != _LoKiCore.getAndDecorateFunctions ( \n name , ## moduel name \n LoKi.Functor (_mcv,'double') , ## the base\n LoKi.Dicts.FunCalls (LHCb.MCVertex) , ## call-traits\n LoKi.Dicts.FuncOps (_mcv,_mcv) ) ## operators&operations\n \n # MCParticle -> bool\n \n _decorated != _LoKiCore.getAndDecoratePredicates (\n name , ## module name \n LoKi.Functor (_mcp,bool) , ## the base\n LoKi.Dicts.CutCalls (LHCb.MCParticle) , ## call-traits\n LoKi.Dicts.CutsOps (_mcp,_mcp) ) ## operators&operations\n \n # MCVertex -> bool\n \n _decorated != _LoKiCore.getAndDecoratePredicates (\n name , ## module name \n LoKi.Functor (_mcv,bool) , ## the base\n LoKi.Dicts.CutCalls (LHCb.MCVertex) , ## call-traits\n LoKi.Dicts.CutsOps (_mcv,_mcv) ) ## operators&operations\n\n ## functional part:\n \n # vector<T> -> vector<double>\n \n _decorated |= _LoKiCore.getAndDecorateMaps (\n name , ## moduel name \n LoKi.Functor (_vp,_vd) , ## the base\n LoKi.Dicts.MapsOps(_mcp) ) ## call-traits\n _decorated |= _LoKiCore.getAndDecorateMaps (\n name , ## moduel name \n LoKi.Functor (_vv,_vd) , ## the base\n LoKi.Dicts.MapsOps(_mcv) ) ## call-traits\n\n # vector<T> -> vector<T>\n\n _decorated |= _LoKiCore.getAndDecoratePipes (\n name , ## module name \n LoKi.Functor (_vp,_vp) , ## the base\n LoKi.Dicts.PipeOps(_mcp,_mcp) ) ## call-traits\n _decorated |= _LoKiCore.getAndDecoratePipes (\n name , ## module name \n LoKi.Functor (_vv,_vv) , ## the base\n LoKi.Dicts.PipeOps(_mcv,_mcv) ) ## call-traits\n\n # vector<T> -> double\n \n _decorated |= _LoKiCore.getAndDecorateFunVals ( \n name , ## module name \n LoKi.Functor (_vp,'double') , ## the base\n LoKi.Dicts.FunValOps(_mcp) ) ## call-traits\n _decorated |= _LoKiCore.getAndDecorateFunVals ( \n name , ## module name \n LoKi.Functor (_vv,'double') , ## the base\n LoKi.Dicts.FunValOps(_mcv) ) ## call-traits\n\n # vector<T> -> bool\n\n _decorated |= _LoKiCore.getAndDecorateCutVals ( \n name , ## module name \n LoKi.Functor (_vp,bool) , ## the base\n LoKi.Dicts.CutValOps(_mcp) ) ## call-traits\n _decorated |= _LoKiCore.getAndDecorateCutVals ( \n name , ## module name \n LoKi.Functor (_vv,bool) , ## the base\n LoKi.Dicts.CutValOps(_mcv) ) ## call-traits\n\n #sources : void -> vector<T>\n\n _decorated |= _LoKiCore.getAndDecorateSources ( \n name , ## module name \n LoKi.Functor ('void',_vp) , ## the base\n LoKi.Dicts.SourceOps(_mcp,_mcp) ) ## call-traits\n _decorated |= _LoKiCore.getAndDecorateSources ( \n name , ## module name \n LoKi.Functor ('void',_vv) , ## the base\n LoKi.Dicts.SourceOps(_mcv,_mcv) ) ## call-traits\n\n\n ## primitive voids:\n\n _decorated |= _LoKiCore.getAndDecoratePrimitiveVoids ( name ) \n \n \n ## decorate pids (Comparison with strings, integers and ParticleID objects:\n for t in ( MCID , MCABSID ) :\n t = type ( t ) \n _LoKiCore.decoratePID ( t , LoKi.Dicts.PIDOps ( t ) )\n _decorated.add ( t )\n\n \n return _decorated", "def test_initialized() -> None:\n MapieRegressor()", "def make_c_init(self, madz_path):\n res = \\\n\"\"\"void ___madz_init(){{\n PyObject *fn, *implib, *importer;\n PyInterpreterState *interp_state;\n PyThreadState *thread_state, *tmp;\n if(!Py_IsInitialized())\n Py_InitializeEx(0);\n\n if(!PyEval_ThreadsInitialized()){{\n printf(\"No Theadz\\\\n\");\n PyEval_InitThreads();\n }}\n\n interp_state = PyInterpreterState_New();\n thread_state = PyThreadState_New(interp_state);\n PyEval_RestoreThread(thread_state);\n\n ___madz_LANG_python_thread_state = Py_NewInterpreter();\n tmp = PyThreadState_Swap(___madz_LANG_python_thread_state);\n implib = PyImport_ImportModule(\"importlib.machinery\");\n\n fn = PyObject_GetAttrString(implib,\"SourceFileLoader\");\n importer = PyObject_CallObject(fn, Py_BuildValue(\"(ss)\", \"_madz\", \"{madzpath}\"));\n Py_XDECREF(fn);\n fn = PyObject_GetAttrString(importer,\"load_module\");\n\n ___madz_LANG_python_wrapper_module = PyObject_CallObject(fn, Py_BuildValue(\"(s)\", \"_madz\"));\n Py_XDECREF(fn);\n Py_XDECREF(implib);\n Py_XDECREF(importer);\n\n if (___madz_LANG_python_wrapper_module == NULL){{\n PyErr_Print();\n\n PyThreadState_Swap(tmp);\n //PyEval_SaveThread();\n return;\n }}\n fn = PyObject_GetAttrString(___madz_LANG_python_wrapper_module, \"_madz_init\");\n\n if(fn == NULL){{\n PyErr_Print();\n PyThreadState_Swap(tmp);\n //PyEval_SaveThread();\n\n return;\n }}\n\n{function_hooks}\n\n if(PyObject_CallObject(fn, 0) == NULL){{\n PyErr_Print();\n }}\n\n PyThreadState_Swap(tmp);\n PyEval_SaveThread();\n}}\n\nvoid ___madz_init_imports(){{\n //Asks _madz.py to attach imported plugins to the madz.py autogenerated file\n PyObject *fn;\n PyThreadState *tmp;\n PyGILState_STATE gstate;\n\n //Swap Thread State\n tmp = PyThreadState_Swap(___madz_LANG_python_thread_state);\n //Get the init imports function\n gstate = PyGILState_Ensure();\n fn = PyObject_GetAttrString(___madz_LANG_python_wrapper_module, \"_madz_init_imports\");\n\n if(fn == NULL){{\n PyErr_Print();\n PyThreadState_Swap(tmp);\n return;\n }}\n //Call The init imports function\n\n if(PyObject_CallObject(fn, 0) == NULL){{\n PyErr_Print();\n }}\n PyGILState_Release(gstate);\n //Reinstate Thread State\n PyThreadState_Swap(tmp);\n\n}}\n\"\"\"\n #Path variable cannot be accessed here. Let's forward it.\n fragments = {\"function_hooks\":\"\", \"madzpath\" : repr(madz_path.path)[1 : -1]}\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments[\"function_hooks\"] +=\" ___madz_OUTPUT.\" + node.name + \" = \" + \"___madz_LANG_python_FN_\" + node.name +\";\\n\"\n\n return res.format(**fragments)", "def preprocess_for_clml(mod):\n\n for _var in mod.get_global_vars():\n if _var.name_hint == \"main\":\n continue\n fn = mod[_var.name_hint]\n if \"Compiler\" in fn.attrs.keys() and fn.attrs[\"Compiler\"] == \"clml\":\n new_fn = fn.body\n clml_mod = tvm.IRModule.from_expr(new_fn)\n with tvm.transform.PassContext(opt_level=3):\n clml_mod = preprocess_module(clml_mod)\n new_body = clml_mod[\"main\"].body\n mod[_var.name_hint] = _function.Function(\n fn.params, new_body, fn.ret_type, fn.type_params, fn.attrs\n )\n return mod", "def declare(module_name, *func_name):\n for func in func_name:\n func = SqlFunction.normalize_name(func)\n if func not in SqlFunction._definitions:\n SqlFunction._definitions[func] = module_name", "def init(self):\n\n self.loaded = False\n self.exports = NotImplemented\n self.exception = None\n self.namespace = self.create_namespace()\n self.namespace.__file__ = str(self.filename)\n self.namespace.module = self\n self.namespace.require = self.require", "def function(minargs, maxargs, implicit=False, first=False, convert=None,\n namespaceUri=None):\n\n def decorator(f):\n def new_f(node, pos, size, context, *args):\n if len(args) < new_f.minargs:\n raise xpath.exceptions.XPathTypeError(\n 'too few arguments for \"%s()\"' % new_f.__name__\n )\n if(\n new_f.maxargs is not None and\n len(args) > new_f.maxargs\n ):\n raise xpath.exceptions.XPathTypeError(\n 'too many arguments for \"%s()\"' % new_f.__name__\n )\n\n if implicit and len(args) == 0:\n args = [[node]]\n\n if first:\n args = list(args)\n args[0] = xpath.tools.nodeset(args[0])\n if len(args[0]) > 0:\n args[0] = args[0][0]\n else:\n args[0] = None\n\n if convert is not None:\n if isinstance(convert, str):\n args = [xpath.tools.invoke(convert, node, pos, size,\n context, x) for x in args]\n else:\n args = [convert(x) for x in args]\n\n return f(node, pos, size, context, *args)\n\n new_f.minargs = minargs\n new_f.maxargs = maxargs\n new_f.__name__ = f.__name__\n new_f.__doc__ = f.__doc__\n\n xpname = new_f.__name__[2:].replace('_', '-')\n if namespaceUri is not None:\n xpname = (namespaceUri, xpname)\n module = sys.modules[f.__module__]\n if not hasattr(module, 'xpath_functions'):\n module.xpath_functions = {}\n module.xpath_functions[xpname] = new_f\n\n return new_f\n\n return decorator", "def build_and_import_extension(\n modname, functions, *, prologue=\"\", build_dir=None,\n include_dirs=[], more_init=\"\"):\n body = prologue + _make_methods(functions, modname)\n init = \"\"\"PyObject *mod = PyModule_Create(&moduledef);\n \"\"\"\n if not build_dir:\n build_dir = pathlib.Path('.')\n if more_init:\n init += \"\"\"#define INITERROR return NULL\n \"\"\"\n init += more_init\n init += \"\\nreturn mod;\"\n source_string = _make_source(modname, init, body)\n try:\n mod_so = compile_extension_module(\n modname, build_dir, include_dirs, source_string)\n except Exception as e:\n # shorten the exception chain\n raise RuntimeError(f\"could not compile in {build_dir}:\") from e\n import importlib.util\n spec = importlib.util.spec_from_file_location(modname, mod_so)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return foo", "def define_functions(self):\n\n self.define_vector_functions()\n self.define_scalar_functions()\n\n return None", "def register_ast(self, funcs):\n for name, func in funcs.items():\n self.ast_functions[name] = func" ]
[ "0.54267865", "0.5393932", "0.53602004", "0.51983255", "0.5163334", "0.51566917", "0.51326257", "0.5130478", "0.5116886", "0.51109326", "0.51055855", "0.509391", "0.50782514", "0.50449574", "0.501213", "0.5006291", "0.49983588", "0.4979815", "0.49753696", "0.4935354", "0.4883586", "0.48537096", "0.48473746", "0.483057", "0.48239824", "0.4812476", "0.4808842", "0.48025787", "0.48015308", "0.47997427", "0.4795335", "0.47901607", "0.47635284", "0.47366974", "0.47297207", "0.4721745", "0.47139707", "0.47088858", "0.47028825", "0.4699775", "0.46969533", "0.4695257", "0.46929005", "0.4692772", "0.46889395", "0.4687798", "0.4687448", "0.46686593", "0.46678814", "0.46635237", "0.46617067", "0.46617067", "0.46559703", "0.46559703", "0.46501562", "0.46491355", "0.46438822", "0.46378702", "0.46254537", "0.46241182", "0.46233904", "0.46216023", "0.46208048", "0.46024314", "0.46022293", "0.45904487", "0.4590163", "0.45896974", "0.45880917", "0.4585154", "0.4581263", "0.45782778", "0.4572868", "0.45680133", "0.45611072", "0.45587897", "0.45575774", "0.45539978", "0.45486134", "0.4546546", "0.45450622", "0.45381504", "0.4536836", "0.45346564", "0.45288268", "0.45154384", "0.45094362", "0.45011094", "0.44891042", "0.4485573", "0.44838598", "0.4482779", "0.4482199", "0.44812453", "0.4476033", "0.44745955", "0.44640234", "0.44638303", "0.4462166", "0.44621307" ]
0.48797792
21
Solve A x = b for x using the mrestarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(A_args) gmres_m solves Ax = b (1) where x and b are lengthn vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M).
def gmres_m(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x0: jax.ShapedArray, tol: float, atol: float, num_krylov_vectors: int, maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]: num_krylov_vectors = min(num_krylov_vectors, b.size) x = x0 b_norm = jnp.linalg.norm(b) tol = max(tol * b_norm, atol) for n_iter in range(maxiter): done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol, b_norm) if done: break return x, beta, n_iter, done
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gmres_wrapper(jax: types.ModuleType):\n jnp = jax.numpy\n\n def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n \"\"\"\n Solve A x = b for x using the m-restarted GMRES method. This is\n intended to be called via jax_backend.gmres.\n\n Given a linear mapping with (n x n) matrix representation\n A = A_mv(*A_args) gmres_m solves\n Ax = b (1)\n where x and b are length-n vectors, using the method of\n Generalized Minimum RESiduals with M iterations per restart (GMRES_M).\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x0: Initial guess solution.\n tol, atol: Solution tolerance to achieve,\n norm(residual) <= max(tol * norm(b), atol).\n tol is also used to set the threshold at which the Arnoldi factorization\n terminates.\n num_krylov_vectors: Size of the Krylov space to build at each restart.\n maxiter: The Krylov space will be repeatedly rebuilt up to this many\n times.\n Returns:\n x: The approximate solution.\n beta: Norm of the residual at termination.\n n_iter: Number of iterations at termination.\n converged: Whether the desired tolerance was achieved.\n \"\"\"\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done\n\n def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n \"\"\"\n A single restart of GMRES.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The `b` in `A @ x = b`.\n x: Initial guess solution.\n tol: Solution tolerance to achieve,\n num_krylov_vectors : Size of the Krylov space to build.\n Returns:\n done: Whether convergence was achieved.\n beta: Magnitude of residual (i.e. the error estimate).\n x: The approximate solution.\n \"\"\"\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x\n\n @jax.jit\n def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n \"\"\"\n Computes the residual vector r and its norm, beta, which is minimized by\n GMRES.\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and\n v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x: Initial guess solution.\n Returns:\n r: The residual vector.\n beta: Its magnitude.\n \"\"\"\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta\n\n def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n \"\"\"\n Updates the solution in response to the information computed by the\n main GMRES loop.\n\n Args:\n k: The final iteration which was reached by GMRES before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: The R factor in H = QR where H is the Arnoldi overlap matrix.\n beta_vec: Stores the Givens factors used to map H into QR.\n x0: The initial guess solution.\n Returns:\n x: The updated solution.\n \"\"\"\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Builds the Arnoldi decomposition of (A, v), where v is the normalized\n residual of the current solution estimate. The decomposition is\n returned as V, R, where V is the usual matrix of Krylov vectors and\n R is the upper triangular matrix in H = QR, with H the usual matrix\n of overlaps.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n n_kry: Size of the Krylov space to build; this is called\n num_krylov_vectors in higher level code.\n x0: Guess solution.\n r: Residual vector.\n beta: Magnitude of r.\n tol: Solution tolerance to achieve.\n b_norm: Magnitude of b in Ax = b.\n Returns:\n k: Counts the number of iterations before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: From H = QR where H is the Arnoldi matrix of overlaps.\n beta_vec: Stores Q implicitly as Givens factors.\n \"\"\"\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)\n\n VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray,\n float, jax.ShapedArray]\n ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int]\n GmresCarryType = Tuple[VarType, ConstType]\n\n @jax.jit\n def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n \"\"\"\n This function dictates whether the main GMRES while loop will proceed.\n It is equivalent to:\n if k < n_kry and err > tol:\n return True\n else:\n return False\n where k, n_kry, err, and tol are unpacked from gmres_carry.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n (bool): Whether to continue iterating.\n \"\"\"\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.\n\n @jax.jit\n def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n \"\"\"\n Performs a single iteration of gmres_krylov. See that function for a more\n detailed description.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n gmres_carry: The updated gmres_carry.\n \"\"\"\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)\n\n @jax.jit\n def _gs_step(r: jax.ShapedArray,\n v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs one iteration of the stabilized Gram-Schmidt procedure, with\n r to be orthonormalized against {v} = {v_0, v_1, ...}.\n\n Args:\n r: The new vector which is not in the initially orthonormal set.\n v_i: The i'th vector in that set.\n Returns:\n r_i: The updated r which is now orthonormal with v_i.\n h_i: The overlap of r with v_i.\n \"\"\"\n h_i = jnp.vdot(v_i, r)\n r_i = r - h_i * v_i\n return r_i, h_i\n\n @jax.jit\n def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs the kth iteration of the Arnoldi reduction procedure.\n Args:\n k: The current iteration.\n A_mv, A_args: A function A_mv(v, *A_args) performing a linear\n transformation on v.\n V: A matrix of size (n, K + 1), K > k such that each column in\n V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes.\n H: A matrix of size (K, K), K > k with H[:, k] all zeroes.\n Returns:\n V, H: With their k'th columns respectively filled in by a new\n orthogonalized Krylov vector and new overlaps.\n \"\"\"\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H\n\n####################################################################\n# GIVENS ROTATIONS\n####################################################################\n @jax.jit\n def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n \"\"\"\n Successively applies each of the rotations stored in givens to H_col.\n\n Args:\n H_col : The vector to be rotated.\n givens: 2 x K, K > k matrix of rotation factors.\n k : Iteration number.\n Returns:\n H_col : The rotated vector.\n \"\"\"\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col\n\n @jax.jit\n def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Applies the Givens rotations stored in the vectors cs and sn to the vector\n H_col. Then constructs a new Givens rotation that eliminates H_col's\n k'th element, yielding the corresponding column of the R in H's QR\n decomposition. Returns the new column of R along with the new Givens\n factors.\n\n Args:\n H_col : The column of H to be rotated.\n givens: A matrix representing the cosine and sine factors of the\n previous GMRES Givens rotations, in that order\n (i.e. givens[0, :] -> the cos factor).\n k : Iteration number.\n Returns:\n R_col : The column of R obtained by transforming H_col.\n givens_k: The new elements of givens that zeroed out the k+1'th element\n of H_col.\n \"\"\"\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens\n\n @jax.jit\n def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n \"\"\"\n Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta)\n so that [cs -sn] @ [v1] = [r]\n [sn cs] [v2] [0]\n Args:\n v1, v2: The scalars.\n Returns:\n cs, sn: The rotation factors.\n \"\"\"\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn\n\n fnames = [\n \"gmres_m\", \"gmres_residual\", \"gmres_krylov\", \"gs_step\",\n \"kth_arnoldi_step\", \"givens_rotation\"\n ]\n functions = [\n gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step,\n givens_rotation\n ]\n\n class Functions:\n\n def __init__(self, fun_dict):\n self.dict = fun_dict\n\n def __getattr__(self, name):\n return self.dict[name]\n\n return Functions(dict(zip(fnames, functions)))", "def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,\n callback=None, atol=None, callback_type=None):\n A, M, x, b = _make_system(A, M, x0, b)\n matvec = A.matvec\n psolve = M.matvec\n\n n = A.shape[0]\n if n == 0:\n return cupy.empty_like(b), 0\n b_norm = cupy.linalg.norm(b)\n if b_norm == 0:\n return b, 0\n if atol is None:\n atol = tol * float(b_norm)\n else:\n atol = max(float(atol), tol * float(b_norm))\n if maxiter is None:\n maxiter = n * 10\n if restart is None:\n restart = 20\n restart = min(restart, n)\n if callback_type is None:\n callback_type = 'pr_norm'\n if callback_type not in ('x', 'pr_norm'):\n raise ValueError('Unknown callback_type: {}'.format(callback_type))\n if callback is None:\n callback_type = None\n\n V = cupy.empty((n, restart), dtype=A.dtype, order='F')\n H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')\n e = numpy.zeros((restart+1,), dtype=A.dtype)\n\n compute_hu = _make_compute_hu(V)\n\n iters = 0\n while True:\n mx = psolve(x)\n r = b - matvec(mx)\n r_norm = cublas.nrm2(r)\n if callback_type == 'x':\n callback(mx)\n elif callback_type == 'pr_norm' and iters > 0:\n callback(r_norm / b_norm)\n if r_norm <= atol or iters >= maxiter:\n break\n v = r / r_norm\n V[:, 0] = v\n e[0] = r_norm\n\n # Arnoldi iteration\n for j in range(restart):\n z = psolve(v)\n u = matvec(z)\n H[:j+1, j], u = compute_hu(u, j)\n cublas.nrm2(u, out=H[j+1, j])\n if j+1 < restart:\n v = u / H[j+1, j]\n V[:, j+1] = v\n\n # Note: The least-square solution to equation Hy = e is computed on CPU\n # because it is faster if tha matrix size is small.\n ret = numpy.linalg.lstsq(cupy.asnumpy(H), e)\n y = cupy.array(ret[0])\n x += V @ y\n iters += restart\n\n info = 0\n if iters == maxiter and not (r_norm <= atol):\n info = iters\n return mx, info", "def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x", "def solve_gmres(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n tol: float = 1e-5,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol, **kwargs)[0]", "def GMRES_1(A, b, x0, max_iterations=50):\n\n last_x = x0\n curr_x = last_x\n last_r = b - A @ x0\n curr_iter = 0\n residual_queue = []\n while curr_iter < max_iterations:\n Ar = A @ last_r\n alpha = (last_r.transpose() @ Ar) / (Ar.transpose() @ Ar)\n curr_x = last_x + alpha * last_r\n curr_r = last_r - alpha * Ar\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", \"GMRES(1)\")\n last_x = curr_x\n last_r = curr_r\n curr_iter += 1\n print(\"Number of Iterations: \" + str(curr_iter))\n\n return curr_x", "def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta", "def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def tt_gmres_leftprecond(AOp, b, nrm_b, eps=1.e-6, maxIter=20, verbose=True, preconOp=None, adaptiveTolerance=True):\n\n def calc_solution():\n x = pitts_py.TensorTrain_double(b.dimensions())\n x.setZero()\n nrm_x = 0\n for i in range(len(y)):\n nrm_x = pitts_py.axpby(y[i], V[i], nrm_x, x, eps)\n return x, nrm_x\n\n def residual_error(x, nrm_x):\n #print(\"TT-GMRES: solution max rank %d\" % np.max(x.getTTranks()))\n # calculate real residual\n r = pitts_py.TensorTrain_double(b.dimensions())\n r_nrm = nrm_x * AOp(x, r, eps/10, maxRank=9999)\n if preconOp is not None:\n r_nrm = pitts_py.axpby(orig_nrm_b, orig_b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/orig_nrm_b) )\n else:\n r_nrm = pitts_py.axpby(nrm_b, b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/nrm_b) )\n return r_nrm\n\n if verbose:\n if preconOp is None:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n else:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"precond direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n\n # assumes b is normalized and nrm_b is the desired rhs norm\n\n # left-preconditioning, transform RHS\n if preconOp is not None:\n orig_b = b\n orig_nrm_b = nrm_b\n b = pitts_py.TensorTrain_double(orig_b.dimensions())\n nrm_b = nrm_b * preconOp.apply(orig_b, b, eps / 10, 9999)\n nrm_b = nrm_b * pitts_py.normalize(b, eps/10, 9999)\n\n # define initial subspace\n beta = nrm_b\n curr_beta = beta\n V = [b]\n m = maxIter\n H = np.zeros((m + 1, m), order='F')\n\n if preconOp is not None:\n z = pitts_py.TensorTrain_double(b.dimensions())\n\n if verbose:\n #print(\"TT-GMRES: initial residual norm: %g, max. rank: %d\" % (beta, np.max(b.getTTranks())))\n if preconOp is None:\n print(0, 1, 1, np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n #print(\"TT-GMRES: un-preconditioned RHS max. rank: %d\" % np.max(orig_b.getTTranks()))\n else:\n print(0, 1, 1, np.max(orig_b.getTTranks()), np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n\n for j in range(m):\n if adaptiveTolerance:\n delta = eps / (curr_beta / beta) / (1.2 * m)\n else:\n delta = eps\n w = pitts_py.TensorTrain_double(b.dimensions())\n\n if preconOp is not None:\n z_nrm = AOp(V[j], z, delta, 9999)#, (j+1)*rank_b)\n w_nrm = z_nrm * preconOp.apply(z, w, delta, 9999)#, (j+2)*rank_b)\n else:\n w_nrm = AOp(V[j], w, delta, 9999)#, (j+2)*rank_b)\n\n if preconOp is not None:\n rank_z = np.max(z.getTTranks())\n rank_w = np.max(w.getTTranks())\n\n H[:j+2,j] = w_nrm * tt_pivmgs(V, w, delta, maxRank=9999)\n\n rank_vj = np.max(w.getTTranks())\n\n Hj = H[:j+2,:j+1]\n betae = np.zeros(j+2)\n betae[0] = beta\n # solving Hj * y = beta e_1\n y, curr_beta, rank, s = np.linalg.lstsq(Hj, betae, rcond=None)\n curr_beta = np.sqrt(curr_beta[0]) if curr_beta.size > 0 else 0\n if verbose:\n #print(\"TT-GMRES: LSTSQ residual norm: %g \" % (curr_beta / beta) )\n x, nrm_x = calc_solution()\n r_nrm = residual_error(x, nrm_x)\n rank_x = np.max(x.getTTranks())\n if preconOp is None:\n print(j+1, curr_beta/beta, r_nrm / nrm_b, rank_w, rank_vj, rank_x)\n else:\n print(j+1, curr_beta/beta, r_nrm / orig_nrm_b, rank_w, rank_z, rank_vj, rank_x)\n if curr_beta / beta <= eps:\n break\n\n if not verbose:\n x, nrm_x = calc_solution()\n return x, nrm_x", "def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def minmap_newton(A, b, x, max_iter=0, tol_rel=0.00001, tol_abs=np.finfo(np.float64).eps*10, profile=True):\n\n # Human readable dictionary of exit messages\n msg = {1 : 'preprocessing', # flag = 1\n 2 : 'iterating', # flag = 2\n 3 : 'relative', # flag = 3\n 4 : 'absolute', # flag = 4\n 5 : 'stagnation', # flag = 5\n 6 : 'local minima', # flag = 6\n 7 : 'nondescent', # flag = 7\n 8 : 'maxlimit', # flag = 8\n }\n\n # We use N as a reference for the usage of asserts throughout the\n # program. We assume that everything is column vector of shape\n # (N,1) if not the asserts will catch them.\n N = np.size(b)\n flag = 1\n\n assert x.shape == (N,1), 'x0 is not a column vector, it has shape: ' + repr(x.shape)\n assert A.shape == (N,N), 'A is not a square matrix, it has shape: ' + repr(A.shape)\n assert b.shape == (N,1), 'b is not a column vector, it has shape: ' + repr(b.shape)\n\n if max_iter == 0:\n max_iter = np.floor(N/2.0)\n\n # Ensure sane values\n max_iter = max(max_iter,1)\n # Rest of the value should be sane\n\n ##### Magic constants #####\n h = 1e-7\n alpha = 0.5\n beta = 0.001\n gamma = 1e-28\n\n eps = np.finfo(np.float64).eps\n rho = np.finfo(np.float64).eps\n gmres_tol = 10*eps\n\n ##### Values needed file iterating #####\n convergence = np.zeros(max_iter+1)\n\n # Should use np.infty.\n err = 1e20\n iterate = 1\n flag = 2\n\n while iterate <= max_iter:\n y = np.dot(A,x) + b\n assert y.shape == (N,1), 'y is not a column vector, it has shape: ' + repr(y.shape)\n assert np.all(np.isreal(y)), 'y is not real'\n # Calculate the minimum map column vector.\n H = minmap(x,y)\n assert H.shape == (N,1), 'H is not a column vector, it has shape: ' + repr(H.shape)\n assert np.all(np.isreal(H)), 'H is not real'\n old_err = err\n # Calculate merit value, error\n err = 0.5*np.dot(H.T,H)\n assert err.shape == (1,1), 'err is not a scalar, it has shape: ' + repr(err.shape)\n assert np.isreal(err), 'err is not real'\n\n if profile:\n convergence[iterate-1] = err\n\n ##### Test the stopping criterias used #####\n rel_err = np.abs(err-old_err)/np.abs(old_err)\n\n if rel_err < tol_rel:\n flag = 3\n break\n\n if err < tol_abs:\n flag = 4\n break\n\n ##### Solving the Newton system\n restart = min(N, 20) # Number of iterates done before Restart\n # for GMRES should restart\n S = np.where(y < x)\n J = np.identity(N)\n J[S,:] = A[S,:]\n dx = np.zeros((N,1))\n dx = gmres(J, (-H), tol=gmres_tol, restart=restart)[0].reshape(N,1)\n\n assert dx.shape == (N,1), 'dx is not a column vector, it has shape: ' + repr(dx.shape)\n assert np.all(np.isreal(dx)), 'dx is not real'\n\n nabla_H = np.dot(H.T, J)\n # Ensure nabla_H is a column vector\n nabla_H = nabla_H.reshape(N,1)\n assert nabla_H.shape == (N,1), 'nabla_H is not a column vector, it has shape: ' + repr(nabla_H.shape)\n assert np.all(np.isreal(nabla_H)), 'nabla_H is not real'\n\n # Tests whether the search direction is below machine\n # precision.\n if np.max(np.abs(dx)) < eps:\n flag = 5\n print \"*** Search direction below machine precision at iterate \" + repr(iterate) + \", choosing gradient as search direction.\"\n dx = -nabla_H\n\n # Test whether we are stuck in a local minima\n if np.linalg.norm(nabla_H) < tol_abs:\n flag = 6\n break\n\n # Test whether our direction is a sufficient descent direction\n if np.dot(nabla_H.T,dx) > -rho*(np.dot(dx.T, dx)):\n # Otherwise we should try gradient direction instead.\n print \"*** Non descend direction at iterate \" + repr(iterate) + \", choosing gradient as search direction.\"\n dx = -nabla_H\n\n ##### Armijo backtracking combined with a projected line-search #####\n tau = 1.0\n f_0 = err\n grad_f = beta*np.dot(nabla_H.T,dx)\n\n x_k = x[:]\n assert x_k.shape == (N,1), 'x_k is not a column vector, it has shape: ' + repr(x_k)\n assert np.all(np.isreal(x_k)), 'x_k is not real'\n \n # Perform backtracking line search\n while True:\n x_k = np.maximum(0, x + dx*tau)\n assert x_k.shape == (N,1), 'x_k is not a column vector, it has shape: ' + repr(x_k.shape)\n assert np.all(np.isreal(x_k)), 'x_k is not real'\n y_k = np.dot(A,x_k)+b\n assert y_k.shape == (N,1), 'y_k is not a column vector, it has shape: ' + repr(y_k.shape)\n assert np.all(np.isreal(y_k)), 'y_k is not real'\n H_k = minmap(y_k,x_k)\n assert H_k.shape == (N,1), 'H_k is not a column vector, it has shape: ' + repr(H_k.shape)\n assert np.all(np.isreal(H_k)), 'H_k is not real'\n f_k = 0.5*(np.dot(H_k.T,H_k))\n # Test Armijo condition for sufficient decrease\n if f_k <= f_0 + tau*grad_f:\n break\n # Test whether the stepsize has become too small\n if tau*tau < gamma:\n break\n tau *= alpha\n\n # Update iterate with result from line search.\n x = x_k\n assert x.shape == (N,1), 'x is not a column vector, it has shape: ' + repr(x.shape)\n assert np.all(np.isreal(x)), 'x is not real.'\n\n # Increment iterate\n iterate += 1\n\n if iterate >= max_iter:\n iterate -= 1\n flag = 8\n\n return (x, err, iterate, flag, convergence[:iterate], msg[flag])", "def _newtons_method_gmres_action(f, initial_guess, max_iter=50, tol=1e-12):\n\n output_dim = len(f(initial_guess))\n \n @np.vectorize\n def sum_values(dictionary):\n return sum(dictionary.values())\n \n def create_action(x0):\n \n def L_fun(x):\n \"\"\"\n Action\n Returns J_f(x0)*x by setting the values of 'x' as the initial derivatives for the variables in x0.\n \"\"\"\n \n f_x0 = f(ad.create_vector('x0', x0, seed_vector=x));\n f_x0 = np.array(f_x0) #ensure that f_x0 is np.array\n action = sum_values(ad.get_deriv(f_x0))\n return action\n \n L = LinearOperator(shape=(output_dim, len(x0)), matvec=L_fun)\n \n return L\n \n x0 = initial_guess\n for iter_num in range(max_iter):\n L = create_action(x0)\n b = -1 * np.array(f(x0))\n if len(x0) == 1:\n b = np.array([b])\n step, _ = gmres(L, b, tol = tol, atol = 'legacy')\n xnext = x0 + step \n if np.all(np.abs(xnext - x0) < tol):\n return (xnext, iter_num + 1);\n x0 = xnext\n \n raise RuntimeError(\"Failed to converge after {0} iterations, value is {1}\".format(max_iter, x0) );", "def mr(A, n_iterations, stop=False):\n assert len(A.sizes) == 2\n assert A.sizes[0] == A.sizes[1]\n M = A.same_shape()\n n = A.sizes[0]\n @for_range(n)\n def _(i):\n e = sfix.Array(n)\n e.assign_all(0)\n e[i] = 1\n M[i] = solve_linear(A, e, n_iterations, stop=stop)\n return M.transpose()", "def linear_least_squares(a, b, residuals=False):\n if type(a) != np.ndarray or not a.flags[\"C_CONTIGUOUS\"]:\n main_warning(\n \"Matrix a is not a C-contiguous numpy array. The solver will create a copy, which will result\"\n + \" in increased memory usage.\"\n )\n\n a = np.asarray(a, order=\"c\")\n i = dgemm(alpha=1.0, a=a.T, b=a.T, trans_b=True)\n x = np.linalg.solve(i, dgemm(alpha=1.0, a=a.T, b=b))\n\n if residuals:\n return x, np.linalg.norm(np.dot(a, x) - b)\n else:\n return x", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def solve_matrix(M, b):\n\n try:\n x = np.linalg.solve(M, b)\n except np.LinAlgError:\n print(\"ERR: Matrix is singular\")\n return None\n\n if not np.allclose(np.dot(M, x), b):\n print(\"ERR: Matrix is inconsistent (most likely with the independent sources)\")\n return None\n \n return x", "def cg_solve_jax(A,\n b,\n x_0=None,\n cg_iters=10,\n cg_residual_tol=1e-20,\n damping=1e-4):\n x = jnp.zeros_like(b) if x_0 is None else x_0\n if x_0 is not None:\n hvp_x0 = jnp.dot(A, x)\n\n r = b.copy() if x_0 is None else b-hvp_x0\n p = r.copy()\n rdotr = p.dot(r)\n\n for i in range(cg_iters):\n hvp_p = jnp.dot(A, p)\n z = hvp_p\n\n v = rdotr / p.dot(z)\n x += v * p\n r -= v * z\n\n s = r\n newrdotr = s.dot(r)\n mu = newrdotr / rdotr\n\n p = s + mu * p\n rdotr = newrdotr\n\n if rdotr < cg_residual_tol:\n break\n return x", "def _lin_solve(b, x, x0, a, c, iterations, n):\n c_recip = 1 / c\n for k in range(0, iterations):\n for m in range(1, n - 1):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, m, n)] = (x0[index_of(i, j, m, n)] + a * (x[index_of(i + 1, j, m, n)]\n + x[index_of(i - 1, j, m, n)]\n + x[index_of(i, j + 1, m, n)]\n + x[index_of(i, j - 1, m, n)]\n + x[index_of(i, j, m + 1, n)]\n + x[index_of(i, j, m - 1, n)]\n )) * c_recip\n _set_bounds(b, x, n)", "def linearRegression(A, b):\n m = Model()\n m.setParam('OutputFlag', False)\n\n n = len(A) # number of rows in A\n d = len(A[0]) # number of columns in A\n assert n == len(b) # make sure the shape of matrix is correct\n\n # x is of size d\n x = m.addVars(d, name='x')\n # ** is not supported by gurobi!\n square = lambda _: _ * _\n # \\sum_i (A[i] * x - b[i])^2\n m.setObjective(sum(square(sum(A[i][j] * x[j] for j in xrange(d)) - b[i])\n for i in xrange(n)), GRB.MINIMIZE)\n m.optimize()\n\n return [x[_].X for _ in xrange(d)]", "def linear_least_squares(M, v):\n \n B = copy(M)\n [m,n] = shape(B)\n if rank(B) != min(m,n):\n print('Warning: can not be solved since the rank of the matrix is not its maximum value')\n return nan\n else:\n \n A = copy(M)\n At = transpose(M)\n b = copy(v)\n b = transpose(b)\n \n AtA = dot(At, A)\n Atb = transpose(dot(At, b))\n print(AtA, Atb)\n \n x = gauss_elimination(AtA, Atb)\n print('x*:')\n return x", "def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None,\n atol=None):\n A, M, x, b = _make_system(A, M, x0, b)\n matvec = A.matvec\n psolve = M.matvec\n\n n = A.shape[0]\n if maxiter is None:\n maxiter = n * 10\n if n == 0:\n return cupy.empty_like(b), 0\n b_norm = cupy.linalg.norm(b)\n if b_norm == 0:\n return b, 0\n if atol is None:\n atol = tol * float(b_norm)\n else:\n atol = max(float(atol), tol * float(b_norm))\n\n r = b - matvec(x)\n iters = 0\n rho = 0\n while iters < maxiter:\n z = psolve(r)\n rho1 = rho\n rho = cublas.dotc(r, z)\n if iters == 0:\n p = z\n else:\n beta = rho / rho1\n p = z + beta * p\n q = matvec(p)\n alpha = rho / cublas.dotc(p, q)\n x = x + alpha * p\n r = r - alpha * q\n iters += 1\n if callback is not None:\n callback(x)\n resid = cublas.nrm2(r)\n if resid <= atol:\n break\n\n info = 0\n if iters == maxiter and not (resid <= atol):\n info = iters\n\n return x, info", "def _gmres(self, super_operator, super_rhs, tol):\n return login_gmres(\n super_operator, super_rhs, tol,\n return_residuals=True,\n **SOLVER_OPTIONS\n )", "def simpleDemo(verbose=False):\n N = 100\n u_true = np.array([np.sin(x / 10.0) for x in np.linspace(0, 20, N)])\n A = openmg.operators.poisson(N, sparse=True)\n b = openmg.tools.flexibleMmult(A, u_true)\n params = {'problemShape': (N,), 'gridLevels': 3, 'cycles': 10,\n 'iterations': 2, 'verbose': verbose, 'dense': True,\n 'threshold': 1e-2, 'giveInfo': True}\n u_mg, infoDict = openmg.mgSolve(A, b, params)\n if verbose:\n print \"info:\"\n print infoDict\n \n ## if verbose==True, output will look something like this:\n # Generating restriction matrices; dense=True\n # Generating coefficient matrices; dense=True ... made 3 A matrices\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 1 is 0.805398.\n # cycle 1 < cycles 10\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 2 is 0.107866.\n # cycle 2 < cycles 10\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 3 is 0.018650.\n # cycle 3 < cycles 10\n # calling amg_cycle at level 0\n # calling amg_cycle at level 1\n # direct solving at level 2\n # Residual norm from cycle 4 is 0.003405.\n # Returning mgSolve after 4 cycle(s) with norm 0.003405\n # info:\n # {'norm': 0.0034051536498270769, 'cycle': 4} \n return u_mg", "def _gmres(self, super_operator, super_rhs, tol):\n sol, solve_info, residuals = linalg.gmres(\n super_operator, super_rhs,\n tol=tol,\n use_strong_form=True,\n return_residuals=True,\n **SOLVER_OPTIONS\n )\n return sol, solve_info, residuals", "def PCG(A, b, x0, M_inv, eps=0.01, imax=50):\n i = 0\n x = x0\n # residue\n r = b - A @ x\n # step in the direction of residue\n d = M_inv @ r\n # initial delta^2\n delta_new = np.dot(r,d)\n delta_0 = delta_new\n while i < i_max and delta_new > eps**2 * delta_0:\n alpha = delta_new / np.einsum('i,ij,j', d,A,d)\n x += alpha * d\n if i % 50 == 0:\n r = b - A@x\n else:\n r -= alpha*q\n s = M_inv @ r\n delta_old = delta_new\n delta_new = np.dot(r, s)\n beta = delta_new / delta_old\n d = s + beta*d\n i += 1\n return x", "def solve(self, A, B):\n d = tf.matrix_diag_part(A)\n D = tf.reshape(tf.matrix_diag(d), tf.shape(A))\n R = A - D\n\n iD = tf.reshape(tf.matrix_diag(1.0 / d), tf.shape(A))\n\n X = tf.zeros_like(B)\n for _ in range(self.nb_iterations):\n T = tf.einsum('bmn,bno->bmo', R, X)\n S = B - T\n X = tf.einsum('bmn,bno->bmo', iD, S)\n return tf.reshape(X, tf.shape(B))", "def SolveAndCorrect(M, b):\n\tXMatrix = Solve(M, b)\n\tXMatrix, Qc = MatrixCorrections(XMatrix)\n\treturn XMatrix, Qc", "def _General_Iterative_Method(A, b, x0, M, N, max_iterations=200, epsilon=1e-2, w=1.0, method=\"General Iterative\"):\n\n residual_queue = []\n convergences_queue = []\n last_x = x0\n M_inverse = np.linalg.inv(M)\n curr_iter = 0\n while curr_iter < max_iterations:\n curr_x = (1 - w) * last_x + (w * M_inverse) @ (b - N @ last_x)\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n convergences_queue.append(np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(A @ last_x - b, 2))\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if c < epsilon or curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", method, w)\n print_graph(convergences_queue, curr_iter, \"convergence rate\", method, w)\n print(\"Number of Iterations: \" + str(curr_iter))\n return curr_x\n last_x = curr_x\n curr_iter += 1\n return \"failed\"", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def magma_sgemv(trans, m, n, alpha, dA, ldda, dx, incx, beta,\n dy, incy, queue):\n\n _libmagma.magma_sgemv(trans, m, n, alpha, int(dA), ldda, dx, incx,\n beta, int(dy), incy, queue)", "def beta_iter(b,px,py,pyx_c,pm_size,restarts,iterations):\n candidates = []\n for r in range(restarts):\n\t # initialize distribution for bottleneck variable\n\t pm = np.random.rand(pm_size)+1\n\t pm /= pm.sum()\n\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t pym_c /= pym_c.sum(axis=0)\n\t # iterate the BA algorithm\n\t for i in range(iterations):\n\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,b)\n\t\t pm = p_m(pmx_c,px)\n\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t break\n\t\t pmx_c_old = pmx_c\n\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t# among the restarts, select the result that gives the minimum\n\t# value for the functional we're actually minimizing (eq 29 in\n\t# Tishby et al 2000).\n selected_candidate = min(candidates, key=lambda c: c['functional'])\n i_p = selected_candidate['past_info']\n i_f = selected_candidate['future_info']\n return [i_p,i_f,b]", "def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None,\n atol=None):\n A, M, x, b = _make_system(A, M, x0, b)\n\n matvec = A.matvec\n psolve = M.matvec\n\n n = A.shape[0]\n if n == 0:\n return cupy.empty_like(b), 0\n b_norm = cupy.linalg.norm(b)\n if b_norm == 0:\n return b, 0\n if atol is None:\n atol = tol * float(b_norm)\n else:\n atol = max(float(atol), tol * float(b_norm))\n if maxiter is None:\n maxiter = n * 5\n\n r0 = b - matvec(x)\n\n rho = cupy.dot(r0, r0)\n\n # initialise vectors\n r = r0.copy()\n u = r0\n p = r0.copy()\n\n iters = 0\n while True:\n y = psolve(p)\n v = matvec(y)\n sigma = cupy.dot(r0, v)\n alpha = rho / sigma\n q = u - alpha * v\n\n z = psolve(u + q)\n x += alpha * z\n Az = matvec(z)\n r -= alpha * Az\n\n # Update residual norm and check convergence\n r_norm = cupy.linalg.norm(r)\n\n iters += 1\n if callback is not None:\n callback(x)\n\n if r_norm <= atol or iters >= maxiter:\n break\n\n rho_new = cupy.dot(r0, r)\n beta = rho_new / rho\n rho = rho_new\n u = r + beta * q\n p *= beta\n p += q\n p *= beta\n p += u\n\n info = 0\n if iters == maxiter and not (r_norm < atol):\n info = iters\n\n return x, info", "def linearize_and_solve(g):\n\n # initialize the sparse H and the vector b\n H = np.zeros((len(g.x), len(g.x)), dtype='float')\n b = np.zeros(len(g.x), dtype='float')\n\n # set flag to fix gauge\n needToAddPrior = True\n Fx = 0\n\n # compute the addend term to H and b for each of our constraints\n print('linearize and build system')\n\n for edge in g.edges:\n\n # pose-pose constraint\n if edge.Type == 'P':\n\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node state for the current edge\n x_i = g.x[fromIdx:fromIdx + 3]\n x_j = g.x[toIdx:toIdx + 3]\n\n # (TODO) compute the error and the Jacobians\n e, A, B = linearize_pose_pose_constraint(\n x_i, x_j, edge.measurement)\n\n # # (TODO) compute the terms\n b_i = e.transpose() @ edge.information @ A\n b_j = e.transpose() @ edge.information @ B\n H_ii = A.transpose() @ edge.information @ A\n H_ij = A.transpose() @ edge.information @ B\n H_jj = B.transpose() @ edge.information @ B\n\n # (TODO) add the terms to H matrix and b\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx + 3] += H_ii\n H[toIdx:toIdx + 3, toIdx:toIdx + 3] += H_jj\n H[fromIdx:fromIdx + 3, toIdx:toIdx + 3] += H_ij\n H[toIdx:toIdx + 3, fromIdx:fromIdx + 3, ] += H_ij.transpose()\n b[fromIdx:fromIdx + 3] += b_i[0, :]\n b[toIdx:toIdx + 3] += b_j[0, :]\n\n # Add the prior for one pose of this edge\n # This fixes one node to remain at its current location\n if needToAddPrior:\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx +\n 3] = H[fromIdx:fromIdx + 3,\n fromIdx:fromIdx + 3] + 1000 * np.eye(3)\n needToAddPrior = False\n\n # pose-pose constraint\n elif edge.Type == 'L':\n print(\"you shouldn't be here...\")\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node states for the current edge\n x = g.x[fromIdx:fromIdx + 3]\n l = g.x[toIdx:toIdx + 2]\n\n # (TODO) compute the error and the Jacobians\n e, A, B = linearize_pose_landmark_constraint(\n x, l, edge.measurement)\n\n # (TODO) compute the terms\n b_i = e.transpose() @ edge.information @ A\n b_j = e.transpose() @ edge.information @ B\n H_ii = A.transpose() @ edge.information @ A\n H_ij = A.transpose() @ edge.information @ B\n H_jj = B.transpose() @ edge.information @ B\n\n # (TODO )add the terms to H matrix and b\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx + 3] += H_ii\n H[toIdx:toIdx + 2, toIdx:toIdx + 2] += H_jj\n H[fromIdx:fromIdx + 3, toIdx:toIdx + 2] += H_ij\n H[toIdx:toIdx + 2, fromIdx:fromIdx + 3, ] += H_ij.transpose()\n b[fromIdx:fromIdx + 3] = b_i\n b[toIdx:toIdx + 2] = b_j\n # solve system\n dx = np.linalg.solve(H, b)\n\n return dx", "def MatrixFreeCG(A, b, x, tol=1e-6, maxiter=5000, quiet=True):\n\n if b.dtype != x.dtype:\n raise TaichiTypeError(f\"Dtype mismatch b.dtype({b.dtype}) != x.dtype({x.dtype}).\")\n if str(b.dtype) == \"f32\":\n solver_dtype = ti.f32\n elif str(b.dtype) == \"f64\":\n solver_dtype = ti.f64\n else:\n raise TaichiTypeError(f\"Not supported dtype: {b.dtype}\")\n if b.shape != x.shape:\n raise TaichiRuntimeError(f\"Dimension mismatch b.shape{b.shape} != x.shape{x.shape}.\")\n\n size = b.shape\n vector_fields_builder = ti.FieldsBuilder()\n p = ti.field(dtype=solver_dtype)\n r = ti.field(dtype=solver_dtype)\n Ap = ti.field(dtype=solver_dtype)\n Ax = ti.field(dtype=solver_dtype)\n if len(size) == 1:\n axes = ti.i\n elif len(size) == 2:\n axes = ti.ij\n elif len(size) == 3:\n axes = ti.ijk\n else:\n raise TaichiRuntimeError(f\"MatrixFreeCG only support 1D, 2D, 3D inputs; your inputs is {len(size)}-D.\")\n vector_fields_builder.dense(axes, size).place(p, r, Ap, Ax)\n vector_fields_snode_tree = vector_fields_builder.finalize()\n\n scalar_builder = ti.FieldsBuilder()\n alpha = ti.field(dtype=solver_dtype)\n beta = ti.field(dtype=solver_dtype)\n scalar_builder.place(alpha, beta)\n scalar_snode_tree = scalar_builder.finalize()\n succeeded = True\n\n @ti.kernel\n def init():\n for I in ti.grouped(x):\n r[I] = b[I] - Ax[I]\n p[I] = 0.0\n Ap[I] = 0.0\n\n @ti.kernel\n def reduce(p: ti.template(), q: ti.template()) -> solver_dtype:\n result = solver_dtype(0.0)\n for I in ti.grouped(p):\n result += p[I] * q[I]\n return result\n\n @ti.kernel\n def update_x():\n for I in ti.grouped(x):\n x[I] += alpha[None] * p[I]\n\n @ti.kernel\n def update_r():\n for I in ti.grouped(r):\n r[I] -= alpha[None] * Ap[I]\n\n @ti.kernel\n def update_p():\n for I in ti.grouped(p):\n p[I] = r[I] + beta[None] * p[I]\n\n def solve():\n A._matvec(x, Ax)\n init()\n initial_rTr = reduce(r, r)\n if not quiet:\n print(f\">>> Initial residual = {initial_rTr:e}\")\n old_rTr = initial_rTr\n new_rTr = initial_rTr\n update_p()\n if sqrt(initial_rTr) >= tol: # Do nothing if the initial residual is small enough\n # -- Main loop --\n for i in range(maxiter):\n A._matvec(p, Ap) # compute Ap = A x p\n pAp = reduce(p, Ap)\n alpha[None] = old_rTr / pAp\n update_x()\n update_r()\n new_rTr = reduce(r, r)\n if sqrt(new_rTr) < tol:\n if not quiet:\n print(\">>> Conjugate Gradient method converged.\")\n print(f\">>> #iterations {i}\")\n break\n beta[None] = new_rTr / old_rTr\n update_p()\n old_rTr = new_rTr\n if not quiet:\n print(f\">>> Iter = {i+1:4}, Residual = {sqrt(new_rTr):e}\")\n if new_rTr >= tol:\n if not quiet:\n print(\n f\">>> Conjugate Gradient method failed to converge in {maxiter} iterations: Residual = {sqrt(new_rTr):e}\"\n )\n succeeded = False\n\n solve()\n vector_fields_snode_tree.destroy()\n scalar_snode_tree.destroy()\n return succeeded", "def backsubstitution_numba(b, dofmap, num_dofs_per_element, mpc,\n global_indices):\n (slaves, slave_cells, cell_to_slave, cell_to_slave_offset,\n masters_local, coefficients, offsets) = mpc\n slaves_visited = numpy.empty(0, dtype=numpy.float64)\n\n # Loop through slave cells\n for (index, cell_index) in enumerate(slave_cells):\n cell_slaves = cell_to_slave[cell_to_slave_offset[index]:\n cell_to_slave_offset[index+1]]\n local_dofs = dofmap[num_dofs_per_element * cell_index:\n num_dofs_per_element * cell_index\n + num_dofs_per_element]\n\n # Find the global index of the slaves on the cell in the slaves-array\n global_slaves_index = []\n for gi in range(len(slaves)):\n if in_numpy_array(cell_slaves, slaves[gi]):\n global_slaves_index.append(gi)\n\n for slave_index in global_slaves_index:\n slave = slaves[slave_index]\n k = -1\n # Find local position of slave dof\n for local_dof in local_dofs:\n if global_indices[local_dof] == slave:\n k = local_dof\n assert k != -1\n # Check if we have already inserted for this slave\n if not in_numpy_array(slaves_visited, slave):\n slaves_visited = numpy.append(slaves_visited, slave)\n slaves_masters = masters_local[offsets[slave_index]:\n offsets[slave_index+1]]\n slaves_coeffs = coefficients[offsets[slave_index]:\n offsets[slave_index+1]]\n for (master, coeff) in zip(slaves_masters, slaves_coeffs):\n b[k] += coeff*b[master]", "def cgmat(A,x,b,M=None,max_it=100,tol=1e-8):\n if M is None:\n M= sp.diag(A)\n bnrm2 = sp.linalg.norm(b)\n r=b-A.dot(x)\n rho=sp.zeros(max_it)\n for i in range(max_it):\n z=sp.linalg.solve(M,r)\n rho[i] = sp.dot(r,z)\n if i==0:\n p=z\n else:\n beta=rho/rho[i-1]\n p=z+beta*p\n\n q=A.dot(p)\n alpha=rho/sp.dot(p,q)\n x = x+alpha*p\n r = r-alpha*q\n error = sp.linalg.norm( r ) / bnrm2\n if error <tol:\n return (x,error,i,False)\n\n return (x,error,max_it,True)", "def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise", "def Solve(M, b):\n\tm2 = [row[:]+[right] for row,right in zip(M,b) ]\n\treturn [row[-1] for row in m2] if gauss_jordan(m2) else None", "def solve_normal_cg(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n **kwargs) -> Any:\n def _matvec(x):\n \"\"\"Computes A^T A x.\"\"\"\n return _normal_matvec(matvec, x)\n\n if ridge is not None:\n _matvec = _make_ridge_matvec(_matvec, ridge=ridge)\n\n Ab = _rmatvec(matvec, b)\n\n return jax.scipy.sparse.linalg.cg(_matvec, Ab, **kwargs)[0]", "def optGM(objective_function: \"function\",\n set_of_mols_par: SetOfMolecules,\n subset_of_mols: SetOfMolecules,\n min_subset_of_mols: SetOfMolecules,\n chg_method: ChargeMethod,\n num_of_samples: int,\n num_of_candidates: int) -> namedtuple:\n\n print(\" Sampling...\")\n samples = lhs(num_of_samples, chg_method.params_bounds)\n\n print(\" Calculating of objective function for samples...\")\n samples_rmsd = [objective_function(sample, chg_method, min_subset_of_mols) for sample in samples]\n\n print(\"\\x1b[2K Selecting candidates...\")\n best_samples = samples[list(map(samples_rmsd.index, nsmallest(num_of_candidates * 100, samples_rmsd)))]\n best_samples_rmsd = [objective_function(sample, chg_method, set_of_mols_par) for sample in best_samples]\n candidates = best_samples[list(map(best_samples_rmsd.index, nsmallest(num_of_candidates, best_samples_rmsd)))]\n\n print(\"\\x1b[2K Local minimizating...\")\n all_loc_min_course = []\n opt_candidates = []\n for params in candidates:\n opt_params, _, loc_min_course = local_minimization(objective_function, subset_of_mols, chg_method, params)\n all_loc_min_course.append(loc_min_course[0])\n opt_candidates.append(opt_params)\n\n opt_candidates_rmsd = [objective_function(candidate, chg_method, set_of_mols_par) for candidate in opt_candidates]\n final_candidate_obj_val = nsmallest(1, opt_candidates_rmsd)\n final_candidate_index = opt_candidates_rmsd.index(final_candidate_obj_val)\n final_candidate = opt_candidates[final_candidate_index]\n\n print(\"\\x1b[2K Final local minimizating...\")\n final_params, final_obj_val, loc_min_course = local_minimization(objective_function, set_of_mols_par, chg_method, final_candidate)\n all_loc_min_course[final_candidate_index].extend(loc_min_course[0])\n\n return namedtuple(\"chgs\", [\"params\",\n \"obj_val\",\n \"loc_min_courses\"])(final_params,\n final_obj_val,\n all_loc_min_course)", "def tensorflow_optimization(m):\n\n fusing.fuse_Transpose_into_Constant(m.graph)\n fusing.fuse_MatMul_and_Add_into_Gemm(m.graph)\n other.topological_sort(m.graph)\n\n m = other.polish_model(m)\n\n # constant folding\n replacing.replace_shape_with_constant(m.graph)\n\n # constant_folding\n m = other.inference_shapes(m)\n while constant_folding.constant_folding(m.graph):\n logging.debug(\"After constant folding jobs.\")\n other.topological_sort(m.graph)\n while len(m.graph.value_info) != 0:\n m.graph.value_info.pop()\n\n m = other.inference_shapes(m)\n replacing.replace_shape_with_constant(m.graph)\n other.topological_sort(m.graph)\n m = tf_pattern_match(m)\n m = optimizer.optimize(m, [\"eliminate_deadend\"])\n\n eliminating.eliminate_consecutive_reshape(m.graph)\n eliminating.eliminate_Squeeze_before_Reshape(m.graph)\n other.topological_sort(m.graph)\n return m", "def gemv(self,transa_,m_,n_,alpha_,a_,x_,beta_,y_):\n _a_minlength = ((n_) * (m_))\n if ((n_) * (m_)) > 0 and a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a is not long enough: Is %d, expected %d\" % (len(a_),((n_) * (m_))))\n if a_ is None:\n raise ValueError(\"Argument a cannot be None\")\n if a_ is None:\n raise ValueError(\"Argument a may not be None\")\n if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous:\n _a_copyarray = False\n _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif a_ is not None:\n _a_copyarray = True\n _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64))\n _a_np_tmp[:] = a_\n assert _a_np_tmp.flags.contiguous\n _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _a_copyarray = False\n _a_tmp = None\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n _x_minlength = __tmp_var_0\n if __tmp_var_0 > 0 and x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x is not long enough: Is %d, expected %d\" % (len(x_),__tmp_var_0))\n if x_ is None:\n raise ValueError(\"Argument x cannot be None\")\n if x_ is None:\n raise ValueError(\"Argument x may not be None\")\n if isinstance(x_, numpy.ndarray) and x_.dtype is numpy.dtype(numpy.float64) and x_.flags.contiguous:\n _x_copyarray = False\n _x_tmp = ctypes.cast(x_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif x_ is not None:\n _x_copyarray = True\n _x_np_tmp = numpy.zeros(len(x_),numpy.dtype(numpy.float64))\n _x_np_tmp[:] = x_\n assert _x_np_tmp.flags.contiguous\n _x_tmp = ctypes.cast(_x_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _x_copyarray = False\n _x_tmp = None\n \n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n _y_minlength = __tmp_var_1\n if __tmp_var_1 > 0 and y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y is not long enough: Is %d, expected %d\" % (len(y_),__tmp_var_1))\n if isinstance(y_,numpy.ndarray) and not y_.flags.writeable:\n raise ValueError(\"Argument y must be writable\")\n if y_ is None:\n raise ValueError(\"Argument y may not be None\")\n if isinstance(y_, numpy.ndarray) and y_.dtype is numpy.dtype(numpy.float64) and y_.flags.contiguous:\n _y_copyarray = False\n _y_tmp = ctypes.cast(y_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif y_ is not None:\n _y_copyarray = True\n _y_np_tmp = numpy.zeros(len(y_),numpy.dtype(numpy.float64))\n _y_np_tmp[:] = y_\n assert _y_np_tmp.flags.contiguous\n _y_tmp = ctypes.cast(_y_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _y_copyarray = False\n _y_tmp = None\n \n res = __library__.MSK_XX_gemv(self.__nativep,transa_,m_,n_,alpha_,_a_tmp,_x_tmp,beta_,_y_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _y_copyarray:\n y_[:] = _y_np_tmp", "def mLFG(a,b,m):\n \n require_integers([\"a\",\"b\",\"m\"],[a,b,m])\n \n while True:\n yield a\n a,b = b,(a*b)%m", "def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):\n x = g / np.linalg.norm(g) * d_max\n for _ in range(max_iter):\n # gradient ascent\n x = x + stepsize * (M @ x + g)\n # projection to sphere\n x = x / np.linalg.norm(x) * d_max\n ## debug\n #loss = 0.5 * x.T @ M @ x + g.T @ x\n #print(f'Loss: {loss}')\n return x", "def solve(mm):\n model = mm.model\n model.optimize()\n\n\n mm.optimal = model.status\n mm.take_snapshot()\n print \"\\nSnapshot saved as {}\".format(mm.filename)\n mm.solve_count += 1\n mm.update_filename()\n\n if model.status == gp.GRB.OPTIMAL:\n # Write a csv of the solution data\n write_solution(mm)\n\n\n return True", "def _compute_mmi_loss_exact_optimized(\n dense_fsa_vec: k2.DenseFsaVec,\n texts: List[str],\n graph_compiler: MmiTrainingGraphCompiler,\n den_scale: float = 1.0,\n beam_size: float = 8.0,\n) -> torch.Tensor:\n num_graphs, den_graphs = graph_compiler.compile(texts, replicate_den=False)\n\n device = num_graphs.device\n\n num_fsas = num_graphs.shape[0]\n assert dense_fsa_vec.dim0() == num_fsas\n\n assert den_graphs.shape[0] == 1\n\n # The motivation to concatenate num_graphs and den_graphs\n # is to reduce the number of calls to k2.intersect_dense.\n num_den_graphs = k2.cat([num_graphs, den_graphs])\n\n # NOTE: The a_to_b_map in k2.intersect_dense must be sorted\n # so the following reorders num_den_graphs.\n #\n # The following code computes a_to_b_map\n\n # [0, 1, 2, ... ]\n num_graphs_indexes = torch.arange(num_fsas, dtype=torch.int32)\n\n # [num_fsas, num_fsas, num_fsas, ... ]\n den_graphs_indexes = torch.tensor([num_fsas] * num_fsas, dtype=torch.int32)\n\n # [0, num_fsas, 1, num_fsas, 2, num_fsas, ... ]\n num_den_graphs_indexes = (\n torch.stack([num_graphs_indexes, den_graphs_indexes])\n .t()\n .reshape(-1)\n .to(device)\n )\n\n num_den_reordered_graphs = k2.index(num_den_graphs, num_den_graphs_indexes)\n\n # [[0, 1, 2, ...]]\n a_to_b_map = torch.arange(num_fsas, dtype=torch.int32).reshape(1, -1)\n\n # [[0, 1, 2, ...]] -> [0, 0, 1, 1, 2, 2, ... ]\n a_to_b_map = a_to_b_map.repeat(2, 1).t().reshape(-1).to(device)\n\n num_den_lats = k2.intersect_dense(\n num_den_reordered_graphs,\n dense_fsa_vec,\n output_beam=beam_size,\n a_to_b_map=a_to_b_map,\n )\n\n num_den_tot_scores = num_den_lats.get_tot_scores(\n log_semiring=True, use_double_scores=True\n )\n\n num_tot_scores = num_den_tot_scores[::2]\n den_tot_scores = num_den_tot_scores[1::2]\n\n tot_scores = num_tot_scores - den_scale * den_tot_scores\n loss = -1 * tot_scores.sum()\n return loss", "def test_remap_coefficients(self, months, remap_months):\n adaptor = IntervalAdaptor(\"test-month-remap\")\n from_spec = Spec(\n name=\"test-var\", dtype=\"float\", dims=[\"months\"], coords={\"months\": months}\n )\n adaptor.add_input(from_spec)\n to_spec = Spec(\n name=\"test-var\",\n dtype=\"float\",\n dims=[\"remap_months\"],\n coords={\"remap_months\": remap_months},\n )\n adaptor.add_output(to_spec)\n\n actual = adaptor.generate_coefficients(from_spec, to_spec)\n expected = np.array(\n [\n [0.333333, 0, 0, 0],\n [0.333333, 0, 0, 0],\n [0, 0.333333, 0, 0],\n [0, 0.333333, 0, 0],\n [0, 0.333333, 0, 0],\n [0, 0, 0.333333, 0],\n [0, 0, 0.333333, 0],\n [0, 0, 0.333333, 0],\n [0, 0, 0, 0.333333],\n [0, 0, 0, 0.333333],\n [0, 0, 0, 0.333333],\n [0.333333, 0, 0, 0],\n ]\n )\n\n np.testing.assert_allclose(actual, expected, rtol=1e-3)", "def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):\n # Form augmented system\n K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))\n # LU factorization\n # TODO: Use a symmetric indefinite factorization\n # to solve the system twice as fast (because\n # of the symmetry).\n try:\n solve = scipy.sparse.linalg.factorized(K)\n except RuntimeError:\n warn(\"Singular Jacobian matrix. Using dense SVD decomposition to \"\n \"perform the factorizations.\")\n return svd_factorization_projections(A.toarray(),\n m, n, orth_tol,\n max_refin, tol)\n\n # z = x - A.T inv(A A.T) A x\n # is computed solving the extended system:\n # [I A.T] * [ z ] = [x]\n # [A O ] [aux] [0]\n def null_space(x):\n # v = [x]\n # [0]\n v = np.hstack([x, np.zeros(m)])\n # lu_sol = [ z ]\n # [aux]\n lu_sol = solve(v)\n z = lu_sol[:n]\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.2.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # new_v = [x] - [I A.T] * [ z ]\n # [0] [A O ] [aux]\n new_v = v - K.dot(lu_sol)\n # [I A.T] * [delta z ] = new_v\n # [A O ] [delta aux]\n lu_update = solve(new_v)\n # [ z ] += [delta z ]\n # [aux] [delta aux]\n lu_sol += lu_update\n z = lu_sol[:n]\n k += 1\n\n # return z = x - A.T inv(A A.T) A x\n return z\n\n # z = inv(A A.T) A x\n # is computed solving the extended system:\n # [I A.T] * [aux] = [x]\n # [A O ] [ z ] [0]\n def least_squares(x):\n # v = [x]\n # [0]\n v = np.hstack([x, np.zeros(m)])\n # lu_sol = [aux]\n # [ z ]\n lu_sol = solve(v)\n # return z = inv(A A.T) A x\n return lu_sol[n:m+n]\n\n # z = A.T inv(A A.T) x\n # is computed solving the extended system:\n # [I A.T] * [ z ] = [0]\n # [A O ] [aux] [x]\n def row_space(x):\n # v = [0]\n # [x]\n v = np.hstack([np.zeros(n), x])\n # lu_sol = [ z ]\n # [aux]\n lu_sol = solve(v)\n # return z = A.T inv(A A.T) x\n return lu_sol[:n]\n\n return null_space, least_squares, row_space", "def JacobiSolve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n x_new *= 0 #reset x_new\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def solve_system(A, method):\n # find b vector such that Ax = b\n # with x = [0 1 2 ... size(m)]\n size = A.shape\n true_x = list(xrange(0, size[1]))\n b = A.dot(true_x)\n\n # solve Ax = b and check solution error\n # diretti\n if method in [sla.spsolve, direttolu]:\n x = method(A, b)\n print(\"\\t\" + method.func_name + \" solved \" + \n str(size))\n return x, sol_error(x, true_x)\n\n # iterativi\n else: \n # per accellerare la convergenza dei metodi iterativi\n # dobbiamo passare un precondizionatore (una matrice M,\n # che approssima l'inversa di A)\n # http://osdir.com/ml/python-scientific-user/2011-06/msg00249.html\n try:\n P = sla.spilu(A, drop_tol=1e-5) \n except Exception as err:\n print(\"\\t\", err)\n print(\"\\tPorta le tue sporche matrici singolari altrove...\")\n return None, \"nan\"\n\n M = sla.LinearOperator(size, P.solve)\n\n global current_x\n current_x = None\n try: \n x, status = method(A, \n b, \n tol=1e-16, \n M=M,\n maxiter=500,\n callback=callback_func)\n except Exception:\n print(\"\\t\" + method.func_name + \" converged on \" + str(size))\n return current_x, sol_error(current_x, true_x)\n\n if status != 0:\n print(\"\\t\" + method.func_name + \" DIDN'T converge on \" +\n str(size) + \" in less than 500 iterations\")\n return current_x, sol_error(x, true_x)\n else:\n print(\"\\t\" + method.func_name + \" converged on \" +\n str(size))\n return current_x, sol_error(x, true_x)", "def alternative_iterative_method(x0, n, gamma, b):\n # Parameters:\n MAX_ITER = 1000\n n2 = n**2\n\n # Creating NxN versions of vector for easier indexing during iteration\n b = b.copy().reshape(n, n)\n b_transposed = b.copy().T\n x0 = x0.copy().reshape(n, n)\n x0_transposed = x0.copy().T\n x1 = x0.copy()\n x1_transposed = x0_transposed.copy()\n\n # No need for M, N, only a smaller tridiagonal system:\n H = scipy.sparse.diags((-1, 2, -1), (-1, 0, 1), shape=(n, n), format=\"csr\")\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n, n), format=\"csr\")\n M1 = gammaI + H # Corresponds to both (gI + M) & (gI + N) in equations\n M2 = gammaI - H # Corresponds to both (gI - M) & (gI - N) in equations\n\n # Preallocating RHS of equations\n RHS7 = np.zeros((n, n), dtype=np.float64)\n RHS8 = np.zeros((n, n), dtype=np.float64)\n\n k = 0\n while k < MAX_ITER:\n for i in range(n): # Loading RHS values for Equation (7):\n RHS7[:, i] = scipy.sparse.csr_matrix.dot(M2, x0_transposed[i]) + b_transposed[i]\n for i in range(n): # Solving N independent tridig mat systems related to Eq(7):\n x1[i] = scipy.sparse.linalg.spsolve(M1, RHS7[i])\n RHS8[i] = scipy.sparse.csr_matrix.dot(M2, x1[i]) + b[i] # Loading RHS values for Equation (8):\n for i in range(n): # Solving N independent tridig mat systems related to Eq(8):\n x1_transposed[i] = scipy.sparse.linalg.spsolve(M1, RHS8[:, i])\n\n k += 1\n if np.allclose(x1_transposed, x0_transposed, rtol=1e-8):\n break\n x0_transposed = x1_transposed.copy()\n\n res = x1_transposed.T.reshape(n2)\n return res, k", "def fit_m(\n self,\n lamb,\n m_init=None,\n s2_init=None,\n alpha=None,\n factr=1e7,\n maxls=50,\n m=10,\n lb=-np.Inf,\n ub=np.Inf,\n maxiter=15000,\n verbose=True,\n ):\n # check inputs\n assert lamb >= 0.0, \"lambda must be non-negative\"\n assert type(lamb) == float, \"lambda must be float\"\n assert type(factr) == float, \"factr must be float\"\n assert maxls > 0, \"maxls must be at least 1\"\n assert type(maxls) == int, \"maxls must be int\"\n assert type(m) == int, \"m must be int\"\n assert type(lb) == float, \"lb must be float\"\n assert type(ub) == float, \"ub must be float\"\n assert lb < ub, \"lb must be less than ub\"\n assert type(maxiter) == int, \"maxiter must be int\"\n assert maxiter > 0, \"maxiter be at least 1\"\n\n # init from null model if no init nodes are provided\n if m_init is None and s2_init is None:\n # fit null model to estimate the residual variance and init nodes\n self.fit_null_model_m(verbose=verbose) \n m_init = self.m0\n else:\n # check initial graph nodes\n assert m_init.shape[0] == len(self), (\n \"nodes must have size of graph\"\n )\n assert np.all(m_init > 0.0), \"nodes must be non-negative\"\n self.m0 = m_init\n self.comp_precision(s2=s2_init)\n\n # prefix alpha if not provided\n if alpha is None:\n alpha = 1.0 / self.m0.mean()\n else:\n assert type(alpha) == float, \"alpha must be float\"\n assert alpha >= 0.0, \"alpha must be non-negative\"\n\n # run l-bfgs\n obj = Objective(self)\n obj.lamb = lamb\n obj.alpha = alpha\n x0 = np.log(m_init)\n res = fmin_l_bfgs_b(\n func=loss_wrapper_m,\n x0=x0,\n args=[obj],\n factr=factr,\n m=m,\n maxls=maxls,\n maxiter=maxiter,\n approx_grad=False,\n bounds=[(lb, ub) for _ in range(x0.shape[0])],\n )\n if maxiter >= 100:\n assert res[2][\"warnflag\"] == 0, \"did not converge\"\n self.m = np.exp(res[0])\n\n # print update\n self.train_loss, _ = loss_wrapper_m(res[0], obj)\n if verbose:\n sys.stdout.write(\n (\n \"lambda={:.7f}, \"\n \"alpha={:.7f}, \"\n \"converged in {} iterations, \"\n \"train_loss={:.7f}\\n\"\n ).format(lamb, alpha, res[2][\"nit\"], self.train_loss)\n )", "def solve_cg(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n init: Optional[Any] = None,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.cg(matvec, b, x0=init, **kwargs)[0]", "def backsolve(self, b, transp='N'):\n \n if self.use_sub_factor:\n return self.sub_backsolve(b, transp=transp)\n \n elif b.ndim==1:\n \n if len(b) != self.m:\n raise ValueError(\"Length of b does not equal m in backsolve b.ndim==1.\")\n #assert len(b)==self.m\n \n return self.A_factorized(b.astype(numpy.float64), trans=transp)\n #return self.A_factorized.backsolve(b.astype(numpy.float64), trans=transp)\n #\n # trans 'N': solve A * x == b\n # 'T': solve A^T * x == b\n # 'H': solve A^H * x == b\n # (optional, default value 'N')\n #\n \n # Case where b is an m x n matrix\n elif b.ndim==2:\n \n b_m, b_n = b.shape\n \n if b_m != self.m:\n print \"b_m:{}, b_n:{}, m:{}\".format(b_m, b_n, self.m)\n raise ValueError(\"Length of b_m does not equal m in backsolve b.ndim==2.\")\n #assert b_m == self.m\n\n x = numpy.zeros((b_m, b_n))\n\n for k in range(b_n):\n x[:,k] = self.A_factorized(b[:,k].astype(numpy.float64), trans=transp)\n #x[:,k] = self.A_factorized.backsolve(b[:,k].astype(numpy.float64), trans=transp)\n\n return x", "def cg_solve_jax_hvp(hvp_fn,\n b,\n x_0,\n cg_iters=10,\n cg_residual_tol=1e-20,\n damping=1e-4):\n x = jnp.zeros_like(b)\n # x = x_0 #jnp.zeros_like(b) #if x_0 is None else x_0 #, copy=True) if x_0 is None else x_0\n # hvp_x0 = hvp_fn(x) + damping * x #jnp.dot(A, x)\n\n r = jnp.array(b, copy=True)\n # r = b - hvp_x0\n p = jnp.array(r, copy=True)\n rdotr = p.dot(r)\n\n for i in range(cg_iters):\n z = hvp_fn(p) + damping * p\n\n v = rdotr / p.dot(z)\n x += v * p\n r -= v * z\n\n s = r\n newrdotr = s.dot(r)\n mu = newrdotr / rdotr\n\n p = s + mu * p\n rdotr = newrdotr\n\n # Note: ignoring residual tol because we don't reach it and it makes this function not jit-able.\n # if rdotr < cg_residual_tol:\n # break\n return x", "def solve(self,b):\n nrows = self.nrows\n ncols = self.ncols\n newmatrix = Matrix(nrows,ncols+b.ncols) #Account for b not being just a column vector\n for i in range(nrows):\n for j in range(ncols):\n newmatrix[i,j]= self[i,j]\n for j in range(b.ncols):\n newmatrix[i,ncols+j] = b[i,j]\n newmatrix.gaussianelimination()\n x = Matrix(nrows,b.ncols)\n for i in range(x.nrows):\n for j in range(b.ncols):\n x[i,j] = newmatrix[i,j+ncols]\n return x", "def gem_solve(A, b):\r\n\tstart = time()\r\n\tn = len(A)\r\n\tU = [[0.0 for k in range(n)] for k in range(n)]\r\n\tfor k in range(n):\r\n\t\tfor i in range(k+1,n):\r\n\t\t\tA[i][k] = A[i][k]/A[k][k]\r\n\t\t\tb[i] = b[i] - A[i][k]*b[k]\r\n\t\tfor j in range(k+1,n):\r\n\t\t\tfor i in range(k+1, n):\r\n\t\t\t\tA[i][j] = A[i][j]-A[i][k]*A[k][j]\r\n\t\t\t\t\r\n\tfor i in range(n):\r\n\t\tfor j in range(n):\r\n\t\t\tif i>j:\r\n\t\t\t\tU[i][j] = 0\r\n\t\t\telse:\r\n\t\t\t\tU[i][j] = A[i][j]\r\n\t\r\n\tx, place = backward(U, b)\r\n\tend = time()\r\n\treturn x, (end-start)", "def optimize(self, num_restarts=1, max_iters=100, max_f_eval=300.0, method='Anneal'):\n dic = DictVectorizer()\n # flatten the parameters\n init_params,bounds=dic.fit_transform(self.params)\n #we minimise minus the marginal likelihood\n def objective(params_flatten):\n self.params=dic.inverse_transform(params_flatten,bounds)\n val = -self.log_marginal_likelihood()\n return val# we want to maximize it\n \n \n #run ptimisation with multiple restarts\n optml=np.inf\n for i in range(num_restarts):\n #minimise function\n if method=='Anneal':\n res=dual_annealing(objective,bounds, maxiter=max_iters, maxfun=max_f_eval, x0=init_params)\n else:\n \n res = minimize(objective, init_params, \n bounds=bounds, method=method,options={'maxiter': max_iters, 'disp': False})\n #print(\"Iteration \"+str(i)+\" \",-res.fun)\n if res.fun<optml:\n params_best=res.x #init_params \n optml=res.fun\n init_params=bounds[:,0]+(bounds[:,1]-bounds[:,0])*np.random.rand(len(bounds[:,0]))\n print(\"Iteration \"+str(i)+\" \",-res.fun)\n #params_best=res.x\n #optml=res.fun\n self.params=dic.inverse_transform(params_best,bounds)\n return -optml", "def gLFG(a,b,m,func):\n \n require_integers([\"a\",\"b\",\"m\"],[a,b,m])\n \n while True:\n yield a\n a,b = b,func(a,b)%m", "def optimize(self, x0):\n (result,f,d) = fmin_l_bfgs_b(lambda x:self.costFun(x), np.ravel(x0),lambda x: self.gradFun(x))\n print(\"optimization completed with cost: \" + str(f))\n return result.reshape(self.inp_shape)", "def cg_least_squares(A_row_indices : numpy.ndarray,\n A_col_indices : numpy.ndarray,\n A_values : numpy.ndarray, A_num_columns : int,\n b : numpy.ndarray, min_r_decrease = 0.01,\n max_iterations = 200, algorithm = 1):\n A_rows = len(A_row_indices) - 1\n A_row_indices_ptr = A_row_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int))\n A_col_indices_ptr = A_col_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int))\n A_values_ptr = A_values.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n b_length = len(b)\n b_ptr = b.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n # generate solution vector x\n x = numpy.random.uniform(-1, 1, (A_num_columns, 1))\n x_length = A_num_columns\n x_ptr = x.ctypes.data_as(ctypes.POINTER(ctypes.c_double))\n\n final_rr = ctypes.c_double(0)\n iterations = 0\n\n if algorithm == 1:\n iterations = _dll.cg_least_squares_from_python(\n A_rows, A_num_columns, A_row_indices_ptr, A_col_indices_ptr,\n A_values_ptr, b_length, b_ptr, x_length, x_ptr,\n ctypes.c_double(min_r_decrease), max_iterations, ctypes.byref(final_rr))\n\n else:\n iterations = _dll.cg_least_squares_from_python2(\n A_rows, A_num_columns, A_row_indices_ptr, A_col_indices_ptr,\n A_values_ptr, b_length, b_ptr, x_length, x_ptr,\n ctypes.c_double(min_r_decrease), max_iterations, ctypes.byref(final_rr))\n\n return x, iterations, final_rr.value", "def _gth_solve_jit(A, out):\n n = A.shape[0]\n\n # === Reduction === #\n for k in range(n-1):\n scale = np.sum(A[k, k+1:n])\n if scale <= 0:\n # There is one (and only one) recurrent class contained in\n # {0, ..., k};\n # compute the solution associated with that recurrent class.\n n = k+1\n break\n for i in range(k+1, n):\n A[i, k] /= scale\n\n for j in range(k+1, n):\n A[i, j] += A[i, k] * A[k, j]\n\n # === Backward substitution === #\n out[n-1] = 1\n for k in range(n-2, -1, -1):\n for i in range(k+1, n):\n out[k] += out[i] * A[i, k]\n\n # === Normalization === #\n norm = np.sum(out)\n for k in range(n):\n out[k] /= norm", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def solveLinearSystem(aMat, bMat):\n numRow = aMat.rows\n dummyVec = mkVector(\"x\", numRow)\n dummySymbols = [v for v in dummyVec]\n #\n system = aMat, bMat\n result = sympy.linsolve(system, *dummyVec)\n lst = flatten(result)\n # Handle case of multiple solutions\n subs = {s: 1 for s in lst if s in dummySymbols}\n return evaluate(sympy.Matrix(lst), subs=subs)", "def _solver_dirty(X, R, coef_shared_, coef_specific_, Ls, alpha, beta,\n max_iter, tol, positive):\n n_tasks = len(X)\n n_samples, n_features = X[0].shape\n theta = coef_shared_ + coef_specific_\n alpha *= n_samples\n beta *= n_samples\n\n # dg = 1.\n for i in range(max_iter):\n w_max = 0.0\n d_w_max = 0.0\n for j in range(n_features):\n if Ls[j] == 0.:\n continue\n # compute residual\n grad = np.zeros(n_tasks)\n tmp1 = np.zeros(n_tasks)\n tmp2 = np.zeros(n_tasks)\n\n normtmp = 0.\n for t in range(n_tasks):\n for n in range(n_samples):\n grad[t] += X[t, n, j] * R[t, n]\n grad[t] /= Ls[j]\n tmp1[t] = grad[t] + coef_shared_[j, t]\n tmp2[t] = grad[t] + coef_specific_[j, t]\n\n normtmp += tmp1[t] ** 2\n\n normtmp = np.sqrt(normtmp)\n\n # l2 thresholding\n\n thresholdl2 = 0.\n if normtmp:\n thresholdl2 = max(1. - alpha / (Ls[j] * normtmp), 0.)\n tmp1 *= thresholdl2\n thresholdl1 = beta / Ls[j]\n tmp2 = np.sign(tmp2) * np.maximum(np.abs(tmp2) - thresholdl1, 0.)\n if positive:\n tmp2 = np.maximum(tmp2, 0.)\n tmp1 = np.maximum(tmp1, 0.)\n new_theta = tmp1 + tmp2\n if theta[j].any():\n for t in range(n_tasks):\n R[t] += X[t, :, j] * theta[j, t]\n\n d_w_j = np.abs(theta[j] - new_theta).max()\n d_w_max = max(d_w_max, d_w_j)\n w_max = max(w_max, np.abs(tmp1 + tmp2).max())\n coef_shared_[j] = tmp1\n coef_specific_[j] = tmp2\n theta[j] = new_theta\n\n if theta[j].any():\n for t in range(n_tasks):\n R[t] -= X[t, :, j] * theta[j, t]\n\n if (w_max == 0.0 or d_w_max / w_max < tol):\n break\n\n return coef_shared_, coef_specific_, R, i", "def owlqn(input_size, eval_fun, regulariser,\n initial_x=0, m=10, beta=0.7, gamma=0.8,\n verbose=1, verbose_output=0):\n ############################### argument check ###############################\n initial_x = _get_initial_x(initial_x, input_size)\n \n _regulariser_fun = lambda x: regulariser * np.sum(np.abs(x))\n _constraint_orthant = lambda var, orthant: var * (np.sign(var) == orthant)\n \n ############################### initialisation ###############################\n S = [] # list to store the displacements\n Y = [] # list to store the differences in gradient\n x_next = initial_x\n f_x_next, grad_x_next = eval_fun(x_next)\n g_x_next = _regulariser_fun(x_next)\n alpha = 1.\n \n def _compute_direction_lbfgs(S, Y, grad):\n # S: displacement history\n # Y: difference in gradient history\n # grad: first order gradient direction\n # compute the gradient for maximisation\n \n N = len(Y)\n if N == 0: return grad\n \n q = grad\n alphas = [None for i in range(len(Y))]\n rhos = [None for i in range(len(Y))]\n for i in range(len(Y)-1,-1,-1):\n # compute the rho, alpha (and save them), and update q\n rhos[i] = 1./np.sum(Y[i] * S[i])\n alphas[i] = rhos[i] * np.sum(S[i] * q)\n q = q - alphas[i] * Y[i]\n \n H_0 = 1. * np.sum(S[-1] * Y[-1]) / np.sum(Y[-1] * Y[-1])\n z = H_0 * q\n \n for i in range(len(Y)):\n beta = rhos[i] * np.sum(Y[i] * z)\n z = z + S[i] * (alphas[i] - beta)\n \n return z\n \n ############################### main iteration ###############################\n n_iter = 1\n while True:\n f_x = f_x_next\n grad_x = grad_x_next\n g_x = g_x_next\n F_x = f_x + g_x\n x = x_next\n \n # print the message\n if verbose == 1:\n if printHeader(n_iter): print(header)\n if printContent(n_iter): print(contentFormat % (n_iter, f_x, g_x, F_x, np.sum(np.abs(grad_x)), np.sum(x > 0)))\n \n # compute the pseudo-gradient of f by computing the directional gradient of f first\n sgnx = np.sign(x)\n sgnx_0 = (sgnx == 0)\n grad_pos_x = grad_x + regulariser * (sgnx + sgnx_0)\n grad_neg_x = grad_x + regulariser * (sgnx - sgnx_0)\n pgrad_x = grad_pos_x * (grad_pos_x < 0) + grad_neg_x * (grad_neg_x > 0)\n \n # choose the orthant\n orthant = np.sign(x) * (x != 0) - np.sign(pgrad_x) * (x == 0)\n \n # compute the inverse hessian multiplied by the pseudo gradient from the S & Y history\n grad_2nd = _compute_direction_lbfgs(S, Y, pgrad_x)\n \n # constraint the gradient direction\n grad_2nd_orthant = _constraint_orthant(grad_2nd, np.sign(pgrad_x))\n \n # do line search\n alpha = 1.\n while True:\n x_next = _constraint_orthant(x - grad_2nd_orthant * alpha, orthant)\n f_x_next, grad_x_next = eval_fun(x_next)\n g_x_next = _regulariser_fun(x_next)\n if f_x_next + g_x_next <= F_x + gamma * np.sum(pgrad_x * (x_next - x)): break\n alpha *= beta\n # n += 1\n # print(n)\n \n # check convergence\n if np.abs(f_x_next - f_x) / (1e-10 + np.abs(f_x)) < 1e-6: break\n n_iter += 1\n \n # update the histories\n S.append(x_next - x)\n Y.append(grad_x_next - grad_x)\n if len(S) > m: S.pop(0)\n if len(Y) > m: Y.pop(0)\n \n ############################### output ###############################\n if verbose_output:\n return {\"x\": x_next, \"n_iter\": n_iter, \"fx\": f_x_next, \"gradx\": grad_x_next, \"gx\": g_x_next}\n else:\n return x_next", "def _stratified_model_admm(self, shape, Lap, loss_proximal_func, regulariser_proximal_func, graph_data=dict(), \\\n relative_tolerance=1e-5, absolute_tolerance=1e-5, num_jobs=4, \\\n max_cg_iters=10, max_iters=1000, rho=1, tau_decrement=2, tau_increment=2, mu=10, \\\n rho_min=0.1, rho_max=1.0):\n import multiprocessing as mp\n import scipy as sc\n optimal_solution = False\n n = np.prod(shape)\n m = Lap.shape[0]\n\n # Retrieve data from ``graph_data``\n # alpha_init\n if 'alpha_init' in graph_data:\n alpha = graph_data['alpha_init'].copy()\n else:\n alpha = np.zeros((m,) + shape)\n\n primal_residual = np.zeros(alpha.shape)\n primal_residual_tilde = np.zeros(alpha.shape)\n dual_residual = np.zeros(alpha.shape)\n dual_residual_tilde = np.zeros(alpha.shape)\n\n # alpha_tilde\n if 'alpha_tilde' in graph_data:\n alpha = graph_data['alpha_tilde'].copy()\n else:\n alpha_tilde = alpha.copy()\n # alpha_hat\n if 'alpha_hat' in graph_data:\n alpha_hat = graph_data['alpha_hat'].copy()\n else:\n alpha_hat = alpha.copy()\n # u\n if 'u' in graph_data:\n u = graph_data['u'].copy()\n else:\n u = np.zeros(alpha.shape)\n # u_tilde\n if 'u_tilde' in graph_data:\n u_tilde = graph_data['u_tilde'].copy()\n else:\n u_tilde = np.zeros(alpha.shape)\n\n # Multiprocessing\n if m <= num_jobs:\n num_jobs = m\n proximal_pool = mp.Pool(num_jobs)\n\n for iter_j in range(1, max_iters):\n\n # Update alpha\n alpha = loss_proximal_func(t=1./rho, nu=alpha_hat-u, warm_start=alpha, pool=proximal_pool)\n\n # Update alpha_tilde\n alpha_tilde = regulariser_proximal_func(t=1./rho, nu=alpha_hat-u_tilde, warm_start=alpha_tilde, \\\n pool=proximal_pool)\n\n # Update alpha_hat\n\n S = Lap + 2.0 * rho * sc.sparse.eye(m)\n M = sc.sparse.diags(1./S.diagonal() )\n indices = np.ndindex(shape)\n equ_rhs = rho * (alpha.T + alpha_tilde.T + u.T + u_tilde.T)\n\n for j, index in enumerate(indices):\n index_value = index[::-1]\n solution = sc.sparse.linalg.cg(S, equ_rhs[index_value], \\\n M=M, x0=alpha_hat.T[index_value], \\\n maxiter=max_cg_iters)\n solution = solution[0]\n dual_residual.T[index_value] = -rho * (solution - alpha_hat.T[index_value])\n dual_residual_tilde.T[index_value] = dual_residual.T[index_value]\n alpha_hat.T[index_value] = solution\n\n # Updates\n primal_residual = alpha - alpha_hat\n primal_residual_tilde = alpha_tilde - alpha_hat\n u += alpha - alpha_hat\n u_tilde += alpha_tilde - alpha_hat\n\n # Calculation of residual norms and epsilon values\n primal_residual_norm = np.linalg.norm(np.append(primal_residual, primal_residual_tilde), 2)\n dual_residual_norm = np.linalg.norm(np.append(dual_residual, dual_residual_tilde), 2)\n primal_eps = np.sqrt(2. * m * n) * absolute_tolerance + relative_tolerance * \\\n np.max([primal_residual_norm, dual_residual_norm])\n dual_eps = np.sqrt(2. * m * n) * absolute_tolerance + relative_tolerance * \\\n np.linalg.norm(rho * np.append(u, u_tilde))\n\n # Breaking condition!\n if primal_residual_norm <= primal_eps and \\\n dual_residual_norm <= dual_eps:\n optimal_solution = True\n break\n\n rho_update = rho\n if primal_residual_norm > mu * dual_residual_norm:\n rho_update = tau_increment * rho\n elif dual_residual_norm > mu * primal_residual_norm:\n rho_update = rho / tau_decrement\n rho_update = np.clip(rho_update, rho_min, rho_max)\n u *= rho / rho_update\n u_tilde *= rho / rho_update\n rho = rho_update\n\n proximal_pool.close()\n proximal_pool.join()\n output = {'alpha': alpha, \\\n 'alpha_tilde': alpha_tilde, \\\n 'alpha_hat': alpha_hat, \\\n 'u': u, \\\n 'u_tilde': u_tilde}\n\n # Complete later!\n result = {'iterations': iter_j, \\\n 'optimal' :optimal_solution}\n return output, result", "def optimize(self, acqf: MCAcquisitionFunction) -> Tuple[Tensor, Tensor]:\n initial_conditions = self.generate_restart_points(acqf)\n # shape = num_restarts x *acqf.batch_shape x 1 x dim_X\n if self.inequality_constraints is not None:\n org_shape = initial_conditions.shape\n initial_conditions = initial_conditions.reshape(\n self.num_restarts, -1, self.dim_x\n )\n options = {\"maxiter\": int(self.maxiter / 25)}\n with settings.propagate_grads(True):\n solutions, values = gen_candidates_scipy(\n initial_conditions=initial_conditions,\n acquisition_function=acqf,\n lower_bounds=self.bounds[0],\n upper_bounds=self.bounds[1],\n options=options,\n inequality_constraints=self.inequality_constraints,\n )\n self.add_solutions(solutions.view(-1, 1, self.dim_x).detach())\n best_ind = torch.argmax(values, dim=0)\n if self.inequality_constraints is not None:\n solutions = solutions.reshape(org_shape)\n solution = solutions.gather(\n dim=0,\n index=best_ind.view(1, *best_ind.shape, 1, 1).repeat(\n *[1] * (best_ind.dim() + 2), self.dim_x\n ),\n )\n if self.inequality_constraints is not None:\n org_shape = solution.shape\n solution = solution.reshape(1, -1, self.dim_x)\n options = {\"maxiter\": self.maxiter}\n with settings.propagate_grads(True):\n solution, value = gen_candidates_scipy(\n initial_conditions=solution,\n acquisition_function=acqf,\n lower_bounds=self.bounds[0],\n upper_bounds=self.bounds[1],\n options=options,\n inequality_constraints=self.inequality_constraints,\n )\n # This is needed due to nested optimization\n value = acqf(solution)\n if self.inequality_constraints is not None:\n solution = solution.reshape(org_shape)\n return solution, value.reshape(*acqf.batch_shape)", "def common_optimization(m):\n logger.info(\"Doing nodes fusion and replacement... \")\n m = other.polish_model(m)\n g = m.graph\n other.transpose_B_in_Gemm(g)\n fusing.fuse_BN_into_Gemm(g)\n fusing.fuse_BN_with_Reshape_into_Gemm(g)\n fusing.fuse_Gemm_into_Gemm(g)\n fusing.fuse_consecutive_reducemean(g)\n fusing.fuse_slice_nodes_into_conv(g)\n fusing.fuse_relu_min_into_clip(g)\n other.duplicate_shared_Flatten(g)\n replacing.replace_average_pool_with_GAP(g)\n\n m = other.polish_model(m)\n g = m.graph\n\n replacing.replace_Squeeze_with_Reshape(g)\n replacing.replace_Unsqueeze_with_Reshape(g)\n replacing.replace_Reshape_with_Flatten(g)\n replacing.replace_ReduceMean_with_GlobalAveragePool(g)\n replacing.replace_Sum_with_Adds(g)\n replacing.replace_constant_input_concat_with_pad(g)\n other.topological_sort(g)\n return m", "def m_step(self, a_in, r, v, eps, b, B, C, psize):\n r = r * a_in\n r = r / (r.sum(dim=2, keepdim=True) + eps)\n r_sum = r.sum(dim=1, keepdim=True)\n coeff = r / (r_sum + eps)\n coeff = coeff.view(b, B, C, 1)\n \n mu = torch.sum(coeff * v, dim=1, keepdim=True)\n sigma_sq = torch.sum(coeff * (v - mu)**2, dim=1, keepdim=True) + eps\n \n r_sum = r_sum.view(b, C, 1)\n sigma_sq = sigma_sq.view(b, C, psize)\n cost_h = (self.beta_u.view(C, 1) + torch.log(sigma_sq.sqrt())) * r_sum\n \n a_out = self.sigmoid(self._lambda*(self.beta_a - cost_h.sum(dim=2)))\n sigma_sq = sigma_sq.view(b, 1, C, psize)\n \n return a_out, mu, sigma_sq", "def preCondConjugateGradientSolver(b, x, linsys_setup, eps, i_max, plotInterval, mapDir):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz=True\n \n \n # Calculate residual r = b - (A^-1) x\n r = b - applyMat(x, linsys_setup)\n d = r\n\n\n delta_new = numpy.inner(r,r)\n \n\n\n\n delta_o = delta_new\n delta_array = numpy.zeros(shape=(i_max))\n \n # Iterate CG solver until converged\n i = 0\n #i_max = 300\n while (i < i_max) and (delta_new > delta_o*eps**2.):\n if i==0: t = time.time()\n \n if i%plotInterval == 0 and i != 0:\n print \"\\tNumber of iterations in the CG:\", i\n x0 = x[:nx*ny] # CMB\n x1 = x[nx*ny:nx*ny+1] # Monopole\n x2 = x[nx*ny+1:nx*ny+1+nCluster] # TSZ\n if ksz: x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n print \"\\tMonopole:\", x1\n print \"\\tTSZ:\", x2\n if ksz: print \"\\tKSZ:\", x3\n \n x0.shape = (ny,nx)\n a_l = numpy.fft.fft2(x0)\n a_l *= precond_2d\n x_test = numpy.real(numpy.fft.ifft2(a_l))\n plot(x_test,mapDir+'/CMB_%d.png'%i,'Reconstructed CMB', range=(-250., 250.))\n print delta_new, delta_o*eps**2.\n\n q = applyMat(d, linsys_setup)\n alpha = delta_new / (numpy.inner(d,q))\n x += alpha * d\n\n # What does this do? It's always false.\n if i/50. < numpy.int(i/50):\n r = b - applyMat(x, linsys_setup)\n else:\n r = r - alpha*q\n \n delta_old = delta_new\n delta_new = numpy.inner(r,r)\n beta = delta_new/delta_old\n d = r + beta * d\n #if i==0: print \"\\tEach iteration takes:\", time.time()-t\n i += 1\n\n x0 = x[:nx*ny].reshape((ny, nx))\n x1 = x[nx*ny:nx*ny+1]\n x2 = x[nx*ny+1:nx*ny+1+nCluster]\n if ksz:\n x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n else:\n x3 = None\n \n a_l = numpy.fft.fft2(x0) * precond_2d\n x0 = numpy.real(numpy.fft.ifft2(a_l))\n\n \n # CMB, monopole, TSZ, KSZ\n return x0, x1, x2, x3", "def solve(self, A, B):\n return tf.matrix_solve_ls(matrix=A, rhs=B)", "def solve(a, b):\n #-> getrf + getrs\n a, _, _ = get_computation_matrix(a)\n b, cv2, isM2 = get_computation_matrix(b)\n if a.get_dtype() != b.get_dtype():\n raise TypeError(\"solve: dtype of a and b are not compatible!\")\n if a.numRows() != a.numCols():\n raise ValueError(\"solve: input a is not a square matrix!\")\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n (_, _, x, _) = gesv(a, b, overwrite_a=1, overwrite_b=1, dtype=t_dtype)\n\n if cv2:\n if isM2:\n return x.to_numpy_matrix()\n else:\n return x.to_numpy_array()\n else:\n return x", "def solveLinearSingular(aMat, bVec, isParameterized=False, defaultValue=1):\n solution = aMat.gauss_jordan_solve(bVec)\n solutionVec = solution[0]\n if not isParameterized:\n parameterMat = solution[1]\n for parameter in parameterMat:\n solutionVec = solutionVec.subs(parameter, defaultValue)\n solutionVec = solutionVec.evalf()\n return solutionVec", "def local_gemm_to_gemv(node):\r\n if node.op == gemm_no_inplace:\r\n z, a, x, y, b = node.inputs\r\n if z.broadcastable == x.broadcastable == (True, False):\r\n r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b)\r\n return [r.dimshuffle('x', 0)]\r\n if z.broadcastable == y.broadcastable == (False, True):\r\n r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b)\r\n return [r.dimshuffle(0, 'x')]", "def magma_sgeqrf_m(ngpu, m, n, A, lda, tau, work, lwork):\n info = c_int_type()\n status = _libmagma.magma_sgeqrf_m(ngpu, m, n, int(A), lda,\n int(tau), int(work), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def linear(m, b, x, xx):\n y = m*(x - xx) + b\n return y", "def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_):\n _a_minlength = ((m_) * (k_))\n if ((m_) * (k_)) > 0 and a_ is not None and len(a_) != ((m_) * (k_)):\n raise ValueError(\"Array argument a is not long enough: Is %d, expected %d\" % (len(a_),((m_) * (k_))))\n if a_ is None:\n raise ValueError(\"Argument a cannot be None\")\n if a_ is None:\n raise ValueError(\"Argument a may not be None\")\n if isinstance(a_, numpy.ndarray) and a_.dtype is numpy.dtype(numpy.float64) and a_.flags.contiguous:\n _a_copyarray = False\n _a_tmp = ctypes.cast(a_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif a_ is not None:\n _a_copyarray = True\n _a_np_tmp = numpy.zeros(len(a_),numpy.dtype(numpy.float64))\n _a_np_tmp[:] = a_\n assert _a_np_tmp.flags.contiguous\n _a_tmp = ctypes.cast(_a_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _a_copyarray = False\n _a_tmp = None\n \n _b_minlength = ((k_) * (n_))\n if ((k_) * (n_)) > 0 and b_ is not None and len(b_) != ((k_) * (n_)):\n raise ValueError(\"Array argument b is not long enough: Is %d, expected %d\" % (len(b_),((k_) * (n_))))\n if b_ is None:\n raise ValueError(\"Argument b cannot be None\")\n if b_ is None:\n raise ValueError(\"Argument b may not be None\")\n if isinstance(b_, numpy.ndarray) and b_.dtype is numpy.dtype(numpy.float64) and b_.flags.contiguous:\n _b_copyarray = False\n _b_tmp = ctypes.cast(b_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif b_ is not None:\n _b_copyarray = True\n _b_np_tmp = numpy.zeros(len(b_),numpy.dtype(numpy.float64))\n _b_np_tmp[:] = b_\n assert _b_np_tmp.flags.contiguous\n _b_tmp = ctypes.cast(_b_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _b_copyarray = False\n _b_tmp = None\n \n _c_minlength = ((m_) * (n_))\n if ((m_) * (n_)) > 0 and c_ is not None and len(c_) != ((m_) * (n_)):\n raise ValueError(\"Array argument c is not long enough: Is %d, expected %d\" % (len(c_),((m_) * (n_))))\n if isinstance(c_,numpy.ndarray) and not c_.flags.writeable:\n raise ValueError(\"Argument c must be writable\")\n if c_ is None:\n raise ValueError(\"Argument c may not be None\")\n if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous:\n _c_copyarray = False\n _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif c_ is not None:\n _c_copyarray = True\n _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64))\n _c_np_tmp[:] = c_\n assert _c_np_tmp.flags.contiguous\n _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _c_copyarray = False\n _c_tmp = None\n \n res = __library__.MSK_XX_gemm(self.__nativep,transa_,transb_,m_,n_,k_,alpha_,_a_tmp,_b_tmp,beta_,_c_tmp)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n if _c_copyarray:\n c_[:] = _c_np_tmp", "def resolves_matrix(self):\n self.P = np.linalg.solve(self.M, self.f)", "def rforwardsolve(A, b, d):\n \n \n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[0] = x[0] / A[0, 0]\n for k in range(1, n):\n lk = max(0, k-d)\n x[k] = b[k] - np.dot(A[k, lk : k], x[lk : k])\n x[k] = x[k] / A[k, k] \n return x", "def gemv(self,transa_,m_,n_,alpha_,a,x,beta_,y): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((n_) * (m_)):\n raise ValueError(\"Array argument a has wrong length\")\n if x is None: raise TypeError(\"Invalid type for argument x\")\n if x is None:\n x_ = None\n else:\n try:\n x_ = memoryview(x)\n except TypeError:\n try:\n _tmparr_x = array.array(\"d\",x)\n except TypeError:\n raise TypeError(\"Argument x has wrong type\")\n else:\n x_ = memoryview(_tmparr_x)\n \n else:\n if x_.format != \"d\":\n x_ = memoryview(array.array(\"d\",x))\n \n if ((transa_) == transpose.no):\n __tmp_var_0 = (n_);\n else:\n __tmp_var_0 = (m_);\n if x_ is not None and len(x_) != __tmp_var_0:\n raise ValueError(\"Array argument x has wrong length\")\n if y is None: raise TypeError(\"Invalid type for argument y\")\n _copyback_y = False\n if y is None:\n y_ = None\n else:\n try:\n y_ = memoryview(y)\n except TypeError:\n try:\n _tmparr_y = array.array(\"d\",y)\n except TypeError:\n raise TypeError(\"Argument y has wrong type\")\n else:\n y_ = memoryview(_tmparr_y)\n _copyback_y = True\n else:\n if y_.format != \"d\":\n y_ = memoryview(array.array(\"d\",y))\n _copyback_y = True\n if ((transa_) == transpose.no):\n __tmp_var_1 = (m_);\n else:\n __tmp_var_1 = (n_);\n if y_ is not None and len(y_) != __tmp_var_1:\n raise ValueError(\"Array argument y has wrong length\")\n res = self.__obj.gemv(transa_,m_,n_,alpha_,a_,x_,beta_,y_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_y:\n y[:] = _tmparr_y", "def optim_solve(\n self, x0: devices.PrimaryWeights = None, global_search: bool = False, **kwargs\n ) -> scipy.optimize.OptimizeResult:\n print(f'{\" optim_solve \":~^60s}')\n self._assert_problem_is_valid()\n if self._background is None:\n bounds = self.bounds * 2\n print(\"> No background specified, will optimise background.\")\n else:\n bounds = self.bounds\n\n if np.inf in self._target_contrast:\n print(\"> Aiming to maximise contrast.\")\n\n elif -np.inf in self._target_contrast:\n print(\"> Aiming to minimize contrast.\")\n\n constraints = [\n {\"type\": \"eq\", \"fun\": self.silencing_constraint, \"tol\": 1e-04}\n ]\n\n if x0 is None:\n x0 = self.initial_guess_x0()\n \n if not global_search: # Local minimization\n\n default_options = {\"iprint\": 2, \"disp\": True, \"ftol\": 1e-08}\n options = kwargs.pop(\"options\", default_options)\n\n print(\"> Performing local optimization with SLSQP.\")\n result = scipy.optimize.minimize(\n fun=self.objective_function,\n x0=x0,\n method=\"SLSQP\",\n bounds=bounds,\n constraints=constraints,\n options=options,\n **kwargs,\n )\n\n elif global_search: # Global minimization\n print(\n \"> Performing global optimization with basinhopping and SLSQP\"\n )\n\n # Configure global defaults\n disp = kwargs.pop(\"disp\", True)\n # Configure local defaults\n default_minimizer_kwargs = {\n \"method\": \"SLSQP\",\n \"constraints\": constraints,\n \"bounds\": bounds,\n \"options\": {\"iprint\": 2, \"disp\": False},\n }\n minimizer_kwargs = kwargs.pop(\n \"minimizer_kwargs\", default_minimizer_kwargs\n )\n\n # Do optimization\n result = scipy.optimize.basinhopping(\n func=self.objective_function,\n x0=x0,\n minimizer_kwargs=minimizer_kwargs,\n disp=disp,\n **kwargs,\n )\n\n return result", "def gth_solve(A, overwrite=False, use_jit=True):\n A1 = np.array(A, dtype=float, copy=not overwrite, order='C')\n # `order='C'` is for use with Numba <= 0.18.2\n # See issue github.com/numba/numba/issues/1103\n\n if len(A1.shape) != 2 or A1.shape[0] != A1.shape[1]:\n raise ValueError('matrix must be square')\n\n n = A1.shape[0]\n x = np.zeros(n)\n\n if use_jit:\n _gth_solve_jit(A1, x)\n return x\n\n # if not using jit\n # === Reduction === #\n for k in range(n-1):\n scale = np.sum(A1[k, k+1:n])\n if scale <= 0:\n # There is one (and only one) recurrent class contained in\n # {0, ..., k};\n # compute the solution associated with that recurrent class.\n n = k+1\n break\n A1[k+1:n, k] /= scale\n\n A1[k+1:n, k+1:n] += np.dot(A1[k+1:n, k:k+1], A1[k:k+1, k+1:n])\n\n # === Backward substitution === #\n x[n-1] = 1\n for k in range(n-2, -1, -1):\n x[k] = np.dot(x[k+1:n], A1[k+1:n, k])\n\n # === Normalization === #\n x /= np.sum(x)\n\n return x", "def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):\r\n initial_parameters = self._get_params_transformed()\r\n\r\n if parallel:\r\n try:\r\n jobs = []\r\n pool = mp.Pool(processes=num_processes)\r\n for i in range(num_restarts):\r\n self.randomize()\r\n job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs)\r\n jobs.append(job)\r\n\r\n pool.close() # signal that no more data coming in\r\n pool.join() # wait for all the tasks to complete\r\n except KeyboardInterrupt:\r\n print \"Ctrl+c received, terminating and joining pool.\"\r\n pool.terminate()\r\n pool.join()\r\n\r\n for i in range(num_restarts):\r\n try:\r\n if not parallel:\r\n self.randomize()\r\n self.optimize(**kwargs)\r\n else:\r\n self.optimization_runs.append(jobs[i].get())\r\n\r\n if verbose:\r\n print(\"Optimization restart {0}/{1}, f = {2}\".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))\r\n except Exception as e:\r\n if robust:\r\n print(\"Warning - optimization restart {0}/{1} failed\".format(i + 1, num_restarts))\r\n else:\r\n raise e\r\n\r\n if len(self.optimization_runs):\r\n i = np.argmin([o.f_opt for o in self.optimization_runs])\r\n self._set_params_transformed(self.optimization_runs[i].x_opt)\r\n else:\r\n self._set_params_transformed(initial_parameters)", "def _optimize(self, objective):\n # Initial value\n initial = self.get_initial()[0]\n\n if self.vector_to_matrix_transform is not None:\n initial = self.vector_to_matrix_transform(initial)\n\n if self.solver_type is 'NelderMead' or self.solver_type is 'ParticleSwarm':\n initial = None\n\n # Create tensorflow variable\n if self.matrix_manifold_dimension is None:\n x_tf = tf.Variable(tf.zeros(self.dimension, dtype=tf.float64))\n else:\n x_tf = tf.Variable(tf.zeros([self.matrix_manifold_dimension, self.matrix_manifold_dimension], dtype=tf.float64))\n\n # Cost function for pymanopt\n def objective_fct(x):\n if self.matrix_to_vector_transform_tf is not None:\n # Reshape x from matrix to vector form to compute the objective function (tensorflow format)\n x = self.matrix_to_vector_transform_tf(x, self.matrix_manifold_dimension)\n return objective(x)[0]\n\n # Transform the cost function to tensorflow function\n cost = tf.py_function(objective_fct, [x_tf], tf.float64)\n\n # Gradient function for pymanopt\n def objective_grad(x):\n if self.matrix_to_vector_transform is not None:\n # Reshape x from matrix to vector form to compute the gradient\n x = self.matrix_to_vector_transform(x)\n\n # Compute the gradient\n grad = np.array(objective(x)[1])[0]\n\n if self.vector_to_matrix_transform is not None:\n # Reshape the gradient in matrix form for the optimization on the manifold\n grad = self.vector_to_matrix_transform(grad)\n return grad\n\n # Define pymanopt problem\n problem = pyman.Problem(manifold=self.manifold, cost=cost, egrad=objective_grad, arg=x_tf, verbosity=2)\n\n # Optimize the parameters of the problem\n opt_x, opt_log = self.solver.solve(problem, x=initial)\n\n if self.matrix_to_vector_transform is not None:\n # Reshape the optimum from matrix to vector form\n opt_x = self.matrix_to_vector_transform(opt_x)\n\n # Format the result to fit with GPflowOpt\n result = sc_opt.OptimizeResult(x=opt_x, fun=opt_log['final_values']['f(x)'], nit=opt_log['final_values']['iterations'], message=opt_log['stoppingreason'], success=True)\n\n return result", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res", "def JacobiSolve_Short(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n x_new *= 0 #reset x_new\n #update is (b - whole row * x + diagonal part * x)/diagonal\n x_new = (b - np.dot(A,x)+ A.diagonal()*x)/A.diagonal()\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def moment0_map(gal_index,quant='m', res=0.5, plane='xy', units='Jy', **kwargs):\n \n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n #print('TEST, fixing R_max = 60')\n p.gal_index = gal_index\n # p.R_max = 60\n \n location = aux.moment0_map_location(res=res,plane=plane,gal_index=p.gal_index)\n \n # Getting matrix with projected emmision values: \n #momentmap = np.load(location, allow_pickle=True)\n #pdb.set_trace()\n if p.ow:\n print('Overwrite is on - creating')\n aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n momentmap = np.load(location, allow_pickle=True)\n #try:\n # momentmap = np.load(location, allow_pickle=True)\n # print('Found stored momentmap data for %i' % p.gal_index)\n # print(location)\n # if p.ow:\n # print('But overwrite is on - creating')\n # aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n # momentmap = np.load(location, allow_pickle=True)\n #except:\n # print('Did not find stored momentmap data for %i - creating' % p.gal_index)\n # aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n # momentmap = np.load(location, allow_pickle=True)\n \n n = momentmap[-1]\n momentmap = momentmap[:-1]\n indexes = momentmap[-1]\n index1, index2 = int(indexes[1]), int(indexes[2])\n\n # Getting the desired quantity to create the momentmap:\n dictionary = p.moment0_dict\n\n num = dictionary[quant]\n lumus = np.array(momentmap[:,3])\n lum = []\n mass = []\n metal = []\n for prop in lumus:\n if (quant == 'Z') | (quant == 'G0') | (quant == 'ne_mw') | (quant == 'Te_mw') | (quant == 'Tk_mw'):\n lum.append(prop[num]/prop[0])\n else:\n lum.append(prop[num])\n lum = np.array(lum)\n\n if 'L_' in quant:\n print('Sum over %s image: %.2f Lsun' % (quant,np.sum(lum)*6))\n print('Or: %.2f K km/s pc^2' % (aux.Lsun_to_K_km_s_pc2(np.sum(lum)*6,quant.replace('L_',''))))\n print('Or: %.2f K km/s pc^2' % (aux.Lsun_to_K_km_s_pc2(1.8e8,quant.replace('L_',''))))\n lum = lum / (res**2)\n \n # Converting to Jy*km/s / kpc^2 units:\n if units == 'Jy':\n if 'L_' in quant:\n quant_name = quant.replace('L_','')\n frequencies = p.freq\n \n z = p.zred\n D = 10 # Mpc (Luminosity Distance)\n freq = frequencies[quant_name]\n \n lum = lum*(1+z) / (1.04e-3 * D**2 * freq)\n # Soloman et al. 1997\n \n # Creating momentmaps:\n ax1,ax2 = momentmap[:, 1], momentmap[:, 2]\n \n nrows, ncols = int(n[1]), int(n[2])\n grid = lum.reshape((nrows, ncols))\n #pdb.set_trace()\n # grid = np.flipud(grid)\n # normal = mpl.colors.Normalize(vmin = min(lum), vmax = max(lum))\n\n # Setting 0 values to something very low\n print(len(lum))\n print(len(lum[lum == 0]))\n grid[grid == 0] = 1e-30\n grid[np.isnan(grid)] = 1e-30\n\n # Default min,max values\n if p.log: grid = np.log10(grid)\n if (not p.vmin) : \n p.vmin = np.max(grid)/1e5\n if p.log: p.vmin = np.max(grid) - 5\n if (not p.vmax) : \n p.vmax = 5*np.max(grid)\n if p.log: p.vmax = np.max(grid)\n \n if quant == 'Z':\n p.vmin = 0.05\n p.vmax = 3\n\n if p.add:\n fig,ax = plt.gcf(),p.ax\n else:\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_axes([0.15, 0.15, 0.8, 0.8]) \n ax.axis('equal')\n\n if not p.R_max:\n gal_ob = gal.galaxy(p.gal_index)\n p.R_max = gal_ob.R_max\n grid = np.flipud(grid) \n if p.rotate:\n grid = np.rot90(grid)\n grid = np.rot90(grid)\n gal_ob = gal.galaxy(p.gal_index)\n #cell_data = gal_ob.cell_data.get_dataframe()\n #extent = np.max(np.abs(cell_data[['x','y','z']].values))\n if p.R_max:\n extent = 1*p.R_max\n else:\n extent = 50\n cs = ax.imshow(grid, extent=(-extent, extent, -extent, extent),\\\n vmin=p.vmin, vmax=p.vmax, interpolation='nearest', cmap=p.cmap)\n print(extent)\n # Add half-light radius\n x_axis = np.linspace(-extent,extent,grid.shape[0])\n y_axis = np.linspace(-extent,extent,grid.shape[1])\n x,y = np.meshgrid(x_axis,y_axis)\n r = np.sqrt(x**2 + y**2)\n r_bins = np.linspace(0,r.max(),200)\n L_bins = np.zeros(len(r_bins)-1)\n l0 = 0\n for i in range(len(r_bins)-1):\n L_bins[i] = np.sum(10.**grid[(r < r_bins[i+1])])\n R_half = r_bins[1::][L_bins >= 0.5*L_bins.max()][0]\n print('R_half: ',R_half)\n circle = plt.Circle((0,0),R_half,ec='green',fc=None,fill=False,lw=3,ls='--')\n ax.add_patch(circle)\n\n #if p.R_max: extent = p.R_max\n print(p.R_max,extent)\n ax.set_xlim([-1.1*extent,1.1*extent])\n ax.set_ylim([-1.1*extent,1.1*extent])\n\n\n if num == 0:\n #plt.title('mass density')\n labels = 'log surface density (M$_{\\odot}$ / kpc$^2$)' \n if 'L_' in quant:\n #plt.title(quant + ' density')\n if units == 'Jy':\n labels = 'Jy${\\cdot}$km/s / kpc$^2$'\n else:\n labels = 'log surface brightness density (L$_{\\odot}$ / kpc$^2$)'\n if quant == 'Z': \n labels = 'log Z (Z$_{\\odot}$)'\n if quant == 'FUV': \n labels = 'log FUV flux (G$_{0}$)'\n\n if not p.add: plt.xlabel(plane[0]+' [kpc]')\n if not p.add: plt.ylabel(plane[1]+' [kpc]')\n\n formatter = mpl.ticker.LogFormatterExponent(10, labelOnlyBase=False, minor_thresholds=(100,20))\n if p.legend: \n if not p.label: labels = ''\n cbar = fig.colorbar(cs, label=labels, pad=0, shrink=0.85)#0.5)#\n \n if p.savefig:\n plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'moment0/'): os.mkdir(p.d_plot + 'moment0/') \n plt.savefig(p.d_plot + 'moment0/moment0_%i_%s%s' % (p.gal_index,p.sim_name,p.sim_run) + '_' + plane + '_res' + str(res) +'_'+ quant.replace('(','').replace(')','') + '.png',dpi=500)", "def minimize_pygmo_np(func, x0, bounds, origin, algo_name, algo_options, gradient=None):\n if origin == \"pygmo\" and algo_name != \"simulated_annealing\":\n assert (\n \"popsize\" in algo_options\n ), f\"For genetic optimizers like {algo_name}, popsize is mandatory.\"\n assert (\n \"gen\" in algo_options\n ), f\"For genetic optimizers like {algo_name}, gen is mandatory.\"\n\n prob = _create_problem(func, bounds, origin, gradient)\n algo = _create_algorithm(algo_name, algo_options, origin)\n pop = _create_population(prob, algo_options, x0)\n evolved = algo.evolve(pop)\n result = _process_pygmo_results(evolved)\n\n return result", "def sub_backsolve(self, b, transp='N'):\n \n # Case where b, and xsol are 1-D arrays\n if b.ndim==1:\n \n print \"Running sub_backsolve routine b.ndim=1.\"\n \n # b must have m elements or this doesn't make sense\n if len(b)!=self.m:\n raise ValueError(\"Length of b does not equal m in sub_backsolve b.ndim==1.\")\n #assert len(b)==self.m\n \n # Remove the known part from b\n bpart = b - self.r\n \n # Get the unknown part of b\n bsub = bpart[self.unknown_inds]\n \n # compute the unknown displacements\n xsub = self.Asub_factorized(bsub.astype(numpy.float64), trans=transp)\n #xsub = self.Asub_factorized.backsolve(bsub.astype(numpy.float64), trans=transp)\n \n # reconstruct the full solution vector\n x = numpy.zeros_like(b);\n x[self.unknown_inds] = xsub;\n x[self.xinds] = self.xsol;\n\n # Case where b is an m x p matrix, and xsol is an n x p matrix\n elif b.ndim==2:\n \n print \"Running sub_backsolve routine b.ndim=2.\"\n \n b_m, b_p = b.shape\n \n if b_m != self.m:\n raise ValueError('b_m not equal to self.m')\n if b_p != self.xsol.shape[1]:\n raise ValueError('b_p not equal to self.xsol.shape[1]')\n\n x = numpy.zeros((b_m, b_p))\n \n bpart = b - self.r\n bsub = bpart[self.unknown_inds,:]\n\n for k in range(b_p):\n xsub = self.Asub_factorized(bsub[:,k].astype(numpy.float64), trans=transp)\n #xsub = self.Asub_factorized.backsolve(bsub[:,k].astype(numpy.float64), trans=transp)\n x[self.unknown_inds,k] = xsub;\n x[self.xinds,k] = self.xsol[:,k]\n \n print \"Done with sub_backsolve.\"\n\n return x", "def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)", "def gemm(self,transa_,transb_,m_,n_,k_,alpha_,a,b,beta_,c): # 3\n if not isinstance(transa_,transpose): raise TypeError(\"Argument transa has wrong type\")\n if not isinstance(transb_,transpose): raise TypeError(\"Argument transb has wrong type\")\n if a is None: raise TypeError(\"Invalid type for argument a\")\n if a is None:\n a_ = None\n else:\n try:\n a_ = memoryview(a)\n except TypeError:\n try:\n _tmparr_a = array.array(\"d\",a)\n except TypeError:\n raise TypeError(\"Argument a has wrong type\")\n else:\n a_ = memoryview(_tmparr_a)\n \n else:\n if a_.format != \"d\":\n a_ = memoryview(array.array(\"d\",a))\n \n if a_ is not None and len(a_) != ((m_) * (k_)):\n raise ValueError(\"Array argument a has wrong length\")\n if b is None: raise TypeError(\"Invalid type for argument b\")\n if b is None:\n b_ = None\n else:\n try:\n b_ = memoryview(b)\n except TypeError:\n try:\n _tmparr_b = array.array(\"d\",b)\n except TypeError:\n raise TypeError(\"Argument b has wrong type\")\n else:\n b_ = memoryview(_tmparr_b)\n \n else:\n if b_.format != \"d\":\n b_ = memoryview(array.array(\"d\",b))\n \n if b_ is not None and len(b_) != ((k_) * (n_)):\n raise ValueError(\"Array argument b has wrong length\")\n if c is None: raise TypeError(\"Invalid type for argument c\")\n _copyback_c = False\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n _copyback_c = True\n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n _copyback_c = True\n if c_ is not None and len(c_) != ((m_) * (n_)):\n raise ValueError(\"Array argument c has wrong length\")\n res = self.__obj.gemm(transa_,transb_,m_,n_,k_,alpha_,a_,b_,beta_,c_)\n if res != 0:\n raise Error(rescode(res),\"\")\n if _copyback_c:\n c[:] = _tmparr_c", "def solver_mtw(Xf, mXf, Ls, y, M, alpha, beta1, beta2, epsilon, gamma, coef1,\n coef2, R, b1, b2, sigmas, concomitant=False,\n stable=False, tol=1e-4, max_iter=1000, tol_ot=1e-5,\n max_iter_ot=20, max_iter_cd=2000,\n positive=False, n_jobs=1, tol_cd=1e-5, gpu=False,\n ot_threshold=0):\n log = {'loss': [], 'dloss': [], 'log_sinkhorn1': [], 'log_sinkhorn2': [],\n 'stable': stable, \"objcd\": [], \"fot1\": [0.], \"fot2\": [0.],\n 'reweighting_loss': [], \"n_coords\": [], \"obj\": 0.}\n n_samples, n_features = Xf[0].shape\n n_tasks = len(Xf)\n if n_jobs == -1:\n n_jobs = n_tasks\n marginals1, marginals2 = np.ones((2, n_tasks, n_features)) / n_features\n\n theta1 = coef1.copy()\n theta2 = coef2.copy()\n theta = theta1 - theta2\n\n thetaold = theta.copy()\n\n ot_img = True\n if len(M) == n_features:\n ot_img = False\n\n update_ot_1 = set_ot_func(stable, ot_img)\n update_ot_2 = set_ot_func(stable, ot_img)\n\n xp = utils.set_module(gpu)\n M = xp.asarray(- M / epsilon)\n if b1 is not None:\n b1 = xp.asarray(b1)\n if b2 is not None:\n b2 = xp.asarray(b2)\n\n thetabar1 = np.ones_like(coef1).mean(axis=-1)\n thetabar2 = np.ones_like(coef2).mean(axis=-1)\n\n if positive:\n theta2 *= 0.\n thetabar2 *= 0.\n theta = theta1\n a = n_samples * alpha * gamma\n beta1 = n_samples * beta1\n beta2 = n_samples * beta2\n\n if concomitant:\n sigma0 = 0.01 * np.linalg.norm(y, axis=1).min() / (n_samples ** 0.5)\n else:\n sigma0 = 0.\n with Parallel(n_jobs=n_jobs, backend=\"threading\") as pll:\n if alpha == 0.:\n theta, R, sigmas, mx = update_coefs(pll, Xf, y, theta, R,\n Ls, marginals1,\n sigmas, a, beta1,\n sigma0,\n tol=tol_cd,\n max_iter=max_iter_cd,\n positive=positive)\n obj = 0.5 * (R ** 2).sum(axis=1).dot(1 / sigmas) / n_samples\n obj += beta1 * abs(theta).sum() + 0.5 * sigmas.sum()\n theta1, theta2 = utils.get_unsigned(theta)\n log['loss'].append(obj)\n else:\n for i in range(max_iter):\n obj = 0.\n if not positive:\n Y1 = utils.residual(Xf, - theta2, y)\n else:\n Y1 = y\n theta1, R, sigmas, mxp = update_coefs(pll, Xf, Y1, theta1, R,\n Ls,\n marginals1,\n sigmas,\n a, beta1,\n sigma0,\n tol=tol_cd,\n max_iter=max_iter_cd)\n if not positive:\n Y2 = utils.residual(Xf, theta1, y)\n theta2, R, sigmas, mx = update_coefs(pll, mXf, Y2, theta2,\n R,\n Ls,\n marginals2,\n sigmas,\n a, beta2,\n sigma0,\n tol=tol_cd,\n max_iter=max_iter_cd)\n theta = theta1 - theta2\n else:\n theta = theta1\n\n dx = abs(theta - thetaold) / max(1, thetaold.max(),\n theta.max())\n dx = dx.max()\n thetaold = theta.copy()\n if alpha:\n if (theta1 > ot_threshold).any(0).all():\n fot1, log_ot1, marginals1, b1, q1 = \\\n update_ot_1(theta1, M, epsilon, gamma,\n b=b1, tol=tol_ot,\n max_iter=max_iter_ot,\n threshold=ot_threshold)\n if fot1 is None or not theta1.max(0).all():\n warnings.warn(\"Numerical errors. Moving in \"\n \"log-domain.\")\n b1 = xp.log(b1 + 1e-100, out=b1)\n stable = True\n update_ot_1 = set_ot_func(True, ot_img)\n fot1, log_ot1, marginals1, b1, q1 = \\\n update_ot_1(theta1, M, epsilon, gamma, b=b1,\n tol=tol_ot, max_iter=max_iter_ot,\n threshold=ot_threshold)\n\n log[\"log_sinkhorn1\"].append(log_ot1)\n thetabar1 = q1\n log[\"fot1\"].append(fot1)\n obj += alpha * fot1\n if not positive and (theta2 > ot_threshold).any(0).all():\n fot2, log_ot2, marginals2, b2, q2 = \\\n update_ot_2(theta2, M, epsilon, gamma,\n b=b2, tol=tol_ot, max_iter=max_iter_ot)\n\n if fot2 is None or not theta2.max(0).all():\n warnings.warn(\"Numerical errors. Moving in \"\n \"log-domain.\")\n b2 = xp.log(b2 + 1e-100, out=b2)\n stable = True\n update_ot_2 = set_ot_func(True, ot_img)\n fot2, log_ot2, marginals2, b2, q2 = \\\n update_ot_2(theta2, M, epsilon, gamma,\n b=b2, tol=tol_ot,\n max_iter=max_iter_ot)\n\n log[\"log_sinkhorn2\"].append(log_ot2)\n thetabar2 = q2\n log[\"fot2\"].append(fot2)\n obj += alpha * fot2\n\n log['loss'].append(obj)\n log['dloss'].append(dx)\n\n if dx < tol:\n break\n if i == max_iter - 1:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha and' +\n ' beta may cause precision problems.',\n ConvergenceWarning)\n log['stable'] = stable\n\n if positive:\n theta2 *= 0.\n thetabar2 = xp.zeros_like(thetabar1)\n try:\n thetabar2 = thetabar2.get()\n except AttributeError:\n pass\n return (theta1, theta2, thetabar1, thetabar2, log, sigmas, b1, b2, R)", "def test_gemm_opt0():\r\n X, Y, Z, a, b = XYZab()\r\n\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a + Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) + b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [T.dot(X, Y) * a - Z * b])\r\n just_gemm([X, Y, Z, a, b], [a * T.dot(X, Y) - b * Z])\r\n just_gemm([X, Y, Z, a, b], [b * Z - a * T.dot(X, Y)])\r\n\r\n #with transposes (transposes should be pushed through dot in canonicalize)\r\n just_gemm([X, Y, Z, a, b], [b * Z.T - a * T.dot(Y.T, X.T)])\r\n just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * T.dot(X, Y).T])\r\n just_gemm([X, Y, Z, a, b], [b * Z + a * T.dot(X, Y).T],\r\n ishapes=[(5, 3), (3, 4), (4, 5), (), ()])\r\n\r\n #with N multiplications instead of just one\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b + T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z + a * b * a * T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n just_gemm([X, Y, Z, a, b], [Z - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z * b - T.dot(X, Y)])\r\n just_gemm([X, Y, Z, a, b], [Z - a * b * a * T.dot(X, Y)])", "def magma_sgetrf_m(ngpu,m, n, A, lda, ipiv):\n\n info = c_int_type()\n status = _libmagma.magma_sgetrf_m(ngpu,m, n, int(A), lda,\n int(ipiv), ctypes.byref(info))\n magmaCheckStatus(status)", "def solver_options(lgmres_tol=1e-5,\n lgmres_maxiter=1000,\n lgmres_inner_m=39,\n lgmres_outer_k=3,\n least_squares_lsmr_damp=0.0,\n least_squares_lsmr_atol=1e-6,\n least_squares_lsmr_btol=1e-6,\n least_squares_lsmr_conlim=1e8,\n least_squares_lsmr_maxiter=None,\n least_squares_lsmr_show=False,\n least_squares_lsqr_damp=0.0,\n least_squares_lsqr_atol=1e-6,\n least_squares_lsqr_btol=1e-6,\n least_squares_lsqr_conlim=1e8,\n least_squares_lsqr_iter_lim=None,\n least_squares_lsqr_show=False):\n\n return {'generic_lgmres': {'type': 'generic_lgmres',\n 'tol': lgmres_tol,\n 'maxiter': lgmres_maxiter,\n 'inner_m': lgmres_inner_m,\n 'outer_k': lgmres_outer_k},\n 'generic_least_squares_lsmr': {'type': 'generic_least_squares_lsmr',\n 'damp': least_squares_lsmr_damp,\n 'atol': least_squares_lsmr_atol,\n 'btol': least_squares_lsmr_btol,\n 'conlim': least_squares_lsmr_conlim,\n 'maxiter': least_squares_lsmr_maxiter,\n 'show': least_squares_lsmr_show},\n 'generic_least_squares_lsqr': {'type': 'generic_least_squares_lsqr',\n 'damp': least_squares_lsqr_damp,\n 'atol': least_squares_lsqr_atol,\n 'btol': least_squares_lsqr_btol,\n 'conlim': least_squares_lsqr_conlim,\n 'iter_lim': least_squares_lsqr_iter_lim,\n 'show': least_squares_lsqr_show}}", "def solve(self, x_0, dual_x_0):\n # Sanitize the inputs\n if type(x_0) is not np.ndarray or type(dual_x_0) is not np.ndarray:\n x_0 = np.array(x_0)\n dual_x_0 = np.array(dual_x_0)\n # Make sure that the arrays are column vectors\n x_0 = x_0.reshape(-1, 1)\n dual_x_0 = dual_x_0.reshape(-1, 1)\n\n print (\"Starting SQP minimization...\")\n [x, dual_x, exit_info] = self.globalized_sqp(x_0, dual_x_0)\n conv_criteria = exit_info['val']\n\n print (exit_info['msg'])\n print (\"Exiting with ||grad[L]|| = {0:e}\".format(conv_criteria))\n print (\"x = {0}\".format(x.reshape(-1)))\n print (\"dual_x = {0}\".format(dual_x.reshape(-1)))\n\n return [x, dual_x]", "def _solve_resolvedtiles(\n resolvedtiles, matches, nvertex, regularization_lambda,\n regularization_translation_factor, regularization_lens_lambda,\n good_solve_dict,\n logger=default_logger, **kwargs):\n\n # FIXME this is done twice -- think through\n tilespecs = resolvedtiles.tilespecs\n example_tspec = tilespecs[0]\n\n mesh = _create_mesh(resolvedtiles, matches, nvertex, **kwargs)\n\n nend = mesh.points.shape[0]\n\n # logger = logging.getLogger(self.__class__.__name__)\n logger.info(\n \"\\n aimed for %d mesh points, got %d\" %\n (nvertex, nend))\n\n if mesh.points.shape[0] < 0.5*nvertex:\n raise MeshLensCorrectionException(\n \"mesh coarser than intended\")\n\n # prepare the linear algebra and solve\n A, weights, b, lens_dof_start = create_A(\n matches, tilespecs, mesh)\n\n x0 = create_x0(\n A.shape[1], tilespecs)\n\n reg = create_regularization(\n A.shape[1],\n len(tilespecs),\n regularization_lambda,\n regularization_translation_factor,\n regularization_lens_lambda)\n\n solution, errx, erry = solve(\n A, weights, reg, x0, b)\n\n transforms = create_transforms(\n len(tilespecs), solution)\n\n tf_trans, jresult, solve_message = report_solution(\n errx, erry, transforms, good_solve_dict)\n\n logger.info(solve_message)\n\n # check quality of solution\n if not all([\n errx.mean() < good_solve_dict['error_mean'],\n erry.mean() < good_solve_dict['error_mean'],\n errx.std() < good_solve_dict['error_std'],\n erry.std() < good_solve_dict['error_std']]):\n raise MeshLensCorrectionException(\n \"Solve not good: %s\" % solve_message)\n\n logger.debug(solve_message)\n\n new_ref_transform = create_thinplatespline_tf(\n mesh, solution, lens_dof_start, logger)\n\n bbox = example_tspec.bbox_transformed(tf_limit=0)\n tbbox = new_ref_transform.tform(bbox)\n bstr = 'new transform corners:\\n'\n for i in range(bbox.shape[0]-1):\n bstr += \" (%0.1f, %0.1f) -> (%0.1f, %0.1f)\\n\" % (\n bbox[i, 0], bbox[i, 1],\n tbbox[i, 0], tbbox[i, 1])\n logger.info(bstr)\n\n new_tilespecs = new_specs_with_tf(\n new_ref_transform, tilespecs, transforms)\n\n stage_affine = estimate_stage_affine(tilespecs, new_tilespecs)\n sastr = (\n \"affine estimate of tile translations:\\n\" +\n \" scale: {}\\n\".format(stage_affine.scale) +\n \" translation: {}\\n\".format(stage_affine.translation) +\n \" shear: {}\\n\".format(stage_affine.shear) +\n \" rotation: {}\\n\".format(np.degrees(stage_affine.rotation)))\n logger.info(sastr)\n\n resolved = renderapi.resolvedtiles.ResolvedTiles(\n tilespecs=new_tilespecs,\n transformList=[new_ref_transform])\n return resolved, new_ref_transform, jresult", "def f(m, x, b):\n return m*x + b" ]
[ "0.8164", "0.72392", "0.7206821", "0.68150455", "0.678389", "0.6487322", "0.63835514", "0.624065", "0.598083", "0.5946647", "0.5763055", "0.56070817", "0.55459785", "0.55261856", "0.55072945", "0.54999584", "0.54536366", "0.53874725", "0.5362347", "0.5356209", "0.5320891", "0.52675056", "0.52610177", "0.5245691", "0.5201263", "0.517293", "0.5085363", "0.5069066", "0.50317764", "0.50265664", "0.5020453", "0.501772", "0.5010693", "0.4997196", "0.49958465", "0.49765718", "0.49385136", "0.49324432", "0.4925994", "0.49244612", "0.492137", "0.49190143", "0.49149588", "0.49113306", "0.48888236", "0.48861858", "0.4885006", "0.48845708", "0.48731464", "0.48711744", "0.48632538", "0.48559186", "0.48442358", "0.48417518", "0.4838379", "0.48325", "0.483248", "0.48260242", "0.48241946", "0.48214296", "0.48089638", "0.4807683", "0.47931865", "0.47832236", "0.47731328", "0.4772121", "0.47507986", "0.47387055", "0.47377473", "0.4728568", "0.4718013", "0.47046736", "0.47009453", "0.46955803", "0.4692228", "0.46912768", "0.4689657", "0.46769786", "0.46694785", "0.46636307", "0.46514738", "0.4643577", "0.46428248", "0.464239", "0.4631047", "0.46304384", "0.46273348", "0.46253446", "0.46183756", "0.46141303", "0.46110284", "0.4603338", "0.4580916", "0.45750752", "0.45750087", "0.45648274", "0.45607257", "0.45534092", "0.4551248", "0.4543673" ]
0.7316262
1
A single restart of GMRES.
def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray, tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]: r, beta = gmres_residual(A_mv, A_args, b, x) k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors, x0, r, beta, tol, b_norm) x = gmres_update(k, V, R, beta_vec, x0) done = k < num_krylov_vectors - 1 return done, beta, x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\n\t\treturn self.reset().start()", "def restart(self) -> None:", "def restart(self):", "def restart():\n stop()\n start()", "def restart(self):\n pass", "def restart(self):\r\n pass", "def _restart(self):\n pass", "def repl_restart(restart: bool = True) -> None:", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def restart(*args, **kwargs):\n return restart_type(args, kwargs)", "def restart(self):\n self.stop()\n self.start(init=False)", "def restart(self):\n self.km.restart_kernel(now=True)", "def restart(self):\n\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self.stop()\n self.start()", "def restart(self):\n self._start_time = None\n self.start()", "def restart_salt():\n stop_salt()\n start_salt()", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def restart(self):\n self.done()\n self.counter = 0\n self.start_time = time.time()", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])", "def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()", "def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)", "def restart(self):\n self.iic.set_flag(REG.CTRL_REG2.RST)\n time.sleep(0.01)\n self.conf = Configuration()", "def restart(self):\n \n # Set the previous time to be 0, we are starting the simulation\n self.oldtime = 0.0\n\n # Deep Copy of all of the initial conditions\n self.delt = copy.deepcopy(self.ic[0])\n self.odestep = copy.deepcopy(self.ic[1])\n self.species = copy.deepcopy(self.ic[2])\n self.cythonBool = copy.deepcopy(self.ic[3])\n self.resTime = copy.deepcopy(self.ic[4])\n\n # Update need enzyme Counts in the particle map\n self.species.update(self)\n\n print(\"Done with restart\")", "def restartGame(self):\n\t\tself.state = [[0 for x in range(3)] for y in range(3)]\n\t\tself.turn = self.whoGoesFirst()\n\t\tself.win = 0", "def restart_game(self):\n self.play()", "def restart(self):\n self.__init__()\n return", "def restart(self):\n self.stop()\n self.IA.restart()\n self._desintegrator = Desintegrator(background=self.IA.BG_MAP)\n self._desintegrator.increment_maximum_x_in(30)\n self.speed = 3.0\n self._run = True", "def restart(self):\n self.setMap(self.manualBaseMap())", "def restart_game(self):\n self.board = Board(None)\n self.view.reiniciar_jogo(self.board)\n\n self.white_player = None\n self.black_player = None\n self.atual_player = None\n self.finish_game = 0", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def attempt_restart(self):\n self.controller.publish(self, 'restart')", "def cmd_pafastrestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Fast restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('fast_restart', maxRetries=5)", "def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret", "def restart(self):\n\n #Kill processes\n print('Restarting scan...... \\n')\n self.kill()\n\n #Delete crawler\n del self.crawler\n self.crawler = self.generate_crawler()\n\n #Give ourselves a second\n time.sleep(2)", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "def rstrtmgr_RmRestart(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\", \"dwRestartFlags\", \"fnStatus\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def restart(self):\n\t\tself.destroy()\n\t\t\n\t\tself.resetBoard()\n\t\tself.i_play_again = 0\n\t\tself.adversary_play_again = 0\n\t\tself.isMyTurn = False\n\n\t\tself.initGame(self.parent)\n\n\t\t# Update the screen\n\t\tself.pack()", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "def cmd_pamaprestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('map_restart', maxRetries=5)", "def restart(self):\n self.session_id = uuid.uuid4()\n self.turn_count = 0", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def changed(self, *args):\n log.debug(\"Scheduling for immediate restart.\")\n self.schedule('restart', 0)\n return CONTINUE", "def restart_from_helper ( self, ):\r\n self.no_helper_restarts += 1\r\n self.logger.info( \"restart_from_helper\" )\r\n\r\n self.restart()", "def restart(self):\n self.idx = 0", "def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()", "def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))", "def processRestart(name):\n imrclient.update_server_info()\n imrclient.process_restart(name)\n\n # FIXME: this is a workaround until the Bima processes are fixed\n # FIXME: such that their initialized monitor points actually work\n m = re.match(r'^\\s*(th|dm|rm|mh|if|ot|ch)bima([1-9])\\s*$', name)\n if m:\n ant = int(m.group(2)) + 6\n print 'WARNING: You MUST re-initialize C%d by using removeAntenna(%d)' % (ant, ant)\n print 'WARNING: followed by addAntenna(%d) in the correct subarray' % (ant, )", "def restart_game(context: GameContext) -> None:\n left_spaceship, right_spaceship = create_spaceships()\n context.restart(left_spaceship, right_spaceship)", "def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])", "def restart(self):\n self.set_random_pos('starting')\n self.set_random_pos('finishing')\n self.game_loop()", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def _restart_workload(self, workload):\n self.log.info('%-20s RESTARTING', workload.name())\n workload.stop()\n workload.post_stop()\n workload.pre_start()\n workload.start()", "def restart_loop(app_func):\n while samuraix.restarting:\n log.info('restart loop')\n samuraix.restarting = False\n run_app(app_func)\n # make sure everything is really gone \n samuraix.app = None\n samuraix.config = None\n gc.collect()", "def restarted(self):\n return self.reset(\n started=self.started + 1,\n starting=0,\n )", "def restart_kernel(self, now=False, **kw):", "def restart(self):\n self.grid = np.zeros((3, 3), dtype=int)\n self.state = 0", "def restart():\n \n while True:\n print(\"Spiel wurde beendet: Tippe 'r' zum Neustart oder 'q' zum Beenden\")\n user_input_3 = str(input(\"Erwarte Eingabe: \"))\n if user_input_3 == \"q\":\n quit()\n elif user_input_3 == \"r\":\n print()\n print()\n print(\"SPIEL-WURDE-NEUGESTARTET------------------------------------------------------\")\n print()\n sixteen_is_dead(set_players())\n else:\n print()\n print(\"ERROR: Falsche Eingabe - bitte versuchen Sie es erneut!\")\n print()", "def restartSystem(self):\n # save retry count between reboots\n try:\n self.notifyPut('Restarting System...')\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.db['retry_count'] = self.retry_count\n self.db.close()\n except Exception, e:\n self.logQ.put('{0} - Unable to save retry count'.format(e))\n \n try:\n subprocess.call(['SHUTDOWN', '/f', '/r'])\n except Exception, e:\n self.logQ.put('{0} - Unable to restart Windows'.format(e))\n return", "def set_runs_per_restart(self, num):\n raise NotImplementedError()", "def restart_game():\r\n restart = input(\"Would you like to play again?\\n(y/n):\")\r\n if restart.lower() == \"y\" or restart.lower() == \"yes\":\r\n print(\"\\nStarting New Game!\\n\")\r\n self.scores = {\"p1\": 0, \"p2\": 0} # Restarts scores\r\n self.play_game() # Restarts game\r\n elif restart.lower() == \"n\" or restart.lower() == \"no\":\r\n print(\"\\nThank you for playing!\")\r\n else: # Checks for valid input:\r\n print(\"Oops! Seems like you misspelled something.\"\r\n \"\\nMake sure your response is: y, yes, n or no.\")\r\n return restart_game()", "def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def restart():\n logging.warning (\"[FLASKWEB] Shutting down K3 Dispatcher....\")\n shutdown_dispatcher()\n return 'Dispatcher is restarting.....Give me a millisec'", "def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()", "async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True", "def restart(self):\r\n self.agent_x = self.start_x\r\n self.agent_y = self.start_y\r\n self.terminated = False", "def run_restart(self, expanded, unexpanded) :\n\t\tif not self.HasRoles(self.__context.Control_Panel, 'Manager') :\n\t\t\treturn -1\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\tself.__context.Control_Panel.manage_restart(self.__context.REQUEST.URL0)", "async def kill(self, restart: bool = False) -> None:\n pass", "def restart(self, mess, args):\n user = self.get_sender_username(mess)\n\n if user in self.users and args.strip() == self.res:\n self.message_queue.append('_%s restarted me! brb!_'\n %(self.users[user]))\n self.log.info( '%s is restarting me.' % user)\n self.shutdown()\n self.idle_proc()\n self.conn.sendPresence(typ='unavailable')\n self.attempt_reconnect()", "def restart(self):\r\n self._session = _Session()\r\n self._first_run = True\r\n self._reader = MatRead(self._temp_dir)\r\n self._writer = MatWrite(self._temp_dir, self._oned_as)", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True", "def populationReboot(self, experiment_count):\n cons.timer.setTimerRestart(cons.pop_reboot_path) #Rebuild timer objects\n #--------------------------------------------------------------------\n try: #Re-open track learning file for continued tracking of progress.\n self.learn_track = open(cons.out_file+str(experiment_count)+'_LearnTrack.txt','a')\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n print('cannot open', cons.out_file+str(experiment_count)+'_LearnTrack.txt')\n raise\n\n #Extract last iteration from file name---------------------------------------------\n temp = cons.pop_reboot_path.split('_')\n iter_ref = len(temp)-1\n completed_iterations = int(temp[iter_ref])\n print(\"Rebooting rule population after \" +str(completed_iterations)+ \" iterations.\")\n self.explore_iter = completed_iterations-1\n for i in range(len(cons.learning_checkpoints)):\n cons.learning_checkpoints[i] += completed_iterations\n cons.max_iterations += completed_iterations\n\n #Rebuild existing population from text file.--------\n self.population = ClassifierSet(cons.pop_reboot_path)\n #---------------------------------------------------\n try: #Obtain correct track\n f = open(cons.pop_reboot_path+\"_PopStats.txt\", 'r')\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n print('cannot open', cons.pop_reboot_path+\"_PopStats.txt\")\n raise\n else:\n correct_ref = 26 #File reference position\n temp_line = None\n for i in range(correct_ref):\n temp_line = f.readline()\n temp_list = temp_line.strip().split('\\t')\n self.tracked_results = temp_list\n if cons.env.format_data.discrete_action:\n for i in range( len( self.tracked_results ) ):\n self.tracked_results[i] = int( self.correct[i] )\n else:\n for i in range( len( self.tracked_results ) ):\n self.tracked_results[i] = float( self.tracked_results[i] )\n f.close()", "def restart(self, sync=True):\n self.shutdown(sync=True)\n self.power_on(sync)", "def Restart(self, udp=False):\n self.Stop()\n self.Start(udp)", "def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "def restart_all():\n\n restart_nginx()\n restart_supervisor()", "def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:", "def reset(self):\n self.restart()\n self.cycles = 0", "def restart_kernel(self, kernel_id, now=False):", "def restart():\n log('reiniciando servicos', yellow)\n nginx_stop()\n nginx_start()\n nginx_restart()\n nginx_reload()\n supervisor_stop()\n supervisor_start()", "def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code", "def coldRestart(self):\n assert False, \"Deriving class must implement\"", "async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")", "def restart(self, timestamp=0.0, **keywords):\n self.services.debug('restart() method called')\n pass", "def restart(self):\n self.main_grid_values = [\n [0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)\n ]\n\n self.score_value.set('0')\n self.add_two()\n self.add_two()\n self.update_grid()\n\n self.bind('<{}>'.format(self.controller.slide_left_control), self.push_left)\n self.bind('<{}>'.format(self.controller.slide_right_control), self.push_right)\n self.bind('<{}>'.format(self.controller.slide_up_control), self.push_up)\n self.bind('<{}>'.format(self.controller.slide_down_control), self.push_down)\n\n self.game_over_button.destroy()", "def power_off(fast: bool = True, restart: bool = False) -> None:", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def _cb_cmd_restart(self,msg):\r\n print \"Walker restart command received\"\r\n \r\n #Stop the running thread\r\n while self.displacing or self.walking or self._th_walk:\r\n rospy.loginfo('Stopping walking thread')\r\n self.stop()\r\n \r\n #If the robot is simuated -> send to initial configuration\r\n if not self.real_robot:\r\n rospy.loginfo(\"Sending robot to zero configuration\")\r\n for jn in self.robotis_mini_ci.q_names:\r\n self.pubs[jn].publish(0.0)\r\n \r\n time.sleep(1)\r\n \r\n #If the robot is simulated -> reset simulation\r\n try:\r\n self.pause_simulation_srv()\r\n rospy.loginfo( \"Paused gazebo\")\r\n time.sleep(1)\r\n self.reset_world_srv()\r\n rospy.loginfo( \"Reseting gazebo\")\r\n time.sleep(1)\r\n self.unpause_simulation_srv()\r\n rospy.loginfo( \"Unpaused gazebo\")\r\n time.sleep(1)\r\n except rospy.ServiceException, e:\r\n print \"Service call failed: %s\"%e", "def restart(self, relay):\n if self.stop():\n return self.start(relay)\n return False", "def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()", "def replay():\n roku_master.replay()", "def Restart(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Restart\", cmd)", "def test_for_restart(self):\n #print(\"entering test_for_restart()\")\n self._compile_result(\"UserName: \" + self.user_name)\n #if self.first_song == True:\n # msg = \"Welcome_str\"\n # self.first_song = False\n if self._feedback_plat == \"RIVA\":\n self._RIVA_message_num += 1\n text_to_RIVA(\"Welcome_str\")\n else:# self._feedback_plat == \"Text\":\n self._RIVA_message_num += 1\n #to_no_voice_log(\"NewData:{};TTS:{}\".format(self._RIVA_message_num, msg))\n ###else:\n ### text_to_ispeech(ispeech_formatter(msg))\n while True:\n self._check_completion()\n if self._song_over is False:\n print(\"Song Started\")\n if self._feedback_plat == \"RIVA\":\n reset_RIVA_log()\n self._RIVA_message_num += 1\n #text_to_RIVA(self.response_welcome())\n elif self._feedback_plat == \"Text\":\n to_no_voice_log((emo_less_feedback(0, 0, 0)))\n self.execute_song()\n print(\"Re-entered Test_for_restart()\")\n if self._song_over is True:\n interface_info = gather_info(\n parse_csv(read_csv(CSV_functions.MUSICGLOVE)))\n #print(interface_info)\n #print(\"Song_over min/max = \", min_max)\n self.user_stats.set_grips(interface_info)\n self._compile_result(grip_avg_summary_str(interface_info))\n evaluated_info = [evaluate_worst_grip(interface_info, self._last_worst_grip),\n evaluate_best_grip(interface_info)]\n summary = summary_generator(evaluated_info[0], evaluated_info[1])\n if self._feedback_plat == \"RIVA\":\n self._RIVA_message_num += 1\n #print(\"message_num={} summary={}\".format(self._message_num, summary))\n text_to_RIVA(summary)\n else:# self._feedback_plat == \"Text\":\n to_no_voice_log(emo_less_feedback(self._RIVA_message_num, evaluated_info[0], evaluated_info[1]))\n ###else:\n ### text_to_ispeech(ispeech_formatter(summary))\n self._last_30_sec = []\n self._compile_result(summary)\n self._csv_result.extend(read_csv(CSV_functions.MUSICGLOVE))\n make_csv(self._csv_result, CSV_functions.M_GLOVE_SUMMARIES, what_song(self._grip_count))\n self.__init__(self.user_name, restart=True)\n self._RIVA_message_num = 1\n else:\n pass\n return" ]
[ "0.74949425", "0.74385256", "0.73926336", "0.73311555", "0.7275", "0.71946764", "0.7092151", "0.69505244", "0.6848349", "0.6821594", "0.6792471", "0.6755524", "0.67521834", "0.6737451", "0.6737451", "0.6691218", "0.66691226", "0.6651869", "0.66393805", "0.66291237", "0.66158015", "0.6586505", "0.6567151", "0.65501153", "0.65496075", "0.65471345", "0.6522514", "0.64622307", "0.64505535", "0.64447993", "0.64241457", "0.6421358", "0.6378062", "0.636839", "0.6343868", "0.6340574", "0.63120764", "0.629451", "0.62823576", "0.62598467", "0.6247708", "0.6205403", "0.61794204", "0.617291", "0.617291", "0.617291", "0.6172522", "0.6166542", "0.615141", "0.61448675", "0.61261797", "0.61230725", "0.61056376", "0.6105166", "0.6104409", "0.61023474", "0.60969406", "0.60841286", "0.60818464", "0.60803735", "0.6060984", "0.60502934", "0.6036295", "0.60327274", "0.60209167", "0.6005386", "0.59546155", "0.59414405", "0.59356374", "0.5934727", "0.5930012", "0.59123814", "0.5910168", "0.5905774", "0.590118", "0.58888143", "0.5887323", "0.5883987", "0.58792543", "0.5849733", "0.58300835", "0.58227754", "0.5822545", "0.5814035", "0.5809299", "0.58020496", "0.579302", "0.5785716", "0.57850266", "0.57811755", "0.5773091", "0.57705635", "0.57628894", "0.57520396", "0.5750233", "0.5737525", "0.5736826", "0.5725743", "0.57211167", "0.57185984", "0.56999034" ]
0.0
-1
Computes the residual vector r and its norm, beta, which is minimized by GMRES.
def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]: r = b - A_mv(x, *A_args) beta = jnp.linalg.norm(r) return r, beta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def get_residual(self, beta: ndarray) -> ndarray:\n return self.data.weight*(self.data.obs -\n self.fevar.mapping(beta))", "def beta_r(r):\n return 0.", "def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale\n if self.regularizationLambda > 0:\n r2 -= self.regularizationLambda*np.dot(self.x,self.x)\n return r2", "def beta_r(r):\n return 1.", "def beta_r(r, beta):\n return beta", "def beta_r(r, r_ani):\n return 1./2 * r / (r + r_ani)", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def beta_r(r, r_ani):\n return r**2/(r_ani**2 + r**2)", "def residuals(p, r, theta):\n return r - f(theta, p)", "def residual(var, matrix, RHSvector):\n from fipy.tools.numerix import array, LINFnorm\n \n Lx = matrix * array(var)\n return LINFnorm(Lx - RHSvector)", "def residuals(self, b):\n x, y = self.xvals, self.yvals\n return self._numexpr(x, *b) - y", "def wR(r, rc):\n nr = norm_numba(r)\n return (1 - nr / rc) if nr / rc < 1.0 else 0.0", "def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return", "def probaR(self, r):\n\n if r == 0.:\n return self.__alpha0 + self.__beta + self.__eta / 2.\n\n if r == 1.:\n return self.__alpha1 + self.__beta + self.__eta / 2.\n\n return self.__eta * (3./2. + r - r*r)", "def beta_r(r, r_ani, beta_inf):\n return beta_inf * r**2/(r_ani**2 + r**2)", "def m_beta(r, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 3./2, 3 * beta / 2, 5./2, -(r_x / r_c)**2))\n\n m = 4./3 * np.pi * rho_0 * r**3 * spec.hyp2f1(\n 3./2, 3 * beta / 2, 5./2, -(r/r_c)**2)\n\n return m", "def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x", "def _solver_dirty(X, R, coef_shared_, coef_specific_, Ls, alpha, beta,\n max_iter, tol, positive):\n n_tasks = len(X)\n n_samples, n_features = X[0].shape\n theta = coef_shared_ + coef_specific_\n alpha *= n_samples\n beta *= n_samples\n\n # dg = 1.\n for i in range(max_iter):\n w_max = 0.0\n d_w_max = 0.0\n for j in range(n_features):\n if Ls[j] == 0.:\n continue\n # compute residual\n grad = np.zeros(n_tasks)\n tmp1 = np.zeros(n_tasks)\n tmp2 = np.zeros(n_tasks)\n\n normtmp = 0.\n for t in range(n_tasks):\n for n in range(n_samples):\n grad[t] += X[t, n, j] * R[t, n]\n grad[t] /= Ls[j]\n tmp1[t] = grad[t] + coef_shared_[j, t]\n tmp2[t] = grad[t] + coef_specific_[j, t]\n\n normtmp += tmp1[t] ** 2\n\n normtmp = np.sqrt(normtmp)\n\n # l2 thresholding\n\n thresholdl2 = 0.\n if normtmp:\n thresholdl2 = max(1. - alpha / (Ls[j] * normtmp), 0.)\n tmp1 *= thresholdl2\n thresholdl1 = beta / Ls[j]\n tmp2 = np.sign(tmp2) * np.maximum(np.abs(tmp2) - thresholdl1, 0.)\n if positive:\n tmp2 = np.maximum(tmp2, 0.)\n tmp1 = np.maximum(tmp1, 0.)\n new_theta = tmp1 + tmp2\n if theta[j].any():\n for t in range(n_tasks):\n R[t] += X[t, :, j] * theta[j, t]\n\n d_w_j = np.abs(theta[j] - new_theta).max()\n d_w_max = max(d_w_max, d_w_j)\n w_max = max(w_max, np.abs(tmp1 + tmp2).max())\n coef_shared_[j] = tmp1\n coef_specific_[j] = tmp2\n theta[j] = new_theta\n\n if theta[j].any():\n for t in range(n_tasks):\n R[t] -= X[t, :, j] * theta[j, t]\n\n if (w_max == 0.0 or d_w_max / w_max < tol):\n break\n\n return coef_shared_, coef_specific_, R, i", "def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)", "def beta_model(r, s0, rc, beta, c):\n return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def sigma_beta_rmax(R, r_max, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (\n 4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 1.5, 1.5 * beta, 2.5, -(r_x / r_c)**2\n )\n )\n\n prefactor = 2 * r_c * rho_0\n sigma = prefactor * (\n ((r_max / r_c)**2 - (R / r_c)**2)**0.5\n / (1 + (R / r_c)**2)**(1.5 * beta)\n * spec.hyp2f1(\n 0.5, 1.5 * beta, 1.5,\n -(((r_max / r_c)**2 - (R/r_c)**2) / (1 + (R / r_c)**2))\n )\n ).real\n\n return sigma.astype(float)", "def sigma_beta(R, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 3./2, 3. * beta / 2, 5./2, -(r_x / r_c)**2))\n\n prefactor = np.pi**0.5 * r_c * rho_0\n sigma = prefactor * (\n (((R/r_c)**2 + 1)**(0.5 - 3 * beta / 2) *\n spec.gamma(3 * beta / 2 - 0.5)) / spec.gamma(3 * beta / 2))\n\n return sigma", "def beta_r(self, r, **kwargs):\n return self._model.beta_r(r, **kwargs)", "def __call__(self,r):\n return self._n0 * np.power(r / self._r0, self._beta)", "def residuals(self, ts, rvs, p):\n\n if p.npl == 0:\n return rvs\n else:\n rvmodel = np.sum(rv.rv_model(ts,p), axis=0)\n return rvs - rvmodel", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def residual(self, x, y, num_targets):\n \n x = x/sum(x) # normalize weights\n\n # RUN IM-SRG(2)\n ref = self._refs.T.dot(x)\n main(self._n_holes,self._n_particles, \n g=self._g_val, \n pb=self._pb_val, \n ref=ref, \n verbose=0, \n generator=self._generator,\n output_root = self._coeffs_root)\n\n # LOAD EVOLVED COEFFICIENTS\n H0B, H1B, H2B, eta1B_vac, eta2B_vac = pickle.load(open(self._coeffs_root+'/vac_coeffs_evolved.p', 'rb'))\n\n # PERFORM FULL CI AND GET EIGENVALUES\n hme = pyci.matrix(self._n_holes,self._n_particles, H0B, H1B, H2B, H2B, imsrg=True)\n ev_eigs = np.linalg.eigvalsh(hme)\n\n #return np.sqrt(np.mean((ev_eigs-y)**2))\n #return abs(ev_eigs[0:num_targets] - y[0:num_targets])\n #return abs(ev_eigs[1] - y[1])\n #return abs(ev_eigs[0] - y[0])\n return np.sqrt(0.80*(ev_eigs[0]-y[0])**2 + 0.20/35*((ev_eigs[1::]-y[1::]).T.dot(ev_eigs[1::]-y[1::])))", "def residual(t, x, xdot, result):\n result[0] = x[2]-xdot[0]\n result[1] = x[3]-xdot[1]\n result[2] = -xdot[2]+x[4]*x[0]/m\n result[3] = -xdot[3]+x[4]*x[1]/m-g\n result[4] = x[2]**2 + x[3]**2 \\\n + (x[0]**2 + x[1]**2)/m*x[4] - x[1] * g\n print(result)", "def getVs(self, Vp, residual, beta):\n return Vp + beta*residual", "def R(alpha, beta, gamma, tol = 1e-16):\n \n ca, cb, cg = np.cos(alpha), np.cos(beta), np.cos(gamma)\n sa, sb, sg = np.sin(alpha), np.sin(beta), np.sin(gamma)\n\n m = np.array([[ca*cb*cg - sa*sg, -sa*cg - ca*cb*sg, ca*sb],\n [sa*cb*cg + ca*sg, ca*cg - sa*cb*sg, sa*sb],\n [-sb*cg, sb*sg, cb]])\n\n #m[np.abs(m) < tol] = 0\n return m", "def solve_R(R, b):\n n = b.size\n assert R.shape == (n,n)\n x = zeros(n, dtype=R.dtype)\n for i in range(n-1,-1,-1):\n x[i] = (b[i] - dot(x[i+1:], R[i,i+1:])) / R[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def GMRES_1(A, b, x0, max_iterations=50):\n\n last_x = x0\n curr_x = last_x\n last_r = b - A @ x0\n curr_iter = 0\n residual_queue = []\n while curr_iter < max_iterations:\n Ar = A @ last_r\n alpha = (last_r.transpose() @ Ar) / (Ar.transpose() @ Ar)\n curr_x = last_x + alpha * last_r\n curr_r = last_r - alpha * Ar\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", \"GMRES(1)\")\n last_x = curr_x\n last_r = curr_r\n curr_iter += 1\n print(\"Number of Iterations: \" + str(curr_iter))\n\n return curr_x", "def sigma_beta_plaw_rmax(R, r_max, m_x, r_x, r_c, beta, gamma, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 1.5, 1.5 * beta, 2.5, -(r_x / r_c)**2))\n rho_x = rho_0 / (1 + (r_x / r_c)**2)**(1.5 * beta)\n\n if (gamma <= 1):\n raise ValueError(\"for gamma <= 1 the profile diverges.\")\n\n if R <= r_x:\n a = R / r_x\n b = r_max / r_x\n sigma_beta_rx = sigma_beta_rmax(\n R=R, r_max=r_x, m_x=m_x, r_x=r_x, r_c=r_c, beta=beta\n )\n # needs minus sign\n sigma_gamma_rx = 2 * rho_x * r_x * (\n 1. / (a * (2 - gamma)) * (\n 1j * b**(2 - gamma) * mp.hyp2f1(0.5, 1 - 0.5 * gamma, 2 - 0.5 * gamma, (b / a)**2 + 0j)\n - 1j * mp.hyp2f1(0.5, 1 - 0.5 * gamma, 2 - 0.5 * gamma, a**(-2) + 0j)\n ).real\n )\n sigma = sigma_beta_rx + sigma_gamma_rx\n\n elif R > r_x and R < r_max:\n a = R / r_x\n b = r_max / r_x\n if gamma == 3:\n sigma = 2 * rho_x * r_x * (b**2 - a**2)**0.5 / (b * a**2)\n else:\n sigma = rho_x * r_x * (\n np.pi**0.5 * a**(1 - gamma) * spec.gamma(0.5 * (gamma - 1)) / spec.gamma(0.5 * gamma)\n + b**(1 - gamma) * spec.gamma(0.5 * (1 - gamma)) / spec.gamma(1.5 - 0.5 * gamma)\n * spec.hyp2f1(0.5, 0.5 * (gamma - 1), 0.5 * (gamma + 1), (a / b)**2)\n )\n else:\n sigma = 0\n\n\n return float(sigma)", "def m_beta_plaw_rmax(r, r_max, m_x, r_x, r_c, beta, gamma, rho_x=None, **kwargs):\n if r <= r_x:\n m = m_beta(r=r, m_x=m_x, r_x=r_x, r_c=r_c, beta=beta)\n else:\n if r > r_max:\n return 0.\n if rho_x is None:\n rho_x = profile_beta(np.array([r_x]).reshape(-1, 1),\n m_x=np.array([m_x]).reshape(-1, 1),\n r_x=np.array([r_x]).reshape(-1, 1),\n r_c=np.array([r_c]).reshape(-1, 1),\n beta=np.array([beta]).reshape(-1, 1))\n rho_x = rho_x.reshape(-1)\n m = (m_x + m_plaw(r=r, rho_x=rho_x, r_x=r_x, gamma=gamma))\n\n return m", "def residual_G2D_norotation(pars,x,y,data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition_x = parvals[\"x_zero\"]\n\tcenterposition_y = parvals[\"y_zero\"]\n\tbeamwidth_x = parvals[\"omegaX_zero\"]\n\tbeamwidth_y = parvals[\"omegaY_zero\"]\n\tbgr = parvals[\"backgr\"]\n\t\n\n\tmodel = intensity_max*np.exp(-2*np.power(x-centerposition_x,2)/beamwidth_x**2 - \\\n\t\t2*np.power(y-centerposition_y,2)/beamwidth_y**2) + bgr\n\tif data is None:\n\t\treturn np.array(model) # we don't flatten here because this is for plotting\n\tif eps is None:\n\t\tresid = np.array(model - data)\n\t\treturn resid.flatten() # minimization array must be flattened (LMFIT FAQ)\n\telse:\n\t\tresid = np.array((model - data)/eps)\n\t\treturn resid.flatten()", "def residual(self,name):\n state = self.getstate(name)\n m = self.hit.vec \n x = state.vec\n res = m - self.hmatrix*x\n debug('kfnode.residual',(name,res))\n return res", "def sigma_beta_plaw(R, m_x, r_x, r_c, beta, gamma, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 1.5, 1.5 * beta, 2.5, -(r_x / r_c)**2))\n rho_x = rho_0 / (1 + (r_x / r_c)**2)**(1.5 * beta)\n\n if (gamma <= 1):\n raise ValueError(\"for gamma <= 1 the profile diverges.\")\n\n elif gamma == 2:\n if R <= r_x:\n a = R / r_x\n sigma_beta_rx = sigma_beta_rmax(\n R=R, r_max=r_x, m_x=m_x, r_x=r_x, r_c=r_c, beta=beta\n )\n sigma_gamma_rx = np.arcsin(a) / a\n sigma = sigma_beta_rx + sigma_gamma_rx\n else:\n a = R / r_x\n sigma = 0.5 * np.pi / a\n\n else:\n if R < r_x:\n a = R / r_x\n sigma_beta_rx = sigma_beta_rmax(\n R=R, r_max=r_x, m_x=m_x, r_x=r_x, r_c=r_c, beta=beta\n )\n sigma_gamma_rx = 2 * rho_x * r_x * (\n 0.5 / np.pi**0.5 * (-1j * a)**(1 - gamma)\n * mp.gamma(1 - 0.5 * gamma) * mp.gamma(0.5 * (gamma - 1))\n + 1j / (a * (gamma - 2)) * mp.hyp2f1(0.5, 1 - 0.5 * gamma, 2 - 0.5 * gamma, a**(-2))\n ).real\n sigma = sigma_beta_rx + sigma_gamma_rx\n\n elif R == r_x:\n sigma = 2 * rho_x * r_x * (\n 0.5 * np.pi**0.5 * mp.gamma(0.5 * (gamma - 1)) / mp.gamma(0.5 * gamma)\n ).real\n else:\n a = R / r_x\n sigma = np.pi**0.5 * rho_x * r_x * a**(1 - gamma) * (\n mp.gamma(0.5 * (gamma - 1)) / mp.gamma(0.5 * gamma)\n ).real\n\n return float(sigma)", "def _residuals(params: List[float], xs: np.ndarray, ys: np.ndarray) -> float:\n return _model(params=params, xs=xs) - ys", "def test_regress_residuals(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_residuals(x, y)\r\n self.assertFloatEqual(result, [-0.1, 0.08, -0.14, 0.44, -0.28])", "def residualNorm(self):\n return math.sqrt(self.residualNorm2())", "def residual(params, model_func, x, data, min_x_param=None, max_x_param=None,\n eps=None):\n # Crop the X data according to a fit parameter\n if min_x_param is not None or max_x_param is not None:\n min_x = params.get(min_x_param, None)\n max_x = params.get(max_x_param, None)\n x, data = crop_x_y(x, data, min_x=min_x, max_x=max_x,\n include_bounds=False)\n\n # Calculate data according to the model function\n model = model_func(x, **params)\n\n # Calculate the residuals of the model and the given data\n if eps is None:\n return model - data\n return (model - data) / eps", "def compute_residuals(self):\n\n r = self.rsdl()\n adapt_tol = self.opt['RelStopTol']\n\n if self.opt['AutoStop', 'Enabled']:\n adapt_tol = self.tau0 / (1. + self.k)\n\n return r, adapt_tol", "def residuals(self) -> npt.NDArray[np.float64]:\n return self.data - self.theory", "def residual_G2D(pars,x,y,data=None, eps=None):\n\tparvals = pars.valuesdict() # a Parameters() object is passed as \"pars\"\n\tintensity_max = parvals[\"I_zero\"]\n\tcenterposition_x = parvals[\"x_zero\"]\n\tcenterposition_y = parvals[\"y_zero\"]\n\tbeamwidth_x = parvals[\"omegaX_zero\"]\n\tbeamwidth_y = parvals[\"omegaY_zero\"]\n\ttheta = parvals[\"theta_rot\"]\n\tbgr = parvals[\"backgr\"]\n\t\n\t# the model function is based on this http://www.cs.brandeis.edu/~cs155/Lecture_06.pdf\n\t# slide 23; it should describe rotation by angle theta around an arbitrary point \n\t# if I understood the notes correctly, then this transformation should be correct \n\t# but I didn't check the math myself\n\n\t# the rotation is clockwise\n\n\tmodel = intensity_max*np.exp(-2*np.power(x*np.cos(theta)-y*np.sin(theta)+centerposition_x*(1-np.cos(theta))+centerposition_y*np.sin(theta)-centerposition_x,2)/beamwidth_x**2 - \\\n\t\t2*np.power(x*np.sin(theta)+y*np.cos(theta)+centerposition_y*(1-np.cos(theta))-centerposition_x*np.sin(theta)-centerposition_y,2)/beamwidth_y**2) + bgr\n\tif data is None:\n\t\treturn np.array(model) # we don't flatten here because this is for plotting\n\tif eps is None:\n\t\tresid = np.array(model - data)\n\t\treturn resid.flatten() # minimization array must be flattened (LMFIT FAQ)\n\telse:\n\t\tresid = np.array((model - data)/eps)\n\t\treturn resid.flatten()", "def r_squared(beta_0: float, beta_1: float, x: np.ndarray, y: np.ndarray) -> float:\n return 1.0 - (sum_of_sq_errors(beta_0, beta_1, x, y) / total_sum_of_squares(y))", "def update_r(self):\n self.gamma_r = self.gamma_s - self.gamma_q\n self.Sigma_r = self.Sigma_s - self.Sigma_q", "def compute_residual(self, stage=''):\n\n # get current level and problem description\n L = self.level\n\n # Check if we want to skip the residual computation to gain performance\n # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual!\n if stage in self.params.skip_residual_computation:\n L.status.residual = 0.0 if L.status.residual is None else L.status.residual\n return None\n\n # check if there are new values (e.g. from a sweep)\n # assert L.status.updated\n\n # compute the residual for each node\n\n # build QF(u)\n res_norm = []\n res = self.integrate()\n for m in range(self.coll.num_nodes):\n res[m] += L.u[0] - L.u[m + 1]\n # add tau if associated\n if L.tau[m] is not None:\n res[m] += L.tau[m]\n # use abs function from data type here\n res_norm.append(abs(res[m]))\n\n # find maximal residual over the nodes\n if L.params.residual_type == 'full_abs':\n L.status.residual = max(res_norm)\n elif L.params.residual_type == 'last_abs':\n L.status.residual = res_norm[-1]\n elif L.params.residual_type == 'full_rel':\n L.status.residual = max(res_norm) / abs(L.u[0])\n elif L.params.residual_type == 'last_rel':\n L.status.residual = res_norm[-1] / abs(L.u[0])\n else:\n raise ParameterError(\n f'residual_type = {L.params.residual_type} not implemented, choose '\n f'full_abs, last_abs, full_rel or last_rel instead'\n )\n\n # indicate that the residual has seen the new values\n L.status.updated = False\n\n return None", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k)\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model[interp_model == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\n\t\t\treturn Resid.flatten()", "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][mask], XY_mesh[1][mask] \n\t\t\t\tXY = (x,y)\n\n\n\n\n\t\t\t\tfor kk in range(2):\n\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, N+kk, XY, r_0 = rings_pos[N], r_space = r_space_k )\n\n\t\t\t\t\tmdl_ev = mdl_ev + Vxy[kk]\n\n\n\t\t\t\t\tif N == 0 and kk == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\tmask1 = np.where( (r_n < rings_pos[0] ) )\n\t\t\t\t\t\tx1,y1 = XY_mesh[0][mask1], XY_mesh[1][mask1] \n\t\t\t\t\t\tXY1 = (x1,y1)\n\n\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\t# inner interpolation\n\t\t\t\t\t\t#\n\t\t\t\t\t\t#\n\t\t\t\t\t\n\t\t\t\t\t\t#(a) velocity rise linearly from zero\n\n\t\t\t\t\t\tr_space_0 = rings_pos[0]\n\t\t\t\t\t\tVxy,Vsys = vmodel_dataset(pars, 0, XY1, r_0 = 0, r_space = r_space_0)\n\t\t\t\t\t\n\t\t\t\t\t\tinterp_model[mask1] = Vxy[1] + Vsys\n\n\t\t\t\tinterp_model[mask] = mdl_ev + Vsys\n\n\n\n\n\t\t\t\n\t\t\tsigma = np.sqrt(e_vel_map**2 + e_ISM**2)\n\t\t\t#interp_model[interp_model == 0] = np.nan\n\n\n\t\t\tconvolved = 0\n\n\t\t\tif convolved == True:\n\n\t\t\t\tmy_beam = Beam(2.5*u.arcsec, 2.5*u.arcsec, 0*u.deg)\n\t\t\t\tpix_scale = pixel_scale * u.arcsec\n\t\t\t\tgauss_kern = my_beam.as_kernel(pix_scale, x_size = nx, y_size = ny)\n\n\n\t\t\t\textend = np.zeros((3*ny,3*nx))\n\t\t\t\textend[ny:2*ny,nx:2*nx] = interp_model\n\t\t\t\tconvolve_extend = convolve_fft(extend, gauss_kern, mask = extend == 0 )\n\t\t\t\tinterp_model_conv = convolve_extend[ny:2*ny,nx:2*nx]\n\t\t\t\tinterp_model_conv[interp_model == 0] = 0\n\n\n\n\n\t\t\telse:\n\t\t\t\tinterp_model_conv = interp_model\n\n\t\t\tinterp_model_conv[interp_model_conv == 0] = np.nan\n\t\t\tres = vel_map - interp_model_conv\n\t\t\tResid = res/sigma\n\n\n\t\t\treturn Resid.flatten()", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r", "def m_beta_plaw(r, m_x, r_x, r_c, beta, gamma, rho_x=None, **kwargs):\n if r <= r_x:\n m = m_beta(r=r, m_x=m_x, r_x=r_x, r_c=r_c, beta=beta)\n else:\n if rho_x is None:\n rho_x = profile_beta(np.array([r_x]).reshape(-1, 1),\n m_x=np.array([m_x]).reshape(-1, 1),\n r_x=np.array([r_x]).reshape(-1, 1),\n r_c=np.array([r_c]).reshape(-1, 1),\n beta=np.array([beta]).reshape(-1, 1))\n rho_x = rho_x.reshape(-1)\n m = (m_x + m_plaw(r=r, rho_x=rho_x, r_x=r_x, gamma=gamma))\n\n return m", "def tt_gmres_leftprecond(AOp, b, nrm_b, eps=1.e-6, maxIter=20, verbose=True, preconOp=None, adaptiveTolerance=True):\n\n def calc_solution():\n x = pitts_py.TensorTrain_double(b.dimensions())\n x.setZero()\n nrm_x = 0\n for i in range(len(y)):\n nrm_x = pitts_py.axpby(y[i], V[i], nrm_x, x, eps)\n return x, nrm_x\n\n def residual_error(x, nrm_x):\n #print(\"TT-GMRES: solution max rank %d\" % np.max(x.getTTranks()))\n # calculate real residual\n r = pitts_py.TensorTrain_double(b.dimensions())\n r_nrm = nrm_x * AOp(x, r, eps/10, maxRank=9999)\n if preconOp is not None:\n r_nrm = pitts_py.axpby(orig_nrm_b, orig_b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/orig_nrm_b) )\n else:\n r_nrm = pitts_py.axpby(nrm_b, b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/nrm_b) )\n return r_nrm\n\n if verbose:\n if preconOp is None:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n else:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"precond direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n\n # assumes b is normalized and nrm_b is the desired rhs norm\n\n # left-preconditioning, transform RHS\n if preconOp is not None:\n orig_b = b\n orig_nrm_b = nrm_b\n b = pitts_py.TensorTrain_double(orig_b.dimensions())\n nrm_b = nrm_b * preconOp.apply(orig_b, b, eps / 10, 9999)\n nrm_b = nrm_b * pitts_py.normalize(b, eps/10, 9999)\n\n # define initial subspace\n beta = nrm_b\n curr_beta = beta\n V = [b]\n m = maxIter\n H = np.zeros((m + 1, m), order='F')\n\n if preconOp is not None:\n z = pitts_py.TensorTrain_double(b.dimensions())\n\n if verbose:\n #print(\"TT-GMRES: initial residual norm: %g, max. rank: %d\" % (beta, np.max(b.getTTranks())))\n if preconOp is None:\n print(0, 1, 1, np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n #print(\"TT-GMRES: un-preconditioned RHS max. rank: %d\" % np.max(orig_b.getTTranks()))\n else:\n print(0, 1, 1, np.max(orig_b.getTTranks()), np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n\n for j in range(m):\n if adaptiveTolerance:\n delta = eps / (curr_beta / beta) / (1.2 * m)\n else:\n delta = eps\n w = pitts_py.TensorTrain_double(b.dimensions())\n\n if preconOp is not None:\n z_nrm = AOp(V[j], z, delta, 9999)#, (j+1)*rank_b)\n w_nrm = z_nrm * preconOp.apply(z, w, delta, 9999)#, (j+2)*rank_b)\n else:\n w_nrm = AOp(V[j], w, delta, 9999)#, (j+2)*rank_b)\n\n if preconOp is not None:\n rank_z = np.max(z.getTTranks())\n rank_w = np.max(w.getTTranks())\n\n H[:j+2,j] = w_nrm * tt_pivmgs(V, w, delta, maxRank=9999)\n\n rank_vj = np.max(w.getTTranks())\n\n Hj = H[:j+2,:j+1]\n betae = np.zeros(j+2)\n betae[0] = beta\n # solving Hj * y = beta e_1\n y, curr_beta, rank, s = np.linalg.lstsq(Hj, betae, rcond=None)\n curr_beta = np.sqrt(curr_beta[0]) if curr_beta.size > 0 else 0\n if verbose:\n #print(\"TT-GMRES: LSTSQ residual norm: %g \" % (curr_beta / beta) )\n x, nrm_x = calc_solution()\n r_nrm = residual_error(x, nrm_x)\n rank_x = np.max(x.getTTranks())\n if preconOp is None:\n print(j+1, curr_beta/beta, r_nrm / nrm_b, rank_w, rank_vj, rank_x)\n else:\n print(j+1, curr_beta/beta, r_nrm / orig_nrm_b, rank_w, rank_z, rank_vj, rank_x)\n if curr_beta / beta <= eps:\n break\n\n if not verbose:\n x, nrm_x = calc_solution()\n return x, nrm_x", "def gmres_wrapper(jax: types.ModuleType):\n jnp = jax.numpy\n\n def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n \"\"\"\n Solve A x = b for x using the m-restarted GMRES method. This is\n intended to be called via jax_backend.gmres.\n\n Given a linear mapping with (n x n) matrix representation\n A = A_mv(*A_args) gmres_m solves\n Ax = b (1)\n where x and b are length-n vectors, using the method of\n Generalized Minimum RESiduals with M iterations per restart (GMRES_M).\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x0: Initial guess solution.\n tol, atol: Solution tolerance to achieve,\n norm(residual) <= max(tol * norm(b), atol).\n tol is also used to set the threshold at which the Arnoldi factorization\n terminates.\n num_krylov_vectors: Size of the Krylov space to build at each restart.\n maxiter: The Krylov space will be repeatedly rebuilt up to this many\n times.\n Returns:\n x: The approximate solution.\n beta: Norm of the residual at termination.\n n_iter: Number of iterations at termination.\n converged: Whether the desired tolerance was achieved.\n \"\"\"\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done\n\n def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n \"\"\"\n A single restart of GMRES.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The `b` in `A @ x = b`.\n x: Initial guess solution.\n tol: Solution tolerance to achieve,\n num_krylov_vectors : Size of the Krylov space to build.\n Returns:\n done: Whether convergence was achieved.\n beta: Magnitude of residual (i.e. the error estimate).\n x: The approximate solution.\n \"\"\"\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x\n\n @jax.jit\n def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n \"\"\"\n Computes the residual vector r and its norm, beta, which is minimized by\n GMRES.\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and\n v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x: Initial guess solution.\n Returns:\n r: The residual vector.\n beta: Its magnitude.\n \"\"\"\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta\n\n def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n \"\"\"\n Updates the solution in response to the information computed by the\n main GMRES loop.\n\n Args:\n k: The final iteration which was reached by GMRES before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: The R factor in H = QR where H is the Arnoldi overlap matrix.\n beta_vec: Stores the Givens factors used to map H into QR.\n x0: The initial guess solution.\n Returns:\n x: The updated solution.\n \"\"\"\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Builds the Arnoldi decomposition of (A, v), where v is the normalized\n residual of the current solution estimate. The decomposition is\n returned as V, R, where V is the usual matrix of Krylov vectors and\n R is the upper triangular matrix in H = QR, with H the usual matrix\n of overlaps.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n n_kry: Size of the Krylov space to build; this is called\n num_krylov_vectors in higher level code.\n x0: Guess solution.\n r: Residual vector.\n beta: Magnitude of r.\n tol: Solution tolerance to achieve.\n b_norm: Magnitude of b in Ax = b.\n Returns:\n k: Counts the number of iterations before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: From H = QR where H is the Arnoldi matrix of overlaps.\n beta_vec: Stores Q implicitly as Givens factors.\n \"\"\"\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)\n\n VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray,\n float, jax.ShapedArray]\n ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int]\n GmresCarryType = Tuple[VarType, ConstType]\n\n @jax.jit\n def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n \"\"\"\n This function dictates whether the main GMRES while loop will proceed.\n It is equivalent to:\n if k < n_kry and err > tol:\n return True\n else:\n return False\n where k, n_kry, err, and tol are unpacked from gmres_carry.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n (bool): Whether to continue iterating.\n \"\"\"\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.\n\n @jax.jit\n def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n \"\"\"\n Performs a single iteration of gmres_krylov. See that function for a more\n detailed description.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n gmres_carry: The updated gmres_carry.\n \"\"\"\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)\n\n @jax.jit\n def _gs_step(r: jax.ShapedArray,\n v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs one iteration of the stabilized Gram-Schmidt procedure, with\n r to be orthonormalized against {v} = {v_0, v_1, ...}.\n\n Args:\n r: The new vector which is not in the initially orthonormal set.\n v_i: The i'th vector in that set.\n Returns:\n r_i: The updated r which is now orthonormal with v_i.\n h_i: The overlap of r with v_i.\n \"\"\"\n h_i = jnp.vdot(v_i, r)\n r_i = r - h_i * v_i\n return r_i, h_i\n\n @jax.jit\n def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs the kth iteration of the Arnoldi reduction procedure.\n Args:\n k: The current iteration.\n A_mv, A_args: A function A_mv(v, *A_args) performing a linear\n transformation on v.\n V: A matrix of size (n, K + 1), K > k such that each column in\n V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes.\n H: A matrix of size (K, K), K > k with H[:, k] all zeroes.\n Returns:\n V, H: With their k'th columns respectively filled in by a new\n orthogonalized Krylov vector and new overlaps.\n \"\"\"\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H\n\n####################################################################\n# GIVENS ROTATIONS\n####################################################################\n @jax.jit\n def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n \"\"\"\n Successively applies each of the rotations stored in givens to H_col.\n\n Args:\n H_col : The vector to be rotated.\n givens: 2 x K, K > k matrix of rotation factors.\n k : Iteration number.\n Returns:\n H_col : The rotated vector.\n \"\"\"\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col\n\n @jax.jit\n def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Applies the Givens rotations stored in the vectors cs and sn to the vector\n H_col. Then constructs a new Givens rotation that eliminates H_col's\n k'th element, yielding the corresponding column of the R in H's QR\n decomposition. Returns the new column of R along with the new Givens\n factors.\n\n Args:\n H_col : The column of H to be rotated.\n givens: A matrix representing the cosine and sine factors of the\n previous GMRES Givens rotations, in that order\n (i.e. givens[0, :] -> the cos factor).\n k : Iteration number.\n Returns:\n R_col : The column of R obtained by transforming H_col.\n givens_k: The new elements of givens that zeroed out the k+1'th element\n of H_col.\n \"\"\"\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens\n\n @jax.jit\n def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n \"\"\"\n Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta)\n so that [cs -sn] @ [v1] = [r]\n [sn cs] [v2] [0]\n Args:\n v1, v2: The scalars.\n Returns:\n cs, sn: The rotation factors.\n \"\"\"\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn\n\n fnames = [\n \"gmres_m\", \"gmres_residual\", \"gmres_krylov\", \"gs_step\",\n \"kth_arnoldi_step\", \"givens_rotation\"\n ]\n functions = [\n gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step,\n givens_rotation\n ]\n\n class Functions:\n\n def __init__(self, fun_dict):\n self.dict = fun_dict\n\n def __getattr__(self, name):\n return self.dict[name]\n\n return Functions(dict(zip(fnames, functions)))", "def _compute_residuals(self):\n residuls = self.I - self.E\n return residuls", "def get_residual(self) -> np.ndarray:\n return self._calculate_residual(self.coefficients)", "def _j_beta(r, s, r_ani, beta_inf):\n return ((s**2 + r_ani**2) / (r**2 + r_ani**2)) ** beta_inf", "def residual2(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n B0 = parvals['B0']\n Tm = parvals['Tm']\n T0 = parvals['T0']\n model = B0*x*(x-T0)*((Tm-x)**0.5)\n return data - model", "def anisotropy_solution(r, r_ani, beta_inf):\n return (r**2 + r_ani**2) ** beta_inf", "def sigma_mean_beta(R, m_x, r_x, r_c, beta, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 1.5, 1.5 * beta, 2.5, -(r_x / r_c)**2))\n\n x2 = (R / r_c)**2\n\n if beta != 1:\n prefactor = (\n np.pi**0.5 * r_c * rho_0 * 1. / x2 *\n spec.gamma(1.5 * beta - 0.5) /\n spec.gamma(1.5 * beta))\n f = 1. / (1.5 - 1.5 * beta) * ((1 + x2)**(1.5 - 1.5 * beta) - 1)\n\n sigma_mean = prefactor * f\n\n else:\n prefactor = (2 * r_c * rho_0 * 1. / x2)\n f = np.log(1 + x2)\n\n sigma_mean = prefactor * f\n\n return sigma_mean", "def Dres(var):\r\n zeropred = residuals(var)\r\n derivparams = []\r\n results=[]\r\n delta = m.sqrt(np.finfo(float).eps) #Stolen from the leastsq code\r\n for i in range(len(var)): #fixme: This loop is probably sub-optimal. Have a look at what does leastsq to improve this.\r\n copy = np.array(var)\r\n copy[i] += delta\r\n derivparams.append(copy)\r\n# results.append(residuals(derivparams))\r\n if __name__ == \"__main__\":\r\n pool = multiprocessing.Pool(nb_nodes)\r\n results = pool.map(residuals, derivparams)\r\n derivs = [ (r - zeropred)/delta for r in results ]\r\n return derivs", "def _residual(self, x):\n h = x\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n h = F.avg_pool2d(h, 2)\n\n return h", "def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done", "def residuals(self):\r\n return self.__residuals", "def validating_step(self, x, r_loss, beta):\n reconstructed = self(x) # Compute input reconstruction.\n # Compute loss.\n loss = trace_loss(x, reconstructed)\n kl = sum(self.losses)\n loss = r_loss * loss + beta*kl \n \n fid = fidelity_rho(x, reconstructed)\n\n return loss, np.mean(fid)", "def residuals_(self):\n return self._residuals", "def _residual_lattice(self, params):\n model = np.sqrt(self.calc_q_square())\n data = np.absolute(self.q)\n return (model[self.mask] -data[self.mask])", "def update_variables_RMSProp(alpha, beta2, epsilon, var, grad, s):\n s = (s * beta2) + ((1 - beta2) * (grad ** 2))\n var = var - ((alpha * grad) / (s ** (1/2) + epsilon))\n return var, s", "def calc_sc_beta(symm_elems):\n r_11, r_12, r_13 = symm_elems[4], symm_elems[5], symm_elems[6]\n r_21, r_22, r_23 = symm_elems[7], symm_elems[8], symm_elems[9]\n r_31, r_32, r_33 = symm_elems[10], symm_elems[11], symm_elems[12]\n\n r_r_t = numpy.stack([\n numpy.stack([r_11**2, r_12**2, r_13**2, 2*r_11*r_12, 2*r_11*r_13, 2*r_12*r_13], axis=0), # 11\n numpy.stack([r_21**2, r_22**2, r_23**2, 2*r_21*r_22, 2*r_21*r_23, 2*r_22*r_23], axis=0), # 22\n numpy.stack([r_31**2, r_32**2, r_33**2, 2*r_31*r_32, 2*r_31*r_33, 2*r_32*r_33], axis=0), # 33\n numpy.stack([r_11*r_21, r_12*r_22, r_13*r_23, r_11*r_22 + r_12*r_21, r_11*r_23 + r_13*r_21, r_12*r_23 + r_13*r_22], axis=0), # 12\n numpy.stack([r_11*r_31, r_12*r_32, r_13*r_33, r_11*r_32 + r_12*r_31, r_11*r_33 + r_13*r_31, r_12*r_33 + r_13*r_32], axis=0), # 13\n numpy.stack([r_21*r_31, r_22*r_32, r_23*r_33, r_21*r_32 + r_22*r_31, r_21*r_33 + r_23*r_31, r_22*r_33 + r_23*r_32], axis=0)], axis=0 # 23\n )\n sc_beta = r_r_t.sum(axis=2)/r_11.shape[0]\n return sc_beta", "def get_R(self):\n return self.R_min * tf.exp(self.R_ * self.log_R_range)", "def r2_GWR(GWRMod): \r\n tss = np.sum((GWRMod.y - GWRMod.y_mean)**2)\r\n r2 = 1.0 - GWRMod.res2/tss\r\n \r\n return r2", "def solve_gmres(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n tol: float = 1e-5,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol, **kwargs)[0]", "def training_step(self, x, r_loss, beta):\n with tf.GradientTape() as tape:\n reconstructed = self(x) # Compute input reconstruction.\n # Compute loss.\n loss = trace_loss(x, reconstructed)\n kl = sum(self.losses)\n loss = r_loss * loss + beta*kl \n \n # Update the weights of the VAE.\n grads = tape.gradient(loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights)) \n \n fid = fidelity_rho(x, reconstructed)\n\n return loss, np.mean(fid)", "def update(self, x, A, y_vec, r):\n if not self.random:\n self.base_learner.update(truncate_context(x,self.d), A, y_vec, r)\n\n features = np.matrix(x.get_ld_features())\n for i in range(features.shape[0]):\n self.global_cov += features[i,:].T*features[i,:]\n if self.random or (self.learner_type == 'minimonster' and self.base_learner.num_unif > self.base_num_unif):\n for i in range(len(A)):\n self.global_b += y_vec[i]*features[A[i],:].T\n self.random_samples += 1\n\n self.t += 1\n if self.t % 10 == 0:\n self.estimate_residual()", "def gradient(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n grad = 4.0 * epsilon * ((-12.0/r) * s12 - (-6/r) * s6)\n grad = 0.5 * (r - 5.0)\n return grad", "def residual(S):\n rho = seawater.density(T, S, Pa)\n return (rho_1 - rho)", "def calc_rV(A):\n return np.sqrt(calc_rVsq(A))", "def shear_beta(R, m_x, r_x, r_c, beta, sigma_crit=1, **kwargs):\n # analytic enclosed mass inside r_x gives normalization rho_0\n rho_0 = m_x / (4./3 * np.pi * r_x**3 * spec.hyp2f1(\n 1.5, 1.5 * beta, 2.5, -(r_x / r_c)**2))\n\n prefactor = (np.pi**0.5 * r_c * rho_0 / sigma_crit)\n x2 = (R / r_c)**2\n\n if beta != 1:\n prefactor *= spec.gamma(1.5 * beta - 0.5) / spec.gamma(1.5 * beta)\n f = (\n 1. / (1.5 - 1.5 * beta) * 1. / x2 *\n ((1 + x2)**(1.5 - 1.5 * beta) - 1) -\n (1 + x2)**(0.5 - 1.5 * beta)\n )\n\n shear = prefactor * f\n\n else:\n prefactor *= 2. / np.pi**0.5\n f = 1. / x2 * np.log(1 + x2) - 1. / (1 + x2)\n\n shear = prefactor * f\n\n return shear", "def rmse(self, R, *args, **kwargs):\n rmse = 0\n for (u, i, r) in zip(R.row, R.col, R.data):\n r_hat = self.predict(u, i, *args, **kwargs)\n rmse += (r - r_hat) ** 2\n rmse = np.sqrt(rmse / R.getnnz())\n return rmse", "def _second_moment(R, sig_l, sig_m, lum, mass, Mbh, beta, tensor,\n sigmaPsf, normPsf, step, nrad, surf_l, pixSize):\n if (max(sigmaPsf) > 0) and (pixSize > 0): # PSF convolution\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n\n # Make grid linear in log of radius RR\n #\n rmax = np.max(R) + mx # Radius of circle containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in log(RR)\n rr = np.exp(logRad)\n\n # The model Vrms computation is only performed on the radial grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(rr)\n mgePol = np.empty_like(rr)\n rup = 3*np.max(sig_l)\n for j in range(rr.size): # Integration of equation (50)\n wm2Pol[j] = quadva(_integrand, [rr[j], rup],\n args=(sig_l, sig_m, lum, mass, Mbh, rr[j], beta, tensor))[0]\n mgePol[j] = np.sum(surf_l * np.exp(-0.5*(rr[j]/sig_l)**2))\n\n nx = np.ceil(rmax/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n xCar, yCar = np.meshgrid(x1, x1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + yCar**2) # Log radius of cartesian grid\n wm2Car = np.interp(r1, logRad, wm2Pol)\n mgeCar = np.interp(r1, logRad, mgePol)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normalization is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = np.sqrt(signal.fftconvolve(wm2Car, kernel, mode='same')\n / signal.fftconvolve(mgeCar, kernel, mode='same'))\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, x1, muCar, R/np.sqrt(2), R/np.sqrt(2))\n\n else: # No PSF convolution: just compute values\n\n mu = np.empty_like(R)\n rmax = 3*np.max(sig_l)\n for j in range(R.size):\n wm2Pol = quadva(_integrand, [R[j], rmax],\n args=(sig_l, sig_m, lum, mass, Mbh, R[j], beta, tensor))[0]\n mgePol = np.sum( surf_l * np.exp(-0.5*(R[j]/sig_l)**2) )\n mu[j] = np.sqrt(wm2Pol/mgePol)\n\n return mu", "def _residual(function, p, x, y, y_err):\n return (y - function(p, x)) / y_err", "def _gmres(self, super_operator, super_rhs, tol):\n sol, solve_info, residuals = linalg.gmres(\n super_operator, super_rhs,\n tol=tol,\n use_strong_form=True,\n return_residuals=True,\n **SOLVER_OPTIONS\n )\n return sol, solve_info, residuals", "def compute_residual(self, augmented_data, bias=None, synapse=None, bkgd=None):\n N = self.N\n T = augmented_data[\"T\"]\n F = augmented_data[\"F\"]\n W = self.weight_model.W\n\n assert bias is not None or synapse is not None\n n_pre, n_post = self._get_n(bias, synapse)\n\n # compute psi, excluding the bias or synapse, whichever is specified\n psi = np.zeros(T)\n\n if bias is None:\n psi += self.bias_model.b[None, n_post]\n\n # Only compute residual if W is nonzero\n if not np.allclose(W[:,n_post], 0):\n for nn in xrange(N):\n if nn == n_pre:\n continue\n psi += np.dot(F[:,nn,:], W[nn, n_post, :])\n\n if bkgd is None:\n psi += self.background_model.mean_background_activation(augmented_data)[:,n_post]\n\n return psi", "def zernikeResidualSurfaceError(x, y, z_coef, r=50, eps=230e-6, lmbd=2.6e-3, verbose=False):\n\n # Simulate thermal deformation.\n # The Zernike polynomials is at the center of the map.\n x_mid = midPoint(x)\n y_mid = midPoint(y)\n z_sim = zernikePoly(x, y, x_mid, y_mid, coefficients=z_coef)\n z_sim[~radialMask(x,y,r)] = np.nan\n eps_z = np.nanstd(z_sim)\n if verbose:\n print(\"Residual surface error: {} microns\".format(eps_z*1e6))\n\n # Compute surface error.\n eps_tot = np.sqrt(eps_z**2. + eps**2.)\n if verbose:\n print(\"Total surface error: {} microns\".format(eps_tot*1e6))\n\n # Compute aperture efficiency.\n eta = lambda lmbd, eps: 0.71*np.exp(-np.power(4.*np.pi*eps/lmbd, 2.))\n eta_tot = eta(lmbd, eps_tot)\n if verbose:\n print(\"Aperture efficiency: {}\".format(eta_tot))\n print(\"Change in aperture efficiency: {} %\".format((eta(lmbd, eps) - eta_tot)/eta(lmbd, eps)*100.))\n\n return eps_tot, eta_tot, eta", "def K(r, R, beta):\n u = r / R\n k = np.sqrt(1 - 1. / u ** 2) / (1. - 2 * beta) + np.sqrt(np.pi) / 2 * special.gamma(\n beta - 1. / 2) / special.gamma(beta) \\\n * (3. / 2 - beta) * u ** (2 * beta - 1.) * (1 - special.betainc(beta + 1. / 2, 1. / 2, 1. / u ** 2))\n return k", "def R2(orig,residSum):\n\n\n # FIX: this just is not right \n #\n # A correct formulation of this (from Excel) for 2 variables is:\n # r2 = [n*(Sxy) - (Sx)(Sy)]^2 / ([n*(Sx2) - (Sx)^2]*[n*(Sy2) - (Sy)^2])\n #\n #\n\n vect = numpy.array(orig)\n n = vect.shape[0]\n if n <= 0:\n return 0.,0.\n oMean = sum(vect)/n\n v = vect-oMean\n oVar = sum(v*v)\n return 1. - residSum/oVar", "def residual_of(self, z):\n return np.subtract(z, self.HJacob(self.x)@self.x_prior)", "def _gmres(self, super_operator, super_rhs, tol):\n return login_gmres(\n super_operator, super_rhs, tol,\n return_residuals=True,\n **SOLVER_OPTIONS\n )", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def solid_mass_integrated_r0_to_r_given_power_law_profile(r, r0, sigma0, beta, a0=1.):\n assert 0 <= r0 < r # r0 must be nonzero to avoid infinite mass for beta <= -2\n assert 0 < sigma0\n assert 0 < a0\n if beta == -2:\n M_r = 2.*np.pi*sigma0*((a0*gen.AU)**2.)*np.log(r/r0) # total mass in grams\n else: # if beta != -2\n M_r = (2.*np.pi*sigma0/(2.+beta))*((r/a0)**(2.+beta) - (r0/a0)**(2.+beta)) *((a0*gen.AU)**2.) # total mass in grams\n M_r = M_r/(1e3*gen.Mearth) # convert total mass to Earth masses\n return M_r", "def residual1(params, x, data):\n #get the value of the params from a dict\n parvals = params.valuesdict()\n a = parvals['a']\n b = parvals['b']\n c = parvals['c']\n d = parvals['d']\n model = a + b*x + c*x**2 + d*x**3\n return data - model", "def ssr(self):\n return (self.resid * self.resid).sum(0)", "def probaR1R2(self, r1, r2):\n if r1 == 0.: \n if r2 == 0.:\n return self.__alpha0\n elif r2 == 1.:\n return self.__beta\n else:\n return self.__eta * (1. - r2)\n\n if r1 == 1.: \n if r2 == 0.:\n return self.__beta\n elif r2 == 1.:\n return self.__alpha1\n else:\n return self.__eta * (1. - np.abs(1. - r2))\n\n return self.__eta * (1. - np.abs(r1 - r2))", "def reduce_residual(operator, rhs=None, RB=None, rhs_is_functional=True, product=None, extends=None):\n assert rhs is None \\\n or rhs_is_functional and (rhs.range == NumpyVectorSpace(1) and rhs.source == operator.range and rhs.linear) \\\n or not rhs_is_functional and (rhs.source == NumpyVectorSpace(1) and rhs.range == operator.range and rhs.linear)\n assert RB is None or RB in operator.source\n assert product is None or product.source == product.range == operator.range\n assert extends is None or len(extends) == 3\n\n logger = getLogger('pymor.reductors.residual.reduce_residual')\n\n if RB is None:\n RB = operator.source.empty()\n\n if extends and isinstance(extends[0], NonProjectedResidualOperator):\n extends = None\n if extends:\n residual_range = extends[1].RB\n residual_range_dims = list(extends[2]['residual_range_dims'])\n else:\n residual_range = operator.range.empty()\n residual_range_dims = []\n\n with logger.block('Estimating residual range ...'):\n try:\n residual_range, residual_range_dims = \\\n estimate_image_hierarchical([operator], [rhs], RB, (residual_range, residual_range_dims),\n orthonormalize=True, product=product,\n riesz_representatives=rhs_is_functional)\n except ImageCollectionError as e:\n logger.warn('Cannot compute range of {}. Evaluation will be slow.'.format(e.op))\n operator = operator.projected(None, RB)\n return (NonProjectedResidualOperator(operator, rhs, rhs_is_functional, product),\n NonProjectedReconstructor(product),\n {})\n\n with logger.block('Projecting residual operator ...'):\n if rhs_is_functional:\n operator = operator.projected(residual_range, RB, product=None) # the product cancels out.\n rhs = rhs.projected(None, residual_range, product=None)\n else:\n operator = operator.projected(residual_range, RB, product=product)\n rhs = rhs.projected(residual_range, None, product=product)\n\n return (ResidualOperator(operator, rhs, rhs_is_functional),\n GenericRBReconstructor(residual_range),\n {'residual_range_dims': residual_range_dims})", "def mprofile(r, alpha, beta,A,B):\n res = A*(1+(r/alpha)**2)**(-beta)+B\n return res", "def update_r_tensor():\n A_op = linear_operator_r(num_w*num_o, D*num_rep, tensors_A, model.x_r, model.xs_l, model.x_orb)\n model.x_r[:,:] = __ridge_complex_lsqr(num_w*num_o, D * num_rep, A_op, y, model.alpha, atol=atol_lsqr, comm=comm).reshape((D, num_rep))", "def residualsG(p, data):\n \n x, y, err = data\n return (y-funcG(p,x)) / err" ]
[ "0.69030577", "0.68404454", "0.6596661", "0.6463848", "0.6344658", "0.63295376", "0.6283406", "0.6267384", "0.6263794", "0.62514615", "0.6221709", "0.6146919", "0.61445266", "0.6131107", "0.6113608", "0.61097836", "0.6103144", "0.6083334", "0.6065997", "0.6033683", "0.60207003", "0.60178405", "0.5999418", "0.5984028", "0.5963782", "0.5936815", "0.5921717", "0.59005046", "0.58798075", "0.5872653", "0.5855065", "0.5844075", "0.58348435", "0.5801279", "0.5779909", "0.5761719", "0.57474786", "0.57177657", "0.5711331", "0.57065964", "0.5685344", "0.5683684", "0.56802696", "0.5652606", "0.56434447", "0.5642088", "0.5642073", "0.562961", "0.562254", "0.5611156", "0.5611156", "0.5611156", "0.5603855", "0.55662566", "0.5561933", "0.55310386", "0.55103517", "0.54871744", "0.547199", "0.54669327", "0.54571503", "0.5429119", "0.5424387", "0.54223144", "0.5422076", "0.5416217", "0.54064363", "0.5405554", "0.5396433", "0.5390074", "0.53871775", "0.5386607", "0.5384644", "0.5382373", "0.53793097", "0.5368452", "0.5360122", "0.5338338", "0.53354555", "0.5335038", "0.5322361", "0.53159696", "0.5307078", "0.5296669", "0.5291029", "0.5289807", "0.5289237", "0.52828616", "0.52813905", "0.5281249", "0.52775294", "0.5272295", "0.52547437", "0.52492994", "0.52328575", "0.5231996", "0.5227307", "0.52267337", "0.52144593", "0.5212734" ]
0.68639606
1
Updates the solution in response to the information computed by the main GMRES loop.
def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray, beta_vec: jax.ShapedArray, x0: jax.ShapedArray) -> jax.ShapedArray: q = min(k, R.shape[1]) y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q]) x = x0 + V[:, :q] @ y return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n\n SolidSolver.update(self)\n\n self.__nextStep()", "def exchange_solution(self):\n for ss in self.solvers:\n ss.register_solution()\n\n if self.has_amr:\n self.tioga.data_update_amr()\n else:\n raise NotImplementedError(\"Invalid overset exchange\")\n\n for ss in self.solvers:\n ss.update_solution()", "def update(self, solution):\n self.heuristic_path = [i for i in self.initial_path if i in solution]\n self.heuristic_cost = self.pathCost(self.heuristic_path)", "def notify_solution(self, sol):\n pass # pragma: no cover", "def _update_solution(self, step_size):\n if self.model.assortativity == 'positive':\n self.ivp.integrate(self.ivp.t - step_size)\n x, V = self.ivp.t, self.ivp.y\n else:\n self.ivp.integrate(self.ivp.t + step_size)\n x, V = self.ivp.t, self.ivp.y\n\n assert V[1] > 0.0, \"Firm size should be non-negative!\"\n\n # update the putative equilibrium solution\n wage = self.evaluate_wage(x, V)\n profit = self.evaluate_profit(x, V)\n step = np.hstack((x, V, wage, profit))\n self._solution = np.vstack((self._solution, step))", "def notify_solution(self, sol):\n self._solutions.append(sol)", "def _update_objective(self):\n # rewrap the cost if the solver has been run\n self.Finalize()\n return", "def notify_solution(self, s):\n pass # pragma: no cover", "def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()", "def start_solving(self):\n self.mesh.output_vtk_mesh(self.model_name + \"0\", \n [self.current_pressure, \n self.mesh.get_cell_domain_all()], \n [\"pressure\", \"domain\"])\n\n self.time_step_output(0., 0)\n\n for time_step in range(1,self.number_of_time_steps+1):\n current_time = time_step*self.delta_t\n print(time_step)\n\n self.update_pressure()\n self.find_upwinding_direction()\n self.update_concentration()\n \n if time_step%self.output_frequency == 0:\n self.mesh.output_vtk_mesh(self.model_name+str(time_step), \n [self.current_pressure,\n self.current_concentration, \n self.mesh.get_cell_domain_all()],\n [\"pressure\", \"concentration\" , \"domain\"])\n\n self.time_step_output(current_time, time_step)", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def updatesolutioninfo(self,whichsol_):\n res = __library__.MSK_XX_updatesolutioninfo(self.__nativep,whichsol_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def solve(self):\n new_puzzle = self._puzzle.clone()\n self._solution = new_puzzle.solve_puzzle()\n del new_puzzle\n pass", "def solve(self):\n for step in self.run.values():\n step.solve()", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def update_current_sol_and_cost(self,sol=None):\n\n # Update current sol if argument given\n if sol is not None:\n self.current_sol = sol\n \n # Update residual and cost\n try:\n self.residual = self.sketch_reweighted - self.sketch_of_solution(self.current_sol)\n self.current_sol_cost = np.linalg.norm(self.residual)\n except AttributeError: # We are here if self.current_sol does not exist yet\n self.current_sol, self.residual = None, self.sketch_reweighted\n self.current_sol_cost = np.inf", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solution_update(self, theta, force=False):\n \n self.x = self.eval(theta, force)\n \n return", "def solve(mm):\n model = mm.model\n model.optimize()\n\n\n mm.optimal = model.status\n mm.take_snapshot()\n print \"\\nSnapshot saved as {}\".format(mm.filename)\n mm.solve_count += 1\n mm.update_filename()\n\n if model.status == gp.GRB.OPTIMAL:\n # Write a csv of the solution data\n write_solution(mm)\n\n\n return True", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def __solve(self) -> None:\n pyo.TransformationFactory(\"contrib.detect_fixed_vars\").apply_to(self.model) # type: ignore\n pyo.TransformationFactory(\"contrib.deactivate_trivial_constraints\").apply_to(self.model) # type: ignore\n\n # initialise the solver object\n self._logger.debug(\"[ModelSolver] Solver object initiated...\")\n solver = Config.OPTIMISATION_MODEL_CONFIG['SOLVER_TYPE']\n opt = pyo.SolverFactory(solver)\n if Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver) is not None:\n for k, v in Config.OPTIMISATION_MODEL_CONFIG['SOLVER_OPTION'].get(solver).items():\n opt.options[k] = v\n\n try:\n start_time = datetime.now()\n self._logger.debug(\"[ModelSolver] Solver starting...\")\n results = opt.solve(self.model, tee=True)\n self.results = results\n end_time = datetime.now()\n self._logger.info(f\"[ModelSolver] Solver completed in {end_time - start_time}.\")\n except Exception as e:\n raise Exception(f\"Model optimisation failed with {solver} with error message {e}.\")\n\n if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):\n self._logger.info(\"Solution is feasible and optimal\")\n results.write()\n elif results.solver.termination_condition == TerminationCondition.infeasible:\n raise ValueError(\"Model optimisation resulted into an infeasible solution\")\n\n self.model.optimised = True", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.update\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.update\"] = False\n\n EKFSLAM.EKFSLAM.update(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.update\"], \"The function uses the solution\"", "def advance(self, sol):\r\n self.data_saving(sol)\r\n simulation_continues = self._advance_settings(sol)\r\n self.redef_vars()\r\n self.reporting(sol)\r\n self.norm_reporting()\r\n return simulation_continues", "def update():", "def update():", "def update(self, iteration):\n pass", "def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)", "def solve(self):\n\n self.queue.add(*self.moved.items)\n self.solving = True\n self.moved.items = []", "def update(self):\n\n # Update W (gradient should be up-to-date)\n _projected_step(self.W, self.gW, 1.0 / self.lipschitz_W())\n\n # Update H (need to recompute residuals since W was updated).\n self.cache_resids()\n self.cache_gH()\n _projected_step(self.H, self.gH, self.step_size)\n\n # Update residuals and gradient computation for W (for next iteration).\n self.cache_resids()\n self.cache_gW()\n\n # Return loss\n return self.loss", "def updateGraph(self):\n self.initUnits()\n v = self.units.copy()\n v_old = v.copy() * 100 # initial value so it will skip the first break\n for step in range(self.numCycles): # for total number of cycles\n # keep the old version of v for paralel updating\n # if v_old and v every element differnce < 0.001, then stop\n if np.all(np.abs(v_old - v) < 0.001):\n break\n # assign to v_old v from the previous step\n v_old = v.copy()\n for i in range(self.graph.n): # for every unit in the graph\n if i not in self.graph.observed: # if the unit is not a special fixed value s\n net = np.dot(v_old, self.graph.c[i]) # compute total flow to the unit\n if net > 0:\n gradient = net*(self.min_max[1]-v_old[i])\n else:\n gradient = net*(v_old[i]-self.min_max[0])\n v[i] = v_old[i]*(1-self.decay) + gradient\n # should this be after every unit update, or after the whole graph updates ??\n v = np.where(v>1, self.min_max[1], v)\n v = np.where(v<-1,self.min_max[0],v)\n self.units = v", "def solve(self,**kwargs):\n if kwargs.pop('restart',False):\n self.nopt = 0\n savefile = kwargs.pop('savebase',os.path.abspath(self.filename)+('_%02d.cysolve.pkl' % self.nloop))\n\n if kwargs.has_key('savedir'):\n savedir = kwargs['savedir']\n for isub in range(self.nspec):\n kwargs['isub'] = isub\n self.loop(**kwargs)\n print \"Saving after nopt:\", self.nopt\n self.saveState(savefile)\n \n self.pp_ref = self.pp_int\n self.nloop += 1", "def update(self):\r\n if self.games and all(game.result for game in self.games):\r\n self.rankings = self.compute_ranking()\r\n self.update_observers()\r\n\r\n if self.finals:\r\n for final in self.finals:\r\n final.update()", "def optimize(self):\n self.check_is_ready()\n self.check_infeasibility()\n solution_graph, obj_val = self.find_shortest_network_with_ADH((self.old_network_graph is not None))\n self.solution_graph = gnx.GeoMultiGraph(solution_graph, crs=self.optimization_graph.crs)", "def updateVariables(self) -> None:\n ...", "def solve(self, solver):\n solver.solve()", "def solve(self):\n\n # Set up display header if verbose operation enabled\n if self.opt['Verbose']:\n hdr = 'Itn DFidX PriResX DuaResX DFidG' + \\\n ' ResG '\n print(hdr)\n print('-' * len(hdr))\n\n # Main iteration loop\n for n in range(self.opt['MaxMainIter']):\n\n # At start of 2nd iteration, set the numbers of inner\n # iterations for the X and G solvers from the options\n # object for the outer solver\n if n == 1:\n self.slvX.opt['MaxMainIter'] = self.opt['XslvIter']\n self.slvG.opt['MaxMainIter'] = self.opt['GslvIter']\n\n # Run the configured number of iterations of the X (CSC)\n # solver and assign the result to X\n self.X = self.slvX.solve()\n\n # Compute the sum of the subpixel shifts of X\n Xhs = np.sum(fftconv(self.H, self.X.squeeze(), axes=(0, 1)),\n axis=-1)\n\n # Set the convolution kernel in the deconvolution solver\n # to the sum of the subpixel shifts of X\n self.slvG.setG(Xhs)\n # Run the configured number of iterations of the G\n # (deconvolution) solver and crop the result to obtain the\n # updated g\n self.g = self.slvG.solve()[0:self.gshp[0], 0:self.gshp[1]]\n\n # Construct a new dictionary for the X (CSC) solver from\n # the updated psf g\n self.D, self.dn = self.getD(self.g)\n self.slvX.setdict(self.D[..., np.newaxis, np.newaxis, :])\n\n # Display iteration statistics if verbose operation enabled\n if self.opt['Verbose']:\n itsX = self.slvX.getitstat()\n itsG = self.slvG.getitstat()\n fmt = '%3d %.3e %.3e %.3e %.3e %.3e'\n tpl = (n, itsX.DFid[-1], itsX.PrimalRsdl[-1],\n itsX.DualRsdl[-1], itsG.DFid[-1], itsG.Rsdl[-1])\n print(fmt % tpl)\n\n # Return the (normalised) psf estimate g\n return self.g / np.linalg.norm(self.g)", "def update(world):\r\n infect = infection(world['SIR'], infection_rate, incubation_rate)\r\n disperse = dispersion(world['SIR'], dispersion_kernel, dispersion_rates)\r\n world['SIR'] += dt*( infect + disperse)\r\n world['t'] += dt", "def done(self):\n while True:\n self.update_graph()", "def _update(self):\n num_new_evals = (self.metamodel.model_evaluations - self._last_rebuild)\n if num_new_evals >= self.rebuild_interval:\n self._built = True\n self._last_rebuild = self.metamodel.model_evaluations\n\n # Rebuild relevance function and make it usable on arrays.\n self._relevance_function = self._construct_relevance_function()\n rel_fun = np.vectorize(self._relevance_function)\n\n # Learn relevance prediction model\n data = self.metamodel.history.get_model_evaluations()\n relevance_values = rel_fun(data[:, -1])\n self._predictor.fit(data[:, :-1], relevance_values)\n return", "def _solution_on_update(solution, field_name, old_value, new_value):\n if solution.status != SolutionStatus.SUBMITTED:\n return # not interesting\n\n old = old_value >= SOLUTION_CORRECT_SCORE\n new = new_value >= SOLUTION_CORRECT_SCORE\n\n if old != new:\n _update_solved_count(new - old, solution.task, solution.author.get_profile())", "def solve(self):\n ...", "def add_solution(self, solution):\n if self.check_solution(solution):\n self._solution = solution\n self.solution_status = 'OK'\n else:\n self._solution = None\n self.solution_status = 'X'", "def update(self):\n for pl, result in zip(self._players, self.golf_round.doc.results):\n for score in result.scores:\n n = score.num-1\n # update net \n pl.dct_net['holes'][n] = score.gross - pl._bumps[n]\n pl.update_totals(pl.dct_net)", "def solve(self):\n initial_fes = eades(self.graph, self.force_forward_edges)\n initial_fes_vec = self.edge_vector(initial_fes)\n\n # bounds for the objective\n lower_bound = 0\n upper_bound = np.sum(initial_fes_vec @ self.weights)\n\n self.logger.info('Calculating FES for graph with %d edges, max %d feedback edges', self.m, len(initial_fes))\n\n simple_cycles = set(induced_cycles(self.graph, initial_fes))\n\n for iteration in itertools.count(1):\n self.logger.info('Baharev iteration %d, %g <= objective <= %g, %d simple cycles', iteration, lower_bound,\n upper_bound, len(simple_cycles))\n\n # Formulate and solve the problem for this iteration:\n y = cp.Variable(self.m, boolean=True, name=\"y\")\n objective = cp.Minimize(cp.sum(y @ self.weights))\n\n cycle_vectors = [self.edge_vector(nx.utils.pairwise(cycle)) for cycle in simple_cycles]\n constraints = [cp.sum(a @ y) >= 1 for a in cycle_vectors]\n constraints.append(cp.sum(y @ self.force_forward_vec) == 0) # no force forward vec may be in the result set\n problem = cp.Problem(objective, constraints)\n resolution = problem.solve(**self.solver_args)\n if problem.status != 'optimal':\n self.logger.warning('Optimization solution is %s. Try solver != %s?', problem.status,\n problem.solver_stats.solver_name)\n self.logger.debug(\n \"Solved optimization problem with %d constraints: %s -> %s (%g + %g seconds, %d iterations, solver %s)\",\n len(constraints), resolution, problem.solution.status,\n problem.solver_stats.solve_time or 0, problem.solver_stats.setup_time or 0,\n problem.solver_stats.num_iters or 0, problem.solver_stats.solver_name)\n current_solution = np.abs(y.value) >= 0.5 # y.value = vector of floats each ≈ 0 or 1\n current_fes = self.edges_for_vector(current_solution)\n self.logger.debug('Iteration %d, resolution: %s, %d feedback edges', iteration, resolution,\n len(current_fes))\n # S, the feedback edge set calculated using the constraint subset, can be an incomplete solution\n # (i.e. cycles remain after removing S from the graph). So lets compare this with the upper bound\n # from the heuristic\n lower_bound = max(lower_bound, objective.value)\n if lower_bound == upper_bound:\n self.logger.info('upper == lower bound == %g, optimal solution found', lower_bound)\n break # y.value is the optimal solution\n\n if resolution > upper_bound:\n self.logger.error('Solution %g > upper bound %g!', resolution, upper_bound)\n break\n\n Gi = self.graph.copy()\n Gi.remove_edges_from(current_fes)\n if nx.is_directed_acyclic_graph(Gi):\n self.logger.info('Graph is acyclic, optimal solution found')\n break # y.value is the optimal solution\n\n # The solution is not yet ideal. So we take G^(i), the graph still containing some feedback edges,\n # calculate a heuristic on it and use the heuristic (= over-estimation) to adjust upper bound and\n # determine additional simple cycles (= constraints)\n Fi = eades(Gi, self.force_forward_edges)\n yi = self.edge_vector(Fi) | current_solution\n zi = np.sum(yi @ self.weights)\n if zi < upper_bound:\n upper_bound = zi\n current_solution = yi\n simple_cycles |= set(induced_cycles(Gi, Fi))\n\n self.solution_vector = current_solution\n self.solution = self.edges_for_vector(current_solution)\n self.objective = objective.value\n self.iterations = iteration\n self.simple_cycles = simple_cycles\n return self.solution", "def update(self):\n self.brain.update()", "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def run(self): # pragma: no cover\n while True:\n self.update()", "def update( ):\r\n pass", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def update(self):\n \n dbpath, config = self._start()\n \n self.config.obo = check_file(config.obo, dbpath, \"obo\") \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=True) \n phen_file = check_file(config.model_phenotypes, dbpath,\n \"model_phenotypes\", allow_none=True)\n \n summary = self._update(desc_file, phen_file) \n if len(summary[\"incorrect_ids\"]) == 0 and not config.skip_compute:\n self._compute(models=summary[\"new_phenotypes\"])\n \n self._end()", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def notify_start(self):\n super(SolutionListener, self).notify_start()\n self._solutions = []", "def solve(self):\n pass", "def solve(self):\n pass", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def tick(self):\n if self._solution == \"\":\n self._root.after(300, self.tick)\n return\n direction = self._solution[0]\n self._solution = self._solution[1:]\n try:\n self._puzzle.update_puzzle(direction)\n except:\n print \"invalid move:\", direction\n self.draw()\n self._root.after(300, self.tick)", "def dualSolve(data, problem=None, purpose=None):\n maxIter = 20\n\n numFeatures = None # get number of features from data['updatedCountData']\n\n if problem == \"Poisson\":\n optProb = poissonProblemVector(numFeatures)\n elif problem == \"Logistic\":\n optProb = logisticProblemVector(numFeatures)\n elif problem == \"Survival\":\n optProb = problem(numFeatures)\n\n # add constraint and redefine problem\n optProb.addConstraint(data['updatedCountData'])\n optProb.redefineProblem()\n optProb.solve()\n oldCoef = list(optProb.theta.value)\n # print(oldCoef)\n originalCoef = deepcopy(oldCoef)\n if purpose == \"init\":\n return oldCoef\n currCoef = oldCoef # to enable getting shifts inside the loop, which uses currCoef\n\n updatedData = deepcopy(data['incidentData'])\n resultName = '' # TODO: declare the file name for storing results\n\n # iteratively solve model\n with open(resultName, 'w+') as f:\n tempIter = 0\n while tempIter < int(maxIter):\n '''Create a set of inputs and pass to multiproc to get shifts in parallel'''\n inputs = []\n for tempData in updatedData:\n # set scale to true\n inputs.append([tempData, currCoef, data['df'], data['neighborGraph']])\n\n coreCount = multiprocessing.cpu_count()\n pool = Pool(coreCount - 2) # leave two cores\n results = pool.map(getShiftsSplit, inputs)\n pool.close()\n pool.join()\n\n # aggregate results\n updatedDF = deepcopy(data['df'])\n # TODO: mark each shift in the updated df set using 'results'\n # TODO: create training data based on shifts\n updatedCountData = None\n\n # update the optimization problem using the attacker's best response\n optProb.addConstraint(updatedCountData)\n optProb.redefineProblem()\n optProb.solve()\n currCoef = list(optProb.theta.value)\n\n oldLikelihood = getTotalLikelihoodLogistic(updatedCountData, oldCoef)\n newLikelihood = getTotalLikelihoodLogistic(updatedCountData, currCoef)\n gap = oldLikelihood - newLikelihood\n f.write(\"Likelihood at iteration {} is {}\\n\".format(tempIter, oldLikelihood))\n f.write(\"Likelihood at iteration {} is {}\\n\".format(tempIter, newLikelihood))\n f.write(\"status:{}\\n\".format(optProb.formulation.status))\n f.flush()\n\n tempIter += 1\n\n return currCoef, originalCoef", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def variational_update(self):\n with self.elbo_check('update_p_allele_swap'):\n self.model.update_p_allele_swap()\n\n with self.elbo_check('p_cn'):\n self.model.update_p_cn()\n\n with self.elbo_check('p_breakpoint'):\n self.model.update_p_breakpoint()\n\n with self.elbo_check('p_outlier_total'):\n self.model.update_p_outlier_total()\n\n with self.elbo_check('p_outlier_allele'):\n self.model.update_p_outlier_allele()", "def updatesolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res = self.__obj.updatesolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __update(self):\n\n # Make sure loads have been assigned to group\n if type(self.appliedLoad) == Load:\n self.appliedLoad = LoadSet(self.appliedLoad)\n elif type(self.appliedLoad) != LoadSet:\n raise TypeError(\"Applied load must be a Load or LoadSet\")\n\n # Begin Calculations\n _cg = self.cg # calculate the cg once to save computation time\n _appLoad = self.appliedLoad.totalForce\n _appMoment = self.appliedLoad.totalMoment\n\n coef_mat = np.zeros((len(self) * 3, len(self) * 3)) # coeff matrix\n soln_mat = np.zeros(len(self) * 3) # solution matrix\n\n cSet = [[i, i+1, i+2] for i in range(0, 3 * len(self), 3)]\n rSet = [[i+6, i+7, i+8] for i in range(0, 3 * (len(self) - 2), 3)]\n\n for i, j in enumerate(cSet):\n # i = column fastener ID\n # j = column fastener set\n # Mx = yFz - zFy\n # My = zFx - xFz\n # Mz = xFy - yFx\n\n Fx = j[0]\n Fy = j[1]\n Fz = j[2]\n\n # fill in first three rows\n coef_mat[0][Fx] = 1 # sum of Fx\n coef_mat[1][Fy] = 1 # sum of Fy\n coef_mat[2][Fz] = 1 # sum of Fz\n\n # fill in fourth row (sum of Mx at CG)\n coef_mat[3][Fy] = -(F[i].xyz[2] - _cg[2]) # -zFy\n coef_mat[3][Fz] = +(F[i].xyz[1] - _cg[1]) # +yFz\n\n # fill in fifth row (sum of My at CG)\n coef_mat[4][Fx] = +(F[i].xyz[2] - _cg[2]) # +zFx\n coef_mat[4][Fz] = -(F[i].xyz[0] - _cg[0]) # -xFz\n\n # fill in sixth row (sum of Mz at CG)\n coef_mat[5][Fx] = -(F[i].xyz[1] - _cg[1]) # -yFx\n coef_mat[5][Fy] = +(F[i].xyz[0] - _cg[0]) # +xFy\n\n for u, w in enumerate(rSet):\n # u = row fastener ID\n # w = row fastener set\n\n rX = w[0]\n rY = w[1]\n rZ = w[2]\n\n coef_mat[rX][Fy] = -(F[i].xyz[2] - F[u].xyz[2]) # -zFy\n coef_mat[rX][Fz] = +(F[i].xyz[1] - F[u].xyz[1]) # +yFz\n\n coef_mat[rY][Fx] = +(F[i].xyz[2] - F[u].xyz[2]) # +zFx\n coef_mat[rY][Fz] = -(F[i].xyz[0] - F[u].xyz[0]) # -xFz\n\n coef_mat[rZ][Fx] = -(F[i].xyz[1] - F[u].xyz[1]) # -yFx\n coef_mat[rZ][Fy] = +(F[i].xyz[0] - F[u].xyz[0]) # +xFy\n\n # fill in the solution matrix (soln_mat)\n for i in range(3):\n soln_mat[i] = -_netLoad.force[i]\n soln_mat[i+3] = -_netLoad.moment[i]\n\n # fill in the remaining rows\n for i, j in enumerate(rSet):\n # i = fastener\n # j = row\n\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n # Mx = (y_cg - y_i)F_znet - (z_cg - z_i)F_ynet + M_xnet\n soln_mat[rX] = - ((_cg[1] - F[i].xyz[1]) * _netLoad.force[2]\n - (_cg[2] - F[i].xyz[2]) * _netLoad.force[1]\n + _netLoad.moment[0])\n\n # My = (z_cg - z_i)F_xnet - (x_cg - x_i)F_znet + M_ynet\n soln_mat[rY] = -((_cg[2] - F[i].xyz[2]) * _netLoad.force[0]\n - (_cg[0] - F[i].xyz[0]) * _netLoad.force[2]\n + _netLoad.moment[1])\n\n # Mz = (x_cg - x_i)F_ynet - (y_cg - y_i)F_xnet + M_znet\n soln_mat[rZ] = -((_cg[0] - F[i].xyz[0]) * _netLoad.force[1]\n - (_cg[1] - F[i].xyz[1]) * _netLoad.force[0]\n + _netLoad.moment[2])\n\n # Solve system of equations\n matSol = np.linalg.lstsq(coef_mat, soln_mat)[0]\n\n # Add resulting fastener loads to fastener objects\n for i, j in enumerate(cSet):\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n F[i].force[0] = matSol[rX]\n F[i].force[1] = matSol[rY]\n F[i].force[2] = matSol[rZ]", "def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1", "def add_solution(self, paths):\n assert self.benchmark.status[\"state\"] == \"RUNNING\", print(\n f\"Benchmark seems to be inactive. state: {self.benchmark.status(['state'])}\")\n assert self.benchmark.status[\"data\"][\"problem_states\"][self.batch_pos] == 0, print(\n \"Problem seems to be already solved\")\n\n self.paths = paths\n self.time = time()-self.start_time\n self.benchmark.status[\"data\"][\"problem_states\"][self.batch_pos] = 1\n\n if all(self.benchmark.status[\"data\"][\"problem_states\"]):\n self.status = {\"state\": \"SUBMITTING\", \"data\": None}\n self.benchmark.submit()", "def solve(self):\n self.last_result = None\n\n # Check solve with start/next\n if self.context.solver.solve_with_search_next:\n return self.solve_with_search_next()\n\n # Notify listeners\n self._notify_listeners_start_operation(listener.OPERATION_SOLVE)\n\n # Solve model\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_SOLVING)\n stime = time.time()\n try:\n msol = self.agent.solve()\n except Exception as e:\n # Check if aborted in the mean time\n if self._check_status_aborted():\n return self.last_result\n if self.context.log_exceptions:\n traceback.print_exc()\n raise e\n self._set_status(STATUS_IDLE)\n stime = time.time() - stime\n self.context.solver.log(1, \"Model '\", self.model.get_name(), \"' solved in \", round(stime, 2), \" sec.\")\n msol.process_infos[CpoProcessInfos.SOLVE_TOTAL_TIME] = stime\n\n # Store last solution\n self.last_result = msol\n\n # Notify listeners\n for lstnr in self.listeners:\n lstnr.new_result(self, msol)\n self._notify_listeners_end_operation()\n\n # Return solution\n return msol", "def update(self, control=None):\n if self.end:\n print('process has terminated')\n\n if control is None:\n control = defaultdict(lambda: (0, 0))\n\n # determine next state for each Region\n for name in self.group.keys():\n self.group[name].next(self.group, control[name])\n\n # assume simulation will end this time step\n self.end = True\n for name in self.group.keys():\n # check if there are any infected Regions\n if self.group[name].is_infected(self.group[name].next_state):\n self.end = False\n self.counter[name] += 1\n\n # apply next state to all elements\n self.group[name].update()\n\n self.iter += 1\n return", "def processWork(self):\n while self.running == True:\n if len(self.work_queue) == 0:\n self.work_queue = [Instruction('Do Math'), Instruction('Send HUPD'), Instruction('Receive All HUPDs')]\n else:\n instruction = self.work_queue.pop(0)\n if instruction.type == 'Do Math':\n #start calculations\n self.updated = False\n #print('Doing Math')\n # run calculations\n elif instruction.type == 'Send HUPD':\n #echo host update to all other hosts on the network\n min_max = str(self.x_min) + ':' + str(self.x_max)\n payload = 'a' + '\\0' + 'b' + '\\0' + 'c' + '\\0' + 'd' + '\\0' + 'e' + '\\0' + 'f' + '\\0' + 'g' + '\\0' + min_max + '\\0'\n our_update = Message(\"HUPD\", self.ip, payload)\n #if there are no connections, send to myself\n for connection in self.connections:\n connection.host_sock.sendall(our_update.generateByteMessage())\n elif instruction.type == 'Receive All HUPDs':\n # make sure to receive all HUPDs from listening threads\n if len(self.connections) > 0:\n while len(self.updates_received) != len(self.connections):\n msg = 'wait'\n # only set to true once all updates have been received\n self.updated = True\n self.updates_received = []\n # Once all updates are recieved update ABoid locations\n self.all_alphas = []\n elif instruction.type == 'NHST':\n #New host tring to connect to network\n new_host_ip = instruction.message.origin\n payload_array = instruction.message.payload.split(':')\n\n #check if the new host is a neighbor\n if self.x_max == self.curr_x_max:\n self.r_neighbor = new_host_ip\n if self.x_min == self.curr_x_min:\n self.l_neighbor = new_host_ip\n self.host_ips.append(new_host_ip)\n #Start the thread that is listening to the socket connected to the new host\n new_thread = Thread(target=lambda: self.listenToHost(instruction.sock))\n new_thread.daemon = True\n new_thread.start()\n new_connection = Connection(new_host_ip, instruction.sock, new_thread)\n self.connections.append(new_connection)\n host_area = str(self.x_min) + ':' + str(self.x_max)\n #send current host area to the newly connected host\n area_message = Message('AREA', self.ip, host_area)\n instruction.sock.sendall(area_message.generateByteMessage())\n print('Sent AREA message to ' + new_host_ip)\n elif instruction.type == 'LHST':\n #Host has disconnected to the network\n for host_ip in self.host_ips:\n if host_ip == instruction.message.origin:\n #remove host from list of connected ips\n self.host_ips.remove(host_ip)\n for connection in self.connections:\n #remove the connection object from list of known connections\n if connection.ip == instruction.message.origin:\n #close the hosts socket and thread\n connection.close()\n self.connections.remove(connection)\n else:\n print('Invalid Instruction - skipping...')\n return", "def update_fits( self ):\n\n\t\tself._submit_to_queue( None )\n\t\tqueue_contents = self._retrieve_from_queue()\n\n\t\tfor (title,dQ) in queue_contents:\n\t\t\tE = self.get_experiment_by_title(title)\n\t\t\tE.dQ_fit = dQ\n\n\t\treturn", "def on_update(self):\n \n # update physics engine\n \n \n # use code from pick up coins lab to pick up coins\n # you don't need all of the code from that lab(no gameover or reset)", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def _update_local_solution(self, x: np.ndarray, x_neigh: dict, stepsize: float, **kwargs):\r\n for j, x_j in x_neigh.items():\r\n self.lambd[j] += stepsize * (x - x_j)\r\n \r\n self.x = x", "def update_goal(self):\n pass", "def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")", "def _optimise(self):\n better = True\n self.solutions = set()\n\n # Rebuild the neighbours\n self.neighbours = {}\n\n for i in self.heuristic_path:\n self.neighbours[i] = []\n\n for j, dist in enumerate(TSP.edges[i]):\n if dist > 0 and j in self.heuristic_path:\n self.neighbours[i].append(j)\n\n # Restart the loop each time we find an improving candidate\n while better:\n better = self.improve()\n # Paths always begin at 0 so this should manage to find duplicate\n # solutions\n self.solutions.add(str(self.heuristic_path))\n\n self.save(self.heuristic_path, self.heuristic_cost)", "def updateFitness(self):\r\n for candidate in self.candidates:\r\n candidate.updateFitness()\r\n return", "def update_component():\n # written by Anders Deleuran, andersholdendeleuran.com\n import Grasshopper as gh\n\n def call_back(e):\n \"\"\"Defines a callback action\"\"\"\n gh_env.Component.ExpireSolution(False)\n # Get the Grasshopper document\n ghDoc = gh_env.Component.OnPingDocument()\n # Schedule this component to expire\n ghDoc.ScheduleSolution(loop_interval, gh.Kernel.GH_Document.GH_ScheduleDelegate(call_back))", "def solve(self) -> Dict:\n solution = self.opt.decision_variables.vec2dict(self._solve())\n\n if self._error_on_fail and (not self.did_solve()):\n raise RuntimeError(\"Solver failed!\")\n\n # Add full model state to the solution dictionary\n for model in self.opt.models:\n for d in model.time_derivs:\n n_s = model.state_name(d)\n n_s_x = model.state_optimized_name(d)\n if isinstance(model, RobotModel):\n if model.num_param_joints > 0:\n n_s_p = model.state_parameter_name(d)\n t = solution[n_s_x].shape[1]\n solution[n_s] = cs.DM.zeros(model.dim, t)\n solution[n_s][model.optimized_joint_indexes, :] = solution[\n n_s_x\n ]\n solution[n_s][model.parameter_joint_indexes, :] = self._p_dict[\n n_s_p\n ]\n else:\n solution[n_s] = solution[n_s_x]\n else:\n solution[n_s] = solution[n_s_x]\n\n return solution", "def _update(self, results):\n logger = getLogger(\"problog_lfi\")\n fact_body = defaultdict(int)\n fact_par = defaultdict(int)\n\n score = 0.0\n for m, pEvidence, result in results:\n par_marg = dict()\n for fact, value in result.items():\n # use the id and the t variables as index\n index = (fact.args[0], fact.args[1])\n if fact.functor == \"lfi_body\":\n fact_body[index] += value * m\n elif fact.functor == \"lfi_par\":\n if index in par_marg:\n par_marg[index] += value\n for o_index in self._adatomc[index[0]]:\n par_marg[(o_index, *index[1:])] += value\n else:\n par_marg[index] = value\n for o_index in self._adatomc[index[0]]:\n par_marg[(o_index, *index[1:])] = value\n\n for index, value in par_marg.items():\n fact_par[index] += value * m\n try:\n score += math.log(pEvidence)\n except ValueError:\n logger.debug(\"Pr(evidence) == 0.0\")\n\n update_list = fact_body\n\n weight_changed = []\n for weight in self._weights:\n if isinstance(weight, float):\n weight_changed.append(False)\n elif isinstance(weight, dict):\n d = dict()\n for w in weight:\n d[w] = False\n weight_changed.append(d)\n\n score = 0.0\n for index in update_list:\n if float(fact_body[index]) <= 10**-15:\n # if close to zero\n prob = 0.0\n else:\n prob = float(fact_body[index]) / float(fact_par[index])\n try:\n score += math.log(prob)\n except ValueError as ex:\n # prob too close to zero\n pass\n\n logger.debug(\n \"Update probabilistic fact {}: {} / {} = {}\".format(\n index, fact_body[index], fact_par[index], prob\n )\n )\n self._set_weight(index[0], index[1], prob, weight_changed=weight_changed)\n if not index[1]:\n weight_changed[int(index[0])] = True\n elif (\n isinstance(weight_changed[int(index[0])], dict)\n and index[1] in weight_changed[int(index[0])]\n ):\n weight_changed[int(index[0])][index[1]] = True\n else:\n weight_changed[int(index[0])] = {index[1]: True}\n\n if self._enable_normalize:\n self._normalize_weights()\n\n return score", "def update_weights(self,tol = 1e-6,maxIter = 5e2, verbose = False):\n # Armijo parameter\n alphaA = 0.01\n\n self.update_boundaries()\n \n i = 0\n tau = .5\n F = -self.masses.copy()\n if self.intp_rho is None:\n F[self.indices] += self.compute_integrals(self.rho)\n else: \n F[self.indices] += self.compute_integrals_ipp(self.intp_rho,p=0) \n #F[self.indices] += self.compute_integrals(self.rho)\n\n error = np.linalg.norm(F) \n #cost_old = self.compute_ot_cost() \n if self.intp_rho is None :\n cost_old = self.compute_ot_cost()\n else: \n cost_old = self.compute_ot_cost_ipp()\n \n while error>tol and i<maxIter:\n \n Hess = self.compute_integrals_gradient(self.rho) \n #print(self.indices)\n #if tau<1e-9: theta=1. \n \n theta = 0.\n deltaw = -theta*F\n deltaw[self.indices] -= (1-theta)*spsolve(Hess,F[self.indices])\n \n weights_old = self.weights.copy()\n k=0\n \n # Linesearch\n while True:\n self.weights = weights_old +tau*deltaw\n self.update_boundaries()\n #cost = self.compute_ot_cost()\n\n if self.intp_rho is None :\n cost = self.compute_ot_cost()\n else: \n cost = (np.sum(self.masses*self.weights)\n +np.sum(self.compute_integrals_ipp(self.intp_rho,p=2)\n -self.compute_integrals_ipp(self.intp_rho,p=0)*self.weights[self.indices]))\n \n if (cost >= cost_old + tau*alphaA*np.dot(F,deltaw)\n and len(self.indices)==len(self.X)) or tau<1e-10: break\n \n else: \n k += 1\n tau = tau*.8 \n \n #print(deltaw)\n #if i>200: tau = np.min((1., tau*1.01))\n \n cost_old = cost\n \n #self.weights = weights_new.copy() \n #self.update_boundaries()\n #print(cost,tau)\n i+=1\n F = -self.masses.copy()\n if self.intp_rho is None:\n F[self.indices] += self.compute_integrals(self.rho)\n else:\n F[self.indices] += self.compute_integrals_ipp(self.intp_rho,p=0)\n #F[self.indices] += self.compute_integrals(self.rho) \n error = np.linalg.norm(F) \n \n if verbose: print(\"Newton step: {}, cost: {}, tau: {}, error: {}, active particles: {}\".format(i,cost,tau,error,len(self.indices))) \n tau = np.min((tau*1.1,1.))\n\n if i< maxIter and verbose: print(\"Optimization success!\")", "def relax(self):\n # print(\"putin\", self.level.rhs.reshape(-1)[:])\n # print(\"getout\", self.solver(self.level.rhs.reshape(-1)))\n\n self.level.mid[:] = self.solver(self.level.rhs.reshape(-1)).reshape(self.level.mid.shape)", "def solve(self):\n start = timer()\n # encode into milp\n me = MILPEncoder(MILPSolver.prob,\n MILPSolver.params.logger.LOGFILE, \n MILPSolver.params.INTRA_DEP_CONSTRS,\n MILPSolver.params.INTER_DEP_CONSTRS)\n if MILPSolver.lp == True:\n gmodel = me.lp_encode()\n else:\n gmodel = me.encode()\n # Set gurobi parameters\n pgo = 1 if MILPSolver.params.PRINT_GUROBI_OUTPUT == True else 0\n gmodel.setParam('OUTPUT_FLAG', pgo)\n tl = MILPSolver.params.TIMEOUT\n if tl != -1 : gmodel.setParam('TIME_LIMIT', tl)\n if not MILPSolver.params.DEFAULT_CUTS: \n MILPSolver.disable_default_cuts(gmodel)\n gmodel._vars = gmodel.getVars()\n # set callback cuts \n MILPSolver.id_form = IdealFormulation(MILPSolver.prob,\n gmodel, \n MILPSolver.params.IDEAL_FREQ,\n MILPSolver.params.logger.LOGFILE)\n MILPSolver.dep_cuts = DepCuts(MILPSolver.prob,\n gmodel,\n MILPSolver.params.DEP_FREQ,\n MILPSolver.params.INTRA_DEP_CUTS,\n MILPSolver.params.INTER_DEP_CUTS,\n MILPSolver.sip_params,\n MILPSolver.params.logger.LOGFILE)\n # Optimise\n if MILPSolver.params.callback_enabled() and MILPSolver.lp == False:\n gmodel.optimize(MILPSolver._callback)\n else:\n gmodel.optimize()\n\n runtime = timer() - start\n cex = None \n if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:\n result = SolveResult.BRANCH_THRESHOLD\n elif gmodel.status == GRB.OPTIMAL:\n cex_shape = MILPSolver.prob.spec.input_layer.input_shape\n cex = np.zeros(cex_shape)\n for i in itertools.product(*[range(j) for j in cex_shape]):\n cex[i] = MILPSolver.prob.spec.input_layer.out_vars[i].x\n result = SolveResult.UNSATISFIED\n elif gmodel.status == GRB.TIME_LIMIT:\n result = SolveResult.TIMEOUT\n elif gmodel.status == GRB.INTERRUPTED:\n result = SolveResult.INTERRUPTED\n elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:\n result = SolveResult.SATISFIED\n else:\n result = SolveResult.UNKNOWN\n \n # MILPSolver.logger.info('Verification problem {} solved, '\n # 'LP: {}, '\n # 'time: {:.2f}, '\n # 'result: {}.'\n # .format(MILPSolver.prob.id,\n # MILPSolver.lp,\n # runtime,\n # result.value))\n \n return SolveReport(result, runtime, cex)", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def restart_solutions(solutions):\n for solution in solutions:\n solution.restart()", "def solve(self):", "def _update_local_solution(self, x: np.ndarray, z: np.ndarray, z_neigh: dict, rho: float, **kwargs):\r\n self.z_neigh = z_neigh\r\n\r\n # update dual variables\r\n self.lambd[self.agent.id] += rho * (x - z)\r\n for j, z_j in z_neigh.items():\r\n self.lambd[j] += rho * (x - z_j)\r\n\r\n # update primal variables\r\n self.x = x\r\n self.z = z", "def flush(self):\n super().flush()\n self._targetEvaluation = None\n self._solutionExport = None", "def update(self):\n ## Initialize\n self.domain.update()\n self.var = self.domain.var.copy()\n self.out = []\n\n ## Construct var and out, respecting DAG properties\n for fun in self.functions:\n self.var = list(set(self.var).union(set(fun.var).difference(set(self.out))))\n\n self.out = list(set(self.out).union(set(fun.out)))\n\n try:\n self.var_rand = list(self.density.marginals.keys())\n except AttributeError:\n self.var_rand = []\n self.var_det = list(set(self.var).difference(self.var_rand))\n\n ## TODO parameters\n\n ## Convenience constants\n self.n_var = len(self.var)\n self.n_var_rand = len(self.var_rand)\n self.n_var_det = len(self.var_det)\n self.n_out = len(self.out)", "def run(self, iterations):\n\n self.iterations = iterations \n \n for iteration in range(self.iterations):\n \n # create a copy of the solution to simulate the change\n potential_solution = copy.deepcopy(self.state)\n\n random_route = self.pick_random_route(potential_solution) \n random_action = self.pick_random_action()\n\n # perform action\n random_action(random_route, potential_solution)\n self.check_solution(potential_solution)\n\n improved_solution = self.state\n return improved_solution", "def _updateModel(self):\n # for each design variable in the dictionary:\n # loop through rows and cols setting design paramter values\n for dvName in self.DVs:\n dv = self.DVs[dvName]\n espParamIdx = dv.csmDesPmtr.pmtrIndex\n for localIdx in range(dv.nVal):\n rowIdx = localIdx // len(dv.cols)\n colIdx = localIdx % len(dv.cols)\n espRowIdx = dv.rows[rowIdx]\n espColIdx = dv.cols[colIdx]\n self.espModel.SetValuD(espParamIdx, irow=espRowIdx, icol=espColIdx, value=dv.value[localIdx])\n\n # finally, rebuild\n outtuple = self.espModel.Build(0, 0)\n # check that the number of branches built successfully matches the number when the model was first built on __init__\n # otherwise, there was an EGADS/CSM build failure at this design point\n if outtuple[0] != self.num_branches_baseline:\n return False\n else:\n # built correctly\n return True", "def update(self):\r\n self.g = self.create_graph()", "def run(self):\n\n # init\n base_value = self._problem.evaluate()\n self._problem.set_as_best(base_value)\n\n # init iteration (used to count the amount of iterations)\n iteration = 0\n\n # add to data\n self._data_append(self.data, iteration, base_value, base_value)\n\n # init termination criterion\n self._termination_criterion.check_first_value(base_value)\n self._termination_criterion.start_timing()\n\n # main loop\n while self._termination_criterion.keep_running():\n\n # search the neighbourhood for the best move\n best_found_delta = self._best_found_delta_base_value\n best_found_move = None\n\n for move in self._problem.get_moves():\n\n # check quality move\n delta = self._problem.evaluate_move(move)\n\n # checks how the move alters the current state\n diff = self._diff(move)\n\n # if not in tabu list --> not similar to earlier performed\n # moves --> if delta better than old best move\n # --> becomes the best move\n\n if not self._tabu_list.contains(diff) and \\\n self._is_better(best_found_delta, delta):\n best_found_delta = delta\n best_found_move = move\n best_found_diff = diff\n\n # the best found move will be used as the next move\n # alter state problem\n base_value = base_value + best_found_delta\n\n # check if a move was found\n if best_found_move is not None:\n\n self._problem.move(best_found_move)\n\n # if better than best found --> new best_found\n if self._is_better(self._problem.best_order_value,\n base_value):\n self._problem.set_as_best(base_value)\n # log the better solution\n self._log_improvement(base_value)\n\n # add diff to tabu list\n self._tabu_list.add(best_found_diff)\n\n # add to data\n self._data_append(self.data, iteration,\n base_value, self._problem.best_order_value)\n\n self._termination_criterion.check_new_value(base_value)\n\n # functions _termination_criterion called\n self._termination_criterion.check_new_value(base_value)\n\n else:\n # no move found --> we're stuck --> break loop\n break\n\n iteration += 1\n self._termination_criterion.iteration_done()\n\n # last data point\n self._data_append(self.data, iteration, base_value,\n self._problem.best_order_value)\n\n # if we have data:\n # convert data to something easier to plot\n if self.data is not None:\n\n # convert to tuple of list\n data = convert_data(self.data)\n\n # make namedtuple\n DataAsLists = namedtuple(\n 'Data', ['time', 'iteration', 'value', 'best_value'])\n\n data = DataAsLists(data[0], data[1], data[2], data[3])\n\n else:\n data = None\n\n # return results\n\n Results = namedtuple('Results', ['best_order', 'best_value', 'data'])\n\n return Results(self._problem.best_order,\n self._problem.best_order_value,\n data)", "def solve(self):\n measure = None\n while not self.step():\n self._nbSteps += 1\n measure = self.measure(lastMeasure=measure)", "def compute(self):\n\n self.setd = []\n self.satc = [False for cl in self.soft] # satisfied clauses\n self.solution = None\n self.bb_assumps = [] # backbone assumptions\n self.ss_assumps = [] # satisfied soft clause assumptions\n\n if self.oracle.solve():\n # hard part is satisfiable => there is a solution\n self._filter_satisfied(update_setd=True)\n self._compute()\n\n self.solution = list(map(lambda i: i + 1, filter(lambda i: not self.satc[i], range(len(self.soft)))))\n\n return self.solution", "def update(self):\n\n # First we need to scale the sufficient statistics by batch size\n self._counts /= self._corpus_size\n\n # We'll only train the network with 20 iterations.\n # A more common technique is to use a hold-out validation set.\n # When the validation error starts to increase, the network is overfitting,\n # so we stop training the net. This is called \"early stopping\", which we won't do here.\n done_looping = False\n best_cost = np.inf\n best_iter = 0\n learning_rate = self._learning_rate\n patience = self._patience\n\n # TODO: implement adagrad\n for iteration in range(self._max_iterations):\n\n # Train the network using the entire training set.\n current_cost = self._train(self._X, self._counts, learning_rate)\n logging.debug('[%d] MLP cost=%s', iteration, current_cost)\n\n # Give it a chance to update cost and patience\n if current_cost < best_cost:\n if current_cost < best_cost * self._improvement_threshold:\n if iteration >= patience - self._patience_increase:\n patience += self._patience_increase\n best_cost = current_cost\n best_iter = iteration\n\n # Check patience\n if iteration > self._patience:\n logging.debug('Ran out of patience in iteration %d', iteration)\n break\n\n # Finally, we update the CPDs and reset the sufficient statistics to zero\n self._cpds = self._mlp_output(self._X)\n self._counts = np.zeros((self.n_input, self.n_output), dtype=theano.config.floatX)", "def update(self):\r\n\r\n self.target.load_state_dict(self.model.state_dict())\r\n self.target.eval()", "def play_one_round(self):\r\n new_solutions = self.breeder.breed(self.solutions)\r\n self.solutions.clear()\r\n self.solutions.extend(new_solutions)\r\n self.mutation_maker.mutate(self.solutions)\r\n self.round += 1\r\n self.simulation_stats.add_stats(self.round, self.solutions)\r\n if self.simulation_viz is SimulationViz.FRONT:\r\n self.report_progress()", "def update(self):\n self.m.update()", "def _update_model(self, verbose: bool, raw=True, smooth=False):\n if (raw and smooth) or (not raw and not smooth):\n raise InversionsonError(\"SGDM updates can be raw or smooth, not both\")\n if raw:\n gradient = (\n self.comm.lasif.lasif_comm.project.paths[\"gradients\"]\n / f\"ITERATION_{self.iteration_name}\"\n / \"summed_gradient.h5\"\n )\n if not os.path.exists(self.raw_gradient_path):\n shutil.copy(gradient, self.raw_gradient_path)\n if not os.path.exists(self.raw_update_path):\n self._compute_raw_update()\n if smooth:\n self._apply_smooth_update()" ]
[ "0.71704924", "0.6632163", "0.66200024", "0.6557541", "0.6423378", "0.63818824", "0.6320821", "0.62160337", "0.6194195", "0.61916894", "0.6184975", "0.6170712", "0.61512583", "0.6085049", "0.6084356", "0.60457534", "0.6041477", "0.6041477", "0.6041477", "0.60402924", "0.6000796", "0.599788", "0.59931827", "0.59863055", "0.5984065", "0.5956404", "0.5956404", "0.58969945", "0.58753616", "0.58733445", "0.5873132", "0.5836395", "0.5816052", "0.5801441", "0.57980293", "0.57521003", "0.5737126", "0.57307655", "0.57249314", "0.5721511", "0.57187927", "0.57166266", "0.5707214", "0.57061535", "0.57011837", "0.56947255", "0.56938297", "0.569149", "0.56784034", "0.567668", "0.5670561", "0.5666522", "0.56569433", "0.56561226", "0.5638763", "0.5638763", "0.56337637", "0.56330395", "0.56212795", "0.56165683", "0.56117564", "0.56091183", "0.5608763", "0.55982274", "0.5594974", "0.55815196", "0.55736184", "0.55637956", "0.5554532", "0.5553781", "0.55516666", "0.5551402", "0.553799", "0.5528178", "0.5527178", "0.55241996", "0.5518696", "0.551865", "0.5517626", "0.5508535", "0.5505557", "0.55048233", "0.5504248", "0.55019766", "0.5499769", "0.54995465", "0.5495017", "0.5492786", "0.54880494", "0.5475459", "0.5470506", "0.54702663", "0.5464136", "0.54614544", "0.54508424", "0.5442604", "0.54415774", "0.5429095", "0.5427622", "0.54254764", "0.5419966" ]
0.0
-1
Builds the Arnoldi decomposition of (A, v), where v is the normalized residual of the current solution estimate. The decomposition is returned as V, R, where V is the usual matrix of Krylov vectors and R is the upper triangular matrix in H = QR, with H the usual matrix of overlaps.
def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int, x0: jax.ShapedArray, r: jax.ShapedArray, beta: float, tol: float, b_norm: float) -> Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray]: n = r.size err = beta v = r / beta # These will store the Givens rotations used to update the QR decompositions # of the Arnoldi matrices. # cos : givens[0, :] # sine: givens[1, :] givens = jnp.zeros((2, n_kry), dtype=x0.dtype) beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta) V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype) V = jax.ops.index_update(V, jax.ops.index[:, 0], v) R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype) # The variable data for the carry call. Each iteration modifies these # values and feeds the results to the next iteration. k = 0 gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need. givens) # < Modified between iterations. gmres_constants = (tol, A_mv, A_args, b_norm, n_kry) gmres_carry = (gmres_variables, gmres_constants) # The 'x' input for the carry call. Each iteration will receive an ascending # loop index (from the jnp.arange) along with the constant data # in gmres_constants. gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition, gmres_krylov_work, gmres_carry) gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables return (k, V, R, beta_vec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obtain_Q(self):\n \n #create the initial triangular matrix as a copy of the m x n - matrix A\n \n v_list = Householder.vector(self)\n n_v = len(v_list) # number of vectors, not equal to number of columns in R\n q_m = len(v_list[0]) # longest vector, should determine the shape of Q\n \n H_list = []\n for i in list(range(n_v)):\n \n gamma = ((np.linalg.norm(v_list[i]))**2)/2\n vvtrans = v_list[i] * np.transpose(v_list[i])\n H = np.identity((q_m-i)) - (vvtrans/gamma)\n \n print(H.shape)\n\n m_H, n_H = H.shape\n if m_H < q_m:\n I = np.identity(q_m)\n x = y = i\n I [ x:x+H.shape[0], y:y+H.shape[1]] = H\n H = I\n H_list.append(H)\n \n # The transpose of Q is the result of the dot product H(n-1)...H1 \n \n len_H = len(H_list)\n\n H_temp = H_list[-1]\n \n for i in np.arange(len_H-1,0,-1):\n \n H_temp = np.matmul(H_temp, H_list[i-1])\n \n Q = np.transpose(H_temp)\n \n return(Q)", "def project (u, v):\r\n\r\n # Construct linear system Ap = d\r\n A = sps.lil_matrix ((width*height, width*height))\r\n d = np.zeros ((width*height))\r\n\r\n for i in range (1, height-1):\r\n for j in range (1, width-1):\r\n A[index(i,j), index(i,j)] = 4\r\n A[index(i,j), index(i-1,j)] = -1\r\n A[index(i,j), index(i+1,j)] = -1\r\n A[index(i,j), index(i,j-1)] = -1\r\n A[index(i,j), index(i,j+1)] = -1\r\n \r\n d[index(i,j)] = -1/h * (u[i,j] - u[i,j-1] + v[i,j] - v[i-1,j])\r\n\r\n # Unhandled boundary cases, we assume solid walls that don't move\r\n A[index(0,0), index(0,0)] = 2\r\n A[index(0,0), index(1,0)] = -1\r\n A[index(0,0), index(0,1)] = -1\r\n d[index(0,0)] = -1/h * (u[0,0] + v[0,0])\r\n\r\n A[index(height-1,0), index(0,0)] = 2\r\n A[index(height-1,0), index(height-1,1)] = -1\r\n A[index(height-1,0), index(height-2,0)] = -1\r\n d[index(height-1,0)] = -1/h * (u[height-1,0] - v[height-2,0])\r\n\r\n A[index(0,width-1), index(0,width-1)] = 2\r\n A[index(0,width-1), index(1,width-1)] = -1\r\n A[index(0,width-1), index(0,width-2)] = -1\r\n d[index(0,width-1)] = -1/h * (-u[0,width-2] + v[0,width-1])\r\n\r\n A[index(height-1,width-1), index(height-1,width-1)] = 2\r\n A[index(height-1,width-1), index(height-2,width-1)] = -1\r\n A[index(height-1,width-1), index(height-1,width-2)] = -1\r\n d[index(height-1,width-1)] = -1/h * (-u[height-1,width-2] - v[height-2,width-1])\r\n\r\n\r\n for i in range (1, height-1):\r\n A[index(i,0), index(i,0)] = 3\r\n A[index(i,0), index(i-1,0)] = -1\r\n A[index(i,0), index(i+1,0)] = -1\r\n A[index(i,0), index(i,1)] = -1\r\n d[index(i,0)] = -1/h * (u[i,0] + v[i,0] - v[i-1,0])\r\n\r\n for i in range (1, height-1):\r\n A[index(i,width-1), index(i,width-1)] = 3\r\n A[index(i,width-1), index(i-1,width-1)] = -1\r\n A[index(i,width-1), index(i+1,width-1)] = -1\r\n A[index(i,width-1), index(i,width-2)] = -1\r\n d[index(i,width-1)] = -1/h * (- u[i,width-2] + v[i, width-1] - v[i-1,width-1])\r\n\r\n for j in range (1, width-1):\r\n A[index(0,j), index(0,j)] = 3\r\n A[index(0,j), index(1,j)] = -1\r\n A[index(0,j), index(0,j-1)] = -1\r\n A[index(0,j), index(0,j+1)] = -1\r\n d[index(0,j)] = -1/h * (u[0,j] - u[0,j-1] + v[0,j])\r\n \r\n for j in range (1, width-1):\r\n A[index(height-1,j), index(height-1,j)] = 3\r\n A[index(height-1,j), index(height-2,j)] = -1\r\n A[index(height-1,j), index(height-1,j-1)] = -1\r\n A[index(height-1,j), index(height-1,j+1)] = -1\r\n d[index(height-1,j)] = -1/h * (u[height-1,j] - u[height-1,j-1] - v[height-2,j])\r\n\r\n\r\n A = A * dt / (density * h**2)\r\n\r\n A = sps.csr_matrix (A)\r\n p = np.reshape(spsolve (A, d), (height, width))\r\n\r\n # Calculate new velocity field based on this pressure field\r\n for i in range (height):\r\n for j in range (width):\r\n if (i == height-1 and j == width-1) or (i == height-1 and j == 0) or (i == 0 and j == width-1) or (i == 0 and j == 0):\r\n # Set vertical velocity to movement of solid wall 0\r\n u[i,j] = 0\r\n v[i,j] = 0\r\n elif i == height-1 or i == 0:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = 0\r\n elif j == width-1 or j == 0:\r\n u[i,j] = 0\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n else:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n\r\n # let's get some inflow\r\n u[4:12, 0] = 1\r\n\r\n return u, v, p", "def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M", "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of variables:\n `vector`: The current vector to be orthogonalized\n to all previous ones\n `krylov_vectors`: jax.array of collected krylov vectors\n `n`: integer denoting the column-position of the overlap\n <`krylov_vector`|`vector`> within `H`.\n Returns:\n updated vals.\n\n \"\"\"\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]\n\n @functools.partial(jax.jit, static_argnums=(5, 6, 7))\n def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n \"\"\"\n Compute an m-step arnoldi factorization of `matvec`, with\n m = min(`it`,`num_krylov_vecs`). The factorization will\n do at most `num_krylov_vecs` steps. The returned arrays\n `kv` and `H` will satisfy the Arnoldi recurrence relation\n ```\n matrix @ Vm - Vm @ Hm - fm * em = 0\n ```\n with `matrix` the matrix representation of `matvec` and\n `Vm = jax.numpy.transpose(kv[:it, :])`,\n `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1)\n and `em` a cartesian basis vector of shape `(1, kv.shape[1])`\n with `em[0, -1] == 1` and 0 elsewhere.\n\n Note that the caller is responsible for dtype consistency between\n the inputs, i.e. dtypes between all input arrays have to match.\n\n Args:\n matvec: The matrix vector product.\n args: List of arguments to `matvec`.\n v0: Initial state to `matvec`.\n krylov_vectors: An array for storing the krylov vectors. The individual\n vectors are stored as columns.\n The shape of `krylov_vecs` has to be\n (num_krylov_vecs + 1, np.ravel(v0).shape[0]).\n H: Matrix of overlaps. The shape has to be\n (num_krylov_vecs + 1,num_krylov_vecs + 1).\n start: Integer denoting the start position where the first\n produced krylov_vector should be inserted into `krylov_vectors`\n num_krylov_vecs: Number of krylov iterations, should be identical to\n `krylov_vectors.shape[0] + 1`\n eps: Convergence parameter. Iteration is terminated if the norm of a\n krylov-vector falls below `eps`.\n Returns:\n kv: An array of krylov vectors\n H: A matrix of overlaps\n it: The number of performed iterations.\n \"\"\"\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps\n\n return _arnoldi_fact", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def solve_driv(v, ene, s, n, h):\n\n xs = np.array([(k+1)*h for k in range(n)])\n h2 = h*h\n k = np.sqrt(2.0*ene)\n \n vs = [v(x)-ene for x in xs]\n\n mat = laplacian_mat(n) -2.0 * h2 * scipy.sparse.diags(vs, 0) + bc_outgoing_mat(n, h, k)\n vec = np.array([-2.0*h*h*s(x) for x in xs])\n\n ys = scipy.sparse.linalg.spsolve(mat, vec)\n return (xs, ys)", "def prepare(self):\n ls=len(self.v)\n self.S=numpy.zeros(ls)\n self.A=numpy.zeros((ls,ls))\n\n for k,v in self.e.items():\n b,e=k\n bi,ei=self.rv[b],self.rv[e]\n self.A[bi,bi]-=v\n self.A[bi,ei]+=v", "def initiateVMatrixes():\n global v, vNew, vExact\n # Initialize the grid to 0\n v = np.zeros((n+1, n+1)) # matrix of v, index are i: row, j:column\n # Set the boundary conditions\n for i in range(1,n):\n v[0,i] = 10\n v[n,i] = 10\n v[i,0] = 10\n v[i,n] = 10\n # Exact solution\n vExact = np.copy(v)\n for i in range(1,n):\n for j in range(1,n):\n vExact[i,j] = 10\n # Initial guess\n for i in range(1,n):\n for j in range(1,n):\n v[i,j] = 0.9*vExact[i,j]\n vNew = np.copy(v)", "def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol", "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n jax.numpy.ravel(v))\n H = jax.lax.cond(\n start > 0, start,\n lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None,\n lambda x: H)\n\n # body of the arnoldi iteration\n def body(vals):\n krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals\n Av = matvec(vector, *args)\n initial_vals = [Av, krylov_vectors, i, H]\n Av, krylov_vectors, _, H = jax.lax.fori_loop(\n 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals)\n norm = jax.numpy.linalg.norm(Av)\n Av /= norm\n H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm)\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[i + 1, :],\n jax.numpy.ravel(Av))\n return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter]\n\n def cond_fun(vals):\n # Continue loop while iteration < num_krylov_vecs and norm > eps\n _, _, _, _, norm, _, iteration, _ = vals\n counter_done = (iteration >= num_krylov_vecs)\n norm_not_too_small = norm > eps\n continue_iteration = jax.lax.cond(counter_done,\n _, lambda x: False,\n _, lambda x: norm_not_too_small)\n\n return continue_iteration\n initial_norm = v.real.dtype.type(1.0+eps)\n initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start,\n num_krylov_vecs]\n final_values = jax.lax.while_loop(cond_fun, body, initial_values)\n kvfinal, Hfinal, _, _, norm, _, it, _ = final_values\n return kvfinal, Hfinal, it, norm < eps", "def v(resistances, r_i, applied_voltages, **kwargs):\n if r_i.word_line > 0 or r_i.bit_line > 0:\n g = fill.g(resistances, r_i)\n i = fill.i(applied_voltages, resistances, r_i)\n\n utils.message('Started solving for v.', **kwargs)\n v_matrix = linalg.spsolve(g.tocsc(), i)\n utils.message('Solved for v.', **kwargs)\n\n # if `num_examples == 1`, it can result in 1D array.\n if v_matrix.ndim == 1:\n v_matrix = v_matrix.reshape(v_matrix.shape[0], 1)\n\n # if one of the interconnect resistances is zero, only half of the\n # matrix_v had to be solved. The other half can be filled without\n # solving because the node voltages are known.\n if r_i.word_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n new_v_matrix[resistances.size:, ] = v_matrix\n v_matrix = new_v_matrix\n if r_i.bit_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = v_matrix\n v_matrix = new_v_matrix\n else:\n # if both interconnect resistances are zero, all node voltages are\n # known.\n v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n\n return v_matrix", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def svd_approx(A, k):\n U,s,Vh=la.svd(A,full_matrices=False)\n return U[:,:k].dot(np.diag(s[:k])).dot(Vh[:k,:])", "def dlqr(A,B,Q,R):\n #ref Bertsekas, p.151\n \n #first, try to solve the ricatti equation\n X = np.matrix(scipy.linalg.solve_discrete_are(A, B, Q, R))\n \n #compute the LQR gain\n K = np.matrix(scipy.linalg.inv(B.T*X*B+R)*(B.T*X*A))\n \n eigVals, eigVecs = scipy.linalg.eig(A-B*K)\n \n return K, X, eigVals", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return (Matrix(U), Matrix(S), Matrix(Vh))", "def generate_V(self):\n\n n_samples, n_dimensions, L = self.n_samples, self.n_dimensions, self.L\n\n V = np.zeros([L + 1, n_dimensions], dtype=int)\n V[1:, 0] = [1 << (self.scale - j) for j in range(1, L + 1)]\n\n for i in range(n_dimensions - 1):\n\n m = np.array(directions[i], dtype=int)\n s = len(m) - 1\n\n # The following code discards the first row of the ``m`` array\n # Because it has floating point errors, e.g. values of 2.24e-314\n if L <= s:\n V[1:, i + 1] = [m[j] << (self.scale - j) for j in range(1, L + 1)]\n else:\n V[1 : s + 1, i + 1] = [\n m[j] << (self.scale - j) for j in range(1, s + 1)\n ]\n for j in range(s + 1, L + 1):\n V[j, i + 1] = V[j - s, i + 1] ^ (V[j - s, i + 1] >> s)\n for k in range(1, s):\n V[j, i + 1] ^= ((m[0] >> (s - 1 - k)) & 1) * V[j - k][i + 1]\n\n return V", "def calc_V(A):\n return 1. / calc_rV(A)", "def solve(self):\n is_valid = self.verify_sub_matrixes()\n \n if not is_valid:\n raise ValueError((\n \"El determinante es igual a cero \"\n \"el método no puede continuar\"\n ))\n \n (lower, upper) = self.doolittle_factorization()\n\n lower_solution_vector = lower.solve_matrix(matrix=None, vector=self.vector.vector)\n lower_solution_vector.print_vector()\n upper_solution_vector = upper.solve_matrix(\n matrix=None, vector=lower_solution_vector.vector)\n upper_solution_vector.print_vector()\n\n comprobation = self.matrix.comprobation(upper_solution_vector.vector)\n return comprobation", "def calc_vad_3d(az, elev, vel):\n elev = np.deg2rad(elev)\n az = np.deg2rad(az)\n\n if vel.size > 1: # If there could be sufficient data points...\n A = sum(vel * np.sin(az))\n B = sum(np.sin(az) ** 2 * np.cos(elev))\n C = sum(np.cos(az) * np.sin(az) * np.cos(elev))\n G = sum(np.sin(az) * np.sin(elev))\n\n D = sum(vel * np.cos(az))\n E = sum(np.sin(az) * np.cos(az) * np.cos(elev))\n F = sum(np.cos(az) ** 2 * np.cos(elev))\n H = sum(np.cos(az) * np.sin(elev))\n\n W = sum(vel)\n X = sum(np.sin(az) * np.cos(elev))\n Y = sum(np.cos(az) * np.cos(elev))\n Z = sum(az * np.sin(elev))\n\n # solve A = uB + vC + wG , D = uE + vF + wH and W = uX + vY+ wZ\n y = np.array([[B, E, X], [C, F, Y], [G, H, Z]])\n z = np.array([A, D, W])\n # print y\n # print z\n try:\n sol = np.linalg.solve(y, z)\n # print sol\n u = sol[0]\n v = sol[1]\n w = sol[2]\n return u, v, w\n except np.linalg.linalg.LinAlgError:\n return FILL_VALUE, FILL_VALUE, FILL_VALUE\n else:\n return FILL_VALUE, FILL_VALUE, FILL_VALUE", "def QR(self):\n m, n = self.shape\n assert m >= n, \"Requires m>=n\"\n R = self.copy()\n Q = eye(m)\n\n for j in range(n):\n reflect_me = R[j:, j].copy()\n v, beta = reflect_me._house()\n H = eye(m)\n # A[j:, j:] = (I - beta*v*v.T)*A[j:, j:]\n H[j:, j:] -= (v @ v.T()) * beta\n # Not producing correct triangular matrix.\n # Q looks good though.\n R = H @ R\n Q = H @ Q\n return Q[:n].T(), R[:n]", "def dexpinv(self, u, v, _=None):\n A, a = np.split(u, 2)\n B, b = np.split(v, 2)\n alpha = np.linalg.norm(A)\n rho = np.inner(A, a)\n if np.isclose(alpha, 0):\n return v\n c1 = (\n B\n - 0.5 * np.cross(A, B)\n + self._dexpinv_helper_1(alpha) * np.cross(A, np.cross(A, B))\n )\n c2 = (\n b\n - 0.5 * (np.cross(a, B) + np.cross(A, b))\n + self._dexpinv_helper_2(alpha, rho) * np.cross(A, np.cross(A, B))\n + self._dexpinv_helper_1(alpha)\n * (\n np.cross(a, np.cross(A, B))\n + np.cross(A, np.cross(a, B))\n + np.cross(A, np.cross(A, b))\n )\n )\n return np.hstack((c1, c2))", "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def approximate_svd(A, U, S, V, k=10, params=None):\n\n \n A = lib.adapt(A)\n U = lib.adapt(U)\n S = lib.adapt(S)\n V = lib.adapt(V)\n\n Aobj = A.ptr()\n Uobj = U.ptr()\n Sobj = S.ptr()\n Vobj = V.ptr()\n\n if (Aobj == -1 or Uobj == -1 or Sobj == -1 or Vobj == -1):\n raise errors.InvalidObjectError(\"Invalid/unsupported object passed as A, U, S or V \")\n\n # use default params in case none are provided\n if params == None:\n params = SVDParams()\n params_json = params.str() + '\\0'\n\n lib.callsl(\"sl_approximate_svd\", \\\n A.ctype(), Aobj, \\\n U.ctype(), Uobj, \\\n S.ctype(), Sobj, \\\n V.ctype(), Vobj, \\\n k, params_json, lib.ctxt_obj)\n\n A.ptrcleaner()\n U.ptrcleaner()\n S.ptrcleaner()\n V.ptrcleaner()\n\n return (U.getobj(), S.getobj(), V.getobj())", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def svd_approx(A, s):\n \n U, S, V = la.svd(A)\n V = V.conj().T\n if s > len(S):\n raise ValueError( str(len(S)) + \" = Rank(A) > s\" )\n \n U2 = U[:,:s]\n S2 = S[:s]\n V2 = V[:,:s]\n V2 = V2.conj().T\n \n S2 = np.diag(S2)\n \n Ag = U2@S2@V2\n ent = U2.size + len(S2) + V2.size\n return Ag, ent\n \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def project(Ad, Bd, Cd, Dd, q, r, solver=cvx.SCS):\n \n Z = np.zeros((3,3))\n I = np.eye(3)\n A = np.block([[Ad, Z], [I, I]])\n B1 = np.vstack((I, Z))\n B2 = np.vstack((Bd, Z))\n C1 = np.block([[I, Z], [Z, Z]])\n D11 = np.block([[Z], [Z]])\n D12 = np.block([[Z], [I]])\n C2 = np.block([I, Z])\n D21 = Z\n D22 = Z\n\n return hinf_project_pole_alloc(A, B1, B2, C1, C2, D11, D12, D21, D22, q, r, solver)", "def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)", "def qri_mgs_piv( A, alpha=0.5 ):\n \n Q = numpy.array(A, dtype=float)\n m,n = Q.shape\n R = numpy.zeros( (n,n) )\n Qnorms = numpy.zeros( n )\n piv = numpy.zeros( n )\n P = numpy.eye( n )\n\n for k in range( 0, n ) :\n # step 0\n for j in range ( k, n ) :\n Qnorms[j] = numpy.linalg.norm( Q[:,j] )\n #print Qnorms\n j = numpy.where(Qnorms == max(Qnorms[k:n]))[0][0]\n Qnorms[k] = 0\n #print Q\n #print R\n #piv[k] = j\n if (j != k) :\n #print \"switching columns\", k, \"and\", j\n P[:, [j, k]] = P[:, [k, j]]\n Q[:, [j, k]] = Q[:, [k, j]]\n #if (k > 0) :\n # R[0:k, [j, k]] = R[0:k, [k, j]]\n R[:, [j, k]] = R[:, [k, j]]\n #print Q\n #print R\n\n # step 1\n vl2norm = numpy.linalg.norm( Q[:,k] )\n ii = 0\n while True : # iterate\n for i in range( 0, k ) :\n s = numpy.dot( Q[:,i], Q[:,k] )\n Q[:,k] = Q[:,k] - s * Q[:,i]\n R[i,k] = R[i,k] + s\n\n ii = ii + 1\n vlnorm = vl2norm\n vl2norm = numpy.linalg.norm( Q[:,k] )\n if (vl2norm > alpha * vlnorm) :\n #print \"on column\", k, \"used\", ii, \"orthogonalizations\"\n break\n \n # step 2\n R[k,k] = numpy.linalg.norm( Q[:,k] )\n Q[:,k] = Q[:,k] / R[k,k]\n\n # step 3\n if (k == n) :\n break\n else :\n for j in range( k+1, n ) :\n R[k,j] = numpy.dot( Q[:,k], Q[:,j] )\n Q[:,j] = Q[:,j] - R[k,j] * Q[:,k]\n\n # step 4\n #Qhat = Q[:,k]\n #Qhat2 = Qhat\n for j in range( k+1, n ) :\n ii = 0\n vl2norm = numpy.linalg.norm( Q[:,j] )\n while True : # iterate\n s = numpy.dot( Q[:,j], Q[:,k] )\n R[k,j] = R[k,j] + s\n Q[:,j] = Q[:,j] - s * Q[:,k]\n \n ii = ii + 1\n vlnorm = vl2norm\n vl2norm = numpy.linalg.norm( Q[:,j] )\n if (vl2norm > alpha * vlnorm) :\n #print \"on column\", j, \"used\", ii, \"orthogonalizations\"\n break\n \n return Q,R,P", "def solve_homography(u, v):\r\n N = u.shape[0]\r\n H = None\r\n\r\n if v.shape[0] is not N:\r\n print('u and v should have the same size')\r\n return None\r\n if N < 4:\r\n print('At least 4 points should be given')\r\n\r\n # TODO: 1.forming A\r\n A = np.zeros((2*N, 8))\r\n for i in range(N):\r\n A[2*i, :] = np.array([u[i, 0], u[i, 1], 1, 0, 0, 0, -u[i, 0]*v[i,0], -u[i, 1]*v[i, 0]])\r\n A[2*i+1, :] = np.array([0, 0, 0, u[i, 0], u[i, 1], 1, -u[i, 0]*v[i, 1], -u[i, 1]*v[i, 1]])\r\n\r\n # TODO: 2.solve H with A\r\n b = v.reshape(-1)\r\n H, res, _, _ = np.linalg.lstsq(A, b, rcond=None)\r\n H = np.concatenate((H, np.array([1])))\r\n H = H.reshape(3,3)\r\n\r\n return H", "def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # QRFactorization\n Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')\n\n if np.linalg.norm(R[-1, :], np.inf) < tol:\n warn('Singular Jacobian matrix. Using SVD decomposition to ' +\n 'perform the factorizations.')\n return svd_factorization_projections(A, m, n,\n orth_tol,\n max_refin,\n tol)\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v = np.zeros(m)\n v[P] = aux2\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = P inv(R) Q.T x\n aux1 = Q.T.dot(z)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n v[P] = aux2\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = P inv(R) Q.T x\n aux1 = Q.T.dot(x)\n aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)\n z = np.zeros(m)\n z[P] = aux2\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = Q inv(R.T) P.T x\n aux1 = x[P]\n aux2 = scipy.linalg.solve_triangular(R, aux1,\n lower=False,\n trans='T')\n z = Q.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def visualize_svd():\n A=np.array([[3,1],[1,3]])\n U,s,Vh=truncated_svd(A)\n \n twopi=np.linspace(0,2.*np.pi,360)\n one=np.reshape(np.linspace(0,1,100),(1,100))\n zeros=np.zeros((1,100))\n S=np.vstack((np.reshape(np.cos(twopi),(1,360)),np.reshape(np.sin(twopi),(1,360))))\n e1=np.vstack((zeros,one))\n e2=e1[::-1] \t\n \n s1S=Vh.dot(S)\n s1e1=Vh.dot(e1)\n s1e2=Vh.dot(e2)\n\n s2S=np.diag(s).dot(s1S)\n s2e1=np.diag(s).dot(s1e1)\n s2e2=np.diag(s).dot(s1e2)\n \n s3S=U.dot(s2S)\n s3e1=U.dot(s2e1)\n s3e2=U.dot(s2e2)\n \n \n \n \n\n \n \n plt.subplot(221)\n plt.plot(S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n \n plt.subplot(222)\n plt.plot(s1S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s1e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s1e2[0],s3e2[1],\"r-.\",lw=2)\n \n \n plt.subplot(223)\n plt.plot(s2S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s2e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s2e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.subplot(224) \n \n plt.plot(s3S[0],s3S[1],\"b-.\",lw=2)\n plt.plot(s3e1[0],s3e1[1],\"g-.\",lw=2)\n plt.plot(s3e2[0],s3e2[1],\"r-.\",lw=2)\n \n plt.show()", "def singular_decomp(A):\n # Initialization\n n, m = A.shape\n U = np.zeros((n, m), dtype='float64')\n\n # Diagonalization of A^T * A\n rot, e, V = eigen.diag(np.dot(np.transpose(A), A))\n\n # Calculate U\n U = np.dot(A, V)\n for i in range(m):\n e[i] = np.sqrt(e[i])\n U[:, i] /= e[i]\n\n return U, e, V", "def calc_rV(A):\n return np.sqrt(calc_rVsq(A))", "def visualize_svd(A):\r\n theta = np.linspace(0,2*np.pi,200)\r\n #Set S as unit circle\r\n S = np.array([np.cos(theta), np.sin(theta)])\r\n #Set E as orthogonal basis\r\n E = np.array([[1,0,0],[0,0,1]])\r\n U,Si,Vh = la.svd(A)\r\n Si = np.diag(Si)\r\n\r\n #plot original S and E\r\n first = plt.subplot(221)\r\n first.plot(S[0], S[1])\r\n first.plot(E[0], E[1])\r\n first.axis(\"equal\")\r\n\r\n #rotate S,E and plot S,E\r\n second = plt.subplot(222)\r\n vhs = Vh@S\r\n vhe = Vh@E\r\n second.plot(vhs[0], vhs[1])\r\n second.plot(vhe[0], vhe[1])\r\n second.axis(\"equal\")\r\n\r\n #scale S,E and plot S,E\r\n third = plt.subplot(223)\r\n sivhs = Si@vhs\r\n sivhe = Si@vhe\r\n third.plot(sivhs[0],sivhs[1])\r\n third.plot(sivhe[0],sivhe[1])\r\n third.axis([-4,4,-4,4])\r\n\r\n #rotate S,E and plot S,E\r\n fourth = plt.subplot(224)\r\n usivhs = U@sivhs\r\n usivhe = U@sivhe\r\n fourth.plot(usivhs[0],usivhs[1])\r\n fourth.plot(usivhe[0],usivhe[1])\r\n fourth.axis([-4,4,-4,4])\r\n\r\n plt.show()", "def mbed_solve (A, budgets, S, verbose=True):\n # print(S)\n start_time = time.time()\n x_v, C = initialize(A, S)\n if (verbose):\n print(\"Initialized\")\n print(\"V1: \", np.sum(x_v == 1), \" ,V2: \", np.sum(x_v == -1))\n results_info, S_new, Ad, edges_removed = random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=verbose)\n return results_info, S_new, Ad, edges_removed", "def v_o(A,vd):\n return A*vd", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def LUdecomp(Ainput):\n\n n, m = np.shape(Ainput)\n \n if n != m:\n return 'Error: Please enter an invertible matrix.'\n \n U = Ainput.copy() # make copies so as not to write over originals\n L = np.zeros((np.shape(Ainput)))\n \n for i in range(0,n):\n L[i,i] = 1\n for i in range(0,n-1): # loop over pivot rows from row 1 to row n-1 (i to n-2)\n for j in range(i+1,n): # loop over row to be zero'ed from row j+1 to n (j+1 to n-1)\n c = U[j,i]/U[i,i] # multiplicative factor to zero point\n L[j,i] = c\n U[j,i] = 0.0 # we know this element goes to zero\n U[j,i+1:n]=U[j,i+1:n]-c*U[i,i+1:n] # do subtraction of two rows\n\n return (L,U) # return lower and upper decompositions", "def get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry):\n return CT.get_v(r_div_R, z_div_L, Pi_div_DLP, k, alpha_ast, Bp, Bm, gp, gm, membrane_geometry)", "def CreateAtrium(L,v,d,seed): \n Neighbours = np.ndarray(L*L, dtype = list)\n rnd.seed(seed)\n Phases = np.ndarray(L*L,dtype = float)\n Phases.fill(4)\n Functionality = np.ndarray([L*L], dtype = bool)\n index = np.indices((1, L*L))[1][0]\n Atrium = index.reshape(L,L) # The index for that site within the long arrays\n for j in index:\n z = rnd.uniform(0,1)\n if d > z: # dysfunctional\n Functionality[j] = False\n if d <= z: # functional\n Functionality[j] = True\n if j in np.arange(0,L*L,L): # first column\n Neighbours[j] = list()\n Neighbours[j].extend([j+1])\n elif j in (np.arange(0,L*L,L)+L-1): # last column\n Neighbours[j] = list()\n Neighbours[j].extend([j-1])\n else: # body columns\n Neighbours[j] = list()\n Neighbours[j].extend([j-1,j+1])\n w = rnd.uniform(0,1)\n for j in np.arange(L*L):\n if w <= v: # transverse connections\n if j in np.arange(L*L-L,L*L):\n Neighbours[j].extend([j-(L*L-L)])\n Neighbours[j-(L*L-L)].extend([j])\n else:\n Neighbours[j].extend([j+L])\n Neighbours[(j+L)].extend([j])\n return Neighbours, Phases, Functionality, Atrium, index", "def householder(a, b):\n rows, = np.shape(a)\n q = np.identity(rows) # Orthogonal matrix\n r = np.copy(a) # Upper triangular matrix\n\n for i in range(rows - 1):\n b = r[i:, i] # Column vector\n\n \"\"\"The first element of the vector e\n is norm(x) signed -A[i,i](on the main diagonal).\n Now we apply formula b-ac \n \"\"\"\n c = np.zeros_like(b)\n c[0] = m.copysign(np.linalg.norm(b), -a[i, i])\n u = b + c\n v = u / np.linalg.norm(u)\n\n \"\"\"Now we apply E-2*Omega\"\"\"\n Q_om = np.identity(rows) # Identity matrix\n Q_om[i:, i:] -= 2.0 * np.outer(v, v)\n\n r = np.dot(Q_om, r)\n q = np.dot(q, Q_om.T)\n\n \"\"\"If we know the decomposition A = QR, \n then the solution reduces to\n solving the system Rx = Q.T * b\n \"\"\"\n p = np.dot(q.T, b)\n x = np.dot(np.linalg.inv(r), p)\n\n i = 0\n while i < len(x):\n x[i] = int((x[i] * 10000) + 0.5) / 10000\n i += 1\n\n \"\"\"\n Vector of discrepancy (Ax - B)\n \"\"\"\n discrep = np.dot(a, x)\n discrep = discrep - b\n\n print(\"Method of Householder:\\n\")\n print(\"Vector discrep: \", discrep)\n print(\"Vector x: \", x, \"\\n\")\n\n return x", "def lup_decomposition(self):\n p = [i for i in range(self.rows_count())]\n for i in range(self.rows_count() - 1):\n pivot = i\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[j], i]) > abs(self[p[pivot], i]):\n pivot = j\n p[pivot], p[i] = p[i], p[pivot]\n for j in range(i + 1, self.rows_count()):\n if abs(self[p[i], i]) < math.pow(10, -6):\n raise ValueError(\"Can't divide by 0\")\n self[p[j], i] /= self[p[i], i]\n for k in range(i + 1, self.rows_count()):\n self[p[j], k] -= self[p[j], i] * self[p[i], k]\n lst = []\n for i in p:\n lst.append(self.value[i])\n return p, Matrix(lst)", "def MakeEigenVectors( self ): \n sqrt2 = np.sqrt(2)\n Isqrt2 = 1.0 / sqrt2\n EVectors = np.asarray( [ [ Isqrt2 , Isqrt2 , 0 ] ,\n [ Isqrt2 ,-Isqrt2 , 0 ] , \n [ 0 , 0 , 1 ] ] )\n for i in range( self.NQ ):\n for j in range( self.Nbranches ):\n self.EigenVectors[ i , j , 0 , : ] = EVectors[ j , : ]", "def svd(matrix, approach):\n\n # Getting the eigenvalues and vectors of transpose(A) * A for V and Sigma\n a = mat_multiply(transpose(matrix), matrix)\n if approach == \"qr\":\n V, sigma, iterations = qr_eig(a)\n else:\n V, sigma, iterations = eig(a)\n\n # Sorting singular values and the colums of V accordingly\n V = transpose(V)\n\n singular_values = list()\n sorted_V = list()\n\n r = 0\n for i in range(rows(sigma)):\n singular_values.append([(sigma[i][i]), i])\n if sigma[i][i] > math.exp(-8):\n r += 1\n\n singular_values.sort(key=first_item, reverse=True)\n\n sigma_r = eye(r)\n sigma_r_inv = eye(r)\n\n # Constructing the sorted U and sigma matrices\n i, j = 0, 0\n for value in singular_values:\n if value[0] > math.exp(-8):\n sorted_V.append(V[value[1]])\n sigma_r[j][j] = value[0] ** (1 / 2)\n sigma_r_inv[j][j] = 1 / (value[0] ** (1 / 2))\n j += 1\n i += 1\n\n # Constructing U by multiplying V and sigma inverse\n sorted_U = mat_multiply(mat_multiply(matrix, transpose(sorted_V)), sigma_r_inv)\n\n return (sorted_U, sigma_r, sorted_V, r, iterations)", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def computeV(H):\n # Pseudo-inverse of H\n #V = np.linalg.inv(H) # Inverse\n V = np.linalg.pinv(H) # Pseudo-inverse\n \n # Normalise columns\n [m,n] = V.shape\n for i in range(n):\n V[:,i] = V[:,i]/np.linalg.norm(V[:,i])\n \n return V", "def pde_eigv(self, u):\n u0, u1, u2 = u.T\n c = np.sqrt(9.81*u0)\n vel = np.sqrt((u1/u0)**2 + (u2/u0)**2)\n return np.array([vel-c, vel, vel+c])", "def invert_L1_svd():", "def decompose_to_LU(a):\n # create emtpy LU-matrix\n lu_matrix = np.matrix(np.zeros([a.shape[0], a.shape[1]]))\n n = a.shape[0]\n\n for k in range(n):\n # calculate all residual k-row elements\n for j in range(k, n):\n lu_matrix[k, j] = a[k, j] - lu_matrix[k, :k] * lu_matrix[:k, j]\n # calculate all residual k-column elemetns\n for i in range(k + 1, n):\n lu_matrix[i, k] = (a[i, k] - lu_matrix[i, : k] * lu_matrix[: k, k]) / lu_matrix[k, k]\n\n return lu_matrix", "def laplace2d(get_A, get_rho, N=Mynum, Te=2):\n # Reduce the row and column of Laplacian matrix by 2 \n # Reduced row and column will be replace with embed in future\n # n = N - 2 for embed\n n = N\n # Solving for the PDE(1)\n h = 1.0/(n-1)\n A = get_A(n) * (1/(h**2))\n b = get_rho(n, Te)\n U = sp.linalg.solve(A, b)\n\n # Reshape the u vector into nxn matrix for heat map plotting\n T = U.reshape((n, n))\n print T\n \n # Embed the surrounding of U matrix into zeros\n Tfull = embed(T, Te)\n\n # Verify that dot function of A matrix and U vector\n # return the same rho value at midpoint\n CheckU = np.dot(A,U)\n\n # Filter very small value into zeros\n for i in range(0,len(CheckU)):\n if (abs(CheckU[i]) < 1e-12):\n CheckU[i] = 0\n\n # Validate that product of A and U matrix is the same as rho vector\n # Will give warning if it is not the same\n # assert np.all(CheckU == b) # working only mynum = 7 and 9 \n\n # Print value of the products at midpoint.\n mid = (n**2-1)/2\n print \"Q1: Value of the dot product A.u1 is %5.3f at (0.5,0.5).\" % (CheckU[mid])\n return Tfull", "def __init__( self ):\n self.NQ = 16\n self.Nbranches = 3\n self.NatomsUC = 1\n self.dim = 3\n self.QVectors = np.zeros( ( self.NQ , 3 ) )\n self.MakeQVectors()\n self.EigenVectors = np.zeros( [ self.NQ , \n self.Nbranches ,\n self.NatomsUC , \n self.dim ] )\n self.MakeEigenVectors()", "def _mps_decompose_AC(self, A):\n Dl, d, Dr = A.shape\n Q, C = qr(np.reshape(A, [Dl * d, Dr]))\n nC = nfactor(C)\n # nC = max(abs(C.min()), abs(C.max()))\n if C.shape == (1, 1): # if number then makes C = 1\n Q *= np.sign(C.flat[0])\n C = np.ones((1, 1))\n else:\n C = C / nC\n Dr = C.shape[0]\n Q = np.reshape(Q, [Dl, d, Dr])\n return Q, C, nC, Dr", "def householder_qr(A):\n m, n = A.shape\n I = np.eye(m, dtype = complex)\n Ahat = np.zeros((m, n+m), dtype = complex)\n Ahat[:, :n] = A\n Ahat[:, n:] = I\n\n Rhat = householder(Ahat)\n R = Rhat[:,:n]\n Q = Rhat[:,n:].transpose().conj()\n\n return Q, R", "def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)", "def hinf_project_pole_alloc(A, B1, B2, C1, C2, D11, D12, D21, D22, q, r, solver=cvx.SCS):\n \n assert r > 0, 'r must be positive.'\n assert np.abs(q) + r < 1, 'the region must be inside the unit circle.'\n \n tol = 1e-20\n n = A.shape[0]\n \n L = cvx.Variable((B2.shape[1], n))\n P = cvx.Variable((n, n))\n gamma2 = cvx.Variable()\n \n LMI1 = cvx.bmat([\n [P, A*P + B2*L, B1, np.zeros((B1.shape[0], D11.shape[0]))],\n [P*A.T + L.T * B2.T, P, np.zeros((P.shape[0], B1.shape[1])), P*C1.T + L.T*D12.T],\n [B1.T, np.zeros((B1.shape[1], P.shape[1])), np.eye(B1.shape[1]), D11.T],\n [np.zeros((C1.shape[0], B1.shape[0])), C1*P + D12*L, D11, gamma2*np.eye(D11.shape[0])]\n ])\n \n cons1 = LMI1 >> tol\n \n LMI2 = cvx.bmat([\n [-r*P, -q*P + A*P + B2*L],\n [-q*P + P*A.T + L.T*B2.T, -r*P]\n ])\n \n cons2 = LMI2 << -tol\n \n cons3 = gamma2 >= tol\n \n cons4 = P == P.T\n \n cons5 = P >> tol\n \n prob = cvx.Problem(cvx.Minimize(gamma2), constraints=[cons1, cons2, cons3, cons4, cons5])\n prob.solve(solver=solver)\n \n status = prob.status\n if not status in [cvx.OPTIMAL_INACCURATE, cvx.OPTIMAL]:\n #variable.value will be None, better trow an exception\n raise OptException(f'Problem is {status}')\n \n Hinf_norm = np.sqrt(gamma2.value)\n Pop = P.value\n Lop = L.value\n \n K = Lop.dot(np.linalg.inv(Pop))\n \n return K, Hinf_norm, Pop, status", "def _solve_implicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n l_and_u = (1, 1)\n ab = np.empty((3, self.n_x))\n # main diagonal\n ab[1] = 1 + 2.0 * coeff\n # upper and lower diagonals\n ab[0] = ab[2] = -coeff\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n ab[0][1] = 0 # upper diagonal\n ab[1][0] = 1 # main diagonal\n elif self.left_bc_type == \"NEUMANN\":\n ab[0][1] = 1 # upper diagonal\n ab[1][0] = -1 # main diagonal\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = 0 # lower diagonal\n elif self.right_bc_type == \"NEUMANN\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = -1 # lower diagonal\n\n current_solution = initial_conditions\n solutions = []\n\n for t in self.t_grid:\n b = current_solution + self.tau * self.rhs(self.x_grid, t)\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n b[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n b[0] = self.h * self.left_bc(t)\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n b[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n b[-1] = self.h * self.right_bc(t)\n\n next_solution = solve_banded(l_and_u, ab, b)\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def ras2ijk(self,A):\n #productive #math #coordinate-space-conversion\n profprint()\n m=vtk.vtkMatrix4x4()\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n volumeNode.GetIJKToRASMatrix(m)\n m.Invert()\n imageData = volumeNode.GetImageData()\n ijk=[0,0,0]\n k = vtk.vtkMatrix4x4()\n o = vtk.vtkMatrix4x4()\n k.SetElement(0,3,A[0])\n k.SetElement(1,3,A[1])\n k.SetElement(2,3,A[2])\n k.Multiply4x4(m,k,o)\n ijk[0] = o.GetElement(0,3)\n ijk[1] = o.GetElement(1,3)\n ijk[2] = o.GetElement(2,3)\n return ijk", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def extract_solution(result, exact=False):\n if exact:\n i= np.where(result['eigvecs'][0])[0][0]\n else:\n p = result['eigvecs'][0] \n i = max(range(len(p)), key=lambda i: abs(p[i]))\n\n bitsolution = bin(i)[2:]\n pad = int(np.log2(result['eigvecs'].shape[1]))-len(bitsolution)\n bitsolution = pad*'0' + bitsolution\n return bitsolution[::-1]", "def _build_dist(self):\n lamb = self.params['lamb']\n p = self.params['p']\n\n jac = self.jacobian\n # build D on grids\n xg, yg, mask = self._mask_grid()\n r_max = self._r_max(xg, yg, mask)\n d_mat = self._psf_grid(xg, yg, r_max=r_max)\n # E[yy^T]\n j_j_w = np.dot(jac, jac.transpose())\n r_mat = np.diag(np.diag(j_j_w) ** p)\n jac_inv = la.inv(j_j_w + lamb*r_mat)\n # RM = E[xx^T] / E[yy^T]\n h_mat = np.dot(np.dot(d_mat, jac.transpose()), jac_inv)\n return h_mat", "def __init__(self, A, rank=0):\r\n _u, _s, _v = np.linalg.svd(A, full_matrices=0)\r\n \r\n self.rank = rank\r\n\r\n self.U = _u[:,:self.rank].copy()\r\n self.S = _s[:self.rank].copy()\r\n self.SI = np.matrix(np.diag(self.S)).getI()\r\n self.VT = _v[:self.rank,:].copy()\r\n \r\n self._var = [ e/(_s**2).sum() for e in (_s**2).cumsum() ][self.rank-1]", "def make_householder(a):\n\n v = a / (a[0] + np.copysign(np.linalg.norm(a), a[0]))\n v[0] = 1\n H = np.eye(a.shape[0])\n H -= (2 / np.dot(v, v)) * np.dot(v[:, None], v[None, :])\n return H", "def acc_visc(j,rA,vA,mA,rhoA,PA,hA,dW=kernel.dW_M4):\n assert rA.shape[0] == vA.shape[0] == mA.shape[0] == rhoA.shape[0] == hA.shape[0], \"arrays are not matched\"\n N = len(mA)\n c_j = c_gas(j,rhoA,PA)\n\n tot = 0\n for i in range(N):\n if i != j:\n\n r_ij = rA[j,:] - rA[i,:]\n r_ij1 = np.linalg.norm(r_ij)\n v_ij = vA[j,:] - vA[i,:]\n m_i = mA[i]\n c_i = c_gas(i,rhoA,PA)\n c_ij = 0.5 * (c_i + c_j)\n h_ij = 0.5 * (hA[i] + hA[j])\n rho_ij = 0.5 * (rhoA[i] + rhoA[j])\n\n c = np.dot(v_ij,r_ij)\n mu_ij = ( c * h_ij ) / ( r_ij1**2 + 0.01*h_ij**2 )\n\n a = ( -alpha * mu_ij * c_ij + beta * mu_ij**2 ) / rho_ij\n b = 0\n Pi_ij = a*dm.heavi(-c) + b*dm.heavi(c)\n\n # if Pi_ij == 0:\n # print(\"i,j:\",i,j)\n # print(\"c:\",c)\n # print(\"c_ij\",c_ij)\n # print(\"\")\n # assert Pi_ij != 0\n\n tot += m_i * h_ij**(-4) * Pi_ij * dW(r_ij1,h_ij) * (r_ij/r_ij1)\n\n return - tot", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return w, v", "def L1U(A, d):\n \n\n n, _ = A.shape\n L = np.eye(n, n, dtype=A.dtype)\n U = np.zeros((n, n), dtype=A.dtype)\n\n U[0, 0] = A[0, 0]\n for k in range(1, n):\n km = max(0, k-d)\n L[k, km : k] = np.transpose(rforwardsolve(np.transpose(U[km:k, km:k]),\\\n np.transpose(A[k, km:k]), d))\n U[km:k+1, k] = rforwardsolve(L[km:k+1, km:k+1], A[km:k+1, k], d)\n return L, U", "def visualize_svd():", "def qr_decomposition(self):\n if self.m != self.n:\n raise NotImplementedError('QR decomposition not yet available ' +\n 'for non-square matrices')\n orig_basis = [vec.Vector.fromMatrixColumn(self, j)\n for j in range(self.m)]\n orthog_basis, norm_basis = [], []\n for j in range(self.m):\n u = orig_basis[j]\n for k in range(j):\n u -= orig_basis[j].project_onto(orthog_basis[k])\n orthog_basis.append(u)\n norm_basis.append(u.normalise())\n Q = Matrix.fromVectors(norm_basis)\n R = Q.transpose() * self\n return Q, R", "def incremental_svd(A, qr_flg=False):\n\n m = 256\n n = 7291\n\n n0 = 256\n\n if A.shape[0] != m or A.shape[1] != n: raise ValueError('Error: incorrect matrix size')\n\n start = time.clock()\n\n A0 = A[:, :n0]\n U, s, V = ln.svd(A0, full_matrices=False)\n\n # NOTE: s is a vector; np.diag(s) will produce a diagonal matrix\n for i in range(n0, n):\n\n # new matrix is just a single vector (i-th column of A)\n A1 = np.matrix(A[:, i]).T\n\n if qr_flg:\n J, K = ln.qr(A1 - np.dot(np.dot(U, U.T), A1))\n U_, s_, V_ = ln.svd(\n np.vstack((\n np.hstack((np.diag(s), np.dot(U.T, A1))),\n np.hstack((np.zeros((K.shape[0], s.shape[0])), K))\n )),\n full_matrices=False)\n\n # update the result of SVD\n U = np.dot(np.hstack((U, J)), U_)\n\n else:\n U_, s_, V_ = ln.svd(np.hstack((np.diag(s), np.dot(U.T, A1))), full_matrices=False)\n U = np.dot(U, U_)\n\n s = s_\n\n # NOTE: V from svd on NumPy is already transposed\n V = np.dot(V_,\n np.vstack((\n np.hstack((V, np.zeros((V.shape[0], i+1-V.shape[1])))),\n np.hstack((np.zeros((V_.shape[1]-V.shape[0], V.shape[1])), np.eye(V_.shape[1]-V.shape[0], i+1-V.shape[1])))\n ))\n )\n\n # for next computation, update A0\n A0 = np.hstack((A0, A1))\n\n elapsed_time = time.clock() - start\n print 'time:', elapsed_time\n\n return U, s, V", "def solve_lsqr(self, b, rho=None, v=None, x_init=None, options=None):\n\n # Add additional linear terms for the rho terms\n sizev = 0\n if rho is not None:\n vf = v.flatten() * np.sqrt(rho / 2.0)\n sizeb = self.K.input_size\n sizev = np.prod(v.shape)\n b = np.hstack((b, vf))\n\n input_data = np.zeros(self.K.input_size)\n output_data = np.zeros(self.K.output_size + sizev)\n\n def matvec(x, output_data):\n if rho is None:\n # Traverse compgraph\n self.K.forward(x, output_data)\n else:\n # Compgraph and additional terms\n self.K.forward(x, output_data[0:0 + sizeb])\n np.copyto(output_data[sizeb:sizeb + sizev],\n x * np.sqrt(rho / 2.0))\n\n return output_data\n\n def rmatvec(y, input_data):\n if rho is None:\n self.K.adjoint(y, input_data)\n else:\n self.K.adjoint(y[0:0 + sizeb], input_data)\n input_data += y[sizeb:sizeb + sizev] * np.sqrt(rho / 2.0)\n\n return input_data\n\n # Define linear operator\n def matvecComp(x):\n return matvec(x, output_data)\n\n def rmatvecComp(y):\n return rmatvec(y, input_data)\n\n K = LinearOperator((self.K.output_size + sizev, self.K.input_size),\n matvecComp, rmatvecComp)\n\n # Options\n if options is None:\n # Default options\n return lsqr(K, b)[0]\n else:\n if not isinstance(options, lsqr_options):\n raise Exception(\"Invalid LSQR options.\")\n return lsqr(K,\n b,\n atol=options.atol,\n btol=options.btol,\n show=options.show,\n iter_lim=options.iter_lim)[0]", "def HOSVD(A, k=None, tol=None):\n\n d=len(A.shape)\n\n if d==2:\n u, s, vt=svd(A, full_matrices=False)\n U=[u, vt.T]\n S=np.diag(s)\n else:\n U=[None]*d\n for j in range(0, d):\n U[j], s, vt=svd(unfold(A, j), full_matrices=False)\n\n S=A.copy()\n for i in range(0, d):\n S=nModeProduct(S, U[i].T, i)\n\n if k is not None:\n if isinstance(k, int): # if only one integer is assigned to k\n k=k*np.ones((len(A.shape),), dtype=int)\n\n S=subTensor(S, k=k)\n for j in range(0, d):\n U[j]=U[j][:, :k[j]]\n\n return S, U", "def run_exact(self):\n self.operator, var_form, opt = self.generate_VQE_args()\n\n exact_eigensolver = ExactEigensolver(self.operator, k=1)\n self.result = exact_eigensolver.run()\n\n solution = self.extract_solution(self.result, True)\n return solution", "def constructAEMV(Sn):\n\n # sols is a dict with elements as keys and counts the number of 3-tuples\n # that predicted 1 or 0.\n sols = defaultdict(lambda: defaultdict(int))\n\n for a, b, c in tripletGenerator(Sn):\n if isSolvableVect(a, b, c):\n d = solveVect(a, b, c)\n dtuple = tuple(d[:-1])\n dclass = d[-1]\n sols[dtuple][dclass] += 1\n\n # Majority vote procedure\n AEMV = [x for x in Sn]\n for x, vals in sols.items():\n xlist = list(x)\n if xlist in (x[:-1] for x in AEMV): continue\n maj_class = max(vals.keys(), key=lambda k: vals[k])\n AEMV.append(xlist + [maj_class])\n\n return AEMV", "def decompress_svd(size:tuple, svd_u, svd_s, svd_vh):\r\n m, n = size[0:2]\r\n u = np.zeros((3, m, m), dtype=np.float64)\r\n s = np.zeros((3, min(m, n)), dtype=np.float64)\r\n vh = np.zeros((3, n, n), dtype=np.float64)\r\n\r\n _,p = svd_s.shape\r\n u[:, 0:m, 0:p] = svd_u[:, :, :]\r\n s[:, 0:p] = svd_s[:, :]\r\n vh[:, 0:p, 0:n] = svd_vh[:, :, :]\r\n\r\n # SVD equation: A = U * D * VH\r\n img_svd = np.zeros(size, dtype=np.uint8)\r\n for k in range(3):\r\n d = np.zeros((m, n), dtype=np.float64)\r\n d[:min(m, n), :min(m, n)] = np.diag(s[k, :])[:, :]\r\n img_svd[:,:,k] = np.dot(np.dot(u[k,:,:], d), vh[k,:,:])\r\n return img_svd", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )", "def spd_pinv(a, rcond=1e-10, square_root=False, check_stability=True):\n N, _N = a.shape\n assert N == _N, \"Matrix is not square!\"\n # get the eigen-decomposition\n # w, v = np.linalg.eigh(a)\n v, w, u = np.linalg.svd(a)\n sort_index = np.argsort(w)\n w = w[sort_index]\n v = v[:,sort_index]\n # check positive-definiteness\n ev_min = w.min()\n if ev_min <= 0:\n msg = \"Matrix is not positive-definite: min ev = {0}\"\n raise IndefiniteError(msg.format(ev_min))\n # check stability of eigen-decomposition\n if check_stability:\n # XXX use a preconditioner?\n if not np.allclose(a, np.dot(v, w[:, np.newaxis] * v.T)):\n raise NumericalError(\n \"Instability in eigh (condition number={:g})\".format(\n (w.max() / w.min())))\n\n # invert the \"large enough\" part of s\n cutoff = rcond * w.max()\n for i in range(N):\n if w[i] > cutoff:\n if square_root:\n # square root of the pseudo-inverse\n w[i] = np.sqrt(1. / w[i])\n else:\n w[i] = 1. / w[i]\n else:\n w[i] = 0.\n # compute the pseudo-inverse (using broadcasting)\n res = np.real(np.dot(v, w[:, np.newaxis] * v.T))\n # check stability of pseudo-inverse\n if check_stability:\n if square_root:\n pa = np.dot(res, res)\n approx_a = np.dot(a, np.dot(pa, a))\n msg = \"Instability in square-root of pseudo-inverse\"\n else:\n approx_a = np.dot(a, np.dot(res, a))\n msg = \"Instability in pseudo-inverse\"\n if not np.allclose(a, approx_a):\n # be a bit laxist by looking at the Mean Squared Error\n mse = np.mean((a - approx_a) ** 2)\n if mse > 1e-16:\n raise NumericalError(\"{} (MSE={:g})\".format(msg, mse))\n return res", "def bernstein_surface(i, j, nU, nV, u, v):\n return np.outer(comb(nU, i) * (u ** (nU - i)) * ((1 - u) ** i),\n comb(nV, j) * (v ** (nV - j)) * ((1 - v) ** j))", "def _solve_complex_unc(self, d, v, a, force):\n nt = force.shape[1]\n pc = self.pc\n if self.rbsize:\n # solve:\n # for i in range(nt-1):\n # drb[:, i+1] = drb[:, i] + G*vrb[:, i] +\n # A*(rbforce[:, i] + rbforce[:, i+1]/2)\n # vrb[:, i+1] = vrb[:, i] + Ap*(rbforce[:, i] +\n # rbforce[:, i+1])\n rb = self.rb\n if self.m is not None:\n if self.unc:\n rbforce = self.imrb * force[rb]\n else:\n rbforce = la.lu_solve(self.imrb, force[rb], check_finite=False)\n else:\n rbforce = force[rb]\n if nt > 1:\n G = pc.G\n A = pc.A\n Ap = pc.Ap\n if self.order == 1:\n AF = A * (rbforce[:, :-1] + rbforce[:, 1:] / 2)\n AFp = Ap * (rbforce[:, :-1] + rbforce[:, 1:])\n else:\n AF = (1.5 * A) * rbforce[:, :-1]\n AFp = (2 * Ap) * rbforce[:, :-1]\n drb = d[rb]\n vrb = v[rb]\n di = drb[:, 0]\n vi = vrb[:, 0]\n for i in range(nt - 1):\n di = drb[:, i + 1] = di + G * vi + AF[:, i]\n vi = vrb[:, i + 1] = vi + AFp[:, i]\n if not self.slices:\n d[rb] = drb\n v[rb] = vrb\n a[rb] = rbforce\n\n if self.ksize and nt > 1:\n self._delconj()\n # solve:\n # for i in range(nt-1):\n # u[:, i+1] = Fe*u[:, i] + Ae*w[:, i] + Be*w[:, i+1]\n Fe = pc.Fe\n Ae = pc.Ae\n Be = pc.Be\n ur_d = pc.ur_d\n ur_v = pc.ur_v\n rur_d = pc.rur_d\n iur_d = pc.iur_d\n rur_v = pc.rur_v\n iur_v = pc.iur_v\n ur_inv_d = pc.ur_inv_d\n ur_inv_v = pc.ur_inv_v\n\n kdof = self.kdof\n if self.m is not None:\n if self.unc:\n imf = self.invm * force[kdof]\n else:\n imf = la.lu_solve(self.invm, force[kdof], check_finite=False)\n else:\n imf = force[kdof]\n w = ur_inv_v @ imf\n if self.order == 1:\n ABF = Ae[:, None] * w[:, :-1] + Be[:, None] * w[:, 1:]\n else:\n ABF = (Ae + Be)[:, None] * w[:, :-1]\n\n y = np.empty((ur_inv_v.shape[0], nt), complex, order=\"F\")\n di = y[:, 0] = ur_inv_v @ v[kdof, 0] + ur_inv_d @ d[kdof, 0]\n for i in range(nt - 1):\n di = y[:, i + 1] = Fe * di + ABF[:, i]\n if self.systype is float:\n # Can do real math for recovery. Note that the\n # imaginary part of 'd' and 'v' would be zero if no\n # modes were deleted of the complex conjugate pairs.\n # The real part is correct however, and that's all we\n # need.\n ry = y[:, 1:].real.copy()\n iy = y[:, 1:].imag.copy()\n d[kdof, 1:] = rur_d @ ry - iur_d @ iy\n v[kdof, 1:] = rur_v @ ry - iur_v @ iy\n else:\n d[kdof, 1:] = ur_d @ y[:, 1:]\n v[kdof, 1:] = ur_v @ y[:, 1:]", "def truncated_svd(A,k=None):\n \n \n \n AHA=np.conj(A).T.dot(A)\n evals,evecs=la.eig(AHA)\n order=np.argsort(evals)\n\n evals=evals[order][::-1].copy()\n evecs=evecs.T[order][::-1].copy()\n m,n=AHA.shape\n \n tol=1e-12\n Vh=[]\n for i in xrange(0,m):\n\t\t if np.abs(evals[i])>=tol:\n\t \t\tVh+=[evecs[i]]\n \n Vh=np.array(Vh)\n s=np.sqrt(evals[:Vh.shape[0]])\n U=[]\n for i in xrange(0,len(s)):\n U+=[(1./s[i])*A.dot(Vh[i])]\n U=np.array(U).T\n \n return U,s,Vh", "def _dmatrix(kn_u, kn_d):\n d = np.zeros((kn_u.size, 4, 4), np.complex128)\n d_inv = np.zeros_like(d)\n\n d[:, 0, 0] = 1\n d[:, 0, 1] = 1\n d[:, 1, 0] = kn_u\n d[:, 1, 1] = -kn_u\n\n d[:, 2, 2] = 1\n d[:, 2, 3] = 1\n d[:, 3, 2] = kn_d\n d[:, 3, 3] = -kn_d\n\n # an analytic matrix inverse saves time\n inv_kn_u = 0.5 / kn_u\n inv_kn_d = 0.5 / kn_d\n\n d_inv[:, 0, 0] = 0.5\n d_inv[:, 0, 1] = inv_kn_u\n d_inv[:, 1, 0] = 0.5\n d_inv[:, 1, 1] = -inv_kn_u\n\n d_inv[:, 2, 2] = 0.5\n d_inv[:, 2, 3] = inv_kn_d\n d_inv[:, 3, 2] = 0.5\n d_inv[:, 3, 3] = -inv_kn_d\n\n return d, d_inv", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def _solve_explicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n current_solution = initial_conditions\n next_solution = np.empty_like(current_solution)\n solutions = []\n\n for t in self.t_grid:\n next_solution[1:-1] = (\n current_solution[1:-1]\n + (current_solution[:-2] - 2 * current_solution[1:-1] + current_solution[2:]) * coeff\n ) + self.rhs(self.x_grid[1:-1], t) * self.tau\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n next_solution[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n next_solution[0] = (\n 4 * next_solution[1]\n - next_solution[2]\n - 2 * self.h * self.left_bc(t)\n ) / 3.0\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n next_solution[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n next_solution[-1] = (\n 4 * next_solution[-2]\n - next_solution[-3]\n + 2 * self.h * self.right_bc(t)\n ) / 3.0\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def lu_decom(A,b):\n # init\n n = len(b)\n L = np.eye(n)\n U = np.zeros((n,n))\n x = np.zeros(n)\n y = np.zeros(n)\n\n # decomposition A = LU\n\n U[0,:] = A[0,:]\n L[1:,0] = A[1:,0] / U[0,0]\n\n for i in range(1,n):\n for j in range(i,n):\n\n U[i,j] = A[i,j] - np.dot(L[i,:i],U[:i,j])\n\n if j != n-1:\n L[j+1,i] = (A[j+1,i] - np.dot(L[j+1,:i],U[:i,i])) / U[i,i]\n\n # solve Ly=b\n y[0] = b[0]\n\n for k in range(1,n):\n y[k] = b[k] - np.dot(L[k,:k],y[:k])\n\n # solve Ux=y\n x[-1] = y[-1] / U[-1,-1]\n\n for k in range(n-2,-1,-1):\n x[k] = (y[k] - np.dot(U[k,k+1:],x[k+1:])) / U[k,k]\n\n return x,L,U", "def davidson_solver(ax_function, preconditioner, guess, e_conv=1.0E-8, r_conv=None, no_eigs=1, max_vecs_per_root=10, maxiter=100):\n\n if r_conv == None:\n r_conv = e_conv * 100\n d_tol = 1.0E-8\n\n # using the shape of the guess vectors to set the dimension of the matrix\n N = guess.shape[0]\n\n #sanity check, guess subspace must be at least equal to number of eigenvalues\n nli = guess.shape[1]\n if nli < no_eigs:\n raise ValueError(\"Not enough guess vectors provided!\")\n\n nl = nli\n converged=False\n count = 0\n sub_count = nli\n A_w_old = np.ones(nli)\n max_ss_size = nli * max_vecs_per_root\n B = np.zeros((N,N))\n B[:,:nli] = guess\n\n ### begin loop\n while count < maxiter:\n active_mask = [True for x in range(nl)]\n # Apply QR decomposition on B to orthogonalize the new vectors wrto all other subspace vectors\n ## orthogonalize preconditioned residuals against all other vectors in the search subspace\n B, r = np.linalg.qr(B)\n\n # compute sigma vectors corresponding to the new vectors sigma_i = A B_i\n sigma = np.zeros((N,nl))\n for i in range(nl):\n bvec = B[:,i]\n sigma[:,i] = ax_function(B[:,i])\n\n # compute subspace matrix A_b = Btranspose sigma\n A_b = np.dot(B[:,:nl].T, sigma)\n\n # solve eigenvalue problem for subspace matrix; choose n lowest eigenvalue eigpairs\n A_w, A_v = np.linalg.eig(A_b)\n\n # sorting eigenvalues and corresponding eigenvectors\n A_v = A_v[:, A_w.argsort()]\n A_w = A_w[A_w.argsort()]\n\n # here, check if no residuals > max no residuals, if so, collapse subspace\n sub_count = A_v.shape[0]\n if sub_count >= max_ss_size:\n print(\"Subspace too big. Collapsing.\\n\")\n Bnew = np.zeros((N,N))\n Bnew[:,:nli] = np.dot(B[:,:nl], A_v[:,:nli])\n B = Bnew\n nl = nli\n continue\n # else, build residual matrix\n ## residual_i = sigma * eigvec - eigval * B * eigvec\n norm = np.zeros(nli)\n for i in range(0, nli):\n mat = A - A_w[i] * np.identity(N) \n residual = np.dot(mat, np.dot(B[:,:sub_count], A_v[:,i]))\n\n ## check for convergence by norm of residuals\n norm[i] = np.linalg.norm(residual)\n ##apply the preconditioner (A_ii - A_v_i)^-1\n precon_resid = preconditioner(residual, i, A, A_w)\n\n ## normalize and add to search subspace if they're larger than a threshold\n if np.linalg.norm(precon_resid) > d_tol:\n B[:,nl+1] = precon_resid\n nl += 1\n\n # check for convergence by diff of eigvals and residual norms\n check = norm < r_conv\n eig_norm = np.linalg.norm(A_w[:no_eigs] - A_w_old[:no_eigs])\n A_w_old = A_w\n if(check.all() == True and eig_norm < e_conv):\n converged = True\n break\n count += 1 \n\n if converged:\n print(\"Davidson converged at iteration number {}. \\n Eigenvalues: {} \\n Eigenvectors: {}\".format(count, A_w[:no_eigs], A_v[:,:no_eigs]))\n else:\n print(\"Davidson did not converge. Max iterations exceeded.\")", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def p2d(V,x,y):\n def s(a,N):\n \"\"\"Shortcut function to convert array x into a coluumn vector.\"\"\"\n a=np.reshape(a,(1,N**2),order='F').T\n return a\n N=V.shape[1]\n con=np.ones((x.shape[0],x.shape[1])) # constant terms\n xx,yy,xy=x*x,y*y,x*y\n xxx,yyy,xxy,xyy=xx*x,yy*y,xx*y,x*yy\n xxxx,yyyy,xxxy,xxyy,xyyy=xx*xx,yy*yy,xxx*y,xx*yy,x*yyy\n V2=s(V,N) \n lst=[yyyy,xxxy,xxyy,xyyy,xxx,yyy,xxy,xyy,xx,yy,xy,x,y,con]\n Q=s(xxxx,N)\n count = 0\n for elem in lst:\n elem=s(elem,N)\n count+=1\n Q=np.hstack((Q,elem))\n c=np.linalg.lstsq(Q,V2) \n c=c[0]\n theta=-0.5*np.arctan(c[11]/(c[10]-c[9]))\n Af=0.5*(c[9]*(1+1./np.cos(2*theta))+c[10]*(1-1./np.cos(2*theta)))\n Bf=0.5*(c[9]*(1-1./np.cos(2*theta))+c[10]*(1+1./np.cos(2*theta)))\n theta=180.*theta/np.pi\n return (Af, Bf, theta)", "def project(v, a):\n a_sqr = a[0] ** 2 + a[1] ** 2\n return a[0] * sc_mul(v, a) / a_sqr, a[1] * sc_mul(v, a) / a_sqr", "def build(self):\n # Generate a 4x4 identity matrix, which will be the basis for the view matrix.\n vtm = np.identity( 4, float )\n # Generate a translation matrix to move the VRP to the origin and then premultiply the vtm by the translation matrix.\n t1 = np.matrix( [[1, 0, 0, -self.vrp[0, 0]],\n [0, 1, 0, -self.vrp[0, 1]],\n [0, 0, 1, -self.vrp[0, 2]],\n [0, 0, 0, 1] ] )\n\n vtm = t1 * vtm\n\n # Calculate the view reference axes tu, tvup, tvpn.\n tu = np.cross(self.vup, self.vpn)\n tvup = np.cross(self.vpn, tu)\n tvpn = self.vpn.copy()\n\n # Normalize the view axes tu, tvup, and tvpn to unit length.\n\n # if this doesn't work, create my own normalize function\n tu = self.normalize(tu)\n tvup = self.normalize(tvup)\n tvpn = self.normalize(tvpn)\n\n # Copy the orthonormal axes tu, tvup, and tvpn back to self.u, self.vup and self.vpn.\n self.u = tu.copy()\n self.vup = tvup.copy()\n self.vpn = tvpn.copy()\n\n # Use the normalized view reference axes to generate the rotation matrix \n # to align the view reference axes and then premultiply M by the rotation.\n r1 = np.matrix( [[ tu[0, 0], tu[0, 1], tu[0, 2], 0.0 ],\n [ tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0 ],\n [ tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n\n vtm = r1 * vtm\n\n # Translate the lower left corner of the view space to the origin.\n # extent of the view volume in the X and Y view axes.\n vtm = self.T( 0.5*self.extent[0], 0.5*self.extent[1], 0 ) * vtm\n\n vtm = self.S( -self.screen[0] / self.extent[0], -self.screen[1] / self.extent[1], 1.0 / self.extent[2] ) * vtm\n\n vtm = self.T( self.screen[0] + self.offset[0], self.screen[1] + self.offset[1], 0 ) * vtm\n\n return vtm", "def ras2ijk(self, A):\r\n # productive #math #coordinate-space-conversion #frequent\r\n if frequent: profprint()\r\n m = vtk.vtkMatrix4x4()\r\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\r\n volumeNode.GetIJKToRASMatrix(m)\r\n m.Invert()\r\n imageData = volumeNode.GetImageData()\r\n ijk = [0, 0, 0]\r\n k = vtk.vtkMatrix4x4()\r\n o = vtk.vtkMatrix4x4()\r\n k.SetElement(0, 3, A[0])\r\n k.SetElement(1, 3, A[1])\r\n k.SetElement(2, 3, A[2])\r\n k.Multiply4x4(m, k, o)\r\n ijk[0] = o.GetElement(0, 3)\r\n ijk[1] = o.GetElement(1, 3)\r\n ijk[2] = o.GetElement(2, 3)\r\n return ijk", "def calculate_posvij_matrices(main_tetrad_ark):\n\n # Import all the possible solutions to the Vij matrices\n vij_possibilities = matrix_outerprod_calc.illuminator_of_elfes()\n vij_matrices = []\n\n print(\" \")\n print(\" Calculating Vij matrices\")\n print(\" \")\n # for i in range(0, len(main_tetrad_ark)):\n for i in range(0, len(vij_possibilities)):\n tet_i = [x[1] for x in main_tetrad_ark[i]]\n tri_tet = [np.transpose(i) for i in tet_i]\n print(\"# ********************************\")\n # print(\" \")\n print(\"MATRIX i: \", i)\n print(\" \")\n for j in range(0, len(main_tetrad_ark)):\n tet_j = [x[1] for x in main_tetrad_ark[j]]\n trj_tet = [np.transpose(j) for j in tet_j]\n vij_temp = []\n # print(\"# ********************************\")\n print(\" \")\n print(\"MATRIX j: \", j)\n temp_zero = np.zeros((4,4), dtype=int)\n for x in range(0,len(tet_i)):\n test_1half = np.dot(tri_tet[x],tet_j[x])\n test_2half = np.dot(trj_tet[x],tet_i[x])\n test_difs = np.subtract(test_1half, test_2half)\n # print(\" \")\n # print(test_difs)\n temp_mat = np.dot(tri_tet[x],tet_j[x]) - np.dot(trj_tet[x],tet_i[x])\n vij_temp.append(temp_mat)\n # print(\"\")\n temp_add1 = np.add(vij_temp[0], vij_temp[1])\n temp_add2 = np.add(temp_add1, vij_temp[2])\n tempf = np.add(temp_add2, vij_temp[3])\n # tempf = np.divide(temp_add3, 2)\n for ijx in vij_possibilities:\n if np.array_equal(temp_addf, ijx[0]):\n print(\"*************$$$$$$$$$$$$$$$$$$***************** \")\n print(\"l-solution found:\", ijx[1])\n print(temp_addf)\n print(\"\")\n print(ijx[0])\n if np.array_equal(temp_addf, temp_zero):\n pass\n else:\n vij_matrices.append(temp_addf)\n # print(\"\")\n print(temp_addf)\n # vij_matrices.append(temp_addf)\n vijmats_size = sys.getsizeof(vij_matrices)\n print(\"Size of Vij Matrices list: bytes / kilobytes:\", vijmats_size, vijmats_size/1024)\n print(\"Length of Vij Matrices\")\n print(len(vij_matrices))\n print(vij_matrices)\n pass", "def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs", "def compact_svd(A, tol=1e-6):\r\n eigs, vecs = la.eig(A.conj().T@A)\r\n svs = np.sqrt(eigs)\r\n #sort eigenvalues and eigenvectors accordingly\r\n sorter = list(zip(svs,vecs.T))\r\n sorter.sort(reverse=True, key=lambda tup: tup[0])\r\n svs = [x[0] for x in sorter]\r\n vecs = [x[1] for x in sorter]\r\n #find number of nonzero eigenvalues\r\n r_not = svs.count(0)\r\n r = len(svs) - r_not\r\n svs_1 = np.array(svs[:r])\r\n vecs_1 = np.array(vecs[:r])\r\n u_1 = (A@vecs_1)/svs_1\r\n\r\n return u_1, svs_1, vecs_1.conj().T", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def decomposition_method(matrix):\n x, y, z = 0, 1, 2 # indices\n K = np.array([\n [R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],\n [R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],\n [R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],\n [R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]\n ])\n K = K / 3.0\n\n e_vals, e_vecs = np.linalg.eig(K)\n print('Eigenvalues:', e_vals)\n print('Eigenvectors:', e_vecs)\n max_index = np.argmax(e_vals)\n principal_component = e_vecs[max_index]\n return principal_component", "def qp(self, eta_u, eta_v):\n z = 1\n v = np.array(eta_u)\n n_features = v.shape[0]\n u = np.sort(v)[::-1]\n cssv = np.cumsum(u) - z\n ind = np.arange(n_features) + 1\n cond = u - cssv / ind > 0\n rho = ind[cond][-1]\n theta = cssv[cond][-1] / float(rho)\n uu = np.maximum(v - theta, 0)\n vv = np.array(())\n return uu, vv", "def eigenvalue_decomposition (a_t_a_matrix ):\r\n # get eigenvalues and -vectors from ATA matrix\r\n eigenvalues = np.zeros (a_t_a_matrix.shape[0] )\r\n eigenvectors = np.zeros ((a_t_a_matrix.shape[0], a_t_a_matrix.shape[0] ))\r\n evals, evecs = np.linalg.eig (a_t_a_matrix )\r\n\r\n # sort them\r\n indices = np.argsort (-evals ) # reverse sort: greatest numbers first\r\n for loop_count, index in enumerate(indices ):\r\n eigenvalues[loop_count] = evals[index]\r\n eigenvectors[:, loop_count] = evecs[:, index]\r\n\r\n # get the normal vector, normalize it and if it's turned to the ground, turn it around\r\n normal_vector = normalize_vector (eigenvectors[:, -1] ) # the last (smallest) vector is the normal vector\r\n if (normal_vector[2] < 0):\r\n normal_vector = normal_vector * -1\r\n\r\n return normal_vector, eigenvalues[-1]", "def eigenv2tensor(axis):\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n #print v\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def householder_transformation(v):\n size_of_v = v.shape[1]\n e1 = np.zeros_like(v)\n e1[0, 0] = 1\n vector = get_norm(v) * e1\n if v[0, 0] < 0:\n vector = - vector\n u = (v + vector).astype(np.float32)\n norm2 = get_norm(u)\n u = u / norm2\n H = np.identity(size_of_v) - ((2 * np.matmul(np.transpose(u), u)) / np.matmul(u, np.transpose(u)))\n return H, u", "def _solve_principal_eig(a):\n w, v = np.linalg.eig(a)\n idx = np.argmax(w)\n eig_val = w[idx]\n eig_vec = v[:, idx]\n\n # Let eig_vec non-negative\n sign = 0\n i = 0\n while sign == 0 and i < len(eig_vec):\n sign = np.sign(eig_vec[i])\n i += 1\n if sign < 0:\n eig_vec *= -1\n\n return eig_val, eig_vec", "def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):\n # SVD Factorization\n U, s, Vt = scipy.linalg.svd(A, full_matrices=False)\n\n # Remove dimensions related with very small singular values\n U = U[:, s > tol]\n Vt = Vt[s > tol, :]\n s = s[s > tol]\n\n # z = x - A.T inv(A A.T) A x\n def null_space(x):\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n z = x - A.T.dot(v)\n\n # Iterative refinement to improve roundoff\n # errors described in [2]_, algorithm 5.1.\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n # v = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(z)\n aux2 = 1/s*aux1\n v = U.dot(aux2)\n # z_next = z - A.T v\n z = z - A.T.dot(v)\n k += 1\n\n return z\n\n # z = inv(A A.T) A x\n def least_squares(x):\n # z = U 1/s V.T x = inv(A A.T) A x\n aux1 = Vt.dot(x)\n aux2 = 1/s*aux1\n z = U.dot(aux2)\n return z\n\n # z = A.T inv(A A.T) x\n def row_space(x):\n # z = V 1/s U.T x\n aux1 = U.T.dot(x)\n aux2 = 1/s*aux1\n z = Vt.T.dot(aux2)\n return z\n\n return null_space, least_squares, row_space", "def qr(in_A):\n # input checks\n Ndim = numpy.ndim(in_A)\n assert Ndim == 2\n N,M = numpy.shape(in_A)\n assert N==M\n D,P = in_A[0,0].data.shape\n\n # prepare R and QT\n R = in_A.copy()\n QT = numpy.array([[UTPS(numpy.zeros((D,P))) for c in range(N)] for r in range(N) ])\n for n in range(N):\n QT[n,n].data[0,:] = 1\n\n # main algorithm\n for n in range(N):\n for m in range(n+1,N):\n a = R[n,n]\n b = R[m,n]\n r = numpy.sqrt(a**2 + b**2)\n c = a/r\n s = b/r\n\n for k in range(N):\n Rnk = R[n,k]\n \n R[n,k] = c*Rnk + s*R[m,k]\n R[m,k] =-s*Rnk + c*R[m,k];\n\n QTnk = QT[n,k]\n QT[n,k] = c*QTnk + s*QT[m,k]\n QT[m,k] =-s*QTnk + c*QT[m,k];\n # #print 'QT:\\n',QT\n # #print 'R:\\n',R\n # #print '-------------'\n\n return QT.T,R" ]
[ "0.606377", "0.591735", "0.57787323", "0.57064146", "0.5663122", "0.563016", "0.5629043", "0.5616047", "0.5532304", "0.5528181", "0.55007565", "0.54783535", "0.5475328", "0.5461482", "0.5454432", "0.5375918", "0.5353961", "0.5350844", "0.53398156", "0.53277934", "0.5286591", "0.52738565", "0.5270936", "0.5265748", "0.5254883", "0.52508765", "0.52484256", "0.5236953", "0.52296275", "0.52265453", "0.52246964", "0.5224633", "0.52151227", "0.52148515", "0.52118397", "0.52022654", "0.5197023", "0.51951975", "0.5187899", "0.5183565", "0.51787734", "0.51767063", "0.5162273", "0.5153595", "0.51526195", "0.51514506", "0.51502854", "0.51450765", "0.51319146", "0.5129425", "0.51237273", "0.5108595", "0.5103289", "0.5100525", "0.50961435", "0.50945014", "0.5093197", "0.5085116", "0.50844395", "0.508311", "0.50814575", "0.50812626", "0.5081145", "0.50759965", "0.50657064", "0.5063418", "0.50617987", "0.5054108", "0.50462615", "0.50374943", "0.5033858", "0.50286406", "0.5028461", "0.50274736", "0.5026233", "0.50259674", "0.50146186", "0.5007075", "0.50040287", "0.49964103", "0.49915424", "0.4989607", "0.49851325", "0.49810013", "0.49774274", "0.4972365", "0.49670717", "0.49612683", "0.49568877", "0.49547833", "0.49510038", "0.49490464", "0.49394992", "0.49344516", "0.49256805", "0.49255544", "0.4923251", "0.49229595", "0.49196276", "0.49169004", "0.4912055" ]
0.0
-1
This function dictates whether the main GMRES while loop will proceed.
def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool: gmres_constants, gmres_variables = gmres_carry tol = gmres_constants[0] k = gmres_variables[0] err = gmres_variables[4] n_kry = gmres_constants[4] def is_iterating(k, n_kry): return k < n_kry def not_converged(args): err, tol = args return err >= tol return jax.lax.cond(is_iterating(k, n_kry), # Predicate. not_converged, # Called if True. lambda x: False, # Called if False. (err, tol)) # Arguments to calls.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed", "def infinite_loop():\n return True", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n return self.ctx.running_calc < 2 and self.can_restart()", "async def checkNewLoop(self):\n pass", "def is_up(self):\n self.loop = file_to_loop(self.loopFile)\n if len(self.loop) == 0:\n return False\n return True", "def stopCond(self):\n\t\treturn False", "def player_loop(self):\n\n while True:\n # send message to game that you are ready\n msg = self.receiver()\n if msg[\"game_over\"]:\n return", "def loop():\n global loop_idx\n sys.stdout.write('loop index %d/%d\\r\\n' % (loop_idx, _LOOPS))\n time.sleep(0.5)\n loop_idx += 1\n return loop_idx > _LOOPS", "def should_continue():\n\n return LoopContinueEvent()", "def do_run(self):\n return not self._do_exit.isSet()", "def loop(self):\n while self.dispatch(True) is not QUIT:\n pass", "def _in_while_loop(control_flow_node_map, op_name):\n return op_name in control_flow_node_map and \"LoopCond\" in control_flow_node_map[op_name]", "def running(self) -> bool:", "def isFinished():", "def isFinished():", "def isFinished():", "def check_finish(self):\r\n return not self.proc.is_alive()", "def _keep_running():\n return True", "def __bool__(self):\n return self.wait(0)", "def while_(self):\n if self.line.startswith('wh'):\n if self.line.endswith('while') is False:\n return True", "def is_done():\n return False", "def poll(self):\n return False", "def go_again(self):\n return False", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def _continue_running(self):\n if self._signal_recieved == signal.SIGINT:\n return False\n\n return True", "def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500", "async def should_handle(self):\n local_controller = self.controller\n workers_total = len(local_controller.workers)\n geysers = local_controller.extractors\n drones_in_queue = local_controller.already_pending(DRONE)\n if (\n not local_controller.close_enemies_to_base\n and local_controller.can_train(DRONE)\n and not local_controller.counter_attack_vs_flying\n ):\n if workers_total == 12 and not drones_in_queue:\n return True\n if (\n workers_total in (13, 14, 15)\n and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1\n ):\n return True\n optimal_workers = min(\n sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)\n )\n return (\n workers_total + drones_in_queue < optimal_workers\n and np.sum(\n np.array(\n [\n len(local_controller.zerglings),\n len(local_controller.hydras),\n len(local_controller.ultralisks),\n ]\n )\n * np.array([1, 2, 3])\n )\n > 15\n )\n return False", "def can_run(self):\n\t\treturn self._start is None", "def running(self):\n return self.more() or not self.stopped", "def loop(self):\n while not rospy.is_shutdown():\n\n rospy.logdebug(\"Loop\")\n state = self.move_base.get_state()\n\n self.counter +=1\n if(self.counter>6 or state==3):\n rospy.logdebug(\"-------------------------\")\n rospy.logdebug(\"Recalculate Frontriers ! \")\n rospy.logdebug(\"-------------------------\")\n\n self.counter = 0\n frontiers_num = self.update()\n\n #break condition\n if frontiers_num==0 :\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"NO FRONTIERS FOUND EXPLORATION COMPLETE\")\n rospy.logdebug(\"---------------------------------------\")\n rospy.logdebug(\"---------------------------------------\")\n break\n\n\n\n rate.sleep()", "def run(self):\n #=======================================================================\n #\n # TODO: Replace this do-nothing code with some which does something.\n # Don't worry about looping (though you can), since this will be called\n # over and over again by the main appliance loop.\n #\n #=======================================================================\n self.logger.info('Nothing to do; sleeping for a while.')\n sleep(10)\n\n # Return something truthy to continue, anything else to exit.\n return True", "async def loop(self, ctx: commands.Context) -> Optional[bool]:\n\n self.queue[ctx.guild.id].loop = (\n Loops.LOOP if self.queue[ctx.guild.id].loop != Loops.LOOP else Loops.NO_LOOP\n )\n return self.queue[ctx.guild.id].loop == Loops.LOOP", "def _run(self):\n while(self._loop):\n pass", "def go_again(self):\n return True", "def _termination(self):\n if self._never_terminate:\n return False\n\n if self._counter >= self._max_steps:\n return True\n\n return self.is_fallen() # terminates automatically when in fallen state", "def is_next_run(self, local_time):\n return local_time <= self.stop_time", "def waiting(self) -> bool: # pylint: disable=W0221\n return True", "def loop(self):\n while not self.should_exit:\n self._run_once()\n\n self.on_exit()", "def is_done(self):\n\n return not self.thread.is_alive()", "def KeepAdvancingSolutionLoop(self):\n return self.step < self.nsteps", "def is_ready_to_reap(self):\n self.calc_progress()\n return self._num_results > 0 and (\n self._num_results == self.num_sown_batches\n )", "def isDone(self):\n if self.current_turn >= self.MAX_TURNS: return True\n if self.last_user_action[\"action\"] == \"END\": return True\n return False", "def isstarted():", "def burn_in_finished():\n global trials\n if trials <= 0:\n return True\n trials -= 1\n return False", "def has_finished():", "def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False", "def not_converging(self):\n if len(self.rundir) >= int(self.settings[\"run_limit\"]):\n return True\n return False", "def _do_iteration(self):\n return True", "def test_run_loop_success(self):\n found = False\n pyint = Interpreter(limit=15)\n try:\n pyint.run(code=BF_CODE_LOOP_TWICE)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def do_simulation_loop(self) -> tuple[bool, bool]:\n running, reset = True, False\n for event in pygame.event.get():\n if check_for_quit(event):\n running = False\n elif check_for_reset(event):\n running = False\n reset = True\n self.simulation.do_step()\n self.update_visualization()\n self.simple_pygame.loop()\n return running, reset", "def running(self):\r\n return self.__maxlen__ > 0", "def process(self):\n return False", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "def do_exit(self):\n self._loop = False\n print('exiting')\n return True", "def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)", "def block_while_running():\n runs = is_running()\n while runs:\n runs = is_running()\n time.sleep(10)", "def isFinished(self):\n return False", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "def loop_exit_on_q(self, stats_period):\n start_time = time.time() # This is the only way I managed to make a curse application with\n while time.time() - start_time <= stats_period: # screen refreshing exit on key pressed: make window.getch()\n key = self.myscreen.getch() # non blocking with curses.nodelay(1) (otherwise main loop is interrupted)\n if key == ord('q'): # and check it every [10-50]ms to be responsive.\n curses.endwin()\n hacked_print(\"Monitoring ended by user\") # cf hacked_print method\n return 1\n curses.napms(self.GETCH_REFRESH_MS)", "def should_reschedule(self, iteration):\n if not self.max_iterations:\n return True\n return iteration < self.max_iterations", "def pending_work(self) -> bool:\n return len(self.ongoing) > 0", "def run_until_stop(self):\n while self.commands[self.pointer] != END:\n # Get the cmd\n cmd = self.commands[self.pointer]\n opcode = cmd % 100\n modes = cmd // 100\n \n vals, locs, self.pointer = get_vals_and_locs(opcode, modes, self.pointer, self.commands)\n \n if opcode == ADD:\n self.commands[locs[2]] = vals[0] + vals[1]\n elif opcode == MUL:\n self.commands[locs[2]] = vals[0] * vals[1]\n elif opcode == INP:\n if self.inputs:\n self.commands[locs[0]] = self.inputs.pop(0)\n else:\n # Put the pointer back, so we run this opcode again\n self.pointer -= 2\n return False\n elif opcode == OUT:\n self.outputs.append(vals[0])\n elif opcode == JIT:\n if vals[0] != 0:\n self.pointer = vals[1]\n elif opcode == JIF:\n if vals[0] == 0:\n self.pointer = vals[1]\n elif opcode == LT:\n self.commands[locs[2]] = 1 if vals[0] < vals[1] else 0\n elif opcode == EQ:\n self.commands[locs[2]] = 1 if vals[0] == vals[1] else 0\n else:\n print(\"FAIL????\")\n\n return True", "def has_loop(self) -> bool:\n try:\n list(self)\n return False\n except ContainsLoopError:\n return True", "def run(self):\n while not self.turn_over:\n self.go()", "def _loop(self):\n while True:\n if GameLoop.getInstance()._cancelation_token==True:\n break\n self._update_signal.notify_all()\n sleep(1/60)", "def running_loop(self, run_check_ms=None):\r\n if self.board.area.down_click_call is None:\r\n raise SelectError(\"board.area.down_click_call is not set\")\r\n if self.numgame is not None and self.ngame >= self.numgame:\r\n SlTrace.lg(f\"running_loop: ngame={self.ngame} > numgame {self.numgame}\")\r\n self.running = False\r\n self.run = False\r\n return\r\n \r\n self.running = True # Still in game\r\n self.run = True # progressing (not paused)\r\n self.first_time = True \r\n self.game_start_ts = SlTrace.getTs(6)\r\n self.game_control_updates()\r\n if run_check_ms is not None:\r\n self.run_check_ms = run_check_ms\r\n BlinkerMultiState.enable()\r\n \r\n while self.running:\r\n SlTrace.lg(\"running_loop\", \"running_loop\")\r\n self.mw.update()\r\n if ActiveCheck.not_active():\r\n break\r\n SlTrace.lg(\"running_loop active\", \"running_loop\")\r\n self.mw.update_idletasks()\r\n if self.event_check():\r\n continue # Gobble up pending events\r\n \r\n if (self.cmd_stream is not None\r\n and not self.cmd_stream.is_eof()):\r\n self.run_file()\r\n self.first_time = False # Assume file did that\r\n continue # Check if more\r\n else:\r\n if self.first_time:\r\n if not self.start_game():\r\n break\r\n self.first_time = False\r\n if not self.make_move():\r\n break \r\n \r\n SlTrace.lg(\"running_loop after loop\", \"running_loop\")\r\n BlinkerMultiState.disable()\r\n \r\n if self.on_end is not None:\r\n SlTrace.lg(\"running_loop doing on_end\", \"running_loop\")\r\n self.mw.after(0, self.on_end) # After run processing\r", "def should_poll(self):\r\n return False", "async def should_handle(self):\n local_controller = self.controller\n self.selected_pools = local_controller.pools.ready.idle\n return (\n local_controller.can_upgrade(ZERGLINGATTACKSPEED, RESEARCH_ZERGLINGADRENALGLANDS, self.selected_pools)\n and local_controller.hives\n )", "def is_done(self):\n\n # Robosuite envs always rollout to fixed horizon.\n return False", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def recurrent(self):\n return False", "def loop_running(self):\r\n if self.is_running:\r\n if self.first_time:\r\n self.time = time.time()\r\n self.first_time = False\r\n else:\r\n self._loop_end() # Finish the previous loop.\r\n self._loop_begin()\r\n else:\r\n self._loop_end()\r\n self.destroy()\r\n\r\n return self.is_running", "def wait_for_fingerscore(self):\n while True:\n self.recv_event()\n if self.last_event_code == DEFS.EF_FPFTR:\n return self.parse_score_fp_event()", "def ready(self):\n return self.counter > 0", "def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs", "def running(self):\n\t\treturn self._start is not None", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def has_more_work(self):\n return self.done_counter < self.N", "async def hold_loop(self, *args) -> bool:\n raise NotImplementedError", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def main_loop(self) -> None:\n while True:\n player = self._players[self._current_player]\n hit = True\n while hit:\n self.select_square(player)\n if self.menu_called: # go to menu\n self.menu_called = False\n return\n hit = player.shoot()\n if player.has_won():\n self.display_manager.display_end_game_message(player)\n self.game_over = True\n return\n self._current_player = (self._current_player + 1) % len(self._players)", "def loop_forever(self):\n while self.running:\n time.sleep(0.01)", "def is_idle(self) -> bool:", "def main_loop(self):\n while self.game_manager.game_state != GameState.Quit:\n\n self.handle_events()\n self.handle_ui_response()\n #in menu\n if self.game_manager.game_state == GameState.Menu: \n self.display.clear()\n\n #in game\n elif self.game_manager.game_state == GameState.Running:\n self.game_manager.move_players()\n\n #after game\n elif self.game_manager.game_state == GameState.Finished:\n if self.game_manager.winner == None:\n self.game_manager.player1.decay()\n self.game_manager.player2.decay() \n else:\n self.game_manager.loser.decay()\n self.game_manager.loser.draw()\n\n #perform game manager actions\n self.game_manager.act()\n #do all the rendering stuff\n self.render_scene()\n #control FPS\n self.clock.tick(self.FPS)", "def _allow_reset(self):\r\n return (self.child_state == self.DONE and self.child_attempts < self.max_attempts)", "def loop(self):\r\n self._initialize()\r\n if self._waitEvConnected(None):\r\n return self._handleEvConnected()\r\n else:\r\n return False", "def run(self):\n while True:\n if self.is_game_over():\n break\n self.run_turn()", "def waitForScanToFinish(self):\n while True:\n answers = self.readandparseCAM()\n if answers is not None:\n for a in answers:\n if 'inf' in a.keys():\n if a['inf'] == \"scanfinished\":\n return", "def detect_loop(self):\n tortoise = self.head\n hare = self.head\n while hare:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return True\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False", "def should_poll(self):\n return False" ]
[ "0.6914823", "0.6818568", "0.66382414", "0.66197145", "0.64698076", "0.64450336", "0.64174724", "0.64160615", "0.6401387", "0.6371226", "0.6364635", "0.6352241", "0.6348979", "0.632865", "0.632865", "0.632865", "0.629387", "0.6276963", "0.62566316", "0.6251263", "0.6192301", "0.61825", "0.6166445", "0.6146366", "0.61372954", "0.6137002", "0.61368763", "0.613027", "0.61238205", "0.61173147", "0.6111277", "0.61081153", "0.6096044", "0.6094392", "0.608188", "0.6060652", "0.60490894", "0.6045701", "0.6044866", "0.6016928", "0.6012062", "0.6006728", "0.6005925", "0.60017556", "0.59955454", "0.59947324", "0.5989399", "0.5988785", "0.5988687", "0.5985509", "0.5981219", "0.59791595", "0.5978368", "0.59747875", "0.5957138", "0.595547", "0.59551895", "0.595261", "0.5943803", "0.5942328", "0.5933992", "0.59330004", "0.59252053", "0.5923364", "0.5913479", "0.59054655", "0.59042716", "0.590152", "0.5891736", "0.5877224", "0.58748233", "0.5873897", "0.58721983", "0.58718926", "0.5870108", "0.58692586", "0.586894", "0.5868718", "0.58661294", "0.58562297", "0.58536255", "0.5848474", "0.58327943", "0.582857", "0.58277017", "0.58193237", "0.5818164", "0.58025557", "0.58002234", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914", "0.57981914" ]
0.0
-1
Performs a single iteration of gmres_krylov. See that function for a more detailed description.
def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType: gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables tol, A_mv, A_args, b_norm, _ = gmres_constants V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol) R_col, givens = apply_givens_rotation(H[:, k], givens, k) R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:]) # Update the residual vector. cs, sn = givens[:, k] * beta_vec[k] beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn) err = jnp.abs(sn) / b_norm gmres_variables = (k + 1, V, R, beta_vec, err, givens) return (gmres_variables, gmres_constants)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.", "def run(self):\n i = 0\n try:\n for i in range(0, self._iters):\n if self._verbose:\n print(\" Inner CG Iteration \" + repr(i))\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k, alpha_k,\n self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n if self._verbose:\n print(\"Converged at Iteration \" + str(i) + \".\")\n self.converged = True\n self.iteration = i+1\n return\n\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n self._rho_k = rho_k_plus_1\n\n if self._verbose >= 3:\n print(\" Residual=\" + repr(rho_k_t))\n except KeyboardInterrupt:\n raise\n finally:\n self.iteration = i+1", "def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done", "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)", "def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x", "def main():\n feature_extraction_model = \"HOG\"\n dimension_reduction_model = \"PCA\"\n k_value = get_input_k(\"k\")\n K_value = get_input_k(\"K\")\n folder = get_input_folder(\"Folder\")\n dim_k_value = 40\n\n query_images = get_input_image_list(folder)\n start = time.time()\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value, folder_metadata=folder,\n metadata_collection=\"labelled\")\n obj_feat = dim_red.get_object_feature_matrix()\n features_list = np.array(obj_feat['featureVector'].tolist())\n images_list = np.array(obj_feat['imageId'])\n cos_sim = cosine_similarity(features_list)\n\n sim_graph = sim_graph_from_sim_max(cos_sim, images_list, k_value)\n results = ppr(sim_graph, images_list, query_images)\n results = results[:K_value]\n\n print(\"Top {} images from Personalized page Rank are:\".format(K_value))\n for r in results:\n r[\"path\"] = os.path.abspath(os.path.join(folder, r['imageId']))\n print(r)\n\n query_images_list = [os.path.abspath(os.path.join(folder, img)) for img in query_images]\n title = {\"Model\": \"Personalized Page Rank\", \"k\": k_value, \"K\": K_value}\n show_images_ppr(query_images_list, title, results)\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def grid_search(train_labels: str, \n test_labels: str, \n output:str, \n res:tuple=(120, 160), \n lazy:bool=True, \n batch_size:int=16, \n epochs:int=20):\n\n # Data\n print(\"=> Loading data.\")\n train = FLIRDataset(train_labels, res=res, batch_size=batch_size)\n test = FLIRDataset(test_labels, res=res, batch_size=batch_size)\n\n # In eager loading mode, train on everything.\n if not lazy:\n X_train, y_train = train.get_all()\n X_test, y_test = test.get_all()\n X_train = np.concatenate([X_train, X_test], axis=0)\n y_train = np.concatenate([y_train, y_test], axis=0)\n\n\n def net(x, num_classes=1):\n x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)\n x = K.layers.Flatten()(x)\n x = K.layers.Dense(num_classes, activation=\"softmax\")(x)\n return x\n\n print(\"\\n=> Training model.\")\n input_tensor = K.layers.Input((160, 120, 1))\n output_tensor = net(input_tensor, num_classes=train.num_classes())\n model = K.Model(input_tensor, output_tensor)\n\n model.compile(optimizer=\"sgd\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"])\n\n # Train model\n if lazy:\n model.fit(x=train, \n epochs=epochs, \n validation_data=train, \n verbose=2)\n else:\n model.fit(x=X_train, \n y=y_train, \n epochs=epochs, \n batch_size=batch_size, \n verbose=2)\n\n # Save weights\n model.save_weights(os.path.join(output, \"flir_pretrained_weights.h5\"))", "def convergence_gmres_A():\n global conv_residuals\n def compute_residuals(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n n_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_n = np.zeros(n_search.size)\n\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n # To average, we loop over 10 times\n for j in range(10):\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n steps_till_conv_n[i] += len(conv_residuals)\n\n # Divide by 10 to take the average:\n steps_till_conv_n /= 10\n\n fig220 = plt.figure(figsize=(13, 8))\n plt.plot(n_search, steps_till_conv_n)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps Taken to Converge\")\n plt.title(\"Figure 220 - Steps Taken for GMRES to Converge for Varying N\",\n fontsize=13)\n plt.grid()\n plt.savefig(\"figures/figure220.png\")\n plt.show()\n\n n_search = np.array([10, 50, 100, 150])\n\n fig221 = plt.figure(figsize=(13, 8))\n for i, n in enumerate(n_search):\n A = construct_matrix_A(n)\n b = np.random.randn(n**2)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, callback=compute_residuals)\n plt.semilogy(range(len(conv_residuals)), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Step Taken to Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 221 - GMRES Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(\"figures/figure221.png\")\n plt.show()\n return", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def eval_one_iteration(sess, model, cv_num_batch, iteration):\n counter = 0\n cv_g_mse_loss = 0.0\n cv_g_l2_loss = 0.0\n cv_g_loss = 0.0\n for batch in range(int(cv_num_batch/FLAGS.num_gpu)):\n g_mse_losses, g_l2_losses, \\\n g_losses = sess.run([model.g_mse_losses,\n model.g_l2_losses,\n model.g_losses])\n g_mse_loss = np.mean(g_mse_losses)\n g_l2_loss = np.mean(g_l2_losses)\n g_loss = np.mean(g_losses)\n counter += FLAGS.num_gpu\n cv_g_mse_loss += g_mse_loss\n cv_g_l2_loss += g_l2_loss\n cv_g_loss += g_loss\n\n _summaries = sess.run(model.summaries)\n model.writer.add_summary(_summaries, iteration * cv_num_batch)\n\n cv_g_mse_loss = cv_g_mse_loss / counter * FLAGS.num_gpu\n cv_g_l2_loss = cv_g_l2_loss / counter * FLAGS.num_gpu\n cv_g_loss = cv_g_loss / counter * FLAGS.num_gpu\n\n return cv_g_mse_loss, cv_g_l2_loss, cv_g_loss", "def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results", "def compute(self, X, Y, n):\n inner_cv = KFold(5, shuffle=True, random_state=1673)\n\n print('-> grid searching and cross validation ...')\n for training, validation, j in self._k_fold_cross_validation(X, 5, n):\n\n x, y, valid_x, valid_y = X.loc[training, :], Y[training], X.loc[validation, :], Y[validation]\n x_features, valid_features = self.sat_features.loc[training, :], self.sat_features.loc[validation, :]\n\n if 'kNN' in self.model_list:\n parameters = {'n_neighbors': range(1, 18, 2)}\n model = KNeighborsRegressor(weights='distance')\n self.kNN = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.kNN.fit(x, y).predict(valid_x)\n self.results['kNN'].append(list(res))\n self.scores['kNN'].append(R2(valid_y, res))\n\n if 'Kriging' in self.model_list:\n parameters = {\"kernel\": [RBF(l) for l in [[1, 1]]]}\n model = GaussianProcessRegressor(alpha=0.1, n_restarts_optimizer=0)\n self.Kriging = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.Kriging.fit(x, y).predict(valid_x)\n self.results['Kriging'].append(list(res))\n self.scores['Kriging'].append(R2(valid_y, res))\n\n if 'RmSense' in self.model_list:\n parameters = {\"alpha\": [0.001, 0.01, 0.1, 1, 10, 100, 1000]}\n model = Ridge()\n self.RmSense = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n #print('INFO: best alpha - ', self.RmSense.fit(x_features, y).best_params_)\n\n res = self.RmSense.fit(x_features, y).predict(valid_features)\n self.results['RmSense'].append(list(res))\n self.scores['RmSense'].append(R2(valid_y, res))\n\n if 'Ensamble' in self.model_list:\n res = (self.RmSense.predict(valid_features) + self.kNN.predict(valid_x)) / 2.\n self.results['Ensamble'].append(list(res))\n self.scores['Ensamble'].append(R2(valid_y, res))\n\n for m in self.model_list:\n print('score {}: {}'.format(m, np.mean(self.scores[m])))", "def get_all_results(pred_root, meta_results):\r\n results_all = {}\r\n for key in tqdm(meta_results, desc='Generating results ..'):\r\n persons = meta_results[key]\r\n\r\n global_seg = cv2.imread(pred_root + 'global_seg/{}.png'.format(key),\r\n cv2.IMREAD_UNCHANGED)\r\n global_tag = cv2.imread(pred_root + 'global_tag/{}.png'.format(key),\r\n cv2.IMREAD_UNCHANGED)\r\n\r\n results = {}\r\n dets, masks = [], []\r\n for p_id, score in persons:\r\n mask = (global_tag == p_id)\r\n if np.sum(mask) == 0:\r\n continue\r\n seg = mask * global_seg\r\n ys, xs = np.where(mask > 0)\r\n x1, y1, x2, y2 = xs.min(), ys.min(), xs.max(), ys.max()\r\n dets.append((x1, y1, x2, y2, score))\r\n masks.append(seg)\r\n\r\n # Reuiqred Field of each result: a list of masks,\r\n # each is a multi-class masks for one person.\r\n # It can also be sparsified to\r\n # [scipy.sparse.csr_matrix(mask) for mask in masks]\r\n # to save memory cost\r\n results['MASKS'] = masks if not Sparse \\\r\n else [scipy.sparse.csr_matrix(m) for m in masks]\r\n # Reuiqred Field of each result,\r\n # a list of detections corresponding to results['MASKS'].\r\n results['DETS'] = dets\r\n\r\n if cache_pkl:\r\n results_cache_add = cache_pkl_path + key + '.pklz'\r\n pickle.dump(results, gzip.open(results_cache_add, 'w'))\r\n results_all[key] = results_cache_add\r\n else:\r\n results_all[key] = results\r\n\r\n if PLOT:\r\n import pylab as plt\r\n plt.figure('seg')\r\n plt.imshow(global_seg)\r\n print('Seg unique:' + str(np.unique(global_seg)))\r\n plt.figure('tag')\r\n plt.imshow(global_tag)\r\n print('Tag unique:' + str(np.unique(global_tag)))\r\n plt.show()\r\n\r\n return results_all", "def solve(self):\n\n # Set up display header if verbose operation enabled\n if self.opt['Verbose']:\n hdr = 'Itn DFidX PriResX DuaResX DFidG' + \\\n ' ResG '\n print(hdr)\n print('-' * len(hdr))\n\n # Main iteration loop\n for n in range(self.opt['MaxMainIter']):\n\n # At start of 2nd iteration, set the numbers of inner\n # iterations for the X and G solvers from the options\n # object for the outer solver\n if n == 1:\n self.slvX.opt['MaxMainIter'] = self.opt['XslvIter']\n self.slvG.opt['MaxMainIter'] = self.opt['GslvIter']\n\n # Run the configured number of iterations of the X (CSC)\n # solver and assign the result to X\n self.X = self.slvX.solve()\n\n # Compute the sum of the subpixel shifts of X\n Xhs = np.sum(fftconv(self.H, self.X.squeeze(), axes=(0, 1)),\n axis=-1)\n\n # Set the convolution kernel in the deconvolution solver\n # to the sum of the subpixel shifts of X\n self.slvG.setG(Xhs)\n # Run the configured number of iterations of the G\n # (deconvolution) solver and crop the result to obtain the\n # updated g\n self.g = self.slvG.solve()[0:self.gshp[0], 0:self.gshp[1]]\n\n # Construct a new dictionary for the X (CSC) solver from\n # the updated psf g\n self.D, self.dn = self.getD(self.g)\n self.slvX.setdict(self.D[..., np.newaxis, np.newaxis, :])\n\n # Display iteration statistics if verbose operation enabled\n if self.opt['Verbose']:\n itsX = self.slvX.getitstat()\n itsG = self.slvG.getitstat()\n fmt = '%3d %.3e %.3e %.3e %.3e %.3e'\n tpl = (n, itsX.DFid[-1], itsX.PrimalRsdl[-1],\n itsX.DualRsdl[-1], itsG.DFid[-1], itsG.Rsdl[-1])\n print(fmt % tpl)\n\n # Return the (normalised) psf estimate g\n return self.g / np.linalg.norm(self.g)", "def iterate_over_hkl_compute(self, max_hkl=6):\n \n # r will contain the return value, an array with rows that contain:\n # h, k, l, qhkl, qhkl_vector\n r = []\n \n for h in range(-max_hkl,max_hkl+1):\n for k in range(-max_hkl,max_hkl+1):\n for l in range(-max_hkl,max_hkl+1):\n \n # Don't put a reflection at origin\n if not (h==0 and k==0 and l==0):\n qhkl, qhkl_vector = self.q_hkl_exp(h,k,l)\n r.append( [ h, k, l, qhkl, qhkl_vector ] )\n \n return r", "def inner_loop(model, optim, img, rays_o, rays_d, bound, num_samples, raybatch_size, inner_steps):\n pixels = img.reshape(-1, 3)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(inner_steps):\n indices = torch.randint(num_rays, size=[raybatch_size])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()", "def loop(self, *, l_img=True, r_img=False, depth_map=False, depth_map_img=False, point_cloud=False,\n\t\t\t\t\tod_bbox=False, od_img=False, ss_pred=False, ss_img=False,\n\t\t\t\t\tdist_to_col=False, dist_to_col_img=False,\n\t\t\t\t\tis_close=False, min_dist=False, is_close_simple=False, min_dist_simple=False):\n\t\tis_c = is_close or min_dist\n\t\tis_c_s = is_close_simple or min_dist_simple\n\t\td2c = dist_to_col or dist_to_col_img or is_c\n\t\tod = od_bbox or od_img or d2c\n\t\tss = ss_pred or ss_img\n\t\t\n\t\truntime_parameters = sl.RuntimeParameters()\n\n\t\twhile True:\n\t\t\tcache = []\n\t\t\tif self.zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:\n\t\t\t\tif l_img or od or ss or d2c:\n\t\t\t\t\t_l_img = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_image(_l_img, sl.VIEW.LEFT)\n\t\t\t\t\tif l_img:\n\t\t\t\t\t\tcache.append(_l_img.get_data())\n\n\t\t\t\tif r_img:\n\t\t\t\t\t_r_img = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_image(_r_img, sl.VIEW.RIGHT)\n\t\t\t\t\tcache.append(_r_img.get_data())\n\n\t\t\t\tif depth_map or is_c_s:\n\t\t\t\t\t_depth_map = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_measure(_depth_map, sl.MEASURE.DEPTH)\n\t\t\t\t\tcache.append(_depth_map.get_data())\t\t\n\n\t\t\t\tif depth_map_img:\n\t\t\t\t\t_depth_map_img = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_image(_depth_map_img, sl.VIEW.DEPTH)\n\t\t\t\t\tcache.append(_depth_map_img.get_data())\n\n\t\t\t\tif point_cloud or d2c:\n\t\t\t\t\t_point_cloud = sl.Mat()\n\t\t\t\t\tself.zed.retrieve_measure(_point_cloud, sl.MEASURE.XYZRGBA)\n\t\t\t\t\tif point_cloud:\n\t\t\t\t\t\tcache.append(_point_cloud.get_data())\n\n\t\t\t\tif od:\n\t\t\t\t\t_od_bbox = self.object_detection(_l_img, return_image=od_img)\n\t\t\t\t\tif od_img:\n\t\t\t\t\t\t_od_bbox, _od_img = _od_bbox\n\t\t\t\t\tif od_bbox:\n\t\t\t\t\t\tcache.append(_od_bbox)\n\t\t\t\t\tif od_img:\n\t\t\t\t\t\tcache.append(_od_img)\n\n\t\t\t\tif ss:\n\t\t\t\t\t_ss_pred = self.semantic_segmentation(_l_img, return_image=ss_img)\n\t\t\t\t\tif ss_img:\n\t\t\t\t\t\t_ss_pred, _ss_img = _ss_pred\n\t\t\t\t\tif ss_pred:\n\t\t\t\t\t\tcache.append(_ss_pred)\n\t\t\t\t\tif ss_img:\n\t\t\t\t\t\tcache.append(_ss_img)\n\n\t\t\t\tif d2c:\n\t\t\t\t\t_dist_to_col = self.distance_to_collision(_od_bbox, _point_cloud, image=_l_img, return_image=dist_to_col_img)\n\t\t\t\t\tif dist_to_col_img:\n\t\t\t\t\t\t_dist_to_col, _dist_to_col_img = _dist_to_col\n\t\t\t\t\tif dist_to_col:\n\t\t\t\t\t\tcache.append(_dist_to_col)\n\t\t\t\t\tif dist_to_col_img:\n\t\t\t\t\t\tcache.append(_dist_to_col_img)\n\n\t\t\t\tif is_c:\n\t\t\t\t\t_is_close = self.is_close_to_collision(_od_bbox, _dist_to_col, return_min_dist=min_dist)\n\t\t\t\t\tif min_dist:\n\t\t\t\t\t\t_is_close, _min_dist = _is_close\n\t\t\t\t\tif is_close:\n\t\t\t\t\t\tcache.append(_is_close)\n\t\t\t\t\tif min_dist:\n\t\t\t\t\t\tcache.append(_min_dist)\n\n\t\t\t\tif is_c_s:\n\t\t\t\t\t_is_close = self.is_close_to_collision_simple(_depth_map, return_min_dist=min_dist_simple)\n\t\t\t\t\tif min_dist_simple:\n\t\t\t\t\t\t_is_close, _min_dist = _is_close\n\t\t\t\t\tif is_close_simple:\n\t\t\t\t\t\tcache.append(_is_close)\n\t\t\t\t\tif min_dist_simple:\n\t\t\t\t\t\tcache.append(_min_dist)\n\n\t\t\tif cache:\n\t\t\t\tyield cache\n\t\t\telse:\n\t\t\t\traise StopIteration\n\n\t\t\t# use 'q' to quit the loop\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\traise StopIteration", "def main(logger, resultsDict):\n\n print(\"=\" * 30)\n print(\"Main function of overlayMasks.\")\n print(\"=\" * 30)\n\n # Get parameters from .json files.\n full_img_dir = config_overlay[\"full_img_dir\"]\n y_true_dir = config_overlay[\"y_true_dir\"]\n y_pred_dir = config_overlay[\"y_pred_dir\"]\n extension = config_overlay[\"extension\"]\n target_size = (config_overlay[\"target_size\"], config_overlay[\"target_size\"])\n save_maskoverlay_dir = config_overlay[\"save_maskoverlay_dir\"]\n save_fulloverlay_dir = config_overlay[\"save_fulloverlay_dir\"]\n\n # ------------\n\n # Get paths.\n full_img_paths_list = []\n y_true_paths_list = []\n y_pred_paths_list = []\n\n for full in os.listdir(full_img_dir):\n if full.endswith(extension):\n full_img_paths_list.append(os.path.join(full_img_dir, full))\n\n for full in os.listdir(y_true_dir):\n if full.endswith(extension):\n y_true_paths_list.append(os.path.join(y_true_dir, full))\n\n for full in os.listdir(y_pred_dir):\n if full.endswith(extension):\n y_pred_paths_list.append(os.path.join(y_pred_dir, full))\n\n full_img_paths_list.sort()\n y_true_paths_list.sort()\n y_pred_paths_list.sort()\n\n # ------------\n\n # Load full_img.\n full_img_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in full_img_paths_list\n ]\n\n # Load y_true masks.\n y_true_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_true_paths_list\n ]\n\n # Load y_pred masks.\n y_pred_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_pred_paths_list\n ]\n\n print(full_img_arrays[0].min(), full_img_arrays[0].max())\n print(y_true_arrays[0].min(), y_true_arrays[0].max())\n print(y_pred_arrays[0].min(), y_pred_arrays[0].max())\n\n # ------------\n\n # Stack to create RGB version of grayscale images.\n full_img_rgb = [np.stack([img, img, img], axis=-1) for img in full_img_arrays]\n\n # Green true mask. Note OpenCV uses BGR.\n y_true_rgb = [\n np.stack([np.zeros_like(img), img, np.zeros_like(img)], axis=-1)\n for img in y_true_arrays\n ]\n\n # Red predicted mask. Note OpenCV uses BGR.\n y_pred_rgb = [\n np.stack([np.zeros_like(img), np.zeros_like(img), img], axis=-1)\n for img in y_pred_arrays\n ]\n\n # ------------\n\n for i in range(len(full_img_rgb)):\n\n # First overlay true and predicted masks.\n overlay_masks = cv2.addWeighted(\n src1=y_true_rgb[i], alpha=0.5, src2=y_pred_rgb[i], beta=1, gamma=0\n )\n\n # Then overlay full_img and masks.\n overlay_all = cv2.addWeighted(\n src1=full_img_rgb[i], alpha=1, src2=overlay_masks, beta=0.5, gamma=0\n )\n\n # Save.\n\n # Get patient ID from y_true masks.\n filename = os.path.basename(y_true_paths_list[i])\n filename_split = filename.split(\"_\")\n patientID = \"_\".join([filename_split[i] for i in range(4)])\n\n masks_filename = patientID + \"___MasksOverlay.png\"\n all_filename = patientID + \"___AllOverlay.png\"\n\n save_path_masks = os.path.join(save_maskoverlay_dir, masks_filename)\n save_path_all = os.path.join(save_fulloverlay_dir, all_filename)\n\n print(save_path_masks)\n print(save_path_all)\n\n cv2.imwrite(filename=save_path_masks, img=overlay_masks)\n cv2.imwrite(filename=save_path_all, img=overlay_all)", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def run_kohonen(data, size_k: int=6, sigma: float=2.0, eta: int=0.9, \n tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #convergence criteria\n eps = 1E-6\n eps_2 = 0.1\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n \n print('start iteration')\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n som_step(centers, data[int(i),:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n \n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n \n print('Total iteration : {}'.format(t))\n return centers, error[1:]", "def getResults(solver, minBit, maxBit, saveFile, noResults):\n\n for k in range(minBit, maxBit + 1, 2):\n for i in range(noResults):\n\n keys = generate_RSA.KeyGen(k) # initialise keys\n keys.generateKeys() # generate keys\n\n solver.setN(keys.n) # setup solver\n solver.setE(keys.e)\n\n solver.solve() # solve problem\n\n if solver.d == keys.d: # if we got it right\n resTime = resTime_C # update correct dictionaries\n resCount = resCount_C\n resSpace = resSpace_C\n else:\n resTime = resTime_W # else update wrong dictionaries\n resCount = resCount_W\n resSpace = resSpace_W\n\n if k not in resTime: # if we've not yet had a result for k\n resTime[k] = [solver.time, 1] # then set\n resSpace[k] = [solver.space, 1] # then set\n resCount[k] = [solver.count, 1]\n else:\n oldT, oldC = resTime[k] # keeps a running average\n newC = oldC + 1 # increment count\n newT = ((oldT * oldC) + solver.time) / newC # get new averagae\n resTime[k] = [newT, newC] # without storing all variables\n\n oldS, oldC = resSpace[k] # keeps a running average\n newS = ((oldS * oldC) + solver.space) / newC\n resSpace[k] = [newS, newC] # without storing all variables\n\n oldCount, oldC = resCount[k] # keeps a running average\n newCount = ((oldCount * oldC) + solver.count) / newC\n resCount[k] = [newCount, newC] # without storing all variables\n\n if i % 10 == 0:\n saveResults(saveFile) # every ten results save again", "def kmeans_004():\n crops = [200] # Should probably also add 250\n scales = [30, 50] # Scaling is probably the most important part here\n\n scores = []\n for s in scales:\n crop = 200\n n_centroids = 1600\n n_patches = 400000\n # rf_size = int(round(s * .2))\n rf_size = 10\n logger.info(\"Training with crop {}, scale {}, patch size {}, patches {}, centroids {}\".format(crop, s, rf_size, n_patches, n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n # spherical generator\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_004_scale_{}_rf_{}'.format(s, rf_size),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n logger.info(\"Images ndarray shape: {}\".format(images.shape))\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_004_scale_{}_rf_{}.npy'.format(s, rf_size), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n logger.info(\"Train X ndarray shape: {}\".format(train_x.shape))\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n scores.append((s, wrapper.cv_scores))\n del wrapper\n gc.collect()", "def __call__(self, img_ori, objs, **kvs):\n # Crop image, forward to get the param\n param_lst = []\n roi_box_lst = []\n\n crop_policy = kvs.get('crop_policy', 'box')\n for obj in objs:\n if crop_policy == 'box':\n # by face box\n roi_box = parse_roi_box_from_bbox(obj)\n elif crop_policy == 'landmark':\n # by landmarks\n roi_box = parse_roi_box_from_landmark(obj)\n else:\n raise ValueError(f'Unknown crop policy {crop_policy}')\n\n roi_box_lst.append(roi_box)\n self.img_crop = crop_img(img_ori, roi_box)\n img = cv2.resize(self.img_crop, dsize=(self.size, self.size), interpolation=cv2.INTER_LINEAR)\n inp = self.transform(img).unsqueeze(0)\n\n if self.gpu_mode:\n inp = inp.cuda(device=self.gpu_id)\n\n # if kvs.get('timer_flag', False):\n if True:\n end = time.time()\n param = self.model(inp)\n elapse = f'Inference: {(time.time() - end) * 1000:.1f}ms'\n print(elapse)\n else:\n param = self.model(inp)\n\n param = param.squeeze().cpu().numpy().flatten().astype(np.float32)\n param = param * self.param_std + self.param_mean # re-scale\n # print('output', param)\n param_lst.append(param)\n\n return param_lst, roi_box_lst", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.array(pts3d_for_pnp)\n # pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n # print(pts3d_for_pnp)\n # print(pts2d_for_pnp.shape)\n num_pts = len(pts3d_for_pnp)\n print(num_pts)\n highest_inliers = 0\n for j in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n # print(\"pts\",pts3)\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n # print('rvec:', rvec,'\\n\\ntvec:', tvec)\n print(\"avg\",avg_err)\n print(\"inlier\",perc_inliers)\n return R, tvec", "def voc_pred_process(pred_data, val_cls, recs):\n num_classes = config.num_classes\n cls_img_ids = {}\n cls_bboxes = {}\n cls_scores = {}\n classes = {}\n cls_npos = {}\n for cls in val_cls:\n if cls == 'background':\n continue\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == cls]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n cls_npos[cls] = npos\n classes[cls] = class_recs\n cls_img_ids[cls] = []\n cls_bboxes[cls] = []\n cls_scores[cls] = []\n\n for sample in pred_data:\n pred_boxes = sample['boxes']\n box_scores = sample['box_scores']\n img_id = sample['img_id']\n h, w = sample['image_shape']\n\n final_boxes = []\n final_label = []\n final_score = []\n\n for c in range(1, num_classes):\n class_box_scores = box_scores[:, c]\n score_mask = class_box_scores > config.min_score\n class_box_scores = class_box_scores[score_mask]\n class_boxes = pred_boxes[score_mask] * [h, w, h, w]\n\n if score_mask.any():\n nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, config.max_boxes)\n class_boxes = class_boxes[nms_index]\n class_box_scores = class_box_scores[nms_index]\n\n final_boxes += class_boxes.tolist()\n final_score += class_box_scores.tolist()\n final_label += [c] * len(class_box_scores)\n\n for loc, label, score in zip(final_boxes, final_label, final_score):\n cls_img_ids[val_cls[label]].append(img_id)\n cls_bboxes[val_cls[label]].append([loc[1], loc[0], loc[3], loc[2]])\n cls_scores[val_cls[label]].append(score)\n return classes, cls_img_ids, cls_bboxes, cls_scores, cls_npos", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def kmeans_006():\n n_centroids_vals = [1000, 2000, 2500, 3000]\n scores = []\n\n for n_centroids in n_centroids_vals:\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n logger.info(\"Training with n_centroids {}\".format(n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_006_centroids_{}'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_006_centroids_{}.npy'.format(n_centroids), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_centroids, wrapper.cv_scores)\n logger.info(\"Scores: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()", "def run(self):\n self.errList = []\n self.initialize()\n\n for i in range(self.maxiter):\n # update U\n for k in range(self.K):\n [p, sigma, q] = np.linalg.svd(self.H.dot(self.S[:, :, k]).dot(self.V.T).dot(self.X[k].T),\n full_matrices=False)\n self.U[k] = q.T.dot(p.T)\n self.U[k] = self.U[k].real\n\n # calculate temporal variable y\n y = np.zeros([self.rank, self.L, self.K])\n for k in range(self.K):\n y[:, :, k] = self.U[k].T.dot(self.X[k])\n\n # get H, V, and temps by running a single iteration of CP_ALS\n if i == 0:\n [cp, rec] = pyten.method.cp_als(pyten.tenclass.Tensor(y),\n self.rank, tol=self.tol, maxiter=1, printitn=0)\n else:\n [cp, rec] = pyten.method.cp_als(pyten.tenclass.Tensor(y),\n self.rank, tol=self.tol, maxiter=1,\n init=[self.H, self.V, temps], printitn=0)\n self.H = cp.Us[0]\n self.V = cp.Us[1]\n temps = cp.Us[2].dot(np.diag(cp.lmbda))\n\n # update S\n for k in range(self.K):\n self.S[:, :, k] = np.diag(temps[k, :])\n\n # checking the stop criteria\n # error = 0\n for k in range(self.K):\n temp = self.U[k].dot(self.H).dot(self.S[:, :, k]).dot(self.V.T)\n self.sigma_new += np.linalg.norm(temp - self.X[k]) ** 2\n\n error = abs(self.sigma_new - self.sigma_old) #/ self.sigma_old\n self.errList.append(error)\n if (i + 1) % self.printitn == 0:\n print 'PARAFAC2: iterations={0}, difference={1}, fit_difference={2}'.format(i + 1, self.errList[-1],\n self.sigma_new)\n elif error < self.tol:\n print 'PARAFAC2: iterations={0}, difference={1}, fit_difference={2}'.format(i + 1, self.errList[-1],\n self.sigma_new)\n\n if error < self.tol:\n break\n else:\n self.sigma_old = self.sigma_new\n self.sigma_new = 0\n\n for k in range(self.K):\n self.fit[k] = self.U[k].dot(self.H).dot(self.S[:, :, k]).dot(self.V.T)", "def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def iterate(rk):\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk", "def miniBatchKSVRG(loss,X,C,y,yC,kernel,la,Nepochs,mratio=2,tau = None,Kmax = 1,option = 1,om1 = None,memToUse = None,useGPU = None,cobj = dp.cobjK()):\n cobj.start()\n \n ################################################################################################\n #Creating Kernel matrices and functions\n ################################################################################################\n \n n = X.size(0)\n m = C.size(0)\n d = X.size(1)\n if isinstance(useGPU,type(None)):\n useGPU = torch.cuda.is_available()\n if useGPU :\n torch.cuda.empty_cache()\n if isinstance(memToUse,type(None)):\n memToUse = 0.9*psutil.virtual_memory().available\n print(\"no memory limit specified. At most {} GB of \\\n RAM will be used\".format(memToUse/10**9))\n \n factKnmP, kern, freeDoubles, freeGPU = nm.computeMemory(memToUse, kernel, d, n, m, useGPU)\n \n print(\"there is {} GiB free on the GPU \".format(freeGPU*8/1024**3))\n \n T = nm.createT(kern, C, freeGPU)\n cholT,cholTt = lambda x : nm.tr_solve(x,T,freeGPU),\\\n lambda x: nm.tr_solve(x,T,freeGPU,transpose = True)\n \n KnmP = factKnmP(X,C)\n \n l_fun,l_grad = l_fg(loss,n)\n KnmP_fun,KnmP_grad = lambda u,lobj : KnmP(u,l_fun,lobj), \\\n lambda u,lobj : KnmP(u,l_grad,lobj), \\\n \n \n \n ################################################################################################\n #Setting parameters of the method \n ################################################################################################\n \n #batch size\n if isinstance(tau,type(None)):\n tau = m\n \n #number of iterations (divide by batch size)\n niterBatch = (mratio*n)//tau + 1\n print(\"--- m = {}, tau = {} ---\".format(m,tau))\n \n #Smoothness constant\n if isinstance(loss.Lmax,type(None)):\n Lmax = Kmax\n else:\n Lmax = loss.Lmax*Kmax\n \n #om1 and om2, parameters of Katyusha acceleration\n om2 = 1/(2*tau)\n if isinstance(om1,type(None)):\n if m >= tau:\n om1 = float(min(np.sqrt((8*la*m*tau)/(3*Lmax)),1)*om2)\n else:\n om1 = float(min(np.sqrt((2*la)/(3*Lmax)),1/(2*m)))\n \n #Stepsize \n eta = 1/(3*om1*Lmax)\n \n #Theta\n theta = 1 + min(eta*la,1/(4*m))\n \n cobj.keepInfo(loss,X,C,y,yC,kernel,la,freeDoubles,freeGPU,cholT,KnmP_fun,mratio,om1,om2,niterBatch)\n \n beta_prev = torch.zeros(m,1, dtype = dtype)\n x = torch.zeros(m,1,dtype = dtype)\n z = torch.zeros(m,1, dtype = dtype)\n yy = torch.zeros(m,1, dtype = dtype)\n \n for epoch in range(Nepochs):\n \n cobj.cbIterates(beta_prev,yy)\n \n #Computing big gradient \n lobj = [y,torch.zeros(n,1,dtype = dtype)]\n grad = cholTt(KnmP_grad(cholT(beta_prev),lobj))\n d_stock = lobj[1]\n \n beta = torch.zeros(m,1,dtype = dtype)\n for t in range(niterBatch):\n S = np.random.choice(n,tau,replace = True)\n x = om1*z + om2*beta_prev + (1-om1-om2)*yy\n \n KtaumP = factKnmP(X[S,:],C)\n l_grad_tau = lgtau(loss,tau)\n KtaumP_grad = lambda u,lobj : KtaumP(u,l_grad_tau,lobj) \n \n \n lobjS = [y[S,:],d_stock[S,:]]\n grad_proxy = cholTt(KtaumP_grad(cholT(x),lobjS)) + grad\n\n dz = (1/(1 + la*eta))*(z - eta*grad_proxy) - z\n if option == 1:\n yy = (1/(1+la/(3*Lmax)))*(x - (1/(3*Lmax))*grad_proxy)\n if option == 2:\n yy = x + om1*dz\n z = z+dz\n \n beta = (theta - 1)*((theta**t)/(theta**(t+1) - 1))*yy + (theta**t - 1)/(theta**(t+1) - 1) * beta\n \n beta_prev = beta\n \n cobj.cbIterates(beta_prev,yy)\n \n alpha = makeFinal(om1,om2,niterBatch,beta_prev,yy)\n return cholT(alpha)", "def run(self) -> None:\n\t\tprint(\"Running...\\n\")\n\t\tfor count, observation in enumerate(self.observationProduct):\n\t\t\tH = np.array([[int(n == m) for n in range(3)] for m in range(3) if observation[m]])\n\t\t\tprinted = -1\t\t\n\t\t\tprint(\"Generating with observation: \" + str(observation))\n\n\t\t\tfor t in range(self.iterations):\n\t\t\t\tif int(100*(t+1)/self.iterations)%5==0 and int(100*(t+1)/self.iterations) != printed:\n\t\t\t\t\tprint(str(t+1) + \"/\" + str(self.iterations) + \" \" + str(int(100*(t+1)/self.iterations)) + \"%\")\n\t\t\t\t\tprinted = int(100*(t+1)/self.iterations)\n\n\t\t\t\ty = H @ self.truth[t]\n\t\t\t\tself.kalmanFilter.predict(self.beta)\n\t\t\t\tself.kalmanFilter.update(y, H, self.alpha)\n\n\t\t\t\tself.states[t] = self.kalmanFilter.m_hat\n\t\t\t\tself.covariances[t] = self.kalmanFilter.C_hat\n\t\t\tself.RMSE[count] = RMSE(self.states, self.truth)\n\t\t\n\t\t\tprint(\"\\nDone!\")\n\t\t\n\t\t\t# plotting\n\t\t\tvarMap = {0: \"x\", 1: \"y\", 2: \"z\"} \t\t\t\n\n\t\t\tfig, axes = plt.subplots(5, sharex=True, gridspec_kw={\"hspace\": 0.25})\n\t\t\tuncertainties = np.zeros((self.iterations, 3))\n\t\t\tfor i in range(3):\n\t\t\t\tpredictions = self.states[:,i]\n\n\t\t\t\tselector = np.array([int(i==j) for j in range(3)])\n\t\t\t\tuncertainty = np.array([selector.T @ cov @ selector for cov in self.covariances])\n\t\t\t\n\t\t\t\tfor j in range(self.iterations):\n\t\t\t\t\tuncertainties[j][i] = uncertainty[j]\n\t\t\t\t\n\t\t\t\taxes[i].plot(self.t, self.truth[:,i], c=\"cyan\", linewidth=4, label=\"truth\")\n\t\t\t\taxes[i].plot(self.t, predictions, \".\", ms=6, c=\"red\", label=\"prediction\")\n\n\t\t\t\taxes[i].title.set_text(\"Component: \" + varMap[i])\n\t\t\t\taxes[i].set_ylabel(\"f(t)\")\n\t\t\t\taxes[i].legend(loc=\"upper right\")\n\t\t\t\taxes[i].grid()\t\n\t\t\t\n\t\t\terror = np.array([dist(self.states[i], self.truth[i]) for i in range(self.iterations)])\n\t\t\tuncertainty = np.array([dist(np.zeros(3), uncertainties[i]) for i in range(self.iterations)])\n\n\t\t\taxes[3].plot(self.t, error, c=\"green\", linewidth=2.5)\n\t\t\taxes[3].set_ylabel(\"2-Norm\")\n\t\t\taxes[3].title.set_text(\"Error\")\n\t\t\taxes[3].grid()\n\n\t\t\taxes[4].plot(self.t, uncertainty, c=\"magenta\", linewidth=2.5)\n\t\t\taxes[4].set_xlabel(\"Time\")\n\t\t\taxes[4].set_ylabel(\"2-Norm\")\n\t\t\taxes[4].title.set_text(\"Uncertainty\")\n\t\t\taxes[4].grid()\n\n\t\t\tplt.tight_layout()\t\n\t\t\tfig.set_size_inches(15, 15)\n\t\t\t#plt.savefig(\"figures/\" + str(observation[0]) + str(observation[1]) + str(observation[2]), bbox_inches=\"tight\")\t\t\n\t\n\t\t\tself.kalmanFilter = EnKF(self.lorenz, self.y0, self.gamma, members=self.members)\n\t\tdata = {}\n\t\tfor key, val in zip(self.observationProduct, self.RMSE):\n\t\t\ttemp = tuple([varMap[c] for c, val in enumerate(key) if val])\n\t\t\tind = \"(\"\n\t\t\tfor el in temp:\n\t\t\t\tind+=el+\", \"\n\t\t\tind = ind[:-2]\n\t\t\tind+=\")\"\n\t\t\tdata[ind] = val \n\t\tkeys = np.array(list(data.keys()))\n\t\tvalues = np.array(list(data.values())) \n\n\t\torder = values.argsort()\n\t\tvalues = values[order]\n\t\tkeys = keys[order]\n\t\t \n\t\tfig = plt.figure(figsize = (10, 5)) \n\t\tplt.bar(keys, values, color=\"red\", width = 0.4) \n\t\t \n\t\tplt.xlabel(\"Observations\") \n\t\tplt.ylabel(\"RMSE\") \n\t\tplt.grid()\n\t\tplt.title(\"RMSE vs Variables Observed\") \n\n\t\tself.plot()\n\t\tplt.show()\n\t\t#plt.savefig(\"figures/\" + \"RMSE\", bbox_inches=\"tight\")", "def evaluate_iterations(predictions, gold, result_collector, condition):\n for iteration_id, texts in predictions.items():\n texts_in_iteration = sorted(texts)\n gold_trees = [gold[tid] for tid in texts_in_iteration]\n pred_trees = [texts[tid] for tid in texts_in_iteration]\n for level, scores in eval_prediction(gold_trees, pred_trees):\n result_collector.add_result(condition, iteration_id, level, scores)", "def resnet50():\n\n X = K.Input(shape=(224, 224, 3))\n init = K.initializers.he_normal(seed=None)\n\n conv1 = K.layers.Conv2D(\n filters=64,\n kernel_size=(\n 7,\n 7),\n padding='same',\n strides=2,\n kernel_initializer=init)(X)\n\n bn1 = K.layers.BatchNormalization(axis=3)(conv1)\n\n activation1 = K.layers.Activation('relu')(bn1)\n\n maxpool1 = K.layers.MaxPooling2D(\n pool_size=(\n 3, 3), strides=(\n 2, 2), padding='same',)(activation1)\n\n Projection1 = projection_block(maxpool1, [64, 64, 256], s=1)\n IdenBlock1 = identity_block(Projection1, [64, 64, 256])\n IdenBlock2 = identity_block(IdenBlock1, [64, 64, 256])\n\n Projection2 = projection_block(IdenBlock2, [128, 128, 512])\n IdenBlock3 = identity_block(Projection2, [128, 128, 512])\n IdenBlock4 = identity_block(IdenBlock3, [128, 128, 512])\n IdenBlock5 = identity_block(IdenBlock4, [128, 128, 512])\n\n Projection3 = projection_block(IdenBlock5, [256, 256, 1024])\n IdenBlock6 = identity_block(Projection3, [256, 256, 1024])\n IdenBlock7 = identity_block(IdenBlock6, [256, 256, 1024])\n IdenBlock8 = identity_block(IdenBlock7, [256, 256, 1024])\n IdenBlock9 = identity_block(IdenBlock8, [256, 256, 1024])\n IdenBlock10 = identity_block(IdenBlock9, [256, 256, 1024])\n\n Projection4 = projection_block(IdenBlock10, [512, 512, 2048])\n IdenBlock11 = identity_block(Projection4, [512, 512, 2048])\n IdenBlock12 = identity_block(IdenBlock11, [512, 512, 2048])\n\n avgpool = K.layers.AveragePooling2D(\n pool_size=(\n 1, 1), strides=(\n 7, 7), padding='same',)(IdenBlock12)\n\n SoftMax = K.layers.Dense(\n units=1000,\n kernel_initializer=init,\n activation='softmax',\n )(avgpool)\n\n Keras = K.Model(inputs=X, outputs=SoftMax)\n\n return Keras", "def algorithm_loop(self):", "def run_kohonen_dynamicLearningRate(data,fun,size_k: int=6, eta: float=0.1, tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n sigma = fun(t)\n som_step(centers, data[i,:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n\n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n return centers, error[1:]", "def run_inference_true_path(self, r, xr, yr):\n self.tc.reset()\n\n em_data = {}\n\n print('Running Image Optimization using True Eye Path\\n')\n\n for u in range(self.n_itr):\n t0 = self.n_t * u / self.n_itr\n tf = self.n_t * (u + 1) / self.n_itr\n print('Iteration: {} | Running up to time {}'.format(u, tf))\n\n self.run_m_true_path(t0, tf, r, xr, yr, n_g_itr=self.n_g_itr)\n\n iteration_data = {\n 'time_steps': tf,\n 'image_est': self.tc.image_est(),\n 'coeff_est': self.tc.get_A()}\n\n em_data[u] = iteration_data\n em_data['mode'] = 'path_given'\n\n if self.save_mode:\n self.data['EM_data'] = em_data", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def report_result(args, model, imgs, poses, hwf, bound):\n ray_origins, ray_directions = get_rays_shapenet(hwf, poses)\n\n view_psnrs = []\n for img, rays_o, rays_d in zip(imgs, ray_origins, ray_directions):\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],\n args.num_samples, perturb=False)\n \n synth = []\n num_rays = rays_d.shape[0]\n with torch.no_grad():\n for i in range(0, num_rays, args.test_batchsize):\n rgbs_batch, sigmas_batch = model(xyz[i:i+args.test_batchsize])\n color_batch = volume_render(rgbs_batch, sigmas_batch,\n t_vals[i:i+args.test_batchsize],\n white_bkgd=True)\n synth.append(color_batch)\n synth = torch.cat(synth, dim=0).reshape_as(img)\n error = F.mse_loss(img, synth)\n psnr = -10*torch.log10(error)\n view_psnrs.append(psnr)\n \n scene_psnr = torch.stack(view_psnrs).mean()\n return scene_psnr", "def perform_eval(model, k=100, dataset_name='PIR', batch_size=256):\n with tf.device('/gpu:0'):\n\n logging.info('=== Reading Datasets ===')\n if dataset_name == 'PIR':\n file_pattern_query = 'Insert-Path-To-Query-Dataset'\n file_pattern_index = 'Insert-Path-To-Index-Dataset'\n elif dataset_name == 'copydays10k':\n file_pattern_query = 'Insert-Path-To-Query-Dataset'\n file_pattern_index = 'Insert-Path-To-Index-Dataset'\n elif dataset_name == 'copydays10k-strong':\n file_pattern_query = 'Insert-Path-To-Query-Dataset'\n file_pattern_index = 'Insert-Path-To-Index-Dataset'\n\n query_df, query_embeddings = get_df_emb(\n model,\n read_dataset(\n file_pattern_query,\n dataset_name=dataset_name,\n batch_size=batch_size),\n dataset_name=dataset_name)\n index_df, index_embeddings = get_df_emb(\n model,\n read_dataset(\n file_pattern_index,\n dataset_name=dataset_name,\n batch_size=batch_size),\n dataset_name=dataset_name)\n\n logging.info('=== Reading Datasets DONE ===')\n # Performn exact knn search\n logging.info('=== KNN Search ===')\n knn_search = query_embeddings @ index_embeddings.T\n retrieved_rank, similarities = knn_search.argsort()[:, -k:], np.sort(\n knn_search)[:, -k:]\n # sorting on desceding order, so index 0 is the nearsted item from the query\n retrieved_rank = np.flip(retrieved_rank, axis=1)\n similarities = np.flip(similarities, axis=1)\n\n def rank_relevance(qid, retrieved_rank):\n \"\"\"Create Rank Relevenace.\"\"\"\n rank = index_df.iloc[retrieved_rank]\n relevant_ids = rank[rank['image_name'] == query_df.iloc[qid]\n ['relevant']].values\n\n gnd = np.zeros(len(rank), dtype='int')\n for index, tp in enumerate(rank.image_name):\n if tp in relevant_ids:\n gnd[index] = 1\n return gnd\n\n # Identify the relevant items from the retrieved rank\n y_true = [\n rank_relevance(qid, retrieved_rank[qid])\n for qid in range(len(query_df))\n ]\n y_true = np.array(y_true)\n\n # Calculate the uAP\n precision, recall, _ = precision_recall(y_true, similarities, len(y_true))\n # pylint: disable=invalid-name\n uAP = average_precision(recall, precision)\n\n # Calculate the mAP\n mAP = 0\n for rank, rename_me in enumerate(y_true):\n rank_true = rename_me\n rank_sim = similarities[rank]\n p, r, _ = precision_recall(rank_true, rank_sim, 1)\n mAP += average_precision(r, p)\n\n mAP /= len(y_true)\n # pylint: enable=invalid-name\n\n # Calculate recalls\n y_cumsum_true = y_true.cumsum(axis=1)\n r_1 = y_true[:, 0].sum() / len(y_true)\n r_10 = y_cumsum_true[:, 9].sum() / len(y_true)\n r_100 = y_cumsum_true[:, 99].sum() / len(y_true)\n\n return uAP, mAP, r_1, r_10, r_100", "def gmres_wrapper(jax: types.ModuleType):\n jnp = jax.numpy\n\n def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n \"\"\"\n Solve A x = b for x using the m-restarted GMRES method. This is\n intended to be called via jax_backend.gmres.\n\n Given a linear mapping with (n x n) matrix representation\n A = A_mv(*A_args) gmres_m solves\n Ax = b (1)\n where x and b are length-n vectors, using the method of\n Generalized Minimum RESiduals with M iterations per restart (GMRES_M).\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x0: Initial guess solution.\n tol, atol: Solution tolerance to achieve,\n norm(residual) <= max(tol * norm(b), atol).\n tol is also used to set the threshold at which the Arnoldi factorization\n terminates.\n num_krylov_vectors: Size of the Krylov space to build at each restart.\n maxiter: The Krylov space will be repeatedly rebuilt up to this many\n times.\n Returns:\n x: The approximate solution.\n beta: Norm of the residual at termination.\n n_iter: Number of iterations at termination.\n converged: Whether the desired tolerance was achieved.\n \"\"\"\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n b_norm = jnp.linalg.norm(b)\n tol = max(tol * b_norm, atol)\n for n_iter in range(maxiter):\n done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol,\n b_norm)\n if done:\n break\n return x, beta, n_iter, done\n\n def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray,\n tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]:\n \"\"\"\n A single restart of GMRES.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The `b` in `A @ x = b`.\n x: Initial guess solution.\n tol: Solution tolerance to achieve,\n num_krylov_vectors : Size of the Krylov space to build.\n Returns:\n done: Whether convergence was achieved.\n beta: Magnitude of residual (i.e. the error estimate).\n x: The approximate solution.\n \"\"\"\n r, beta = gmres_residual(A_mv, A_args, b, x)\n k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors,\n x0, r, beta, tol, b_norm)\n x = gmres_update(k, V, R, beta_vec, x0)\n done = k < num_krylov_vectors - 1\n return done, beta, x\n\n @jax.jit\n def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray,\n x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]:\n \"\"\"\n Computes the residual vector r and its norm, beta, which is minimized by\n GMRES.\n\n Args:\n A_mv: A function v0 = A_mv(v, *A_args) where v0 and\n v have the same shape.\n A_args: A list of positional arguments to A_mv.\n b: The b in A @ x = b.\n x: Initial guess solution.\n Returns:\n r: The residual vector.\n beta: Its magnitude.\n \"\"\"\n r = b - A_mv(x, *A_args)\n beta = jnp.linalg.norm(r)\n return r, beta\n\n def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n \"\"\"\n Updates the solution in response to the information computed by the\n main GMRES loop.\n\n Args:\n k: The final iteration which was reached by GMRES before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: The R factor in H = QR where H is the Arnoldi overlap matrix.\n beta_vec: Stores the Givens factors used to map H into QR.\n x0: The initial guess solution.\n Returns:\n x: The updated solution.\n \"\"\"\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x\n\n @functools.partial(jax.jit, static_argnums=(2,))\n def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Builds the Arnoldi decomposition of (A, v), where v is the normalized\n residual of the current solution estimate. The decomposition is\n returned as V, R, where V is the usual matrix of Krylov vectors and\n R is the upper triangular matrix in H = QR, with H the usual matrix\n of overlaps.\n\n Args:\n A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and\n `v` have the same shape.\n A_args: A list of positional arguments to A_mv.\n n_kry: Size of the Krylov space to build; this is called\n num_krylov_vectors in higher level code.\n x0: Guess solution.\n r: Residual vector.\n beta: Magnitude of r.\n tol: Solution tolerance to achieve.\n b_norm: Magnitude of b in Ax = b.\n Returns:\n k: Counts the number of iterations before convergence.\n V: The Arnoldi matrix of Krylov vectors.\n R: From H = QR where H is the Arnoldi matrix of overlaps.\n beta_vec: Stores Q implicitly as Givens factors.\n \"\"\"\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)\n\n VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray,\n float, jax.ShapedArray]\n ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int]\n GmresCarryType = Tuple[VarType, ConstType]\n\n @jax.jit\n def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool:\n \"\"\"\n This function dictates whether the main GMRES while loop will proceed.\n It is equivalent to:\n if k < n_kry and err > tol:\n return True\n else:\n return False\n where k, n_kry, err, and tol are unpacked from gmres_carry.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n (bool): Whether to continue iterating.\n \"\"\"\n gmres_constants, gmres_variables = gmres_carry\n tol = gmres_constants[0]\n k = gmres_variables[0]\n err = gmres_variables[4]\n n_kry = gmres_constants[4]\n\n def is_iterating(k, n_kry):\n return k < n_kry\n\n def not_converged(args):\n err, tol = args\n return err >= tol\n return jax.lax.cond(is_iterating(k, n_kry), # Predicate.\n not_converged, # Called if True.\n lambda x: False, # Called if False.\n (err, tol)) # Arguments to calls.\n\n @jax.jit\n def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType:\n \"\"\"\n Performs a single iteration of gmres_krylov. See that function for a more\n detailed description.\n\n Args:\n gmres_carry: The gmres_carry from gmres_krylov.\n Returns:\n gmres_carry: The updated gmres_carry.\n \"\"\"\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n tol, A_mv, A_args, b_norm, _ = gmres_constants\n\n V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol)\n R_col, givens = apply_givens_rotation(H[:, k], givens, k)\n R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:])\n\n # Update the residual vector.\n cs, sn = givens[:, k] * beta_vec[k]\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn)\n err = jnp.abs(sn) / b_norm\n gmres_variables = (k + 1, V, R, beta_vec, err, givens)\n return (gmres_variables, gmres_constants)\n\n @jax.jit\n def _gs_step(r: jax.ShapedArray,\n v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs one iteration of the stabilized Gram-Schmidt procedure, with\n r to be orthonormalized against {v} = {v_0, v_1, ...}.\n\n Args:\n r: The new vector which is not in the initially orthonormal set.\n v_i: The i'th vector in that set.\n Returns:\n r_i: The updated r which is now orthonormal with v_i.\n h_i: The overlap of r with v_i.\n \"\"\"\n h_i = jnp.vdot(v_i, r)\n r_i = r - h_i * v_i\n return r_i, h_i\n\n @jax.jit\n def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence,\n V: jax.ShapedArray, H: jax.ShapedArray,\n tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Performs the kth iteration of the Arnoldi reduction procedure.\n Args:\n k: The current iteration.\n A_mv, A_args: A function A_mv(v, *A_args) performing a linear\n transformation on v.\n V: A matrix of size (n, K + 1), K > k such that each column in\n V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes.\n H: A matrix of size (K, K), K > k with H[:, k] all zeroes.\n Returns:\n V, H: With their k'th columns respectively filled in by a new\n orthogonalized Krylov vector and new overlaps.\n \"\"\"\n v = A_mv(V[:, k], *A_args)\n v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T)\n v_norm = jnp.linalg.norm(v_new)\n r_new = v_new / v_norm\n # Normalize v unless it is the zero vector.\n r_new = jax.lax.cond(v_norm > tol,\n lambda x: x[0] / x[1],\n lambda x: 0.*x[0],\n (v_new, v_norm)\n )\n H = jax.ops.index_update(H, jax.ops.index[:, k], H_k)\n H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm)\n V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new)\n return V, H\n\n####################################################################\n# GIVENS ROTATIONS\n####################################################################\n @jax.jit\n def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n \"\"\"\n Successively applies each of the rotations stored in givens to H_col.\n\n Args:\n H_col : The vector to be rotated.\n givens: 2 x K, K > k matrix of rotation factors.\n k : Iteration number.\n Returns:\n H_col : The rotated vector.\n \"\"\"\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col\n\n @jax.jit\n def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n \"\"\"\n Applies the Givens rotations stored in the vectors cs and sn to the vector\n H_col. Then constructs a new Givens rotation that eliminates H_col's\n k'th element, yielding the corresponding column of the R in H's QR\n decomposition. Returns the new column of R along with the new Givens\n factors.\n\n Args:\n H_col : The column of H to be rotated.\n givens: A matrix representing the cosine and sine factors of the\n previous GMRES Givens rotations, in that order\n (i.e. givens[0, :] -> the cos factor).\n k : Iteration number.\n Returns:\n R_col : The column of R obtained by transforming H_col.\n givens_k: The new elements of givens that zeroed out the k+1'th element\n of H_col.\n \"\"\"\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens\n\n @jax.jit\n def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n \"\"\"\n Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta)\n so that [cs -sn] @ [v1] = [r]\n [sn cs] [v2] [0]\n Args:\n v1, v2: The scalars.\n Returns:\n cs, sn: The rotation factors.\n \"\"\"\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn\n\n fnames = [\n \"gmres_m\", \"gmres_residual\", \"gmres_krylov\", \"gs_step\",\n \"kth_arnoldi_step\", \"givens_rotation\"\n ]\n functions = [\n gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step,\n givens_rotation\n ]\n\n class Functions:\n\n def __init__(self, fun_dict):\n self.dict = fun_dict\n\n def __getattr__(self, name):\n return self.dict[name]\n\n return Functions(dict(zip(fnames, functions)))", "def compute_residuals(r):\n global conv_residuals\n conv_residuals.append(r)\n return", "def evaluate(self, batch):\n images, labels, projs, planes = [], [], [], []\n for serialized in batch:\n example = tf.train.Example.FromString(serialized)\n image, label = self.encoder.parse_example(example)\n images.append(image)\n labels.append(label)\n proj, _ = self.encoder.parse_camera(example)\n projs.append(proj)\n plane = self.encoder.parse_plane(example)\n planes.append(plane)\n\n\n #pred = self.model.predict(np.asarray(images), batch_size=len(batch))\n results = self.predict(np.asarray(images), batch_size=len(batch))\n \n # Creating some fake results for testing as well as example of what the \n # the results should look like.\n # results = []\n # for label in labels:\n # instances = label['2d_instance']\n # instances_3d = label['3d_instance']\n # boxes = []\n # for i in range(len(instances)):\n # point_2d = np.copy(instances[i])\n # point_3d = np.copy(instances_3d[i])\n # for j in range(9):\n # # Translating the box in 3D, this will have a large impact on 3D IoU.\n # point_3d[j] += np.array([0.01, 0.02, 0.5])\n # boxes.append((point_2d, point_3d))\n # results.append(boxes)\n\n for boxes, label, plane in zip(results, labels, planes): \n instances = label['2d_instance']\n instances_3d = label['3d_instance']\n visibilities = label['visibility']\n num_instances = 0\n for instance, instance_3d, visibility in zip(\n instances, instances_3d, visibilities):\n if (visibility > self._vis_thresh and\n self._is_visible(instance[0]) and instance_3d[0, 2] < 0):\n num_instances += 1\n # We don't have negative examples in evaluation.\n if num_instances == 0:\n continue\n\n iou_hit_miss = metrics.HitMiss(self._iou_thresholds)\n azimuth_hit_miss = metrics.HitMiss(self._azimuth_thresholds)\n polar_hit_miss = metrics.HitMiss(self._polar_thresholds)\n pixel_hit_miss = metrics.HitMiss(self._pixel_thresholds)\n\n num_matched = 0\n for box in boxes:\n box_point_2d, box_point_3d = box\n index = self.match_box(box_point_2d, instances, visibilities)\n if index >= 0:\n num_matched += 1\n pixel_error = self.evaluate_2d(box_point_2d, instances[index])\n\n # If you only compute the 3D bounding boxes from RGB images, \n # your 3D keypoints may be upto scale. However the ground truth\n # is at metric scale. There is a hack to re-scale your box using \n # the ground planes (assuming your box is sitting on the ground).\n # However many models learn to predict depths and scale correctly.\n #scale = self.compute_scale(box_point_3d, plane)\n #box_point_3d = box_point_3d * scale\n azimuth_error, polar_error, iou = self.evaluate_3d(box_point_3d, instances_3d[index])\n iou_hit_miss.record_hit_miss(iou)\n pixel_hit_miss.record_hit_miss(pixel_error, greater=False)\n azimuth_hit_miss.record_hit_miss(azimuth_error, greater=False)\n polar_hit_miss.record_hit_miss(polar_error, greater=False)\n\n if num_matched > 0:\n self._iou_ap.append(iou_hit_miss, num_instances)\n self._pixel_ap.append(pixel_hit_miss, num_instances)\n self._azimuth_ap.append(azimuth_hit_miss, num_instances)\n self._polar_ap.append(polar_hit_miss, num_instances)\n self._matched += num_matched", "def sweep_image_model():\n for c1 in [4, 8, 16]:\n for c2 in [2, 4]:\n for c3 in [2, 4]:\n for c4 in [1, 2]:\n flags = flag_reader.read_flag()\n print(c1)\n flags.channel_list = c1 * np.array([1, c2, c2*c3, c2*c3*c4])\n print('channel list = ', flags.channel_list)\n flags.last_dim = flags.channel_list[-1]\n flags.model_name = flags.data_set + '_channel_' + str(flags.channel_list).replace('[','').replace(']','').replace(' ','_') + \\\n '_dim_last_' + str(flags.last_dim) + '_ind_' + str(flags.comp_ind) + \\\n '_lr_{}_decay_{}_reg_{}_bs_{}'.format(flags.lr, flags.lr_decay_rate, flags.reg_scale, flags.batch_size)\n print(flags.model_name)\n training_from_flag(flags)", "def residLike(self):\n\n # --------------------------------------------------------------------------------------------- #\n # Compute the residuals\n if self.csys == 'GAL':\n # Redo some file computations with this coordinate system\n self.outbinexp = os.path.join(self.workpath, 'BinExpMapGAL'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCubeGAL'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMapsGAL'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'ResidGAL'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigmaGAL'+self.suffix+'.fits')\n\n self._gtExpmap()\n self._gtBincube()\n self._gtSrcmap()\n else:\n # Nothing to add\n pass\n \n self._gtBinmap()\n self._gtModel()\n # Create the residual count map (count_map - model_map)\n if not os.path.isfile(self.outresid):\n os.popen(\"farith {} {} {} ops=SUB\".format(self.outbinmap, self.outgtmod,\n self.outresid))\n # Create the sigma-residual map (residual_map/sqrt(model_map))\n if not os.path.isfile(self.outresig):\n os.popen(\"ftpixcalc {} '(a-b)/sqrt(b)' a={} b={}\".format(self.outresig,\n self.outbinmap, self.outgtmod))\n\n # --------------------------------------------------------------------------------------------- #\n # Get the sources to overplot\n srcs = self.getSrc()\n srcs = srcs[(srcs['Separation'] <= 3.) & ([not i.endswith('c') for i in srcs['Name']])]\n # Plot the residuals\n resplt1 = FermiMap()\n resplt1.savepath = self.workpath\n resplt1.image = self.outresig\n resplt1.figname = 'ResSigma.pdf'\n dmin, dmax = np.abs(resplt1.datamin), resplt1.datamax\n resplt1.datamin = - min(dmin, dmax)\n resplt1.datamax = + min(dmin, dmax)\n resplt1.cbarlabel = r'Residual $\\sigma$/pixel'\n resplt1.mapSky()\n resplt1.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt1.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt1.savepath, resplt1.figname) ))\n\n resplt2 = FermiMap()\n resplt2.savepath = self.workpath\n resplt2.image = self.outresid\n resplt2.figname = 'Residuals.pdf'\n dmin, dmax = np.abs(resplt2.datamin), resplt2.datamax\n resplt2.datamin = - min(dmin, dmax)\n resplt2.datamax = + min(dmin, dmax)\n resplt2.cbarlabel = r'Residual counts/pixel'\n resplt2.mapSky()\n resplt2.srcSky(srcs['RA'], srcs['DEC'], srcs['Name'])\n resplt2.save()\n print(\"\\t=== Figure '{}' created ===\".format( os.path.join(resplt2.savepath, resplt2.figname) ))\n return", "def _iter_images(self):\n raise NotImplementedError", "def regPyramidLK( iImgFix, iImgMov, iMaxIter, iNumScales, iVerbose=True ):\n # pretvori vhodne slike v numpy polja tipa float\n iImgFix = np.array( iImgFix, dtype='float' )\n iImgMov = np.array( iImgMov, dtype='float' )\n # pripravi piramido slik\n iPyramid = [ (iImgFix, iImgMov) ]\n for i in range(1,iNumScales):\n # decimiraj fiksno in premicno sliko za faktor 2\n iImgFix_2 = decimateImage2D( iImgFix, i )\n iImgMov_2 = decimateImage2D( iImgMov, i )\n # dodaj v seznam\n iPyramid.append( (iImgFix_2,iImgMov_2) )\n # doloci zacetne parametre preslikave\n oPar = np.array( (0,0) ) \n # izvedi poravnavo od najmanjse do najvecje locljivosti slik\n for i in range(len(iPyramid)-1,-1,-1):\n if iVerbose: print('PORAVNAVA Z DECIMACIJO x%d' % 2*i)\n # posodobi parametre preslikave\n oPar = oPar * 2.0\n # izvedi poravnavo pri trenutni locljivosti\n oPar, oImgReg = regLucasKanade( iPyramid[i][0], iPyramid[i][1], \\\n iMaxIter, oPar, iVerbose=iVerbose )\n # vrni koncne parametre in poravnano sliko\n return oPar, oImgReg", "def iterative_ridge(z0, x, weight, alpha=1.0, tol=1e-5, tikhonov=1e-4, eps=None,\n maxiter=10, line_search=True, cg=False, cg_options=None,\n verbose=False):\n if tikhonov < 1e-5:\n warnings.warn('small regularization value %0.4e may lead to '\n 'inprecise results.' % tikhonov)\n if cg and cg_options is None:\n cg_options = {}\n if eps is None:\n eps = torch.finfo(weight.dtype).eps\n tol = z0.numel() * tol\n\n def f(z):\n x_hat = torch.mm(z, weight.T)\n loss = 0.5 * (x_hat - x).pow(2).sum() + alpha * z.abs().sum()\n return loss\n\n # initialize\n z = z0\n fval = f(z)\n if verbose:\n print('initial fval: %0.4f' % fval)\n\n # right hand side of the residual sum of squares (RSS) problem. [B,D]\n rhs = torch.mm(x, weight) # [B,D] = [B,K] @ [K,D]\n\n # batch gram matrix W^T @ W. [D,D]\n A = torch.mm(weight.T, weight)\n\n if not cg:\n A = A.expand(z.size(0), -1, -1) # [B,D,D]\n\n for k in range(1, maxiter + 1):\n # compute diagonal factor\n zmag = z.abs()\n is_zero = zmag < eps\n diag = (alpha / zmag).masked_fill(is_zero, 0)\n rhs_k = rhs.masked_fill(is_zero, 0.)\n\n # solve ridge sub-problem\n if cg:\n # use conjugate gradient method\n def Adot(v):\n Av = torch.mm(v.masked_fill(is_zero, 0.), A)\n Av.masked_fill_(is_zero, 0.)\n Av.addcmul_(diag + tikhonov, v)\n return Av\n dot = lambda u, v: torch.sum(u*v, 1, keepdim=True)\n z_sol = conjgrad(rhs_k, Adot, dot, **cg_options)\n else:\n # use cholesky factorization\n A_k = A.masked_fill((is_zero.unsqueeze(1) | is_zero.unsqueeze(2)), 0.)\n A_k.diagonal(dim1=1, dim2=2).add_(diag + tikhonov)\n z_sol = batch_cholesky_solve(rhs_k, A_k) # [B,D]\n\n if line_search:\n # line search optimization\n p = z_sol - z\n line_obj = lambda t: float(f(z.add(p, alpha=t)))\n res = minimize_scalar(line_obj, bounds=(0,10), method='bounded')\n t = res.x\n fval = torch.tensor(res.fun)\n update = p.mul(t)\n z = torch.where(is_zero, z, z + update)\n else:\n # fixed step size\n update = z_sol - z\n z = torch.where(is_zero, z, z_sol)\n fval = f(z)\n\n if verbose:\n print('iter %3d - fval: %0.4f' % (k, fval))\n\n # check for convergence\n if update.abs().sum() <= tol:\n msg = _status_message['success']\n break\n\n # check for NaN\n if (fval.isnan() | update.isnan().any()):\n msg = _status_message['nan']\n break\n\n else:\n msg = \"Warning: \" + _status_message['maxiter']\n\n if verbose:\n print(msg)\n print(\" Current function value: %f\" % fval)\n print(\" Iterations: %d\" % k)\n\n return z", "def calc_k_dot_r(self):\n\t\n\tself.k_dot_r = self.k[0]*self.rij[0,:,:,:] + self.k[1]*self.rij[1,:,:,:] + self.k[2]*self.rij[2,:,:,:]\n\t\n\treturn", "def compute_results(path, swatch, print_to):\r\n\tfile_urls = get_images(path)\r\n\tfor file_url in file_urls:\r\n\t\t# gets the fdxcolorextractor object containing color palette\r\n\t\tcolor_palette = FdxColorExtractor(file_url, swatch).extract()\r\n\r\n\t\t# gets the dictionary part of the object\r\n\t\tcolor_palette_dict = color_palette.__dict__\r\n\r\n\t\t# dumps to json asking the encoder to take dict form of every object\r\n\t\tcolor_palette_jsondump = json.dumps(color_palette_dict, default=lambda o: o.__dict__)\r\n\r\n\t\tprint(color_palette_jsondump, file=print_to)", "def _krls_evaluate(self, dAldKRLS):\n \n # Get the needed data from the dictionary with data\n mDict = dAldKRLS['mDict']\n vAlpha = dAldKRLS['vAlpha']\n \n (iRowsDict, _) = mDict.shape # Get the number of rows from the dictionary\n if iRowsDict > 0:\n vX = np.dot(vAlpha.T, mDict)\n else:\n vX = np.zeros((iRowsDict,1))\n \n return vX", "def knn_prediction(X, y, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def run(self, inputVolume, labelColorsList, markupsList, marginMask, distance, gamma, regularizationDiameter, threshold): \n if not self.isValidInputOutputData(inputVolume):\n slicer.util.errorDisplay('Input volume is the same as output volume. Choose a different output volume.')\n return False\n \n volumesLogic = slicer.modules.volumes.logic()\n clonedVolumeNode = volumesLogic.CloneVolume(slicer.mrmlScene, inputVolume, \"clone\")\n voxels = slicer.util.arrayFromVolume(clonedVolumeNode)\n tmpVoxels = np.copy(voxels)\n \n seeds = self.getSeedsFromMarkups(markupsList, len(labelColorsList))\n seeds = self.getIJKSeeds(inputVolume, seeds) \n \n start_time = time.time()\n R = np.copy(tmpVoxels) # Regularization map\n\n regularizationFile = self.globalPath + \"Regularizations/\" + inputVolume.GetName() + \"_\" + str(regularizationDiameter) + \".nii.gz\"\n if os.path.isfile(regularizationFile): # If regularization exists, load this one\n print(\"--- Alredy existing regularization\")\n regularizationVolumeNode = slicer.util.loadVolume(regularizationFile)\n voxels = slicer.util.arrayFromVolume(regularizationVolumeNode)\n R = np.copy(voxels)\n slicer.mrmlScene.RemoveNode(regularizationVolumeNode)\n else: # Else, create a new one\n print(\"- Creating new regularization start: \")\n R = regularization(voxels, int(regularizationDiameter/2))\n print(\"- Creating new regularization end\")\n regularizationVolumeNode = volumesLogic.CloneVolume(slicer.mrmlScene, inputVolume, inputVolume.GetName() + \"_regularization_\" + str(regularizationDiameter))\n slicer.util.updateVolumeFromArray(regularizationVolumeNode, R)\n slicer.util.saveNode(regularizationVolumeNode, regularizationFile)\n\n regularization_time = time.time() - start_time\n print(\"- Regularization time: %s seconds -\" % regularization_time)\n\n # def segmentation(globalPath, volume, voxels, seeds, marginMask, distance, regDiameter):\n self.imgLabel, self.imgDist = segmentation(inputVolume, tmpVoxels, R, seeds, \n len(labelColorsList), marginMask, distance, gamma, regularizationDiameter, threshold) \n \n tmpVoxels[:] = self.imgLabel[:]\n slicer.util.updateVolumeFromArray(clonedVolumeNode, tmpVoxels)\n outputVolume = slicer.vtkSlicerVolumesLogic().CloneVolume(slicer.mrmlScene, clonedVolumeNode, inputVolume.GetName() + \"_segmentation\", True)\n \n displaySegmentationMap(clonedVolumeNode, self.imgLabel, labelColorsList, self.removeLastSegmentation, self.showBackGround)\n \n if slicer.mrmlScene:\n slicer.mrmlScene.RemoveNode(clonedVolumeNode) \n \n slicer.util.setSliceViewerLayers(background=inputVolume)\n return outputVolume", "def evaluation(eval_dict, faster_rcnn, test_num=Config.eval_num):\n bboxes, labels, scores = list(), list(), list()\n gt_bboxes, gt_labels, gt_difficult = list(), list(), list()\n for i, [img_dir, img_info] in tqdm(enumerate(eval_dict.items())):\n img, img_info = rescale_image(img_dir, img_info, flip=False)\n img_tensor = create_img_tensor(img)\n box, score, label = faster_rcnn.predict(img_tensor)\n gt_bbox = np.array(img_info['objects'])[:, 1:5].astype(np.float32)\n gt_label = np.array(img_info['objects'])[:, 0]\n difficult = np.array(img_info['difficult'])\n\n # label from text to number\n gt_label = text_to_num(gt_label)\n\n bboxes.append(box)\n labels.append(label)\n scores.append(score)\n gt_bboxes.append(gt_bbox)\n gt_labels.append(gt_label)\n gt_difficult.append(difficult)\n if i == test_num:\n break\n\n AP, mAP = calc_map(bboxes, labels, scores, gt_bboxes, gt_labels, gt_difficult, use_07_metric=True)\n\n return mAP", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def evaluate(vqg, data_loader, criterion, l2_criterion, args):\n vqg.eval()\n total_gen_loss = 0.0\n total_kl = 0.0\n total_recon_image_loss = 0.0\n total_steps = len(data_loader)\n if args.eval_steps is not None:\n total_steps = min(len(data_loader), args.eval_steps)\n start_time = time.time()\n for iterations, (images, questions, qindices) in enumerate(data_loader):\n\n # Set mini-batch dataset\n if torch.cuda.is_available():\n images = images.cuda()\n questions = questions.cuda()\n qindices = qindices.cuda()\n\n # Forward, Backward and Optimize\n image_features = vqg.encode_images(images)\n mus, logvars = vqg.encode_into_z(image_features)\n zs = vqg.reparameterize(mus, logvars)\n (outputs, _, other) = vqg.decode_questions(\n image_features, zs, questions=questions,\n teacher_forcing_ratio=1.0)\n\n # Reorder the questions based on length.\n questions = torch.index_select(questions, 0, qindices)\n\n # Ignoring the start token.\n questions = questions[:, 1:]\n qlengths = process_lengths(questions)\n\n # Convert the output from MAX_LEN list of (BATCH x VOCAB) ->\n # (BATCH x MAX_LEN x VOCAB).\n outputs = [o.unsqueeze(1) for o in outputs]\n outputs = torch.cat(outputs, dim=1)\n outputs = torch.index_select(outputs, 0, qindices)\n\n # Calculate the loss.\n targets = pack_padded_sequence(questions, qlengths,\n batch_first=True)[0]\n outputs = pack_padded_sequence(outputs, qlengths,\n batch_first=True)[0]\n gen_loss = criterion(outputs, targets)\n total_gen_loss += gen_loss.data.item()\n\n # Get KL loss if it exists.\n kl_loss = gaussian_KL_loss(mus, logvars)\n total_kl += kl_loss.item()\n\n\n\n # Quit after eval_steps.\n if args.eval_steps is not None and iterations >= args.eval_steps:\n break\n\n # Print logs\n if iterations % args.log_step == 0:\n delta_time = time.time() - start_time\n start_time = time.time()\n logging.info('Time: %.4f, Step [%d/%d], gen loss: %.4f, '\n 'KL: %.4f'\n % (delta_time, iterations, total_steps,\n total_gen_loss/(iterations+1),\n total_kl/(iterations+1)))\n total_info_loss = total_recon_image_loss \n return total_gen_loss / (iterations+1), total_info_loss / (iterations + 1)", "def generate_images_pred(self, inputs, outputs):\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n # mean-normalized inverse depth from [62] to discourage shrinking of the estimated depth\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def __call__(self, img: Image):\n if self.K <= 1:\n return self.transform(img)\n else:\n return [self.transform(img) for _ in range(self.K)]", "def residmapComparison():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('dataFiles/6gev_image.fits')\n filename = get_pkg_data_filename('dataFiles/6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = -25.0\n vmax = 25.0\n cbStep = 5.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data-np.sum(modelData001,axis=0)),cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Point Source ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Residual counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((image_data-np.sum(modelData03,axis=0)), cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Extended Source ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Residual counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n #plt.savefig('plots/residComparison.pdf',bbox_inches='tight')\n plt.show()", "def multiple_shape_learning(ratio_result, X_train_list, y_train_list, X_test_list):\n\n # MTL GP learning\n kernel = GPy.kern.Matern32(input_dim=6, ARD=True)\n icm = GPy.util.multioutput.ICM(input_dim=6, num_outputs=len(X_train_list), kernel=kernel)\n model = GPy.models.SparseGPCoregionalizedRegression(X_list=X_train_list, Y_list=y_train_list, Z_list=[], kernel=icm)\n model.optimize('bfgs')\n X_test = np.concatenate(X_test_list)\n X_test = np.hstack((X_test, np.ones((len(X_test), 1))))\n noise_dict = {'output_index': X_test[:, -1:].astype(int)}\n y_pred, y_var = model.predict(X_test, Y_metadata=noise_dict)\n ratio_result['MTL_GP'] = y_pred\n\n X_train = np.concatenate(X_train_list)\n y_train = np.concatenate(y_train_list)\n y_train = y_train.reshape((len(y_train),))\n X_test = np.concatenate(X_test_list)\n\n model1 = linear_model.LinearRegression()\n model1.fit(X_train, y_train)\n y_pred = model1.predict(X_test)\n ratio_result['multiple_linear_regression'] = y_pred\n\n model2 = svm.SVR()\n model2.fit(X_train, y_train)\n y_pred = model2.predict(X_test)\n ratio_result['multiple_SVM'] = y_pred\n\n model3 = neural_network.MLPRegressor(solver='lbfgs', max_iter=1000, learning_rate_init=0.005)\n model3.fit(X_train, y_train)\n y_pred = model3.predict(X_test)\n ratio_result['multiple_NN'] = y_pred\n\n kernel = GPy.kern.Matern32(input_dim=6, ARD=True)\n m_full = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)\n m_full.optimize('bfgs')\n y_pred, y_var = m_full.predict(X_test)\n ratio_result['multiple_GP'] = y_pred\n\n return ratio_result", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.squeeze(np.array(pts3d_for_pnp))\n pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n num_pts = len(pts3d_for_pnp)\n\n highest_inliers = 0\n for i in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n print('rvec:', rvec,'\\n\\ntvec:', tvec)\n\n return R, tvec", "def custom_train_loop(sess, train_targets, **loop_params):\n print('Calling custom training loop...')\n train_results = sess.run(train_targets)\n for i, result in enumerate(train_results):\n print('Model {} has loss {}'.format(i, result['loss']))\n return train_results", "def _rescore_instances(self, classes, scores, keypoint_scores):\n batch, max_detections, total_num_keypoints = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_scores))\n classes_tiled = tf.tile(classes[:, :, tf.newaxis],\n multiples=[1, 1, total_num_keypoints])\n # TODO(yuhuic): Investigate whether this function will create subgraphs in\n # tflite that will cause the model to run slower at inference.\n for kp_params in self._kp_params_dict.values():\n if not kp_params.rescore_instances:\n continue\n class_id = kp_params.class_id\n keypoint_indices = kp_params.keypoint_indices\n kpt_mask = tf.reduce_sum(\n tf.one_hot(keypoint_indices, depth=total_num_keypoints), axis=0)\n kpt_mask_tiled = tf.tile(kpt_mask[tf.newaxis, tf.newaxis, :],\n multiples=[batch, max_detections, 1])\n class_and_keypoint_mask = tf.math.logical_and(\n classes_tiled == class_id,\n kpt_mask_tiled == 1.0)\n class_and_keypoint_mask_float = tf.cast(class_and_keypoint_mask,\n dtype=tf.float32)\n visible_keypoints = tf.math.greater(\n keypoint_scores, kp_params.rescoring_threshold)\n keypoint_scores = tf.where(\n visible_keypoints, keypoint_scores, tf.zeros_like(keypoint_scores))\n num_visible_keypoints = tf.reduce_sum(\n class_and_keypoint_mask_float *\n tf.cast(visible_keypoints, tf.float32), axis=-1)\n num_visible_keypoints = tf.math.maximum(num_visible_keypoints, 1.0)\n scores_for_class = (1./num_visible_keypoints) * (\n tf.reduce_sum(class_and_keypoint_mask_float *\n scores[:, :, tf.newaxis] *\n keypoint_scores, axis=-1))\n scores = tf.where(classes == class_id,\n scores_for_class,\n scores)\n return scores", "def gen_img_settings_quality(l):\n \n lhalf = 0.5*l\n \n ### sphere radius\n \n sphere_radius = 0.7\n #sphere_rgbcolor = [0.25,0.65,0.65]\n \n ### RESOLUTION\n \n img_widthpx = 1024\n img_heightpx = 1024\n\n ### includes and defaults\n\n povray_includes = [\"colors.inc\", \"textures.inc\", \"shapes.inc\"]\n povray_defaults = [vapory.Finish( 'ambient', 0.1,\n\t \t\t\t 'diffuse', 0.65,\n\t\t \t\t 'specular', 0.5,\n\t\t\t \t 'shininess', 0.53,\n\t\t\t\t 'opacity', 1.0)]\n\n\n ### light sources\n\n sun1 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', 'White')\n sun2 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', [0.7, 0.7, 0.7])\n\n ### background\n\n background = vapory.Background('color', [1,1,1])\n\n ### camera\n\n #povray_cam = vapory.Camera('angle', 75, 'location', [-15 , 15.0+0.5,15.0-0.25],'look_at', [0.25 , 15.0+0.5, 15.0-0.25])\n povray_cam = vapory.Camera('location', [lhalf, lhalf, -1.01*lhalf], 'look_at', [lhalf,lhalf,0], 'angle', 90)\n\n ### text\n # If desired include this in the povray_objects - array declared in the loop\n #text1 = vapory.Text( 'ttf', '\"timrom.ttf\"' ,'\"Division:\"', 0.01, 0.0, 'scale', [0.5,0.5,0.5],'rotate', [0,90,0], 'translate' , [0.0 , 15.0+2.75-1 , 15.0+1.5], vapory.Pigment('Black') ) \n\n ### render quality\n\n quality = 10\n \n return sphere_radius, img_widthpx, img_heightpx, povray_includes, povray_defaults, sun1, sun2, background, povray_cam, quality", "def fast_rcnn_detection(self):\n\n\n # (batch_size, num_proposal, 7, 7, channels)\n pooled_feature = self.get_rois(self.rpn_proposals_boxes)\n fast_rcnn_predict_boxes, fast_rcnn_predict_scores = self.fast_rcnn_net(pooled_feature, False)\n\n with tf.variable_scope(\"fast_rcnn_detection\"):\n\n fast_rcnn_softmax_scores = slim.softmax(fast_rcnn_predict_scores) # [-1, num_classes]\n\n # gain the highest category and score and bounding box\n fast_rcnn_categories = tf.argmax(fast_rcnn_softmax_scores, axis=2, output_type=tf.int32) # (N,)\n row_index = tf.range(0, tf.shape(fast_rcnn_categories)[1])\n row_index = tf.expand_dims(row_index, 0)\n multi_row_index = tf.tile(row_index, [self.config.PER_GPU_IMAGE, 1])\n multi_row_index = tf.expand_dims(multi_row_index, axis=-1)\n expand_fast_rcnn_categories = tf.expand_dims(fast_rcnn_categories, axis=-1)\n index = tf.concat([multi_row_index, expand_fast_rcnn_categories], axis=-1)\n fast_rcnn_categories_bboxs = boxes_utils.batch_slice([fast_rcnn_predict_boxes, index],\n lambda x, y: tf.gather_nd(x, y),\n self.config.PER_GPU_IMAGE)\n\n fast_rcnn_categories_scores = tf.reduce_max(fast_rcnn_softmax_scores, axis=2, keepdims=False)# (N,)\n\n detections = self.fast_rcnn_proposals(self.rpn_proposals_boxes,\n fast_rcnn_categories_bboxs,\n fast_rcnn_categories,\n fast_rcnn_categories_scores,\n self.window)\n\n return detections", "def put_cmyk(self, path, res, mode, color_correction):\n\n ### Stratasys_J750\n if mode == \"stratasys\":\n _c_ = (0, 90, 158, 255)\n _m_ = (166, 33, 98, 255)\n _y_ = (200, 189, 3, 255)\n _k_ = (26, 26, 29, 255)\n\n ### cmyk\n else:\n _c_ = (0, 255, 255, 255)\n _m_ = (255, 0, 255, 255)\n _y_ = (255, 255, 0, 255)\n \n\n\n ut = util.UTIL()\n \n image = Image.open(path)\n image_size = image.size\n\n clr_list = self.get_color_to_memory(image)\n\n # print(\"Memory : \", ut.ll_size(clr_list))\n\n new_image = self.up_scale(image, res)\n new_image_size = new_image.size\n \n # print(\"PIL : \", image_size)\n # print(new_image_size)\n\n vectors = ut.set_vector(res)\n\n for i in range(image_size[0]):\n for j in range(image_size[1]):\n\n pt = (i * res, j * res)\n rgb = clr_list[i][j]\n new_vectors = random.sample(vectors, len(vectors))\n\n cmyk = self.calc_rgb_cmyk(rgb)\n\n\n ### ========== CMYK ==========\n # cc, mm, yy, kk = self.calc_cmyk_count(cmyk, res)\n # new_length = cc + mm + yy + kk\n ### ========== CMYK ==========\n\n\n ### ========== CMY ==========\n cc, mm, yy = self.calc_cmy_count(cmyk, res, color_correction)\n _length = cc + mm + yy\n ### ========== CMY ==========\n\n\n if _length > (res * res):\n new_length = (res * res)\n else:\n new_length = _length\n\n new_pt = []\n\n for k in range(new_length):\n\n new_pt = ut.pt2d_add(pt, new_vectors[k])\n # print(new_pt)\n\n if k < cc:\n new_image.putpixel(new_pt, (_c_))\n elif k < (cc + mm):\n new_image.putpixel(new_pt, (_m_))\n elif k < (cc + mm + yy):\n new_image.putpixel(new_pt, (_y_))\n # else:\n # new_image.putpixel(new_pt, (_k_))\n \n return new_image", "def run(self, r, niters=10000):\n validator.validate_type(r, rng, param_name='r')\n validator.validate_positive(niters, param_name='niters')\n model = bind(self._latent, self._view)\n for _ in xrange(niters):\n for name, config in self._kernel_config:\n if name == 'assign':\n gibbs.assign(model, r)\n elif name == 'assign_resample':\n gibbs.assign_resample(model, config['m'], r)\n elif name == 'grid_feature_hp':\n gibbs.hp(model, config, r)\n elif name == 'slice_feature_hp':\n slice.hp(model, r, hparams=config['hparams'])\n elif name == 'slice_cluster_hp':\n slice.hp(model, r, cparam=config['cparam'])\n elif name == 'theta':\n slice.theta(model, r, tparams=config['tparams'])\n else:\n assert False, \"should not be reach\"", "def loop_over_files(self, files_dir, files_opt, results_path, wavelength_idx=None,\n configuration_idx=None, surface=None, spaxels_per_slice=51):\n\n # We want the result to produce as output: the RMS WFE array, and the RayTrace at both Object and Focal plane\n results_names = ['RMS_WFE', 'OBJ_XY', 'FOC_XY', 'GLOB_XY']\n # we need to give the shapes of each array to self.run_analysis\n results_shapes = [(spaxels_per_slice,), (spaxels_per_slice, 2), (spaxels_per_slice, 2), (spaxels_per_slice, 2)]\n\n metadata = {}\n metadata['Spaxels per slice'] = spaxels_per_slice\n metadata['Configurations'] = 'All' if configuration_idx is None else configuration_idx\n metadata['Wavelengths'] = 'All' if wavelength_idx is None else wavelength_idx\n\n\n # read the file options\n file_list, sett_list = create_zemax_file_list(which_system=files_opt['which_system'],\n AO_modes=files_opt['AO_modes'], scales=files_opt['scales'],\n IFUs=files_opt['IFUs'], grating=files_opt['grating'])\n\n # Loop over the Zemax files\n results = []\n for zemax_file, settings in zip(file_list, sett_list):\n\n list_results = self.run_analysis(analysis_function=self.analysis_function_rms_wfe,\n files_dir=files_dir, zemax_file=zemax_file, results_path=results_path,\n results_shapes=results_shapes, results_names=results_names,\n wavelength_idx=wavelength_idx, configuration_idx=configuration_idx,\n surface=surface, spaxels_per_slice=spaxels_per_slice)\n\n results.append(list_results)\n rms_wfe, obj_xy, foc_xy, global_xy, wavelengths = list_results\n\n # Post-Processing the results\n file_name = zemax_file.split('.')[0]\n results_dir = os.path.join(results_path, file_name)\n settings['surface'] = 'IMG' if surface is None else surface\n\n self.save_hdf5(analysis_name='RMS_WFE', analysis_metadata=metadata, list_results=list_results, results_names=results_names,\n file_name=file_name, file_settings=settings, results_dir=results_path)\n\n\n return results", "def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error", "def _kuhn_munkres_algorithm(true_lab, pred_lab):\n nclass = len(set(true_lab))\n nobs = len(true_lab)\n wmat = np.zeros((nclass, nclass))\n for lab in range(nclass):\n for plab in range(lab, nclass):\n n_intersec = len(set(np.transpose(np.argwhere(true_lab == lab))[0]).intersection(\n set(np.transpose(np.argwhere(pred_lab == plab))[0])))\n w = (nobs - n_intersec) / nobs\n if lab == plab:\n wmat[lab, plab] = w\n else:\n wmat[lab, plab] = w\n n_intersec = len(set(np.transpose(np.argwhere(true_lab == plab))[0]).intersection(\n set(np.transpose(np.argwhere(pred_lab == lab))[0])))\n w = (nobs - n_intersec) / nobs\n wmat[plab, lab] = w\n new_pred_lab = list(linear_sum_assignment(wmat)[1])\n # print(f'Recode: {new_pred_lab}')\n # print(f'From matrix: {wmat}')\n pred_perm = np.array([new_pred_lab.index(i) for i in pred_lab])\n\n return pred_perm", "def __call__(self, results):\n img = results['img']\n polys = results[self.instance_key]\n x_min, y_min, x_max, y_max = self._random_crop(img, polys)\n kept_idx = []\n for idx, poly in enumerate(polys):\n if np.all((poly[0::2] >= x_min) & (poly[1::2] >= y_min) & \\\n (poly[0::2] <= x_max) & (poly[1::2] <= y_max)):\n kept_idx.append(idx)\n kept_idx = np.array(kept_idx)\n # crop img\n results['img'] = img[y_min : y_max, x_min : x_max, :]\n results['img_shape'] = results['img'].shape\n # crop mask\n for key in results.get('mask_fields', []):\n results[key] = results[key].crop(np.array([x_min, y_min, x_max, y_max]))\n # crop box\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n box = np.array(box)\n if np.all((np.min(box[0::2]) >= x_min) & (np.min(box[1::2]) >= y_min) & \\\n (np.max(box[0::2]) <= x_max) & (np.max(box[1::2]) <= y_max)):\n box[0::2] = (box[0::2] - x_min)\n box[1::2] = (box[1::2] - y_min)\n bboxes.append(box)\n # no valid box in img\n if len(bboxes) == 0:\n if key == 'gt_bboxes':\n bboxes = np.zeros((0, 4), dtype=np.float32)\n else:\n bboxes = np.zeros((0, 8), dtype=np.float32)\n results[key] = bboxes\n # calculate the kept text and label\n for key in ['gt_labels', 'gt_texts']:\n if key in results:\n results[key] = [results[key][idx] for idx in kept_idx]\n # calculate the kept mask\n for key in ['gt_masks']:\n if key in results:\n ori_mask = results[key].masks\n kept_mask = [ori_mask[idx] for idx in kept_idx]\n if len(kept_mask) > 0:\n kept_mask = np.stack(kept_mask)\n else:\n kept_mask = np.empty((0, results[key].height, results[key].width), dtype=np.float32)\n results[key] = BitmapMasks(kept_mask, results[key].height, results[key].width)\n return results", "def vimeo90k(mode):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n if mode == 'GT':\n img_folder = '../../datasets/vimeo90k/vimeo_septuplet/sequences'\n lmdb_save_path = '../../datasets/vimeo90k/vimeo90k_train_GT.lmdb'\n txt_file = '../../datasets/vimeo90k/vimeo_septuplet/sep_trainlist.txt'\n H_dst, W_dst = 256, 448\n elif mode == 'LR':\n img_folder = '../../datasets/vimeo90k/vimeo_septuplet_matlabLRx4/sequences'\n lmdb_save_path = '../../datasets/vimeo90k/vimeo90k_train_LR7frames.lmdb'\n txt_file = '../../datasets/vimeo90k/vimeo_septuplet/sep_trainlist.txt'\n H_dst, W_dst = 64, 112\n elif mode == 'flow':\n img_folder = '../../datasets/vimeo90k/vimeo_septuplet/sequences_flowx4'\n lmdb_save_path = '../../datasets/vimeo90k/vimeo90k_train_flowx4.lmdb'\n txt_file = '../../datasets/vimeo90k/vimeo_septuplet/sep_trainlist.txt'\n H_dst, W_dst = 128, 112\n else:\n raise ValueError('Wrong dataset mode: {}'.format(mode))\n n_thread = 40\n ########################################################\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n with open(txt_file) as f:\n train_l = f.readlines()\n train_l = [v.strip() for v in train_l]\n all_img_list = []\n keys = []\n for line in train_l:\n folder = line.split('/')[0]\n sub_folder = line.split('/')[1]\n all_img_list.extend(glob.glob(osp.join(img_folder, folder, sub_folder, '*')))\n if mode == 'flow':\n for j in range(1, 4):\n keys.append('{}_{}_4_n{}'.format(folder, sub_folder, j))\n keys.append('{}_{}_4_p{}'.format(folder, sub_folder, j))\n else:\n for j in range(7):\n keys.append('{}_{}_{}'.format(folder, sub_folder, j + 1))\n all_img_list = sorted(all_img_list)\n keys = sorted(keys)\n if mode == 'GT': # only read the 4th frame for the GT mode\n print('Only keep the 4th frame.')\n all_img_list = [v for v in all_img_list if v.endswith('im4.png')]\n keys = [v for v in keys if v.endswith('_4')]\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n \"\"\"get the image data and update pbar\"\"\"\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### write data to lmdb\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n txn = env.begin(write=True)\n pbar = util.ProgressBar(len(all_img_list))\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if 'flow' in mode:\n H, W = data.shape\n assert H == H_dst and W == W_dst, 'different shape.'\n else:\n H, W, C = data.shape\n assert H == H_dst and W == W_dst and C == 3, 'different shape.'\n txn.put(key_byte, data)\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n meta_info = {}\n if mode == 'GT':\n meta_info['name'] = 'Vimeo90K_train_GT'\n elif mode == 'LR':\n meta_info['name'] = 'Vimeo90K_train_LR'\n elif mode == 'flow':\n meta_info['name'] = 'Vimeo90K_train_flowx4'\n channel = 1 if 'flow' in mode else 3\n meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)\n key_set = set()\n for key in keys:\n if mode == 'flow':\n a, b, _, _ = key.split('_')\n else:\n a, b, _ = key.split('_')\n key_set.add('{}_{}'.format(a, b))\n meta_info['keys'] = list(key_set)\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def map_all_sig_p(limitregion=False, region=\"allsky\"):\n \n # Get ids of all pixels that contain RHT data\n rht_cursor, tablename = get_rht_cursor(region = region)\n all_ids = get_all_rht_ids(rht_cursor, tablename)\n \n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n \n if limitregion is True:\n print(\"Loading all allsky data points that are in the SC_241 region\")\n # Get all ids that are in both allsky data and SC_241\n all_ids_SC = pickle.load(open(\"SC_241_healpix_ids.p\", \"rb\"))\n all_ids = list(set(all_ids).intersection(all_ids_SC))\n \n all_sigpGsq = np.zeros(len(all_ids))\n\n update_progress(0.0)\n for i, hp_index in enumerate(all_ids):\n #likelihood = Likelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n (hp_index, T, Q, U) = planck_tqu_cursor.execute(\"SELECT * FROM Planck_Nside_2048_TQU_Galactic WHERE id = ?\", hp_index).fetchone()\n (hp_index, TT, TQ, TU, TQa, QQ, QU, TUa, QUa, UU) = planck_cov_cursor.execute(\"SELECT * FROM Planck_Nside_2048_cov_Galactic WHERE id = ?\", (hp_index,)).fetchone()\n \n # sigma_p as defined in arxiv:1407.0178v1 Eqn 3.\n sigma_p = np.zeros((2, 2), np.float_) # [sig_Q^2, sig_QU // sig_QU, UU]\n sigma_p[0, 0] = (1.0/T**2)*QQ #QQ\n sigma_p[0, 1] = (1.0/T**2)*QU #QU\n sigma_p[1, 0] = (1.0/T**2)*QU #QU\n sigma_p[1, 1] = (1.0/T**2)*UU #UU\n \n # det(sigma_p) = sigma_p,G^4\n det_sigma_p = np.linalg.det(sigma_p)\n sigpGsq = np.sqrt(det_sigma_p)\n \n all_sigpGsq[i] = sigpGsq\n \n update_progress((i+1.0)/len(all_ids), message='Calculating: ', final_message='Finished Calculating: ')\n \n # Place into healpix map\n hp_sigpGsq = make_hp_map(all_sigpGsq, all_ids, Nside = 2048, nest = True)\n \n out_root = \"/disks/jansky/a/users/goldston/susan/Wide_maps/\"\n if limitregion:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_SC_241.fits\", hp_sigpGsq, coord = \"G\", nest = True) \n else:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_DR2sky.fits\", hp_sigpGsq, coord = \"G\", nest = True)", "def im_detections(model, im, anchors):\n k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL\n A = cfg.RETINANET.SCALES_PER_OCTAVE * len(cfg.RETINANET.ASPECT_RATIOS)\n inputs = {}\n inputs['data'], inputs['im_info'] = _get_image_blob(im)\n cls_probs, box_preds = [], []\n for lvl in range(k_min, k_max + 1):\n suffix = 'fpn{}'.format(lvl)\n cls_probs.append(core.ScopedName('retnet_cls_prob_{}'.format(suffix)))\n box_preds.append(core.ScopedName('retnet_bbox_pred_{}'.format(suffix)))\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False))\n\n workspace.RunNet(model.net.Proto().name)\n scale = inputs['im_info'][0, 2]\n cls_probs = workspace.FetchBlobs(cls_probs)\n box_preds = workspace.FetchBlobs(box_preds)\n\n # here the boxes_all are [x0, y0, x1, y1, score]\n boxes_all = defaultdict(list)\n\n cnt = 0\n for lvl in range(k_min, k_max + 1):\n # create cell anchors array\n stride = 2. ** lvl\n cell_anchors = anchors[lvl]\n\n # fetch per level probability\n cls_prob = cls_probs[cnt]\n box_pred = box_preds[cnt]\n cls_prob = cls_prob.reshape((\n cls_prob.shape[0], A, int(cls_prob.shape[1] / A),\n cls_prob.shape[2], cls_prob.shape[3]))\n box_pred = box_pred.reshape((\n box_pred.shape[0], A, 4, box_pred.shape[2], box_pred.shape[3]))\n cnt += 1\n\n if cfg.RETINANET.SOFTMAX:\n cls_prob = cls_prob[:, :, 1::, :, :]\n\n cls_prob_ravel = cls_prob.ravel()\n # In some cases [especially for very small img sizes], it's possible that\n # candidate_ind is empty if we impose threshold 0.05 at all levels. This\n # will lead to errors since no detections are found for this image. Hence,\n # for lvl 7 which has small spatial resolution, we take the threshold 0.0\n th = cfg.RETINANET.INFERENCE_TH if lvl < k_max else 0.0\n candidate_inds = np.where(cls_prob_ravel > th)[0]\n if (len(candidate_inds) == 0):\n continue\n\n pre_nms_topn = min(cfg.RETINANET.PRE_NMS_TOP_N, len(candidate_inds))\n inds = np.argpartition(\n cls_prob_ravel[candidate_inds], -pre_nms_topn)[-pre_nms_topn:]\n inds = candidate_inds[inds]\n\n inds_5d = np.array(np.unravel_index(inds, cls_prob.shape)).transpose()\n classes = inds_5d[:, 2]\n anchor_ids, y, x = inds_5d[:, 1], inds_5d[:, 3], inds_5d[:, 4]\n scores = cls_prob[:, anchor_ids, classes, y, x]\n\n boxes = np.column_stack((x, y, x, y)).astype(dtype=np.float32)\n boxes *= stride\n boxes += cell_anchors[anchor_ids, :]\n\n if not cfg.RETINANET.CLASS_SPECIFIC_BBOX:\n box_deltas = box_pred[0, anchor_ids, :, y, x]\n else:\n box_cls_inds = classes * 4\n box_deltas = np.vstack(\n [box_pred[0, ind:ind + 4, yi, xi]\n for ind, yi, xi in zip(box_cls_inds, y, x)]\n )\n pred_boxes = (\n box_utils.bbox_transform(boxes, box_deltas)\n if cfg.TEST.BBOX_REG else boxes)\n pred_boxes /= scale\n pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)\n box_scores = np.zeros((pred_boxes.shape[0], 5))\n box_scores[:, 0:4] = pred_boxes\n box_scores[:, 4] = scores\n\n for cls in range(1, cfg.MODEL.NUM_CLASSES):\n inds = np.where(classes == cls - 1)[0]\n if len(inds) > 0:\n boxes_all[cls].extend(box_scores[inds, :])\n\n # Combine predictions across all levels and retain the top scoring by class\n detections = []\n for cls, boxes in boxes_all.items():\n cls_dets = np.vstack(boxes).astype(dtype=np.float32)\n # do class specific nms here\n keep = box_utils.nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep, :]\n out = np.zeros((len(keep), 6))\n out[:, 0:5] = cls_dets\n out[:, 5].fill(cls)\n detections.append(out)\n\n detections = np.vstack(detections)\n # sort all again\n inds = np.argsort(-detections[:, 4])\n detections = detections[inds[0:cfg.TEST.DETECTIONS_PER_IM], :]\n boxes = detections[:, 0:4]\n scores = detections[:, 4]\n classes = detections[:, 5]\n return boxes, scores, classes", "def objective(hyperparams): \n global iteration #necessary with a global variable because of implementation from hyperopt. \n iteration += 1\n\n result = run_model(hyperparams, iteration)\n loss = -result #transform to loss in order to minimize\n\n return {'loss': loss, 'hyperparams': hyperparams, 'iteration': iteration, 'status': STATUS_OK}", "def update_kkrimp_params(self):\n\n decrease_mixing_fac = False\n switch_agressive_mixing = False\n switch_higher_accuracy = False\n initial_settings = False\n\n # only do something other than simple mixing after first kkr run\n if self.ctx.loop_count != 1:\n # first determine if previous step was successful (otherwise try to find some rms value and decrease mixing to try again)\n if not self.ctx.kkr_step_success:\n decrease_mixing_fac = True\n message = 'INFO: last KKR calculation failed. Trying decreasing mixfac'\n self.report(message)\n\n convergence_on_track = self.convergence_on_track()\n\n # check if calculation was on its way to converge\n if not convergence_on_track:\n decrease_mixing_fac = True\n message = 'INFO: Last KKR did not converge. Trying decreasing mixfac'\n self.report(message)\n # reset last_remote to last successful calculation\n last_calcs_list = list(range(len(self.ctx.calcs))) # needs to be list to support slicing\n if len(last_calcs_list) > 1:\n last_calcs_list = array(last_calcs_list)[::-1] # make sure to go from latest calculation backwards\n for icalc in last_calcs_list:\n message = f\"INFO: last calc success? {icalc} {self.ctx.KKR_steps_stats['success'][icalc]}\"\n self.report(message)\n if self.ctx.KKR_steps_stats['success'][icalc]:\n if self.ctx.KKR_steps_stats['last_rms'][icalc] < self.ctx.KKR_steps_stats['first_rms'][icalc]:\n self.ctx.last_remote = self.ctx.calcs[icalc].outputs.remote_folder\n break # exit loop if last_remote was found successfully\n else:\n self.ctx.last_remote = None\n else:\n self.ctx.last_remote = None\n # now cover case when last_remote needs to be set to initial remote folder (from input)\n if self.ctx.last_remote is None:\n if 'kkrimp_remote' in self.inputs:\n messager = 'INFO: no successful and converging calculation to take RemoteData from. Reuse RemoteData from input instead.'\n self.report(message)\n self.ctx.last_remote = self.inputs.kkrimp_remote\n elif 'impurity_info' in self.inputs or 'remote_data' in self.inputs:\n self.ctx.last_remote = None\n # check if last_remote has finally been set and abort if this is not the case\n if self.ctx.last_remote is None:\n messager = 'ERROR: last remote not found'\n self.report(message)\n return self.exit_codes.ERROR_SETTING_LAST_REMOTE # pylint: disable=no-member\n\n # check if mixing strategy should be changed\n last_mixing_scheme = self.ctx.last_params.get_dict()['IMIX']\n if last_mixing_scheme is None:\n last_mixing_scheme = 0\n\n if convergence_on_track:\n last_rms = self.ctx.last_rms_all[-1]\n if last_rms < self.ctx.threshold_aggressive_mixing and last_mixing_scheme == 0:\n switch_agressive_mixing = True\n message = 'INFO: rms low enough, switch to agressive mixing'\n self.report(message)\n\n # check if switch to higher accuracy should be done\n if not self.ctx.kkr_higher_accuracy:\n if self.ctx.kkr_converged: # or last_rms < self.ctx.threshold_switch_high_accuracy:\n switch_higher_accuracy = True\n\n\n# self.report(\"INFO: rms low enough, switch to higher accuracy settings\")\n else:\n initial_settings = True\n self.ctx.kkr_step_success = True\n\n if self.ctx.loop_count > 1:\n last_rms = self.ctx.last_rms_all[-1]\n\n # extract values from host calculation\n host_GF_calc = self.inputs.remote_data.get_incoming(node_class=CalcJobNode).first().node\n host_GF_outparams = host_GF_calc.outputs.output_parameters.get_dict()\n host_GF_inparams = host_GF_calc.inputs.parameters.get_dict()\n nspin = host_GF_outparams.get('nspin')\n non_spherical = host_GF_inparams.get('INS')\n if non_spherical is None:\n non_spherical = kkrparams.get_KKRcalc_parameter_defaults()[0].get('INS')\n self.ctx.spinorbit = host_GF_outparams.get('use_newsosol')\n\n # if needed update parameters\n if decrease_mixing_fac or switch_agressive_mixing or switch_higher_accuracy or initial_settings or self.ctx.mag_init:\n if initial_settings:\n label = 'initial KKR scf parameters'\n description = 'initial parameter set for scf calculation'\n else:\n label = ''\n description = ''\n\n # step 1: extract info from last input parameters and check consistency\n para_check = kkrparams(params_type='kkrimp')\n para_check.get_all_mandatory()\n message = 'INFO: get kkrimp keywords'\n self.report(message)\n\n # init new_params dict where updated params are collected\n new_params = {}\n\n # step 1.2: check if all mandatory keys are there and add defaults if missing\n missing_list = para_check.get_missing_keys(use_aiida=True)\n if missing_list != []:\n kkrdefaults = kkrparams.get_KKRcalc_parameter_defaults()[0]\n kkrdefaults_updated = []\n for key_default, val_default in list(kkrdefaults.items()):\n if key_default in missing_list:\n new_params[key_default] = kkrdefaults.get(key_default)\n kkrdefaults_updated.append(key_default)\n if len(kkrdefaults_updated) > 0:\n self.report('ERROR: no default param found')\n return self.exit_codes.ERROR_MISSING_PARAMS # pylint: disable=no-member\n else:\n message = f'updated KKR parameter node with default values: {kkrdefaults_updated}'\n self.report(message)\n\n # step 2: change parameter (contained in new_params dictionary)\n last_mixing_scheme = para_check.get_value('IMIX')\n if last_mixing_scheme is None:\n last_mixing_scheme = 0\n\n strmixfac = self.ctx.strmix\n aggrmixfac = self.ctx.aggrmix\n nsteps = self.ctx.nsteps\n\n # TODO: maybe add decrease mixing factor option as in kkr_scf wc\n # step 2.1 fill new_params dict with values to be updated\n if decrease_mixing_fac:\n if last_mixing_scheme == 0:\n self.report(f'(strmixfax, mixreduce)= ({strmixfac}, {self.ctx.mixreduce})')\n self.report(f'type(strmixfax, mixreduce)= {type(strmixfac)} {type(self.ctx.mixreduce)}')\n strmixfac = strmixfac * self.ctx.mixreduce\n self.ctx.strmix = strmixfac\n label += f'decreased_mix_fac_str (step {self.ctx.loop_count})'\n description += f'decreased STRMIX factor by {self.ctx.mixreduce}'\n else:\n self.report(f'(aggrmixfax, mixreduce)= ({aggrmixfac}, {self.ctx.mixreduce})')\n self.report(f'type(aggrmixfax, mixreduce)= {type(aggrmixfac)} {type(self.ctx.mixreduce)}')\n aggrmixfac = aggrmixfac * self.ctx.mixreduce\n self.ctx.aggrmix = aggrmixfac\n label += 'decreased_mix_fac_bry'\n description += f'decreased AGGRMIX factor by {self.ctx.mixreduce}'\n\n if switch_agressive_mixing:\n last_mixing_scheme = self.ctx.type_aggressive_mixing\n label += ' switched_to_agressive_mixing'\n description += f' switched to agressive mixing scheme (IMIX={last_mixing_scheme})'\n\n # add number of scf steps, spin\n new_params['SCFSTEPS'] = nsteps\n new_params['NSPIN'] = nspin\n new_params['INS'] = non_spherical\n\n # add ldos runoption if dos_run = True\n if self.ctx.dos_run:\n if self.ctx.lmdos:\n runflags = new_params.get('RUNFLAG', []) + ['lmdos']\n else:\n runflags = new_params.get('RUNFLAG', []) + ['ldos']\n new_params['RUNFLAG'] = runflags\n new_params['SCFSTEPS'] = 1\n\n # turn on Jij calculation if jij_run == True\n if self.ctx.jij_run:\n new_params['CALCJIJMAT'] = 1\n\n # add newsosol\n if self.ctx.spinorbit:\n testflags = new_params.get('TESTFLAG', []) + ['tmatnew']\n new_params['TESTFLAG'] = testflags\n new_params['SPINORBIT'] = 1\n new_params['NCOLL'] = 1\n # TODO add deprecation warning and remove these lines (can be set with params_overwrite instead)\n if self.ctx.mesh_params.get('RADIUS_LOGPANELS', None) is not None:\n new_params['RADIUS_LOGPANELS'] = self.ctx.mesh_params['RADIUS_LOGPANELS']\n if self.ctx.mesh_params.get('NCHEB', None) is not None:\n new_params['NCHEB'] = self.ctx.mesh_params['NCHEB']\n if self.ctx.mesh_params.get('NPAN_LOG', None) is not None:\n new_params['NPAN_LOG'] = self.ctx.mesh_params['NPAN_LOG']\n if self.ctx.mesh_params.get('NPAN_EQ', None) is not None:\n new_params['NPAN_EQ'] = self.ctx.mesh_params['NPAN_EQ']\n new_params['CALCORBITALMOMENT'] = 1\n else:\n new_params['SPINORBIT'] = 0\n new_params['NCOLL'] = 0\n new_params['CALCORBITALMOMENT'] = 0\n new_params['TESTFLAG'] = []\n\n # set mixing schemes and factors\n if last_mixing_scheme > 2:\n new_params['ITDBRY'] = self.ctx.broyden_num\n new_params['IMIX'] = last_mixing_scheme\n new_params['MIXFAC'] = aggrmixfac\n new_params['NSIMPLEMIXFIRST'] = self.ctx.nsimplemixfirst\n elif last_mixing_scheme == 0:\n new_params['IMIX'] = last_mixing_scheme\n new_params['MIXFAC'] = strmixfac\n\n # add mixing scheme to context\n self.ctx.last_mixing_scheme = last_mixing_scheme\n\n if switch_higher_accuracy:\n self.ctx.kkr_higher_accuracy = True\n\n # add convergence settings\n if self.ctx.loop_count == 1 or self.ctx.last_mixing_scheme == 0:\n new_params['QBOUND'] = self.ctx.threshold_aggressive_mixing\n else:\n new_params['QBOUND'] = self.ctx.convergence_criterion\n\n # initial magnetization\n if initial_settings and self.ctx.mag_init:\n if self.ctx.hfield[0] <= 0.0 or self.ctx.hfield[1] == 0:\n self.report(\n '\\nWARNING: magnetization initialization chosen but hfield is zero. Automatically change back to default value (hfield={})\\n'\n .format(self._wf_default['hfield'])\n )\n self.ctx.hfield = self._wf_default['hfield']\n new_params['HFIELD'] = self.ctx.hfield\n elif self.ctx.mag_init and self.ctx.mag_init_step_success: # turn off initialization after first (successful) iteration\n new_params['HFIELD'] = [0.0, 0]\n elif not self.ctx.mag_init:\n self.report(\"INFO: mag_init is False. Overwrite 'HFIELD' to '0.0' and 'LINIPOL' to 'False'.\")\n # reset mag init to avoid resinitializing\n new_params['HFIELD'] = [0.0, 0]\n\n # set nspin to 2 if mag_init is used\n if self.ctx.mag_init:\n nspin_in = nspin\n if nspin_in is None:\n nspin_in = 1\n if nspin_in < 2:\n self.report('WARNING: found NSPIN=1 but for maginit needs NPIN=2. Overwrite this automatically')\n new_params['NSPIN'] = 2\n message = f'new_params: {new_params}'\n self.report(message)\n\n # overwrite values from additional input node\n if 'params_overwrite' in self.inputs:\n print('use params_overwrite', self.inputs.params_overwrite.get_dict())\n self._overwrite_parameters_from_input(new_params)\n\n # step 2.2 update values\n try:\n for key, val in new_params.items():\n para_check.set_value(key, val, silent=True)\n except:\n message = 'ERROR: failed to set some parameters'\n self.report(message)\n return self.exit_codes.ERROR_PARAMETER_UPDATE # pylint: disable=no-member\n\n # step 3:\n message = f'INFO: update parameters to: {para_check.get_set_values()}'\n self.report(message)\n updatenode = Dict(para_check.get_dict())\n updatenode.label = label\n updatenode.description = description\n paranode_new = updatenode #update_params_wf(self.ctx.last_params, updatenode)\n self.ctx.last_params = paranode_new\n else:\n message = 'INFO: reuse old settings'\n self.report(message)\n\n message = 'INFO: done updating kkr param step'\n self.report(message)", "def show_predictions(model, test_set, val_set, image_guess, img_res, data='OSNR', GRAY=True):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Defines figure dimensions\n fig = plt.figure(figsize=(20,30))\n\n ## Begins loop to find correct predictions and relay results to user\n ## Searches through the prediction array and compares it to the actual array.\n ## Displays image with the prediction and answer on the title\n for i in range(image_guess):\n correct = False\n actual = np.argmax(val_set[i])\n\n if predict[i] == actual:\n correctly_guessed += 1\n correct = True\n\n plt.subplot(6,3,i+1)\n fig.subplots_adjust(left=0.01,\n right=0.7,\n bottom=0.1,\n top=1.2,\n wspace=0.5,\n hspace=0.2\n )\n if GRAY == False:\n plt.imshow(test_set[i].reshape(img_res,img_res,3))\n else:\n plt.imshow(test_set[i].reshape(img_res,img_res), cmap='gray')\n\n if correct == True:\n if data == 'disp':\n plt.title('Correct! \\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('Correct! \\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('Correct! \\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n \n \n else:\n if data == 'disp':\n plt.title('\\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('\\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('\\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n\n ## Returns amount of predictions that were correct\n print('Correctly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (image_guess-correctly_guessed))", "def report_result(model, img, rays_o, rays_d, bound, num_samples, raybatch_size):\n pixels = img.reshape(-1, 3)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],\n num_samples, perturb=False)\n \n synth = []\n num_rays = rays_d.shape[0]\n with torch.no_grad():\n for i in range(0, num_rays, raybatch_size):\n rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])\n color_batch = volume_render(rgbs_batch, sigmas_batch, t_vals[i:i+raybatch_size])\n synth.append(color_batch)\n synth = torch.cat(synth, dim=0)\n error = F.mse_loss(synth, pixels)\n psnr = -10*torch.log10(error)\n \n return psnr", "def NM08_model_loop(root, run_dict, res_dict, dual_list, perm_tup, machine,\n decimate=100, i=1, verbose=False):\n if machine == 'laptop':\n fz_file_pat = '/home/chet/gmt/data/NZ/wells/feedzones/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/home/chet/data/mrp_data/Steve_Sewell_MRP_PhD_Data/' \\\n 'Natural_State_Temperatures/NM08_profile_pyfehm_comma.txt'\n excel_file = '/home/chet/data/mrp_data/well_data/flow_rates/' \\\n 'July_2017_final/Merc_Ngatamariki.xlsx'\n elif machine == 'server':\n fz_file_pat = '/Users/home/hoppche/data/merc_data/wells/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/Users/home/hoppche/data/merc_data/temps/' \\\n 'NM08_profile_pyfehm_comma.txt'\n excel_file = '/Users/home/hoppche/data/merc_data/flows/' \\\n 'Merc_Ngatamariki.xlsx'\n # Make the directory for this object\n print('Making grid')\n # Extract just floats and exponent from perms\n work_dir = '{}/run_{}'.format(root, i)\n dat = make_NM08_grid(work_dir=work_dir, log_base=3, max_range=15)\n print('Assigning reservoir parameters')\n dat = reservoir_params(dat, temp_file=T_file, reservoir_dict=res_dict,\n show=False)\n print('Defining well nodes')\n dat = define_well_nodes(\n dat, well_file_pattern=fz_file_pat,\n well_name='NM08', type='injection', surf_loc=[1500., 1500.])\n print('Running initial condition')\n dat = run_initial_conditions(dat)\n dat = set_well_boundary(\n dat, excel_file=excel_file, sheet_name='NM08 Stimulation',\n well_name='NM08', dates=[datetime(2012, 6, 7), datetime(2012, 7, 12)],\n t_step='day', decimate=decimate, debug=0)\n dat = set_stress(dat)\n dat = set_dual(dat, zonelist=['tahorakuri'], dual_list=dual_list)\n if perm_tup:\n dat = set_permmodel(dat, zonelist=['tahorakuri'], index=perm_tup[0],\n permmodel_dict=perm_tup[1])\n model_run(dat, run_dict, verbose=verbose)\n return", "def iterations(self):\n i = 0\n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n n = len(self.model.stateVector)\n self.answer = None\n \n while ((i < self.maxiter) \n and (stateVectorConv > self.stateVectorConvThreshold)\n ):\n \n F, K = self.model()\n \n if np.any(np.isnan(F)) or np.any(np.isnan(K)):\n m = \"Iteration {0} failure of model.\"\n raise OptimalEstimationException(m.format(i))\n \n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n m = \"Iteration {0} failure in decomposition.\"\n raise OptimalEstimationException(m.format(i))\n \n statevectorOffset = (self.V.T * self.priorSinvh * \n np.matrix(np.array(self.model.stateVector) - np.array(self.model.prior) ).T)\n measurementOffset = (self.U.T * self.errSinvh * \n np.matrix(self.model.observation - F).T)\n \n newState = np.matrix((self.w * \n (measurementOffset.A1 + \n self.w * statevectorOffset.A1))/(self.w**2+1.0)).T\n newState = self.priorSh * self.V * newState\n newState = newState.A1 + self.model.prior\n \n stateVectorConv = ((np.matrix(newState - self.model.stateVector) * \n self.Sinv * np.matrix(newState - self.model.stateVector).T)/n)[0,0]\n self.model.stateVector = newState\n\n if i == 0:\n \n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n \n print('cost Function for iteration {}:'.format(i), self.costFunction)\n\n i += 1\n \n F, K = self.model()\n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n raise OptimalEstimationException(\"Failure in decomposition.\")\n \n Wplus2 = np.matrix(np.diag(1.0/(self.w**2+1.0)))\n self.model.covariance = (self.priorSh * self.V * Wplus2 * \n self.V.T * self.priorSh)\n \n\n \n return i, stateVectorConv", "def error_analysis(predictions, gold, result_collector):\n # scores = defaultdict(list)\n for iteration_id, texts in predictions.items():\n # map iteration id to fold\n fold = str(int(iteration_id) / 5)\n for tid, pred_tree in texts.items():\n gold_tree = gold[tid]\n print(iteration_id, fold, tid)\n print(gold_tree.get_triples())\n print(pred_tree.get_triples())\n for level, scores in eval_prediction([gold_tree], [pred_tree]):\n result_collector.add_result(tid, fold, level, scores)\n print(\"Done.\")", "def iterate(self, u, z, **kwrags):\n self.predict(u=u)\n self.update(z=z)\n self._mat_desc.update(kwrags)", "def run_iterations(game, solver, start_iteration=0):\n for i in range(int(FLAGS.iterations / 2)):\n solver.run_iteration()\n policy = solver.average_policy()\n exploitability = pyspiel.exploitability(game, policy)\n\n # We also compute NashConv to highlight an important API feature:\n # when using Monte Carlo sampling, the policy\n # may not have a table entry for every info state.\n # Therefore, when calling nash_conv, ensure the third argument,\n # \"use_state_get_policy\" is set to True\n # See https://github.com/deepmind/open_spiel/issues/500\n nash_conv = pyspiel.nash_conv(game, policy, True)\n\n print(\"Iteration {} nashconv: {:.6f} exploitability: {:.6f}\".format(\n start_iteration + i, nash_conv, exploitability))", "def evaluate_regressor(sess, model_info, val_image_feats, val_text_tags, w2v_model, k=5, verbose=False):\n val_pred = sess.run(model_info['prediction'], feed_dict={model_info['input']:val_image_feats})\n\n w2ind = {}\n reverse_w2v_model = {}\n wordmatrix = np.zeros((len(w2v_model), len(w2v_model[w2v_model.keys()[0]])))\n for i, word in enumerate(w2v_model):\n w2ind[word] = i\n wordmatrix[i, :] = w2v_model[word]\n reverse_w2v_model[i] = word\n\n ground_truth_one_hot = np.zeros((len(val_text_tags), len(w2v_model)))\n num_skipped = 0\n total = 0\n skipped = set()\n for i, tags in enumerate(val_text_tags):\n for tag in tags:\n try:\n total += 1\n ground_truth_one_hot[i, w2ind[tag]] = 1\n except KeyError:\n skipped.add(tag)\n num_skipped +=1\n\n if verbose:\n print('Skipped {} of {} total'.format(num_skipped, total))\n\n predictions_one_hot = np.zeros((len(val_text_tags), len(w2v_model)))\n for i in range(val_pred.shape[0]):\n normalized_val = val_pred[i, :]/np.linalg.norm(val_pred[i, :])\n # np.dot(wordmatrix, normalized_val) gets the similarity between the two vectors\n # argpartition gets the topk (where k=5)\n indices = np.argpartition(np.dot(wordmatrix,normalized_val), -1*k)[-1*k:]\n for index in indices:\n predictions_one_hot[i, index] = 1\n\n evaluator = Evaluation(ground_truth_one_hot, predictions_one_hot)\n\n return evaluator", "def loop_over_files(self, files_dir, files_opt, results_path, wavelength_idx=None,\n configuration_idx=None, N_rays=500, box_size=2, monte_carlo=False):\n\n # We want the result to produce as output: Ensquared Energy, Object coords, Image Slicer and Detector centroids\n results_names = ['EE', 'OBJ_XY', 'SLI_XY', 'DET_XY']\n N_waves = 23 if wavelength_idx is None else len(wavelength_idx)\n # we need to give the shapes of each array to self.run_analysis\n results_shapes = [(N_waves,), (2,), (N_waves, 2,), (N_waves, 2,)]\n\n metadata = {}\n metadata['N_rays'] = N_rays\n metadata['Box Size Spaxels'] = box_size\n metadata['Configurations'] = 'All' if configuration_idx is None else configuration_idx\n metadata['Wavelengths'] = 'All' if wavelength_idx is None else wavelength_idx\n\n # read the file options\n if monte_carlo is False:\n file_list, sett_list = create_zemax_file_list(which_system=files_opt['which_system'],\n AO_modes=files_opt['AO_modes'], scales=[files_opt['SPAX_SCALE']],\n IFUs=[files_opt['IFU_PATH']], grating=[files_opt['GRATING']])\n elif monte_carlo is True:\n file_list, sett_list = create_zemax_filename_MC(AO_mode=files_opt['AO_MODE'], scale=files_opt['SPAX_SCALE'],\n IFUpath=files_opt['IFU_PATH'], grating=files_opt['GRATING'],\n FPRS_MC_instance=files_opt['FPRS_MC'],\n IPO_MC_instance=files_opt['IPO_MC'],\n IFU_MC_instance=files_opt['IFU_MC'],\n ISP_MC_instance=files_opt['ISP_MC'])\n\n results = []\n for zemax_file, settings in zip(file_list, sett_list):\n\n # Generate a set of random pupil rays\n px, py = define_pupil_sampling(r_obsc=0.2841, N_rays=N_rays, mode='random')\n print(\"Using %d rays\" % N_rays)\n\n list_results = self.run_analysis(analysis_function=self.analysis_function_ensquared,\n files_dir=files_dir, zemax_file=zemax_file, results_path=results_path,\n results_shapes=results_shapes, results_names=results_names,\n wavelength_idx=wavelength_idx, configuration_idx=configuration_idx,\n px=px, py=py, box_size=box_size)\n\n results.append(list_results)\n\n # Post-Processing the results\n file_name = zemax_file.split('.')[0]\n settings['surface'] = 'DETECTOR'\n # self.save_hdf5(analysis_name='ENSQ_ENERG', analysis_metadata=metadata, list_results=list_results,\n # results_names=results_names, file_name=file_name, file_settings=settings, results_dir=results_path)\n\n return results", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def im_detect_keypoints(model, im_scales, boxes):\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n\n M = cfg.KRCNN.HEATMAP_SIZE\n if boxes.shape[0] == 0:\n pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)\n return pred_heatmaps\n\n inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scales)}\n\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'keypoint_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.keypoint_net.Proto().name)\n\n pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()\n\n # In case of 1\n if pred_heatmaps.ndim == 3:\n pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)\n\n return pred_heatmaps", "def callParallelReducedGA(region):\n year = 2000\n while(year <= 2005):\n execParallelReducedGAModel(year, region)\n year+=1", "def GMRES_1(A, b, x0, max_iterations=50):\n\n last_x = x0\n curr_x = last_x\n last_r = b - A @ x0\n curr_iter = 0\n residual_queue = []\n while curr_iter < max_iterations:\n Ar = A @ last_r\n alpha = (last_r.transpose() @ Ar) / (Ar.transpose() @ Ar)\n curr_x = last_x + alpha * last_r\n curr_r = last_r - alpha * Ar\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", \"GMRES(1)\")\n last_x = curr_x\n last_r = curr_r\n curr_iter += 1\n print(\"Number of Iterations: \" + str(curr_iter))\n\n return curr_x", "def _optimization_loop(self, iteration=0):\n self.logger.print_optimization_header()\n\n while iteration < self.iterations:\n try:\n self._execute_experiment()\n except RepeatedExperimentError:\n # G.debug_(F'Skipping repeated Experiment: {_ex!s}\\n')\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n self.skipped_iterations += 1\n continue\n except StopIteration:\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')\n self._set_hyperparameter_space()\n continue\n\n self.logger.print_result(\n self.current_hyperparameters_list,\n self.current_score,\n experiment_id=self.current_experiment.experiment_id,\n )\n\n if (\n (self.best_experiment is None) # First evaluation\n or (self.do_maximize and (self.best_score < self.current_score)) # New best max\n or (not self.do_maximize and (self.best_score > self.current_score)) # New best min\n ):\n self.best_experiment = self.current_experiment.experiment_id\n self.best_score = self.current_score\n\n iteration += 1", "def compute(self, result_file_dict):\r\n for part in self.parts:\r\n #=====================Need to change, temporal=========================\r\n if part == 'train':\r\n continue # because the train not have the label\r\n #=======================================================================\r\n gt = self.gt_dict[part]\r\n result_file = result_file_dict[part]\r\n # import ipdb; ipdb.set_trace()\r\n for key, item in result_file.items():\r\n self._result_name = item\r\n # score_records, num_videos = self.load_results(result_file)\r\n score_records, num_videos = self.load_results(item)\r\n logger.info(f'Compute Metric of {item}')\r\n assert num_videos == len(gt), f'the number of saved videos does not match the ground truth, {num_videos} != {len(gt)}'\r\n temp_result = self.eval_method(score_records, gt, str(key))\r\n if temp_result > self.optimal_resulst:\r\n self.optimal_resulst = temp_result\r\n \r\n return self.optimal_resulst", "def epoch_recon(models,datapath,img_size=100,adjust=False,epochs=120,max_diff=0.1,step_size=0.01):\n target = set_creation(datapath,img_size=img_size,nrand=1000,adjust=adjust) # Load 1000 images\n out,results = success_rate(model,target,img_size,args.discrepancy)\n acc_size = numpy.arange(0,max_diff,step_size) # Define discrepancy ranges\n results = numpy.zeros((epochs,len(acc_size))) # Initialize results array (epoch vs. reconstruction accuracy)\n for epoch in range(epochs): # Loop over epochs\n model_epoch = models[epoch+1] # Load epoch model\n model_epoch.eval() # Set model to evaluation mode\n out, _ = model_epoch(target.float()) # Execute trained model to data\n for j in range(len(out)): # Loop over all output data\n out[j][0] = (out[j][0]-out[j][0].min())/(out[j][0].max()-out[j][0].min()) # Normalized outputs\n diff = abs(out-target).reshape(len(out),img_size,img_size).data.numpy() # Calculate difference between original and output images\n acc = numpy.array([[len(var[numpy.where((i<=var)&(var<i+step_size))]) for var in diff] for i in acc_size]) # Find how many pixels are found in each discrepancy range \n acc = acc/img_size**2*100 # Convert the values to percentages\n results[epoch] = numpy.mean(acc,axis=1) # Calculate mean percentage accross all images\n plt.style.use('seaborn') # Set seaborn style\n fig = plt.figure(figsize=(10,6),dpi=80) # Initialize figure\n ax1 = fig.add_axes([0.10,0.10,0.83,0.69]) # Main plot\n ax2 = fig.add_axes([0.95,0.10,0.03,0.69]) # Colorbar\n ax3 = fig.add_axes([0.10,0.82,0.83,0.15],sharex=ax1) # Histogram\n img = ax1.imshow(results.T[::-1],aspect='auto',cmap='summer',extent=[0,epochs,0,max_diff])\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Discrepancy threshold')\n plt.colorbar(img,label='Percentage of pixels',cax=ax2) # Plot colorbar\n y = [sum(results[i]) for i in range(epochs)] # Sum all percentages for each epoch\n x = numpy.arange(epochs)\n ax3.bar(x,y,width=1,align='edge',color='lightgrey')\n ax3.set_facecolor('white')\n ax3.set_ylim(min(y)-1,max(y)+1)\n ax3.set_title('Reconstruction accuracy')\n plt.setp(ax3.get_xticklabels(), visible=False)\n plt.show()", "def REDS(mode):\n #### configurations\n read_all_imgs = False # whether real all images to memory with multiprocessing\n # Set False for use limited memory\n BATCH = 5000 # After BATCH images, lmdb commits, if read_all_imgs = False\n if mode == 'train_sharp':\n img_folder = '../../datasets/REDS/train_sharp'\n lmdb_save_path = '../../datasets/REDS/train_sharp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_bicubic':\n img_folder = '../../datasets/REDS/train_sharp_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_sharp_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur_bicubic':\n img_folder = '../../datasets/REDS/train_blur_bicubic'\n lmdb_save_path = '../../datasets/REDS/train_blur_bicubic_wval.lmdb'\n H_dst, W_dst = 180, 320\n elif mode == 'train_blur':\n img_folder = '../../datasets/REDS/train_blur'\n lmdb_save_path = '../../datasets/REDS/train_blur_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_blur_comp':\n img_folder = '../../datasets/REDS/train_blur_comp'\n lmdb_save_path = '../../datasets/REDS/train_blur_comp_wval.lmdb'\n H_dst, W_dst = 720, 1280\n elif mode == 'train_sharp_flowx4':\n img_folder = '../../datasets/REDS/train_sharp_flowx4'\n lmdb_save_path = '../../datasets/REDS/train_sharp_flowx4.lmdb'\n H_dst, W_dst = 360, 320\n n_thread = 40\n ########################################################\n if not lmdb_save_path.endswith('.lmdb'):\n raise ValueError(\"lmdb_save_path must end with \\'lmdb\\'.\")\n if osp.exists(lmdb_save_path):\n print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))\n sys.exit(1)\n\n #### read all the image paths to a list\n print('Reading image path list ...')\n all_img_list = data_util._get_paths_from_images(img_folder)\n keys = []\n for img_path in all_img_list:\n split_rlt = img_path.split('/')\n folder = split_rlt[-2]\n img_name = split_rlt[-1].split('.png')[0]\n keys.append(folder + '_' + img_name)\n\n if read_all_imgs:\n #### read all images to memory (multiprocessing)\n dataset = {} # store all image data. list cannot keep the order, use dict\n print('Read images with multiprocessing, #thread: {} ...'.format(n_thread))\n pbar = util.ProgressBar(len(all_img_list))\n\n def mycallback(arg):\n '''get the image data and update pbar'''\n key = arg[0]\n dataset[key] = arg[1]\n pbar.update('Reading {}'.format(key))\n\n pool = Pool(n_thread)\n for path, key in zip(all_img_list, keys):\n pool.apply_async(read_image_worker, args=(path, key), callback=mycallback)\n pool.close()\n pool.join()\n print('Finish reading {} images.\\nWrite lmdb...'.format(len(all_img_list)))\n\n #### create lmdb environment\n data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes\n print('data size per image is: ', data_size_per_img)\n data_size = data_size_per_img * len(all_img_list)\n env = lmdb.open(lmdb_save_path, map_size=data_size * 10)\n\n #### write data to lmdb\n pbar = util.ProgressBar(len(all_img_list))\n txn = env.begin(write=True)\n for idx, (path, key) in enumerate(zip(all_img_list, keys)):\n pbar.update('Write {}'.format(key))\n key_byte = key.encode('ascii')\n data = dataset[key] if read_all_imgs else cv2.imread(path, cv2.IMREAD_UNCHANGED)\n if 'flow' in mode:\n H, W = data.shape\n assert H == H_dst and W == W_dst, 'different shape.'\n else:\n H, W, C = data.shape\n assert H == H_dst and W == W_dst and C == 3, 'different shape.'\n txn.put(key_byte, data)\n if not read_all_imgs and idx % BATCH == 0:\n txn.commit()\n txn = env.begin(write=True)\n txn.commit()\n env.close()\n print('Finish writing lmdb.')\n\n #### create meta information\n meta_info = {}\n meta_info['name'] = 'REDS_{}_wval'.format(mode)\n channel = 1 if 'flow' in mode else 3\n meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)\n meta_info['keys'] = keys\n pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'), \"wb\"))\n print('Finish creating lmdb meta info.')", "def ROOMSELECTION_LOOP():\n pass", "def evaluate():\n global dictionary, wv\n count = 0\n # To save the scores by distance and similarity\n scores = np.zeros(6)\n similar = np.zeros(6)\n itr = len(dictionary)\n logging.info('running evaluation for {0} samples'.format(itr))\n for key in dictionary:\n progress = (count / itr) * 100\n d = dictionary[key].split('resource/')\n d = [idx.split()[0].translate(table).lower() for idx in d[1:]]\n try:\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n except KeyError:\n itr -= 1\n continue\n if np.any(np.isnan(r)):\n itr -= 1\n continue\n else:\n if r.ndim == 2:\n try:\n # Mean of vector containing all word vectors\n # obtained from abstract.\n r = r.mean(axis=0).reshape(1, -1)\n \n # Obtain the vectors for the entity\n mean_vec = mean_encoder(dictionary[key])\n mean_vec = mean_vec.reshape(1, -1) / norm(mean_vec)\n mean_dist_vec = distance_encoder(dictionary[key])\n mean_dist_vec = mean_dist_vec.reshape(1, -1)\n mean_dist_vec = mean_dist_vec / norm(mean_dist_vec)\n title_vec = title_mean(key)\n title_vec = title_vec.reshape(1, -1) / norm(title_vec)\n abstract_vec = abstract_encoder(key)\n abstract_vec = abstract_vec.reshape(1, -1)\n abstract_vec = abstract_vec / norm(abstract_vec)\n random_vec = np.random.randn(100).reshape(1, -1)\n zero_vec = np.zeros(100).reshape(1, -1)\n \n # Score the entity vectors\n scores[0] += norm(r - mean_vec)\n scores[1] += norm(r - mean_dist_vec)\n scores[2] += norm(r - title_vec)\n scores[3] += norm(r - abstract_vec)\n scores[4] += norm(r - random_vec)\n scores[5] += norm(r - zero_vec)\n similar[0] += cosine_similarity(r, mean_vec)\n similar[1] += cosine_similarity(r, mean_dist_vec)\n similar[2] += cosine_similarity(r, title_vec)\n similar[3] += cosine_similarity(r, abstract_vec)\n similar[4] += cosine_similarity(r, random_vec)\n similar[5] += cosine_similarity(r, zero_vec)\n count += 1\n print(count, end='\\r')\n except (ValueError, KeyError) as _:\n itr -= 1\n continue\n else:\n itr -= 1\n continue\n # Normalize the scores to get a better\n # comparison against the baselines.\n scores = scores / norm(scores)\n similar = similar / norm(similar)\n print_summary(scores, similar)", "def check(self, test_set):\n for id in test_set.keys():\n image = test_set[id]\n answers = {}\n for mood, perceptron in self._perceptrons.items():\n answers[perceptron.check(image)] = mood\n final_answer = answers[max(answers.keys())]\n print('Image{} {}'.format(id, final_answer.value))", "def evaluate_reco_param(self):\n evals = self.input_binning['true_energy'].weighted_centers.magnitude\n n_e = len(self.input_binning['true_energy'].weighted_centers.magnitude)\n n_cz = len(self.input_binning['true_coszen'].weighted_centers.magnitude)\n eval_dict = deepcopy(self.param_dict)\n for flavintgroup, dim_dict in eval_dict.items():\n for dim, dist_list in dim_dict.items():\n for dist_prop_dict in dist_list:\n for dist_prop in dist_prop_dict.keys():\n if dist_prop == 'dist':\n continue\n if callable(dist_prop_dict[dist_prop]):\n func = dist_prop_dict[dist_prop]\n vals = func(evals)\n dist_prop_dict[dist_prop] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n elif isinstance(dist_prop_dict[dist_prop], dict):\n assert dist_prop == 'kwargs'\n for kwarg in dist_prop_dict['kwargs'].keys():\n func = dist_prop_dict['kwargs'][kwarg]\n vals = func(evals)\n dist_prop_dict['kwargs'][kwarg] =\\\n np.repeat(vals,n_cz).reshape((n_e,n_cz))\n # Now check for consistency, to not have to loop over all dict\n # entries again at a later point in time\n self.check_reco_dist_consistency(dist_list)\n return eval_dict" ]
[ "0.62931263", "0.601181", "0.5886399", "0.57915413", "0.56589055", "0.56554955", "0.55133504", "0.53384787", "0.53033984", "0.52407485", "0.5225457", "0.5210151", "0.5209773", "0.52093935", "0.518312", "0.51578075", "0.51371425", "0.5087555", "0.50848186", "0.50783587", "0.50739896", "0.5054838", "0.5033058", "0.5004067", "0.49799156", "0.49574396", "0.49483347", "0.49300227", "0.49252197", "0.49153504", "0.49135187", "0.49085692", "0.49074164", "0.49014455", "0.4896459", "0.4894919", "0.48912907", "0.48844787", "0.4877504", "0.4870724", "0.48629028", "0.48552608", "0.48516986", "0.48513556", "0.48310652", "0.48222885", "0.4816635", "0.4813183", "0.4808244", "0.4805704", "0.4793809", "0.4792715", "0.47922027", "0.47913396", "0.47879055", "0.4782894", "0.47644174", "0.47622705", "0.47617233", "0.47541568", "0.47455552", "0.47438523", "0.47400042", "0.47379678", "0.47370616", "0.4736397", "0.47204474", "0.47195467", "0.47187588", "0.4716284", "0.4714454", "0.47028327", "0.46987858", "0.46981266", "0.46832272", "0.46777403", "0.46717992", "0.4670577", "0.4668394", "0.46672043", "0.4664368", "0.46616063", "0.46611613", "0.46608236", "0.46602744", "0.4660115", "0.4652832", "0.46407622", "0.46359164", "0.4634909", "0.4633825", "0.4633158", "0.4626948", "0.46205935", "0.46181303", "0.46058765", "0.4597961", "0.45977542", "0.45976728", "0.4595237" ]
0.6446188
0
Performs one iteration of the stabilized GramSchmidt procedure, with r to be orthonormalized against {v} = {v_0, v_1, ...}.
def _gs_step(r: jax.ShapedArray, v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]: h_i = jnp.vdot(v_i, r) r_i = r - h_i * v_i return r_i, h_i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def rothesstri(A, b):\n n = shape(A)[0]\n A = hstack([A, b])\n for k in range(n-1):\n r = linalg.norm([ A[k , k] , A[k + 1, k] ])\n if r>0:\n c=A[k, k]/r; s=A[k + 1, k]/r\n A[[k, k + 1],(k + 1):(n + 1)]=[[c, s],[-s, c]]*A[[k, k + 1],(k + 1):(n + 1)]\n A[k, k] = r; A[k+1,k] = 0\n z = A[:, n].copy()\n rbacksolve(A[:, :n], z, n)\n return z", "def stirling(k, r) :\n\n return sum((-1)**(r-i)*binomial(r, i)*i**k for i in range(r+1)) / math.factorial(r)", "def minmod_rusanov_step(q, g, flux):\n \n q_rhs = numpy.zeros_like(q)\n f_rusanov = numpy.zeros_like(q)\n q_L = numpy.zeros_like(q)\n q_R = numpy.zeros_like(q)\n \n for i in range(g.ngz - 1, g.nx + g.ngz + 1):\n for k in range(q.shape[1]):\n sigma_up = q[i+1, k] - q[i, k]\n sigma_do = q[i, k] - q[i-1, k]\n sigma_bar = minmod(sigma_up, sigma_do)\n q_R[i, k] = q[i, k] - 0.5 * sigma_bar\n q_L[i+1, k] = q[i, k] + 0.5 * sigma_bar\n \n f_L = flux(q_L)\n f_R = flux(q_R)\n for i in range(g.ngz, g.nx + g.ngz + 1):\n f_rusanov[i, :] = (f_L[i, :] + f_R[i, :] + g.dx / g.dt * (q_L[i, :] - q_R[i, :])) / 2\n \n for i in range(g.ngz, g.nx + g.ngz):\n q_rhs[i, :] = 1.0 / g.dx * (f_rusanov[i, :] - f_rusanov[i+1, :])\n\n return q_rhs", "def compute_gae(V, s, ss, r, absorbing, last, gamma, lam):\n v = V(s)\n v_next = V(ss)\n gen_adv = np.empty_like(v)\n for rev_k in range(len(v)):\n k = len(v) - rev_k - 1\n if last[k] or rev_k == 0:\n gen_adv[k] = r[k] - v[k]\n if not absorbing[k]:\n gen_adv[k] += gamma * v_next[k]\n else:\n gen_adv[k] = r[k] + gamma * v_next[k] - v[k] + gamma * lam * gen_adv[k + 1]\n return gen_adv + v, gen_adv", "def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def lwr_recursion(r):\r\n\r\n # r is (P+1, nc, nc)\r\n nc = r.shape[1]\r\n P = r.shape[0] - 1\r\n\r\n a = np.zeros((P, nc, nc)) # ar coefs\r\n b = np.zeros_like(a) # lp coefs\r\n sigb = np.zeros_like(r[0]) # forward prediction error covariance\r\n sigf = np.zeros_like(r[0]) # backward prediction error covariance\r\n delta = np.zeros_like(r[0])\r\n\r\n # initialize\r\n idnt = np.eye(nc)\r\n sigf[:] = r[0]\r\n sigb[:] = r[0]\r\n\r\n # iteratively find sequences A_{p+1}(i) and B_{p+1}(i)\r\n for p in range(P):\r\n\r\n # calculate delta_{p+1}\r\n # delta_{p+1} = r(p+1) + sum_{i=1}^{p} a(i)r(p+1-i)\r\n delta[:] = r[p + 1]\r\n for i in range(1, p + 1):\r\n delta += np.dot(a[i - 1], r[p + 1 - i])\r\n\r\n # intermediate values XXX: should turn these into solution-problems\r\n ka = np.dot(delta, linalg.inv(sigb))\r\n kb = np.dot(delta.conj().T, linalg.inv(sigf))\r\n\r\n # store a_{p} before updating sequence to a_{p+1}\r\n ao = a.copy()\r\n # a_{p+1}(i) = a_{p}(i) - ka*b_{p}(p+1-i) for i in {1,2,...,p}\r\n # b_{p+1}(i) = b_{p}(i) - kb*a_{p}(p+1-i) for i in {1,2,...,p}\r\n for i in range(1, p + 1):\r\n a[i - 1] -= np.dot(ka, b[p - i])\r\n for i in range(1, p + 1):\r\n b[i - 1] -= np.dot(kb, ao[p - i])\r\n\r\n a[p] = -ka\r\n b[p] = -kb\r\n\r\n sigf = np.dot(idnt - np.dot(ka, kb), sigf)\r\n sigb = np.dot(idnt - np.dot(kb, ka), sigb)\r\n\r\n return a, sigf", "def value_iteration(r,terminal_states_mask, not_slipping_prob=1,\n initial_policy=None,\n eps=0.1):\n V = np.copy(r).astype(np.float)\n for _ in range(200):\n Vdet_u = np.copy(V)\n Vdet_r = np.copy(V)\n Vdet_l = np.copy(V)\n Vdet_d = np.copy(V)\n\n Vdet_u[1:, :] = V[:-1, :]\n Vdet_r[:, :-1] = V[:, 1:]\n Vdet_l[:, 1:] = V[:, :-1]\n Vdet_d[:-1, :] = V[1:, :]\n\n nsp = not_slipping_prob\n Vnext = np.zeros((4, )+r.shape, dtype=float)\n Vnext[UP, :, :] = nsp * Vdet_u + 0.5 * (1-nsp) * Vdet_l + 0.5 * (\n 1-nsp) * Vdet_r\n Vnext[DOWN, :, :] = nsp * Vdet_d + 0.5 * (1-nsp) * Vdet_l + 0.5 * (\n 1-nsp) * Vdet_r\n Vnext[LEFT, :, :] = nsp * Vdet_l + 0.5 * (1-nsp) * Vdet_u + 0.5 * (\n 1-nsp) * Vdet_d\n Vnext[RIGHT, :, :] = nsp * Vdet_r + 0.5 * (1-nsp) * Vdet_u + 0.5 * (\n 1-nsp) * Vdet_d\n if initial_policy is None:\n policy = np.argmax(r[None, :, :] + Vnext, axis=0)\n else:\n policy = np.copy(initial_policy)\n\n Vnew = np.copy(V)\n for a in [UP, DOWN, LEFT, RIGHT]:\n ind = np.logical_and(policy == a, ~ terminal_states_mask)\n Vnew[ind] = r[ind] + Vnext[a][ind]\n if np.linalg.norm(V - Vnew, ord=np.inf) < eps:\n break\n V[:, :] = Vnew\n return policy, V", "def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):\n if base is None:\n base = []\n if gens is None:\n gens = self.generators[:]\n degree = self.degree\n id_af = list(range(degree))\n # handle the trivial group\n if len(gens) == 1 and gens[0].is_Identity:\n if slp_dict:\n return base, gens, {gens[0]: [gens[0]]}\n return base, gens\n # prevent side effects\n _base, _gens = base[:], gens[:]\n # remove the identity as a generator\n _gens = [x for x in _gens if not x.is_Identity]\n # make sure no generator fixes all base points\n for gen in _gens:\n if all(x == gen._array_form[x] for x in _base):\n for new in id_af:\n if gen._array_form[new] != new:\n break\n else:\n assert None # can this ever happen?\n _base.append(new)\n # distribute generators according to basic stabilizers\n strong_gens_distr = _distribute_gens_by_base(_base, _gens)\n strong_gens_slp = []\n # initialize the basic stabilizers, basic orbits and basic transversals\n orbs = {}\n transversals = {}\n slps = {}\n base_len = len(_base)\n for i in range(base_len):\n transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],\n _base[i], pairs=True, af=True, slp=True)\n transversals[i] = dict(transversals[i])\n orbs[i] = list(transversals[i].keys())\n # main loop: amend the stabilizer chain until we have generators\n # for all stabilizers\n i = base_len - 1\n while i >= 0:\n # this flag is used to continue with the main loop from inside\n # a nested loop\n continue_i = False\n # test the generators for being a strong generating set\n db = {}\n for beta, u_beta in list(transversals[i].items()):\n for j, gen in enumerate(strong_gens_distr[i]):\n gb = gen._array_form[beta]\n u1 = transversals[i][gb]\n g1 = _af_rmul(gen._array_form, u_beta)\n slp = [(i, g) for g in slps[i][beta]]\n slp = [(i, j)] + slp\n if g1 != u1:\n # test if the schreier generator is in the i+1-th\n # would-be basic stabilizer\n y = True\n try:\n u1_inv = db[gb]\n except KeyError:\n u1_inv = db[gb] = _af_invert(u1)\n schreier_gen = _af_rmul(u1_inv, g1)\n u1_inv_slp = slps[i][gb][:]\n u1_inv_slp.reverse()\n u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]\n slp = u1_inv_slp + slp\n h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)\n if j <= base_len:\n # new strong generator h at level j\n y = False\n elif h:\n # h fixes all base points\n y = False\n moved = 0\n while h[moved] == moved:\n moved += 1\n _base.append(moved)\n base_len += 1\n strong_gens_distr.append([])\n if y is False:\n # if a new strong generator is found, update the\n # data structures and start over\n h = _af_new(h)\n strong_gens_slp.append((h, slp))\n for l in range(i + 1, j):\n strong_gens_distr[l].append(h)\n transversals[l], slps[l] =\\\n _orbit_transversal(degree, strong_gens_distr[l],\n _base[l], pairs=True, af=True, slp=True)\n transversals[l] = dict(transversals[l])\n orbs[l] = list(transversals[l].keys())\n i = j - 1\n # continue main loop using the flag\n continue_i = True\n if continue_i is True:\n break\n if continue_i is True:\n break\n if continue_i is True:\n continue\n i -= 1\n\n strong_gens = _gens[:]\n\n if slp_dict:\n # create the list of the strong generators strong_gens and\n # rewrite the indices of strong_gens_slp in terms of the\n # elements of strong_gens\n for k, slp in strong_gens_slp:\n strong_gens.append(k)\n for i in range(len(slp)):\n s = slp[i]\n if isinstance(s[1], tuple):\n slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1\n else:\n slp[i] = strong_gens_distr[s[0]][s[1]]\n strong_gens_slp = dict(strong_gens_slp)\n # add the original generators\n for g in _gens:\n strong_gens_slp[g] = [g]\n return (_base, strong_gens, strong_gens_slp)\n\n strong_gens.extend([k for k, _ in strong_gens_slp])\n return _base, strong_gens", "def res(self, t, y, yd, sw):\n if sw[0]:\n# print('state 1')\n G = scipy.array([\n [0, 1, 0],\n [0, 0, 1]\n ])\n gvec = y[3:5]\n elif sw[1]:\n# print('state 2')\n G = scipy.array([\n [0, self.hS, 0],\n [1, self.rS, 0]\n ])\n gvec = scipy.array([self.rS - self.r0 + self.hS * y[1],\n y[5] + self.rS * y[6]])\n elif sw[2]:\n# print('state 3, phi_bp: ', y[7], 'res: ', self.hB * y[2] - self.lS - self.lG + self.lB + self.r0)\n G = scipy.array([\n [0, - self.hS, 0],\n [1, self.rS, 0]\n ])\n gvec = scipy.array([self.rS - self.r0 - self.hS * y[1],\n y[5] + self.rS * y[6]])\n\n ff = scipy.array([- (self.mS + self.mB) * self.g,\n self.cp * (y[2] - y[1]) - self.mB * self.lS * self.g,\n self.cp * (y[1] - y[2]) - self.mB * self.lG * self.g])\n\n res_1 = yd[0:5] - y[5:10]\n res_2 = scipy.dot(self.M, yd[5:8]) - ff + scipy.dot(G.T, y[3:5])\n res_3 = gvec\n\n return scipy.hstack((res_1, res_2, res_3)).flatten()", "def generate_good(self, m, n, rank, mu=2, ka=2):\n sr = random.random()\n s = []\n s.append(sr)\n for r in range(rank-1):\n newele = s[-1] * (1 + ka * random.random() / (rank-1))\n s.append(newele)\n s.reverse()\n \n # best_u = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # A = np.random.rand(m,m)\n # A = scipy.linalg.orth(A)\n # u = A[:, :rank]\n # mu0 = self.compute_mu(u, m, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_u = u\n # print(\"mu0 for u:\", best_mu0)\n # # print(u.T @ u)\n \n # best_v = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # B = np.random.rand(n,n)\n # B = scipy.linalg.orth(B)\n # v = B[:, :rank]\n # mu0 = self.compute_mu(v, n, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_v = v\n # print(\"mu0 for v:\", best_mu0)\n # u = best_u\n # v = best_v\n\n for _ in range(100):\n A = np.random.rand(m,m)\n A = scipy.linalg.orth(A)\n u = A[:, :rank]\n mu0 = self.compute_mu(u, m, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for u:\", mu0) \n\n for _ in range(10):\n B = np.random.rand(n,n)\n B = scipy.linalg.orth(B)\n v = B[:, :rank]\n mu0 = self.compute_mu(v, n, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for both:\", mu0)\n\n matrix = np.dot(u*s, v.T)\n \n kappa = s[0] / s[-1]\n print(\"kappa=\", kappa)\n \n ss = np.copy(s)\n for k in range(rank):\n ss[k] = s[k] / s[0]\n \n max_entry = np.max(np.abs(np.outer(u[:,:rank], v.T[:rank,:])))\n mu1 = max_entry * math.sqrt(m * n / rank)\n print(\"mu1=\", mu1)\n \n return matrix", "def shapley(self, R, t):\n\t n = self.nodes\n\n\t # phi contains the shapley values of nodes\n\t phi = [0 for i in range(n)]\n\n\t # MC, i.e., marginal contribution of each node\n\t # which reflects the change in coverage due to\n\t # the addition of node i in the set of initilly\n\t # activated nodes\n\t MC = [0 for i in range(n)]\n\n\t # randomly select t permutations from n! possible\n\t # permutations of nodes\n\t for j in range(t):\n\t temp = [0 for i in range(n)]\n\n\t # repeat the experiment R times (take the average)\n\t for r in range(R):\n\t self.theta = nprnd.random_sample((n,))\n\t self.deactivate_all()\n\t k = nprnd.permutation(n)\n\t for i in k:\n\t temp[i] += self.v(i)\n\n\t # Add the contribution for each permuation\n\t for i in range(n):\n\t MC[i] += temp[i]*1.00/R\n\n\t for i in range(n):\n\t phi[i] = (MC[i]*1.00)/t\n\n\t x = {i: phi[i] for i in range(n)}\n\t self.shapley_rank = sorted(x.items(), key=operator.itemgetter(1), reverse=True)\n\n\t return self.shapley_rank", "def alternative_iterative_method(x0, n, gamma, b):\n # Parameters:\n MAX_ITER = 1000\n n2 = n**2\n\n # Creating NxN versions of vector for easier indexing during iteration\n b = b.copy().reshape(n, n)\n b_transposed = b.copy().T\n x0 = x0.copy().reshape(n, n)\n x0_transposed = x0.copy().T\n x1 = x0.copy()\n x1_transposed = x0_transposed.copy()\n\n # No need for M, N, only a smaller tridiagonal system:\n H = scipy.sparse.diags((-1, 2, -1), (-1, 0, 1), shape=(n, n), format=\"csr\")\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n, n), format=\"csr\")\n M1 = gammaI + H # Corresponds to both (gI + M) & (gI + N) in equations\n M2 = gammaI - H # Corresponds to both (gI - M) & (gI - N) in equations\n\n # Preallocating RHS of equations\n RHS7 = np.zeros((n, n), dtype=np.float64)\n RHS8 = np.zeros((n, n), dtype=np.float64)\n\n k = 0\n while k < MAX_ITER:\n for i in range(n): # Loading RHS values for Equation (7):\n RHS7[:, i] = scipy.sparse.csr_matrix.dot(M2, x0_transposed[i]) + b_transposed[i]\n for i in range(n): # Solving N independent tridig mat systems related to Eq(7):\n x1[i] = scipy.sparse.linalg.spsolve(M1, RHS7[i])\n RHS8[i] = scipy.sparse.csr_matrix.dot(M2, x1[i]) + b[i] # Loading RHS values for Equation (8):\n for i in range(n): # Solving N independent tridig mat systems related to Eq(8):\n x1_transposed[i] = scipy.sparse.linalg.spsolve(M1, RHS8[:, i])\n\n k += 1\n if np.allclose(x1_transposed, x0_transposed, rtol=1e-8):\n break\n x0_transposed = x1_transposed.copy()\n\n res = x1_transposed.T.reshape(n2)\n return res, k", "def run_sparring_algorithm(means, horizon):\n\n # The number of arms\n n_arms = len(means)\n\n # Shuffling the means vector.\n random.shuffle(means)\n\n # Assigning Bernoulli arms\n arms = map(lambda (mu): BernoulliArm(mu), means)\n\n # Assigning the black-boxes with the UCB 1 algorithm\n left_black_box = UCB1([], [])\n right_black_box = UCB1([], [])\n\n # Initializing the black-boxes.\n left_black_box.initialize(n_arms)\n right_black_box.initialize(n_arms)\n\n # Initializing rewards and regrets\n average_reward = [0]*horizon\n\n regret = [0]*horizon\n\n cumulative_average_reward = [0]*horizon\n\n cumulative_regret = [0]*horizon\n\n for t in range(horizon):\n\n # Using the black-boxes to select the arms\n left_arm = left_black_box.select_arm()\n right_arm = right_black_box.select_arm()\n\n # Acquiring the rewards\n left_reward = arms[left_arm].draw()\n\n right_reward = arms[right_arm].draw()\n\n b = observe_b_t(left_reward, right_reward)\n b_not = 1 - b\n\n # Updating the black-boxes\n left_black_box.update(left_arm, b_not)\n right_black_box.update(right_arm, b)\n\n # Assigning the average reward.\n average_reward[t] = float(right_reward + left_reward) / 2\n\n # Assigning the regret\n regret[t] = max(means) - average_reward[t]\n\n # Assigning the cumulative regret and rewards\n if t == 1:\n cumulative_average_reward[t] = average_reward[t]\n\n cumulative_regret[t] = regret[t]\n else:\n cumulative_average_reward[t] = average_reward[t] + cumulative_average_reward[t-1]\n\n cumulative_regret[t] = regret[t] + cumulative_regret[t-1]\n\n # Returning the average regret.\n return cumulative_regret", "def gramschmidt(A):\r\n _, k = A.shape\r\n\r\n # first basis vector\r\n Q = A[:, [0]] / np.linalg.norm(A[:, 0])\r\n for j in range(1, k):\r\n # orthogonal projection, loop-free implementation\r\n q = A[:, j] - np.dot(Q, np.dot(Q.T, A[:, j]))\r\n\r\n # check premature termination\r\n nq = np.linalg.norm(q)\r\n if nq < 1e-9 * np.linalg.norm(A[:, j]):\r\n break\r\n # add new basis vector as another column of Q\r\n Q = np.column_stack([Q, q / nq])\r\n return Q", "def robust_value_iteration(robust_algorithm, P, nS, nA, gamma=0.9, max_iteration=20, tol=1e-3, tol2=1.0):\n V = np.zeros(nS)\n V.fill(1000.)\n sigma = np.zeros((nS, nA))\n policy = np.zeros(nS, dtype=int)\n for iter_count in range(max_iteration):\n # print(iter_count)\n\n # Need to estimate sigma, which is of dimension |nS|*|nA|\n # This can simply be p^T V for now.\n # SigmaLikelihood(P, V, nS, nA, sigma, tol2, iter_count)\n # SigmaEntropy(P, V, nS, nA, sigma, tol2)\n robust_algorithm(P, V, nS, nA, sigma, tol2)\n\n newV = np.zeros(nS)\n for state in range(nS):\n BV = np.zeros(nA)\n for action in range(nA):\n BV[action] = RobustBellmanOp(P, sigma, state, action, gamma)\n newV[state] = BV.min()\n # Calculate difference of the value functions.\n Vdiff = np.max(np.abs(newV - V))\n V = newV\n if Vdiff < tol:\n break\n # Calculate the policy.\n for state in range(nS):\n BV = np.zeros(nA)\n for action in range(nA):\n BV[action] = RobustBellmanOp(P, sigma, state, action, gamma)\n policy[state] = np.argmin(BV)\n return V, policy, sigma", "def model_gutenberg(r):\n\n\timport numpy as np\n\n\t#- initialisations ----------------------------------------------------------------------------\n\n\t#- depth intervals\n\td = np.array([0.0, 19.0, 38.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 125.0, 150.0, 175.0, 200.0, 225.0, 250.0, 300.0, 350.0, 400.0, 450.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0])\n\td = 6371000.0 - 1000.0 * d\n\n\t#- density\n\trho_array = np.array([2.74, 3.00, 3.32, 3.34, 3.35, 3.36, 3.37, 3.38, 3.39, 3.41, 3.53, 3.46, 3.48, 3.50, 3.53, 3.58, 3.62, 3.69, 3.82, 4.01, 4.21, 4.40, 4.56, 4.63])\n\n\t#- vp\n\tvp_array = np.array([6.14, 6.58, 8.20, 8.17, 8.14, 8.10, 8.07, 8.02, 7.93, 7.85, 7.89, 7.98, 8.10, 8.21, 8.38, 8.62, 8.87, 9.15, 9.45, 9.88, 10.30, 10.71, 11.10, 11.35])\n\n\t#- vs\n\tvs_array = np.array([3.55, 3.80, 4.65, 4.62, 4.57, 4.51, 4.46, 4.41, 4.37, 4.35, 4.36, 4.38, 4.42, 4.46, 4.54, 4.68, 4.85, 5.04, 5.21, 5.45, 5.76, 6.03, 6.23, 6.32])\n\n\t#- determine vp, vs and rho for the relevant layer --------------------------------------------\n\n\tfor k in np.arange(len(rho_array)):\n\t\tif (r <= d[k]) & (r > d[k+1]):\n\t\t\trho = rho_array[k]\n\t\t\tvpv = vp_array[k]\n\t\t\tvph = vpv\n\t\t\tvsv = vs_array[k]\n\t\t\tvsh = vsv\n\t\t\teta = 1.0\n\t\t\tcontinue\n\t\telif r <= 5371000.0:\n\t\t\trho = 4.63\n\t\t\tvpv = 11.35\n\t\t\tvph = vpv\n\t\t\tvsv = 6.32\n\t\t\tvsh = vsv\n\t\t\teta = 1.0\n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def v(resistances, r_i, applied_voltages, **kwargs):\n if r_i.word_line > 0 or r_i.bit_line > 0:\n g = fill.g(resistances, r_i)\n i = fill.i(applied_voltages, resistances, r_i)\n\n utils.message('Started solving for v.', **kwargs)\n v_matrix = linalg.spsolve(g.tocsc(), i)\n utils.message('Solved for v.', **kwargs)\n\n # if `num_examples == 1`, it can result in 1D array.\n if v_matrix.ndim == 1:\n v_matrix = v_matrix.reshape(v_matrix.shape[0], 1)\n\n # if one of the interconnect resistances is zero, only half of the\n # matrix_v had to be solved. The other half can be filled without\n # solving because the node voltages are known.\n if r_i.word_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n new_v_matrix[resistances.size:, ] = v_matrix\n v_matrix = new_v_matrix\n if r_i.bit_line == 0:\n new_v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n new_v_matrix[:resistances.size, ] = v_matrix\n v_matrix = new_v_matrix\n else:\n # if both interconnect resistances are zero, all node voltages are\n # known.\n v_matrix = np.zeros(\n (2*resistances.size, applied_voltages.shape[1]))\n v_matrix[:resistances.size, ] = np.repeat(\n applied_voltages, resistances.shape[1], axis=0)\n\n return v_matrix", "def optimal_shu_osher_form(self):\n m=len(self)\n r = self.absolute_monotonicity_radius()\n v, alpha = self.canonical_shu_osher_form(r)\n beta = alpha / r\n if self.is_explicit():\n for i in range(1,len(self)+1):\n alpha[i,0]=1.-np.sum(alpha[i,1:])\n return alpha, beta", "def SOR_Solve(A,b,tol=1.0e-6,omega=1,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def L1Uv2(A, d):\n n = shape(A)[0]\n for k in range(1,n):\n km = array([0, k - d]).max() # First index of r we need to update\n for r in range(km, k - 1):\n A[k, r] /= A[r, r]\n uk = array([k, r + d + 1]).min() # last index not included\n A[k, (r + 1):uk] -= A[r, (r + 1):uk]*A[k, r]\n A[k, k - 1] /= A[k - 1,k - 1] \n for r in range(km, k):\n uk = array([k + 1, r + d + 1]).min() # last index not included\n A[(r + 1):uk, k] -= A[(r + 1):uk, r]*A[r, k]", "def rforwardsolve(A, b, d):\n n = len(b)\n b[0] /= A[0, 0]\n for k in range(1,n):\n lk = array([0,k-d]).max()\n b[k] = b[k] - dot(A[k, lk:k],b[lk:k])\n b[k] /= A[k, k]", "def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def Gram_Schmidt1(vecs, row_wise_storage=True):\n from numpy.linalg import inv\n from math import sqrt\n\n vecs = asarray(vecs) # transform to array if list of vectors\n m, n = vecs.shape\n basis = array(transpose(vecs))\n eye = identity(n).astype(float)\n\n basis[:,0] /= sqrt(dot(basis[:,0], basis[:,0]))\n for i in range(1, m):\n v = basis[:,i]/sqrt(dot(basis[:,i], basis[:,i]))\n U = basis[:,:i]\n P = eye - dot(U, dot(inv(dot(transpose(U), U)), transpose(U)))\n basis[:, i] = dot(P, v)\n basis[:, i] /= sqrt(dot(basis[:, i], basis[:, i]))\n\n return transpose(basis) if row_wise_storage else basis", "def gram_schmidt(vects):\n\n res = [vects[:,0] / np.linalg.norm(vects[:,0])]\n for i in xrange(1, vects.shape[1]):\n curr = vects[:,i] - reduce(lambda x,y: x+y, map(lambda x: proj(x,vects[:,i]), res))\n res.append(curr / np.linalg.norm(curr))\n return np.stack(res, axis=1)", "def random_iteration(r, w):\n density = 1.225\n blade_number = 6\n wind_v = round(-168.75*r**2-36.75*r+20.05 , 2)\n blade_v = r*w\n rel_v = round(math.sqrt(blade_v**2 + wind_v**2),2)\n arctan = round(math.degrees(math.atan2(wind_v, blade_v)), 2)\n # cl is a random float in range 1.0 to 1.7\n cl = uniform(1.0, 1.7)\n # aoa is a random float in range 10 to 15\n aoa = uniform(10, 15)\n treeHit = 0\n Re = get_re(r, cl, aoa, arctan, rel_v, blade_number)[0]\n new_cl, new_a, new_cd = get_max_cl(Re, r)\n new_Re, new_chord = get_re(r, round(new_cl, 2), round(new_a, 2), arctan, rel_v, blade_number)\n re_devi = abs((new_Re - Re) / Re)\n # iterate until Re_deviation goes under 5%\n while re_devi > 0.05:\n Re = new_Re\n new_cl, new_a, new_cd = get_max_cl(new_Re, r)\n new_Re, new_chord = get_re(r, new_cl, new_a, arctan, rel_v, blade_number)\n re_devi = abs((new_Re - Re) / Re)\n treeHit += 1\n # stop iteration over 10 times\n if treeHit > 10:\n break\n force_reference = 0.5 * density * rel_v**2 * 0.0125 * round(new_chord, 3)\n return {\n \"r\": r,\n \"arctan\": arctan, \n \"chord\": round(new_chord, 3), \n \"aoa\": new_a, \n \"cl\": new_cl, \n \"cd\": new_cd, \n \"Re\": new_Re,\n \"lift\": new_cl * force_reference, \n \"drag\": new_cd * force_reference,\n \"torque\": r * (new_cl * force_reference * math.sin(math.radians(arctan - new_a)) - new_cd * force_reference * math.cos(math.radians(arctan - new_a)))\n }", "def birank(W, u0=None, v0=None,\n alpha=0.85, beta=0.85, max_iter=200, tol=1.0e-4, verbose=False):\n # default initial vectors: 1/|U|, 1/|V|\n if u0 is None:\n u0=np.repeat(1 / W.shape[0], W.shape[0])\n if v0 is None:\n v0=np.repeat(1 / W.shape[1], W.shape[1])\n\n\n W = W.astype('float', copy=False)\n WT = W.T\n\n Ku = np.array(W.sum(axis=1)).flatten()\n Kv = np.array(W.sum(axis=0)).flatten()\n # avoid divided by zero issue\n Ku[np.where(Ku==0)] += 1\n Kv[np.where(Kv==0)] += 1\n\n Ku_ = spa.diags(1/Ku)\n Kv_ = spa.diags(1/Kv)\n\n Ku_bi = spa.diags(1/np.sqrt(Ku))\n Kv_bi = spa.diags(1/np.sqrt(Kv))\n Sv = Kv_bi.dot(WT).dot(Ku_bi)\n Su = Sv.T\n\n\n u_last = u0.copy()\n v_last = v0.copy()\n\n for i in range(max_iter):\n u = alpha * (Su.dot(v_last)) + (1-alpha) * u0\n v = beta * (Sv.dot(u_last)) + (1-beta) * v0\n\n err_u = np.absolute(u - u_last).sum()\n err_v = np.absolute(v - v_last).sum()\n \n if verbose:\n print(\n \"Iteration : {}; top error: {}; bottom error: {}\".format(\n i, err_u, err_v\n )\n )\n if err_v < tol and err_u < tol:\n break\n u_last = u\n v_last = v\n\n return u, v", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def discount_rewards(r, gamma):\n #print(\"r\", r)\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n #print(\"t\", t)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def canonical_shu_osher_form(self,r):\n s=len(self)\n K=np.vstack([self.A,self.b])\n K=np.hstack([K,np.zeros([s+1,1])])\n I=snp.eye(s+1)\n P=r*snp.solve(I+r*K,K)\n d=(I-P).sum(1)\n return d,P", "def loss_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n #running_add = running_add * gamma + r[t]\n #discounted_r[t] = running_add\n discounted_r[t] = 0\n return discounted_r", "def valueIteration(P,R,gamma,theta,initial_v,max_iter=1e8):\n print('Running value iteration ...')\n\n def one_step_lookahead(s, V):\n \"\"\"\n :param state: current state\n :param v: current value estimator\n :return: A, list of optimal action values under current value estimator\n \"\"\"\n num_a = num_actions\n num_S = num_states\n\n A = np.zeros(num_a)\n\n for a in range(num_a):\n for s_prime in range(num_S):\n A[a] += P[s, a, s_prime] * (R[s, a, s_prime] + gamma * V[s_prime])\n return A\n \n # initialization\n v = initial_v \n num_states, num_actions = P.shape[:2]\n k = 0 \n best_actions = [0] * num_states\n delta = 1000\n\n while delta > theta and k <= max_iter:\n delta = 0\n k += 1\n for s in range(num_states):\n action_values = one_step_lookahead(s, v)\n best_action_value = np.max(action_values)\n delta = max(delta, np.abs(best_action_value - v[s]))\n v[s] = best_action_value\n print(delta)\n\n for s in range(num_states):\n A = one_step_lookahead(s, v)\n best_actions[s] = np.argmax(A)\n\n\n print('number of iterations:', k)\n return best_actions, v", "def LeapFrog(self,r,v,dt):\n\n rhalf = r + np.asarray(v)*(dt/2) #Taking a half step forward with positional vector\n # predict the final velocity at the next timestep using the acceleration field at the rhalf position \n vnew = v + self.M31Accel(rhalf)*dt\n # predict the final position using the average of the current velocity and the final velocity\n rnew = r + 0.5*(v+vnew)*dt\n \n return rnew,vnew", "def _fit_stochastic(self, R, U, V, seed):\n Rhat = self._estimate_ratings(U=U, V=V) # Get Rhat, the estimate of R\n np.random.seed(seed) # set random seed\n for _ in range(self.n_iter): # until we've reached n_iter...\n n = np.random.choice(self.N) # randomly choose a user\n m = np.random.choice(self.M) # randomly choose an item\n un_grad = -(R[n,m] - Rhat[n,m])*V[m] # gradient with respect to u_n vector\n vm_grad = -(R[n,m] - Rhat[n,m])*U[n] # gradient with respect to v_m vector\n U[n] -= self.lr*un_grad # update \n V[m] -= self.lr*vm_grad # update\n Rhat = self._estimate_ratings(U=U, V=V) # form new estimates\n self.Rhat, self.U, self.V = Rhat, U, V", "def bfgs_method(x0, eps=1e-6, H0=np.eye(18),c1=1e-4):\n k = 0 # initialize num of outer iterations.\n inner_k = 0 # initialize inner k iteration.\n old_xk = None\n alpha_original = 1\n alpha = np.copy(alpha_original)\n xk = x0 # intitialize x.\n Hk = H0 # initialize H, positive definite matrix.\n I = np.eye(len(x0)) # idenitity matrix of 2 by 2.\n\n alpha_vec = []\n f_vec = []\n grad_vec = []\n inner_k = []\n conv_c = []\n\n while np.linalg.norm(rosen_der(xk)) > eps:\n pk = -Hk @ rosen_der(xk)\n\n xk_next = xk + alpha * pk\n ink = 0\n print(xk)\n while rosen(xk_next) > rosen(xk) + c1 * alpha * (pk.T @ rosen_der(xk)):\n \"\"\" find a step size that will satisfy Armijo-Goldstein inequality. Modify alpha. \"\"\"\n alpha = 0.1* alpha\n xk_next = xk + alpha * pk\n ink += 1\n\n inner_k.append(abs(int(ink)))\n\n xk_next = xk + alpha * pk\n\n sk = xk_next - xk\n\n yk = rosen_der(xk_next) - rosen_der(xk)\n\n rho = 1 / (yk.T @ sk)\n\n Hk = np.copy((I - rho * sk @ yk.T) @ Hk @ (I - rho * yk @ sk.T) + rho * sk @ sk.T)\n\n old_xk = np.copy(xk)\n xk = np.copy(xk_next)\n\n alpha_vec.append(alpha)\n f_vec.append(rosen(xk))\n grad_vec.append(np.linalg.norm(rosen_der(xk)))\n alpha = np.copy(alpha_original)\n print(f_vec[-1])\n\n k += 1\n\n return xk, k, inner_k, alpha_vec, f_vec, grad_vec", "def gs(self, k=50):\n # a. initialize V1 to Vk as a matrix of zeros\n Vs = np.zeros((k, self.ATA.shape[0]), dtype=float)\n\n # initialize u_n as first eigen vector?\n # un = self.eigen_vectors[0]\n\n # looking for k largest eigenvalues and associated eigenvectors\n # of ATA\n # b. for i = 1 to k\n for i in tqdm(range(len(Vs))):\n print(\"Doing i\")\n\n # i. randomly generated vector of size m\n # (length of latitudes, in this case?)\n # scale entire vector by its magnitude, to make magnitude = 1\n u1 = scale_mag_1(np.random.rand(self.ATA.shape[0]))\n un = u1 # at first, u_n is u_1 and random\n\n diff = 1 # set initial diff too high to trip while loop\n while diff > 1e-3:\n\n print(\"Doing ii\")\n # ii. u_(n+1) = A^T*A*u_n\n u1more = np.dot(self.ATA, un)\n\n print(\"Doing iii\")\n # iii. u_(n+1) = u_(n+1) - Sigma_j^(i-1)(u_(n+1)^T * V_j) * V_j\n u1more = u1more - np.sum([\n np.dot(np.dot(u1more.T, Vs[j]), Vs[j]) for j in range(i)\n ])\n\n print(\"Doing iv\")\n # iv. u_(n+1) = u_(n+1) / || u_(n+1) ||\n # just norm mag\n u1more = scale_mag_1(u1more)\n\n diff = mag(u1more - un)\n print(\"Diff:\", diff)\n\n un = u1more\n\n Vs[i] = un", "def rforwardsolve(A, b, d):\n \n \n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[0] = x[0] / A[0, 0]\n for k in range(1, n):\n lk = max(0, k-d)\n x[k] = b[k] - np.dot(A[k, lk : k], x[lk : k])\n x[k] = x[k] / A[k, k] \n return x", "def solve_R(R, b):\n n = b.size\n assert R.shape == (n,n)\n x = zeros(n, dtype=R.dtype)\n for i in range(n-1,-1,-1):\n x[i] = (b[i] - dot(x[i+1:], R[i,i+1:])) / R[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def rk_adaptive(accel,m,r,h,v,recur,emin=10**-12,emax=10**-8,hmax=.1,hmin=.01,recurmax=100):\n k1v = accel(m,r)\n k1r = v\n k2v = accel(m,r + 0.25*k1r*h)\n k2r = v + (0.25*k1v)*h\n k3v = accel(m,r + (3/32.*k1r + 9/32.*k2r)*h)\n k3r = v + (3/32.*k1v + 9/32.*k2v)*h\n k4v = accel(m,r + (1932/2197.*k1r - 7200/2197.*k2r + 7296/2197.*k3r)*h)\n k4r = v + (1932/2197.*k1v - 7200/2197.*k2v + 7296/2197.*k3v)*h\n k5v = accel(m,r + (439/216.*k1r - 8*k2r + 3680/513.*k3r - 845/4104.*k4r)*h)\n k5r = v + (439/216.*k1v - 8*k2v + 3680/513.*k3v - 845/4104.*k4v)*h\n k6v = accel(m,r - (8/27.*k1r + 2*k2r - 3544/2565.*k3r + 1859/4104.*k4r - 11/40.*k5r)*h)\n k6r = v - (8/27.*k1v + 2*k2v - 3544/2565.*k3v + 1859/4104.*k4v - 11/40.*k5v)*h\n\n # 4th order calculation\n new_v4 = v + h*(25/216.*k1v + 1408/2565.*k3v + 2197/4104.*k4v - 1/5.*k5v)\n new_r4 = r + h*(25/216.*k1r + 1408/2565.*k3r + 2197/4104.*k4r - 1/5.*k5r)\n \n # 5th order calculation\n new_v5 = v + h*(16/135.*k1v + 6656/12825.*k3v+28561/56430.*k4v - 9/50.*k5v + 2/55.*k6v) \n new_r5 = r + h*(16/135.*k1r + 6656/12825.*k3r+28561/56430.*k4r - 9/50.*k5r + 2/55.*k6r) \n\n # Calculate truncation error between 5th and 4th order\n eps = np.abs( (np.max(np.abs(new_r5)) - np.max(np.abs(new_r4))) / np.max(np.abs(new_r4)))\n \n # Compare eps to emin and emax and update h accordingly\n if np.max(eps) < emin:\n if h*2.0 < hmax:\n h *= 2.0\n new_v = new_v5\n new_r = new_r5 \n \n if np.max(eps) > emax:\n if h/2.0 > hmin:\n h /= 2.0\n print h\n # Error too large, call rk_adaptive again with smaller h\n if recur < recurmax:\n recur += 1\n rk_adaptive(accel,m,r,h,v,recur)\n new_v = new_v5\n new_r = new_r5\n \n else:\n new_v = new_v5\n new_r = new_r5\n \n return new_v, new_r, h", "def gains_vector(m):\n\n n_ant, n_chan, n_dir, _ = m.shape\n row_shape = n_ant * n_chan * n_dir\n g = np.zeros((2*row_shape), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n g[row] = m[a, nu, s, 0]\n g[row + row_shape] = m[a, nu, s, 1]\n \n return g", "def run_rwr(g, R, max_iters):\n\n A = nx.adjacency_matrix(g, weight='weight')\n m, n = A.shape\n\n d = A.sum(axis=1)\n d = np.asarray(d).flatten()\n d = np.maximum(d, np.ones(n))\n\n invd = spdiags(1.0 / d, 0, m, n)\n T = invd.dot(A)\n\n rwr_fn = partial(rwr, T=T, R=R, max_iters=max_iters)\n\n aff = [rwr_fn(x) for x in np.identity(m)]\n aff = np.array(aff)\n return aff", "def SYR_forward(b, alpha, V, s0, y0, T=100):\n n = len(y0)\n\n du = np.zeros(n+1)\n u0 = np.zeros(n+1)\n u0[0] = s0\n u0[1:] = y0\n \n def f(t,u):\n s = u[0]\n y = u[1:]\n force = np.dot(y,b) # Force of infection\n du[0] = - s*force\n du[1:] = s*force*alpha - np.dot(V,y)\n return du\n\n times = np.linspace(0,T,10000)\n solution = solve_ivp(f,[0,T],u0,t_eval=times,method='RK23',max_step=0.1)\n s = solution.y[0,:]\n y = solution.y[1:,:]\n t = solution.t\n \n return s, y, t", "def s(self, r, mu):\n\n assert(mu <= 1)\n\n if r < self.R:\n return r * mu + self.R * self.g(r, mu)\n else:\n if self.mu_star(r) <= mu:\n return 2. * self.R * self.g(r, mu)\n else:\n return 0", "def value_iteration(self):\n #Create a utility function of the environment shape\n gamma = 0.9\n epsilon = 0.01\n iteration = 0\n\n #create a utility function that matches the size of the number of states\n u = np.zeros(self.env.observation_space.n, dtype=float)\n\n u_copy = u.copy()\n\n #Create the reward grid\n reward = np.array([state_map.get(sublist) for state in frozen_lake.MAPS[self.env.spec._kwargs.get('map_name')] for sublist in state])\n\n T = self.frozen_transition()\n\n graph_list = list()\n\n #keep track of the convergence\n policy_convergence = list()\n\n while True:\n delta = 0\n iteration += 1\n u = u_copy.copy()\n graph_list.append(u)\n start_time = time()\n for s in range(self.env.observation_space.n):\n r = reward[s]\n v = np.zeros((1, self.env.observation_space.n), dtype=float)\n v[0, s] = 1.0\n u_copy[s] = self.return_state_utility(v, T, u, r, gamma)\n delta = max(delta, np.abs(u_copy[s] - u[s]))\n policy_convergence.append({'iter': iteration, 'delta': delta})\n if delta < epsilon * (1 - gamma) / gamma:\n print(\"Total Iterations: {}\".format(iteration))\n print(\"=================== VALUE ITERATION RESULT ==================\")\n print(\"Iterations: \" + str(iteration))\n print(\"Delta: \" + str(delta))\n print(\"Gamma: \" + str(gamma))\n print(\"Epsilon: \" + str(epsilon))\n print(\"Time to converge: {} seconds\".format(time() - start_time))\n print(\"===================================================\")\n utility_reshape = np.reshape(u, (int(np.sqrt(self.env.observation_space.n)), int(np.sqrt(self.env.observation_space.n))))\n print (np.array(utility_reshape, dtype=float))\n print(\"===================================================\")\n break\n\n return u", "def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f", "def forward_substitution(self):\r\n for col in range(0, self.SIZE):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col + 1, self.SIZE):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result", "def qri_mgs_piv( A, alpha=0.5 ):\n \n Q = numpy.array(A, dtype=float)\n m,n = Q.shape\n R = numpy.zeros( (n,n) )\n Qnorms = numpy.zeros( n )\n piv = numpy.zeros( n )\n P = numpy.eye( n )\n\n for k in range( 0, n ) :\n # step 0\n for j in range ( k, n ) :\n Qnorms[j] = numpy.linalg.norm( Q[:,j] )\n #print Qnorms\n j = numpy.where(Qnorms == max(Qnorms[k:n]))[0][0]\n Qnorms[k] = 0\n #print Q\n #print R\n #piv[k] = j\n if (j != k) :\n #print \"switching columns\", k, \"and\", j\n P[:, [j, k]] = P[:, [k, j]]\n Q[:, [j, k]] = Q[:, [k, j]]\n #if (k > 0) :\n # R[0:k, [j, k]] = R[0:k, [k, j]]\n R[:, [j, k]] = R[:, [k, j]]\n #print Q\n #print R\n\n # step 1\n vl2norm = numpy.linalg.norm( Q[:,k] )\n ii = 0\n while True : # iterate\n for i in range( 0, k ) :\n s = numpy.dot( Q[:,i], Q[:,k] )\n Q[:,k] = Q[:,k] - s * Q[:,i]\n R[i,k] = R[i,k] + s\n\n ii = ii + 1\n vlnorm = vl2norm\n vl2norm = numpy.linalg.norm( Q[:,k] )\n if (vl2norm > alpha * vlnorm) :\n #print \"on column\", k, \"used\", ii, \"orthogonalizations\"\n break\n \n # step 2\n R[k,k] = numpy.linalg.norm( Q[:,k] )\n Q[:,k] = Q[:,k] / R[k,k]\n\n # step 3\n if (k == n) :\n break\n else :\n for j in range( k+1, n ) :\n R[k,j] = numpy.dot( Q[:,k], Q[:,j] )\n Q[:,j] = Q[:,j] - R[k,j] * Q[:,k]\n\n # step 4\n #Qhat = Q[:,k]\n #Qhat2 = Qhat\n for j in range( k+1, n ) :\n ii = 0\n vl2norm = numpy.linalg.norm( Q[:,j] )\n while True : # iterate\n s = numpy.dot( Q[:,j], Q[:,k] )\n R[k,j] = R[k,j] + s\n Q[:,j] = Q[:,j] - s * Q[:,k]\n \n ii = ii + 1\n vlnorm = vl2norm\n vl2norm = numpy.linalg.norm( Q[:,j] )\n if (vl2norm > alpha * vlnorm) :\n #print \"on column\", j, \"used\", ii, \"orthogonalizations\"\n break\n \n return Q,R,P", "def update(self, state_sequence, reward_sequence):\n\n for i in range(reward_sequence.shape[0]):\n\n trajt_1 = state_sequence[:,i][:,np.newaxis]\n Vt_1 = self.get_value_function(trajt_1)[0]\n trajt = state_sequence[:,i+1][:,np.newaxis]\n Vt = self.get_value_function(trajt)[0]\n k_t_1 = self.kernel(self.D, trajt_1)\n k_t = self.kernel(self.D, trajt)\n ktt = self.kernel(trajt, trajt)\n at = np.dot(self.K_inv, k_t)\n et = (ktt - np.dot(k_t.T, at))\n delk_t_1 = k_t_1 - self.gamma*k_t\n\n if ((et - self.nu) > 10**(-4)) and (abs(Vt_1 - self.gamma*Vt - reward_sequence[i]) > 2*abs(reward_sequence[i])):\n self.D = np.concatenate((self.D, trajt), axis=1)\n self.V_D = np.concatenate((self.V_D, self.V_mu(state_sequence[:,i+1][:,np.newaxis])), axis=0)\n\n at_by_et = at/et\n self.K_inv = np.concatenate((self.K_inv + np.dot(at, at.T)/et, -at_by_et), axis=1)\n self.K_inv = np.concatenate((self.K_inv, \\\n np.concatenate((-at_by_et.T, 1/et), axis=1)), axis=0)\n\n c_t = np.dot(self.C_, delk_t_1) - self.A\n\n delktt = np.dot(self.A.T, delk_t_1 - self.gamma*k_t) + (self.gamma**2)*ktt\n s_t = self.sigma0**2 + delktt - np.dot(delk_t_1.T, np.dot(self.C_, delk_t_1))\n\n diff_r = np.dot(delk_t_1.T, self.alpha_)[0,0] - reward_sequence[i]\n self.alpha_ = np.concatenate((self.alpha_ + c_t/s_t*diff_r, self.gamma/s_t*diff_r), axis=0)\n\n gc_t_by_s_t = (self.gamma/s_t)*c_t\n self.C_ = np.concatenate((self.C_ + np.dot(c_t, c_t.T)/s_t, gc_t_by_s_t), axis=1) \n self.C_ = np.concatenate((self.C_, \\\n np.concatenate((gc_t_by_s_t.T, self.gamma**2/s_t), axis=1)), axis=0)\n\n self.A = np.zeros((self.A.shape[0]+1, self.A.shape[1]), dtype=np.float64, order='C')\n self.A[-1, 0] = 1\n\n self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)\n\n else:\n\n ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma*at)\n st = self.sigma0**2 - np.dot(ct.T, delk_t_1)\n\n diff_r = np.dot(delk_t_1.T, self.alpha_)[0,0] - reward_sequence[i]\n self.alpha_ = self.alpha_ + ct/st*diff_r\n\n self.C_ = self.C_ + np.dot(ct, ct.T)/st\n\n self.A = at\n\n self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)\n\n assert (not np.isnan(self.alpha_).any()), \"Check alpha for NaN values\"", "def one_step(self):\r\n assert (self.uv_vol is not None)\r\n assert (self.guv_vol is not None)\r\n assert (self.uv_bound is not None)\r\n assert (self.vf_vect_bound is not None)\r\n assert (self.vF_vect_vol is not None)\r\n # Shape checks\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])\r\n assert (self.uv_vol.shape == self.guv_vol.shape)\r\n assert (self.uv_vol.shape == self.uv_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])\r\n \r\n if self.step == 0:\r\n self.check_k_matrix_stability()\r\n # print(\"Epsilon is :\"+str(self.Epsilon))\r\n # print(\"Beta is :\"+str(self.Beta))\r\n\r\n # Form \"Stiffness\" matrix:\r\n K = self.make_k_matrix()\r\n # Form \"Force\" vector: \r\n f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound\r\n\r\n # print(\"FORCE VECTOR:\")\r\n # print(f)\r\n # print(\"STIFFNESS MATRIX\")\r\n # print(K)\r\n # print(\"UV_VOL\")\r\n # print(self.uv_vol)\r\n # print(\"EPSILON * GUV_VOL\")\r\n # print(self.Epsilon * self.guv_vol)\r\n # print(\"UV_BOUND * COEFF\")\r\n # print((self.Epsilon / self.Beta) * self.uv_bound)\r\n sol = scipy_sparse_linsolve(K, f)\r\n # print(\"SOLUTION\")\r\n # print(sol)\r\n return sol", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def update_r(self):\n self.gamma_r = self.gamma_s - self.gamma_q\n self.Sigma_r = self.Sigma_s - self.Sigma_q", "def modified_gram_schmidt_step_arnoldi(j, vals):\n vector, krylov_vectors, n, H = vals\n v = krylov_vectors[j, :]\n h = jax.numpy.vdot(v, vector)\n H = jax.ops.index_update(H, jax.ops.index[j, n], h)\n vector = vector - h * jax.numpy.reshape(v, vector.shape)\n return [vector, krylov_vectors, n, H]", "def SolveSCP(self):\n\n t0 = time()\n\n # Some predicates\n Lu_min = 0.\n niters_max = self._maxiters\n maxfracchange = self._maxfracchange\n\n # initialization, resetting ...\n self.reset_all() # including _u_naught(), first application\n scp_min = self.greedy()\n\n # re-initialization iteration; col fixing ignored for the moment\n niters = 0\n f_change = _largenumber\n while (f_change>maxfracchange) and (niters<niters_max):\n # re-initialize u\n if (np.mod(niters, 2)==0): \n self.reset_u(random=True)\n else:\n self.reset_u()\n u_tmp, Lu_tmp = self.subgradient() # find a near-optimal solution \n u, Lu = self.subgradient() # rerun subgradient to get a set of Lagrangian multipliers\n\n scp_all = np.zeros(self._subg_nsteps)\n for i in np.arange(self._subg_nsteps):\n #self.reset_s()\n self.s = np.copy(self.f)\n scp_all[i] = self.greedy(u=u[:,i])\n\n # check if the solution is gettting better\n imin_tmp = (np.where(scp_all==np.amin(scp_all)))[0]\n imin = imin_tmp[np.argmax(Lu[imin_tmp])]\n imax = np.argmax(Lu)\n if (np.mod(niters, 5)==0):\n print(\"This Best solution: UB={0}, LB={1}, UB1={2}, LB1={3}\".format(scp_all[imin], Lu[imin], scp_all[imax], Lu[imax]))\n if (niters==0) or ((scp_all[imin]<=scp_min) and ((Lu[imin]-Lu_min)>-(np.fabs(Lu_min)*self._LB_maxfracchange))):\n scp_min = scp_all[imin]\n u_min = np.copy(u[:,imin])\n Lu_min = Lu[imin]\n self.stepsize = _stepsize\n\n LB = Lu_min\n\n # final step, needs to get u_min back\n self.u = np.copy(u_min)\n self.s = np.copy(self.f)\n UB = self.greedy()\n\n # Which is better? absolute change or fractional change? \n # Both are fine, but cost should be normalized over the mean/median.\n GAP = (UB-LB)/np.fabs(UB)\n f_change = GAP\n if (np.mod(niters, 5)==0):\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n niters = niters + 1\n if (niters == niters_max): \n #warnings.warn(\"Iteration reaches maximum = {0}\".format(niters))\n print(\"Iteration in re-initialization reaches maximum number = {0}\".format(niters))\n\n # Need to remove redundant columns\n # self.remove_redundant() # this itself is NP-hard ...\n\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n print(\"Final Best solution: {0}\".format(UB))\n time_used = (time()-t0)/60.\n print(\"Took {0:.3f} minutes to reach current solution.\".format(time_used))\n\n return (UB,time_used)", "def _random_walk(self, preference_vectors: sps.csr_matrix) -> np.ndarray:\n similarity_rank_vertices = preference_vectors\n nb_iteration = 0\n while True:\n previous_similarity_rank_vertices = similarity_rank_vertices\n if self.verbose:\n print(\"Step: {}\".format(nb_iteration + 1))\n\n similarity_rank_vertices = self.damping_factor * similarity_rank_vertices.dot(self._transition_matrix) + (\n 1 - self.damping_factor) * preference_vectors\n\n diff = np.sum(\n np.abs(similarity_rank_vertices - previous_similarity_rank_vertices))\n if nb_iteration > 0 and diff < self.minimal_random_walk_change_difference_value:\n if self.verbose:\n print(\"Converged with error: {:.6f}\".format(diff))\n break\n\n nb_iteration += 1\n\n if nb_iteration > self.max_iterations:\n if self.verbose:\n print(\"Random walk did not converge, current error: {:.6f}\".format(\n diff))\n break\n return similarity_rank_vertices.toarray()", "def rk4(accel,m,r,h,v): \n k1v = accel(m,r) \n k1r = v \n k2v = accel(m,r + h*0.5*k1r) \n k2r = v+k1v*h*0.5 \n k3v = accel(m,r + h*0.5*k2r) \n k3r = v+k2v*h*0.5\n k4v = accel(m,r + h*k3r) \n k4r = v+k3v*h\n new_v = v + h*(k1v + 2*k2v + 2*k3v + k4v)/float(6)\n new_r = r + h*(k1r + 2*k2r + 2*k3r + k4r)/float(6)\n return new_v,new_r", "def left_rotate_s4(arr, d):\n n = len(arr)\n g = gcd(d, n)\n for i in range(g):\n\n # move i-th values of blocks\n temp = arr[i]\n j = i\n while 1:\n k = j + d\n # print(\"K >= n : {} >= {}\".format(k, n), end=\"\\n\")\n if k >= n:\n k = k - n\n # print(\"K == i : {} == {}\".format(k, i), end=\"\\n\")\n if k == i:\n break\n # print(\"i: {}, j: {}, k: {}\".format(i, j, k), end=\"\\n\")\n arr[j] = arr[k]\n j = k\n\n arr[j] = temp", "def discount_rewards(r):\r\n discounted_r = np.zeros_like(r)\r\n running_add = 0\r\n for t in reversed(xrange(0, r.size)):\r\n running_add = running_add * gamma + r[t]\r\n discounted_r[t] = running_add\r\n return discounted_r", "def first_lga(r, r_m, p, q):\n\n\t# Computation of the S/C velocity in the HRV frame\n\tP_HRV_ECI = np.linalg.inv(P_ECI_HRV())\n\tv_HRV = P_HRV_ECI.dot(r[3:])\n\n\t# Moon's velocity in the HRV\n\tv_M_HRV = np.array([0, 0, cst.V_M])\n\n\tv_inf, phi_m, theta_m = cart2sph(v_HRV - v_M_HRV)\n\n\t# S/C distance to the Earth [km]\n\td = np.linalg.norm(r[:3])\n\n\t# 1 - Computation of the polar angle of the S/C excess velocity after LGA to enter in p:q resonance with the Moon [rad]\n\t# ---------------------------------------------------------------------------------------------------------------------\n\t# S/C velocity after LGA to enter in a p:q resonance with the Moon [km/s]\n\tv = np.sqrt( 2*cst.mu_E/d - (2*np.pi * cst.mu_E / (cst.T_M * p/q))**(2/3) )\n\n\t# Polar angle of the S/C velocity at infinity after LGA [rad]\n\ttheta_p = np.arccos( (v**2 - cst.V_M**2 - v_inf**2) / (2 * cst.V_M * v_inf) )\n\n\n\t# 2 - Computation of the admissible longitude angles of the S/C velocity at infinity after LGA to enter in p:q resonance with the Moon [rad]\n\t# ------------------------------------------------------------------------------------------------------------------------------------------\n\t# Computation of the maximum rotation [rad]\n\tdelta_max = 2 * np.arcsin( cst.mu_M/(cst.R_M+r_m) / (v_inf**2 + cst.mu_M/(cst.R_M+r_m)) )\n\n\t# Possible longitude angles [rad]\n\tphi_p_arr = np.linspace(-np.pi, np.pi, 100)\n\n\t# Admissible longitude angles [rad]\n\tphi_p_adm = np.array([])\n\n\tdef admissible_longitude(phi_p):\n\t\treturn (np.cos(phi_m)*np.sin(theta_m)*np.sin(theta_p))*np.cos(phi_p) + \\\n\t\t\t (np.sin(phi_m)*np.sin(theta_m)*np.sin(theta_p))*np.sin(phi_p) + \\\n\t\t\t\tnp.cos(theta_m)*np.cos(theta_p) - np.cos(delta_max)\n\n\tfor phi_p in phi_p_arr:\n\t\tif admissible_longitude(phi_p) >= 0:\n\t\t\tphi_p_adm = np.append(phi_p_adm, phi_p)\n\n\n\tr_fs = np.ndarray(shape=(0, 8))\n\n\tt_span = [0, p/q * cst.T_M]\n\tt_eval = np.linspace(t_span[0], t_span[-1], 10000)\n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\n\tfor k, phi_p in enumerate(phi_p_adm):\n\n\t\t# Computation of the post-LGA S/C velocity in the ECI frame\n\t\tv_HRV_p = sph2cart([v_inf, phi_p, theta_p]) + v_M_HRV\n\t\tv_ECI_p = P_ECI_HRV().dot(v_HRV_p)\n\t\tr0 = np.concatenate((r[:3], v_ECI_p))\n\n\t\tprint(\"HRV : [{}, {}, {}]\".format(v_inf, phi_p*180/np.pi, theta_p*180/np.pi))\n\t\tprint(\"Velocity : {} km/s\".format(np.linalg.norm(v_M_HRV)))\n\t\tinput()\n\n\t\tsolution = solve_ivp(fun=kepler, t_span=t_span, t_eval=t_eval, y0=r0, rtol=1e-12, atol=1e-12)\n\t\tr_f = solution.y[:, -1]\n\n\t\tr_fs = np.append(r_fs, np.concatenate(([phi_p], [theta_p], r_f)))\n\n\t\tax.plot(solution.y[0], solution.y[1], solution.y[2], '-', color='blue', linewidth=1)\n\n\tplot_env_3D(ax)\n\tplt.show()\n\n\tr_fs = r_fs.reshape(int(len(r_fs)/8), 8)\n\n\treturn r_fs, solution.t", "def relu(self, v):\n if v > 0:\n solution = v\n else:\n solution = 0\n return solution", "def sorm(func, dist_list, init_search_point, alg): \n def SLSQP(func, dist_list, init_search_point):\n \n dim = len(dist_list)\n current_beta = 0\n new_beta = 1\n sig = np.empty((1, dim))\n mu = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n def f_l(x_l):\n return(func([x_l[i,:]*sig[0,i] + mu[0,i] for i in range(0, dim)]))\n \n while abs(current_beta-new_beta) > 0.001:\n current_search_point = new_search_point\n current_beta = new_beta\n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n mu[0,i], sig[0, i] = Rosenblatt_Transform(dist_list[i][0], current_search_point[0,i])\n else:\n mu[0,i], sig[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n dist_fun = lambda u: np.linalg.norm(u) \n \n alg = 'SLSQP'\n \n H = lambda u: f_l(u)\n cons = ({'type': 'eq', 'fun': lambda u: -(H(u.reshape(-1,1)))})\n \n result = scipy.optimize.minimize(dist_fun, x0 = current_search_point, constraints = cons, method=alg)\n \n new_beta = result.fun\n u = np.array(result.x).reshape((1,dim))\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = mu[0,i] + u[0,i]*sig[0,i]\n \n beta_value = new_beta \n p_f = sst.norm.cdf(-beta_value)\n iterations = result.nit\n u = result.x\n x = u[:]*sig[0,:] + mu[0,:]\n grad_val = scipy.optimize.approx_fprime(x, func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(sig[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*sig[0,i]/np.sqrt(sum1) \n \n return(beta_value, p_f, x, u, mu, sig, cosines, iterations) \n \n def HL_R(func, dist_list, init_search_point):\n \n iterations = 0\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n new_search_point = np.array(init_search_point).reshape((1, dim))\n \n while abs(cur_beta - new_beta) > 0.001:\n cur_beta = new_beta\n cur_cosines = np.zeros((1, dim))\n new_cosines = np.ones((1, dim))\n \n while max((abs(cur_cosines - new_cosines))[0]) > 0.005:\n \n cur_cosines = new_cosines\n \n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0, i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0, i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n \n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.00000001)\n grad_val = grad_val.reshape((1, dim))\n \n sum1 = np.sum((grad_val[0,:]**2)*(global_std_arr[0,:]**2))\n cosines = np.empty((1, dim))\n \n for i in range(0, dim):\n cosines[0,i] = grad_val[0,i]*global_std_arr[0,i]/np.sqrt(sum1)\n \n new_cosines = cosines\n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = global_mean_arr[0,i] - new_cosines[0,i]*global_std_arr[0,i]*cur_beta\n \n iterations = iterations + 1\n \n \n B = Symbol('B')\n coordinates = []\n for i in range(0, dim):\n coordinates.append(global_mean_arr[0, i] - new_cosines[0,i]*global_std_arr[0, i]*B)\n new_beta = float(solve(func(coordinates), B)[0])\n \n cosines = new_cosines \n beta_value = new_beta\n p_f = sst.norm.cdf(-new_beta)\n x = new_search_point\n u = (x[0,:] - global_mean_arr[0,:])/global_std_arr\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines, iterations)\n \n def HL_RF(func, dist_list, init_search_point):\n\n cur_beta = 3\n new_beta = 0\n dim = len(dist_list)\n\n new_search_point = np.array(init_search_point).reshape((1, dim))\n iterations = 0\n while abs(cur_beta - new_beta) > 0.001 and abs(func(new_search_point[0])) > 0.001:\n global_mean_arr = np.empty((1, dim))\n global_std_arr = np.empty((1, dim))\n cur_beta = new_beta\n cur_search_point = new_search_point\n \n for i in range(0, dim):\n if dist_list[i][1] != 'norm':\n global_mean_arr[0,i], global_std_arr[0, i] = Rosenblatt_Transform(dist_list[i][0], cur_search_point[0,i])\n else:\n global_mean_arr[0,i], global_std_arr[0, i] = dist_list[i][0].mean(), dist_list[i][0].std()\n \n f_val = func(cur_search_point[0])\n \n x_ast = np.empty((1, dim))\n for i in range(0, dim):\n x_ast[0,i] =(cur_search_point[0,i] - global_mean_arr[0,i])/global_std_arr[0,i]\n\n grad_val = scipy.optimize.approx_fprime(cur_search_point[0], func, 0.000001)\n grad_val = grad_val.reshape((1, dim)) \n \n grad_val_ast = np.empty(grad_val.shape)\n for i in range(0, dim):\n grad_val_ast[0,i] = grad_val[0,i]*global_std_arr[0,i]\n \n t1 = 1/np.sum(grad_val_ast[0,:]**2)\n\n t2 = sum(grad_val_ast[0,:]*x_ast[0,:]) - f_val\n \n t3 = t1*t2\n \n new_x_ast = np.empty(x_ast.shape)\n for i in range(0, dim):\n new_x_ast[0,i] = t3*grad_val_ast[0,i]\n u = new_x_ast\n new_beta = np.linalg.norm(new_x_ast)\n \n new_search_point = np.empty((1, dim))\n for i in range(0, dim):\n new_search_point[0,i] = new_x_ast[0,i]*global_std_arr[0,i] + global_mean_arr[0,i]\n iterations = iterations + 1\n \n grad_val_ast_sum = sum(grad_val_ast[0,:]**2)\n cosines = grad_val_ast/(grad_val_ast_sum**0.5)\n beta_value = new_beta\n x = new_search_point\n p_f = sst.norm.cdf(-beta_value)\n \n return(beta_value, p_f, x, u, global_mean_arr, global_std_arr, cosines, iterations)\n \n if alg == 'slsqp':\n beta_value, p_f, x, u, mu, sig, cosines, iterations = SLSQP(func, dist_list, init_search_point)\n elif alg == 'HL-R':\n beta_value, p_f, x, u, mu, sig, cosines, iterations = HL_R(func, dist_list, init_search_point)\n elif alg == 'HL-RF':\n beta_value, p_f, x, u, mu, sig, cosines, iterations = HL_RF(func, dist_list, init_search_point)\n \n d = len(dist_list)\n\n R0 = np.eye(d)\n \n for i in range(0, d):\n R0[-1,i] = cosines[0,i]\n \n Q, R = scipy.linalg.rq(R0)\n \n def f_l(x_l):\n return(func([x_l[i]*sig[0,i] + mu[0,i] for i in range(0, d)]))\n \n x = np.array(x).reshape((1, -1))\n u = x[0,:]*sig[0,:] + mu[0,:]\n \n H = nd.Hessian(f_l)(u)\n \n grad_val_standard = (scipy.optimize.approx_fprime(x[0], func, 0.00000001)[:])*(sig[0,:])\n \n dist_standard = np.linalg.norm(grad_val_standard)\n \n A_1 = 1/dist_standard\n R_transp = np.transpose(R)\n A_2 = R.dot(H)\n A_3 = A_2.dot(R_transp)\n \n A = A_3.dot(A_1)\n \n A = A[0:-1, 0:-1]\n \n k = np.linalg.eig(A)[0]\n \n prod_arr = np.empty((1, len(k)))\n for i in range(0, len(k)):\n prod_arr[0,i] = (1 + beta_value*k[i])**-0.5\n \n p_f_sorm = p_f*np.prod(prod_arr)\n beta_sorm = -1*scipy.stats.norm.ppf(p_f_sorm)\n \n print('-------------------------')\n print('Second-Order Reliability Analysis')\n print('Algorithm:',alg,'solver')\n print('Iterations: {}\\nReliability index = {}\\nProbability of failure = {}'.format(iterations, beta_sorm, p_f_sorm))\n print('-------------------------')\n \n return(beta_sorm, p_f_sorm)", "def Rfun(U,V,Q,Phi,Phibar, taudrag):\n \n Qclone=Q.copy()\n Qclone[Q<0]=0\n\n Ru=np.divide(np.multiply(-U,Qclone),Phi+Phibar)\n Rv=np.divide(np.multiply(-V,Qclone),Phi+Phibar)\n \n #reset to 0 if losing mass\n Ru[Q<0]=0\n Rv[Q<0]=0\n \n #if taudrag is infinity, only have the R componen \n if taudrag!=-1:\n F=Ru-(U/taudrag)\n G=Rv-(V/taudrag)\n \n else:\n F=Ru\n G=Rv\n \n return F, G", "def GramSchmidt(A):\r\n n = len(A)\r\n # Finds the number of lists in the list, which is also the number of rows\r\n m = len(A[0])\r\n # Finds the number of elements in list one, which is also the number of columns\r\n V = A\r\n R = [[0]*n for i in range(n)]\r\n # creates an empty list R with dimensions of n rows and n columns\r\n Q = [[0]*m for i in range(n)]\r\n # creates an empty list Q with dimensions of n rows and m columns\r\n inputStatus = True\r\n # inputStatus is true at this point until proven otherwise\r\n for i in range(n):\r\n for j in range(m):\r\n if ((type(A[i][j]) != int) and (type(A[i][j]) != float) and (type(A[i][j]) != complex)):\r\n inputStatus = False\r\n print(\"Invalid Input\")\r\n # this checks each value in the matrix A to make sure it is some time of number, if it isnt a number then the input status will be false \r\n # if the input status is false then an error message will be displayed stating that this is an invalid input\r\n if inputStatus == True:\r\n # if the given list does not fall under the previous if statement then the input status will continue to be true and we can continue to find the QR factorization \r\n for i in range(n):\r\n # for loop which continues as long as there are still lists in A \r\n R[i][i] = norm(V[i])\r\n # Creates the border for the upper triangle matrix R, where each value in the diagonal is the 2 norm of the corresponding vector in the original matrix A \r\n Q[i] = unit(V[i])\r\n # Each vector in Q is the unit vector of the corresponding vector in A \r\n for j in range(i+1,n):\r\n # the position j will be 1 more than the position i \r\n R[j][i] = dot(Q[i],V[j])\r\n # The element in R[i+1][i] is the dot product of Q[i] and V[i+1] \r\n temp = scalarmul(R[j][i],Q[i])\r\n # This is the scalar multiplication of R[i+1][i] and Q[i] which will be labeled as temp \r\n V[j] = subtract(V[j],temp)\r\n # V[j] is the difference between the original V[j] and temp \r\n return[Q,R]", "def sgd(self):\n for i, j, r in self.samples:\n # Computer prediction and error\n if (self.type=='bias'):\n prediction = self.get_rating_bias(i, j)\n elif(self.type=='nonbias') :\n prediction = self.get_rating(i, j)\n # print(i, j, r,prediction)\n e = (r - prediction)\n\n # Update biases\n self.b_u[i] =self.b_u[i]+ self.alpha * (e - self.beta * self.b_u[i])\n self.b_i[j] = self.b_i[j] + self.alpha * (e - self.beta * self.b_i[j])\n\n # Create copy of row of P since we need to update it but use older values for update on Q\n P_i = self.P[i, :][:]\n\n # Update user and item latent feature matrices\n # print(self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :]))\n # print(self.P[i, :])\n self.P[i, :] =self.P[i, :] + self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :])\n # print(self.P[i, :],\"&&&&&&\")\n self.Q[j, :] = self.Q[j, :] + self.alpha * (e * P_i - self.beta * self.Q[j, :])\n # print(self.Q[j, :])", "def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):\n V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1\n V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2\n V_t1 = gamma * sigma * (y + ll * x) / r1\n V_t2 = gamma * sigma * (y + ll * x) / r2\n return [V_r1, V_r2, V_t1, V_t2]", "def propagate_state(s,t0,tf):\n\n return rk4(s,t0,tf)", "def _gv_linear(self, r, t):\n mv1t = torch.matmul(self.mv1.weight, t.T) # [k, b]\n mv2r = torch.matmul(self.mv2.weight, r.T) # [k, b]\n return (mv1t + mv2r + self.bv.weight).T # [b, k]", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def discount_rewards(r):\r\n discounted_r = np.zeros_like(r)\r\n running_add = 0\r\n for t in reversed(range(0, r.size)):\r\n running_add = running_add * gamma + r[t]\r\n discounted_r[t] = running_add\r\n return discounted_r", "def iterate_value(self):\n self.V = np.zeros(self.stateCount)\n for i in range(self.maxIter):\n last_V = np.copy(self.V)\n for state_index in range(self.stateCount):\n current_state = self.env.statespace[state_index]\n for action in self.env.actionspace:\n next_state = self.env.next_state(current_state,action)\n reward = self.env.compute_reward(next_state)\n next_state_index = self.env.stateDict[next_state]\n self.Q[state_index][action] = reward + self.gamma*last_V[next_state_index]\n if self.mode == 'debug':\n print(\"Q(s={}):{}\".format(current_state,self.Q[state_index]))\n self.V[state_index] = max(self.Q[state_index])\n if np.sum(np.fabs(last_V - self.V)) <= self.th:\n print (\"Convergene Achieved in {}th iteration. \"\n \"Breaking V_Iteration loop!\".format(i))\n break", "def main():\n\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from textwrap import dedent\n parser = ArgumentParser(description=dedent(main.__doc__),\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n args = parser.parse_args()\n\n import itertools\n from numpy import zeros, matrix, linalg, array\n\n # Create a file ('oa' for orientational average)\n fh_pol = open('oa_raman.data', 'w')\n fh_hpol = open('oa_hyperraman.data', 'w')\n fh_2hpol = open('oa_secondhyperraman.data', 'w')\n\n # +++++ Polarizability +++++\n\n # For the polarizability, we are concerned with the average:\n # <alpha_ij^2> = sum_{ab,cd}[ <T_ia*T_jb*T_ic*T_jd> alpha_ab*alpha_cd ]\n #\n # For Raman scattering measured in a perpendicular orientation, we need\n # the averages <alpha_ii^2> and <alpha_ij^2>. For averaging of the 4th\n # rank tensor on the right side of the equation, only two circumstances\n # give nonzero averages:\n # 1. a = b = c = d\n # 2. a = b, c = d\n # These are stored in the lists below.\n #laaaa = ['a', 'a', 'a', 'a']\n #laabb = ['a', 'a', 'b', 'b']\n laaaa = [1, 1, 1, 1]\n laabb = [1, 1, 2, 2]\n\n saaaa = set()\n saabb = set()\n\n genaaaa = itertools.permutations(laaaa,4)\n genaabb = itertools.permutations(laabb,4)\n\n txt = 'Polarizability Averaging Indices'\n print(len(txt)*'+', file=fh_pol) \n print(txt, file=fh_pol)\n print(len(txt)*'+', file=fh_pol)\n\n # Size of the basis set and number of linearly independent terms\n r4nn, r4qn = fullpermutations(4)\n print('', file=fh_pol)\n txt = 'For a tensor of rank 4'\n print('*'*2*len(txt), file=fh_pol)\n print(txt, file=fh_pol)\n print('*'*2*len(txt), file=fh_pol)\n txt = 'Size of basis set = ' + str(r4nn)\n print(txt, file=fh_pol)\n txt = 'Number of linearly independent terms = ' + str(r4qn)\n print(txt, file=fh_pol)\n print('', file=fh_pol)\n\n # Terms with aa,aa\n txt = 'Indices for aa,aa terms'\n print(len(txt)*'=', file=fh_pol)\n print(txt, file=fh_pol)\n print(len(txt)*'=', file=fh_pol)\n for i in genaaaa:\n if i not in saaaa:\n print(i, file=fh_pol)\n saaaa.add(i)\n\n print('', file=fh_pol)\n # Terms with aa,bb (all permutations)\n txt = 'Indices for aa,bb terms'\n print(len(txt)*'=', file=fh_pol)\n print(txt, file=fh_pol)\n print(len(txt)*'=', file=fh_pol)\n for i in genaabb:\n if i not in saabb:\n print(i, file=fh_pol)\n saabb.add(i)\n\n print('', file=fh_pol)\n print('~'*30, file=fh_pol)\n print('Number of aa,aa terms', len(saaaa), file=fh_pol)\n print('Number of aa,bb terms', len(saabb), file=fh_pol)\n print('~'*30, file=fh_pol)\n print('', file=fh_pol)\n\n # Terms for Mathematica\n print('%'*30, file=fh_pol)\n print('Mathematica style output', file=fh_pol)\n print('%'*30, file=fh_pol) \n print('', file=fh_pol)\n\n # Basis vectors in the experimental reference frame\n r4exp, r4mol = vectors_exp_mol(4)\n print('Experimental reference frame basis vectors', file=fh_pol)\n for item in r4exp:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r4nn,4,False)\n print('S matrix', file=fh_pol)\n print(smat, file=fh_pol)\n print('', file=fh_pol)\n \n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_pol)\n for item in r4mol:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_pol)\n print(vexp, file=fh_pol)\n print('', file=fh_pol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_pol)\n print(vmol, file=fh_pol)\n print('', file=fh_pol)\n\n # Index equivalence for molecular reference frame data\n data, avg_alphaii, avg_alphaij = pol_mathematica(saaaa, saabb) \n\n print('Index equivalence for molecular reference frame vectors', file=fh_pol)\n for item in data:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n print('Polarizability Average Terms', file=fh_pol)\n print('<alpha_ii^2> term', file=fh_pol)\n for item in avg_alphaii:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n print('<alpha_ij^2> term', file=fh_pol)\n for item in avg_alphaij:\n print(item, file=fh_pol)\n\n # +++++ First Hyperpolarizability +++++\n\n # For the first hyperpolarizability, we are concerned with the average:\n # <beta_ijk^2> \n # = sum_{abc,def}[ <T_ia*T_jb*T_kc*T_id*T_je*T_kf> beta_abc*beta_def ]\n #\n # For hyper-Raman scattering measured in a perpendicular orientation, we need\n # the averages <beta_iii^2> and <beta_ijj^2>. For averaging of the 6th\n # rank tensor on the right side of the equation, three circumstances\n # give nonzero averages:\n # 1. a = b = c = d = e = f\n # 2. a = b = c = d, e = f\n # 3. a = b, c = d, e = f\n # These are stored in the lists below.\n #laaaaaa = ['a', 'a', 'a', 'a', 'a', 'a']\n #laaaabb = ['a', 'a', 'a', 'a', 'b', 'b']\n #laabbcc = ['a', 'a', 'b', 'b', 'c', 'c']\n laaaaaa = [1, 1, 1, 1, 1, 1]\n laaaabb = [1, 1, 1, 1, 2, 2]\n laabbcc = [1, 1, 2, 2, 3, 3]\n\n saaaaaa = set()\n saaaabb = set()\n saabbcc = set()\n\n genaaaaaa = itertools.permutations(laaaaaa,6)\n genaaaabb = itertools.permutations(laaaabb,6)\n genaabbcc = itertools.permutations(laabbcc,6)\n\n txt = 'First hyperpolarizability Averaging Indices'\n print(len(txt)*'+', file=fh_hpol) \n print(txt, file=fh_hpol)\n print(len(txt)*'+', file=fh_hpol)\n\n # Size of the basis set and number of linearly independent terms\n r6nn, r6qn = fullpermutations(6)\n print('', file=fh_hpol)\n txt = 'For a tensor of rank 6'\n print('*'*2*len(txt), file=fh_hpol)\n print(txt, file=fh_hpol)\n print('*'*2*len(txt), file=fh_hpol)\n txt = 'Size of basis set = ' + str(r6nn)\n print(txt, file=fh_hpol)\n txt = 'Number of linearly independent terms = ' + str(r6qn)\n print(txt, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Terms with aaa,aaa\n txt = 'Indices for aaa,aaa terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n for i in genaaaaaa:\n if i not in saaaaaa:\n print(i, file=fh_hpol)\n saaaaaa.add(i)\n\n print('', file=fh_hpol)\n # Terms with aaa,abb (all permutations)\n txt = 'Indices for aaa,abb terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n for i in genaaaabb:\n if i not in saaaabb:\n print(i, file=fh_hpol)\n saaaabb.add(i)\n\n print('', file=fh_hpol)\n # Terms with aab,bcc (all permutations)\n # Here, we need to be careful that we don't overcount terms. It\n # is very easy to come up with an overcomplete basis.\n txt = 'Indices for aab,bcc terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n\n # This will generate all combinations of the aab,bcc terms. However,\n # it requires more prior knowledge than I'd like. \n #count1 = 0\n #count2 = 0\n #count3 = 0\n #count4 = 0\n #count5 = 0\n #for i in genaabbcc:\n # if i not in saabbcc:\n # if i[1] == 1:\n # count1 +=1\n # if count1 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[2] == 1:\n # count2 +=1\n # if count2 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[3] == 1:\n # count3 +=1\n # if count3 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[4] == 1:\n # count4 +=1\n # if count4 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[5] == 1:\n # count5 +=1\n # if count5 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # Generate all combinations of aab,bcc terms. We remove the redundant\n # elements next.\n for i in genaabbcc:\n if i not in saabbcc:\n saabbcc.add(i)\n\n # Basis functions of Kronecker delta products\n f61m = \"krond(a,b)*krond(c,d)*krond(e,f)\"\n f62m = \"krond(a,b)*krond(c,e)*krond(d,f)\"\n f63m = \"krond(a,b)*krond(c,f)*krond(d,e)\"\n f64m = \"krond(a,c)*krond(b,d)*krond(e,f)\"\n f65m = \"krond(a,c)*krond(b,e)*krond(d,f)\"\n f66m = \"krond(a,c)*krond(b,f)*krond(d,e)\"\n f67m = \"krond(a,d)*krond(b,c)*krond(e,f)\"\n f68m = \"krond(a,d)*krond(b,e)*krond(c,f)\"\n f69m = \"krond(a,d)*krond(b,f)*krond(c,e)\"\n f610m = \"krond(a,e)*krond(b,c)*krond(d,f)\"\n f611m = \"krond(a,e)*krond(b,d)*krond(c,f)\"\n f612m = \"krond(a,e)*krond(b,f)*krond(c,d)\"\n f613m = \"krond(a,f)*krond(b,c)*krond(d,e)\"\n f614m = \"krond(a,f)*krond(b,d)*krond(c,e)\"\n f615m = \"krond(a,f)*krond(b,e)*krond(c,d)\"\n\n lmol = [ f61m, f62m, f63m, f64m, f65m, \n f66m, f67m, f68m, f69m, f610m,\n f611m, f612m, f613m, f614m, f615m ]\n\n # Temporary set for checking uniqueness\n stmp = set()\n # This set stores the elements of saabbcc that are redundant when \n # we insert values of the indices.\n sintersect = set()\n # Loop through the elements of saabbcc\n for item in saabbcc:\n # Assign values to the indices\n a = item[0]\n b = item[1]\n c = item[2]\n d = item[3]\n e = item[4]\n f = item[5]\n # Temporary list for storing vectors with values\n tmp = []\n for vec in lmol:\n # Evaluate the value of the Kronecker delta products\n v = eval_krond(vec, a, b, c, d, e, f, 0, 0)\n tmp.append(v)\n # We need immutable objects to add in a set\n tmp = tuple(tmp)\n # Add to sintersect if the item is in stmp\n if tmp in stmp:\n sintersect.add(item)\n # Add to stmp if it isn't present\n else:\n stmp.add(tmp)\n # This function removes elements of saabbcc that intersect with\n # elements of sintersect. The result is a set containing only the \n # unique elements.\n saabbcc.difference_update(sintersect)\n\n # Print elements of saabbcc.\n for i in saabbcc:\n print(i, file=fh_hpol)\n\n print('', file=fh_hpol)\n print('~'*30, file=fh_hpol)\n print('Number of aaa,aaa terms', len(saaaaaa), file=fh_hpol)\n print('Number of aaa,abb terms', len(saaaabb), file=fh_hpol)\n print('Number of aab,bcc terms', len(saabbcc), file=fh_hpol)\n print('~'*30, file=fh_hpol)\n print('', file=fh_hpol)\n\n print('%'*30, file=fh_hpol)\n print('Mathematica style output', file=fh_hpol)\n print('%'*30, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Basis vectors in the experimental reference frame\n r6exp, r6mol = vectors_exp_mol(6)\n print('Experimental reference frame basis vectors', file=fh_hpol)\n for item in r6exp:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r6nn,6,False)\n print('S matrix', file=fh_hpol)\n print(smat, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_hpol)\n for item in r6mol:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_hpol)\n print(vexp, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_hpol)\n print(vmol, file=fh_hpol)\n print('', file=fh_hpol)\n\n data, avg_betaiii, avg_betaijj = hpol_mathematica(saaaaaa, saaaabb, saabbcc)\n\n print('Set up molecular reference frame vectors', file=fh_hpol)\n for item in data:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n print('Hyperpolarizability Average Terms', file=fh_hpol)\n print('<beta_iii^2> term', file=fh_hpol)\n for item in avg_betaiii:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n print('<beta_ijj^2> term', file=fh_hpol)\n for item in avg_betaijj:\n print(item, file=fh_hpol)\n\n # +++++ Second Hyperpolarizability +++++\n\n # For the second hyperpolarizability, we are concerned with the average:\n # <gamma_ijkl^2> \n # = sum_{abcd,efgh}[ <T_ia*T_jb*T_kc*T_ld*T_ie*T_jf*T_kg*T_lh> gamma_abcd*gamma_efgh ]\n #\n # For second hyper-Raman scattering measured in a perpendicular orientation, we need\n # the averages <gamma_iiii^2> and <gamma_ijjj^2>. For averaging of the 8th\n # rank tensor on the right side of the equation, four circumstances\n # give nonzero averages:\n # 1. a = b = c = d = e = f = g = h\n # 2. a = b = c = d = e = f, g = h\n # 3. a = b = c = d, e = f = g = h\n # 4. a = b = c = d, e = f, g = h\n # These are stored in the lists below.\n #laaaaaaaa = ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']\n #laaaaaabb = ['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b']\n #laaaabbbb = ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b']\n #laaaabbcc = ['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c']\n laaaaaaaa = [1, 1, 1, 1, 1, 1, 1, 1]\n laaaaaabb = [1, 1, 1, 1, 1, 1, 2, 2]\n laaaabbbb = [1, 1, 1, 1, 2, 2, 2, 2]\n laaaabbcc = [1, 1, 1, 1, 2, 2, 3, 3]\n # This type of average is actually equivalent to the fourth term,\n # because the indices can only be x, y, or z. \n #laabbccdd = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']\n\n saaaaaaaa = set()\n saaaaaabb = set()\n saaaabbbb = set()\n saaaabbcc = set()\n #saabbccdd = set()\n\n genaaaaaaaa = itertools.permutations(laaaaaaaa,8)\n genaaaaaabb = itertools.permutations(laaaaaabb,8)\n genaaaabbbb = itertools.permutations(laaaabbbb,8)\n genaaaabbcc = itertools.permutations(laaaabbcc,8)\n #genaabbccdd = itertools.permutations(laabbccdd,8)\n\n txt = 'Second hyperpolarizability Averaging Indices'\n print(len(txt)*'+', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'+', file=fh_2hpol)\n\n # Size of the basis set and number of linearly independent terms\n r8nn, r8qn = fullpermutations(8)\n print('', file=fh_2hpol)\n txt = 'For a tensor of rank 8'\n print('*'*2*len(txt), file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print('*'*2*len(txt), file=fh_2hpol)\n txt = 'Size of basis set = ' + str(r8nn)\n print(txt, file=fh_2hpol)\n txt = 'Number of linearly independent terms = ' + str(r8qn)\n print(txt, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Terms with aaaa,aaaa\n txt = 'Indices for aaaa,aaaa terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaaaaaa:\n if i not in saaaaaaaa:\n print(i, file=fh_2hpol)\n saaaaaaaa.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,aabb (all permutations)\n txt = 'Indices for aaaa,aabb terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaaaabb:\n if i not in saaaaaabb:\n print(i, file=fh_2hpol)\n saaaaaabb.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,bbbb (all permutations)\n txt = 'Indices for aaaa,bbbb terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaabbbb:\n if i not in saaaabbbb:\n print(i, file=fh_2hpol)\n saaaabbbb.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,bbcc (all permutations)\n txt = 'Indices for aaaa,bbcc terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n # Temporarily, we store all permutations. There are 420 permutations,\n # but only 210 of them are unique.\n for i in genaaaabbcc:\n if i not in saaaabbcc:\n #print(i, file=fh_2hpol)\n saaaabbcc.add(i)\n\n # Set up the Kronecker delta products as strings. \n f81m = 'krond(a,b)*krond(c,d)*krond(e,f)*krond(g,h)'\n f82m = 'krond(a,b)*krond(c,d)*krond(e,g)*krond(f,h)'\n f83m = 'krond(a,b)*krond(c,d)*krond(e,h)*krond(f,g)'\n f84m = 'krond(a,b)*krond(c,e)*krond(d,f)*krond(g,h)'\n f85m = 'krond(a,b)*krond(c,e)*krond(d,g)*krond(f,h)'\n f86m = 'krond(a,b)*krond(c,e)*krond(d,h)*krond(f,g)'\n f87m = 'krond(a,b)*krond(c,f)*krond(d,e)*krond(g,h)'\n f88m = 'krond(a,b)*krond(c,f)*krond(d,g)*krond(e,h)'\n f89m = 'krond(a,b)*krond(c,f)*krond(d,h)*krond(e,g)'\n f810m = 'krond(a,b)*krond(c,g)*krond(d,e)*krond(f,h)'\n f811m = 'krond(a,b)*krond(c,g)*krond(d,f)*krond(e,h)'\n f812m = 'krond(a,b)*krond(c,g)*krond(d,h)*krond(e,f)'\n f813m = 'krond(a,b)*krond(c,h)*krond(d,e)*krond(f,g)'\n f814m = 'krond(a,b)*krond(c,h)*krond(d,f)*krond(e,g)'\n f815m = 'krond(a,b)*krond(c,h)*krond(d,g)*krond(e,f)'\n f816m = 'krond(a,c)*krond(b,d)*krond(e,f)*krond(g,h)'\n f817m = 'krond(a,c)*krond(b,d)*krond(e,g)*krond(f,h)'\n f818m = 'krond(a,c)*krond(b,d)*krond(e,h)*krond(f,g)'\n f819m = 'krond(a,c)*krond(b,e)*krond(d,f)*krond(g,h)'\n f820m = 'krond(a,c)*krond(b,e)*krond(d,g)*krond(f,h)'\n f821m = 'krond(a,c)*krond(b,e)*krond(d,h)*krond(f,g)'\n f822m = 'krond(a,c)*krond(b,f)*krond(d,e)*krond(g,h)'\n f823m = 'krond(a,c)*krond(b,f)*krond(d,g)*krond(e,h)'\n f824m = 'krond(a,c)*krond(b,f)*krond(d,h)*krond(e,g)'\n f825m = 'krond(a,c)*krond(b,g)*krond(d,e)*krond(f,h)'\n f826m = 'krond(a,c)*krond(b,g)*krond(d,f)*krond(e,h)'\n f827m = 'krond(a,c)*krond(b,g)*krond(d,h)*krond(e,f)'\n f828m = 'krond(a,c)*krond(b,h)*krond(d,e)*krond(f,g)'\n f829m = 'krond(a,c)*krond(b,h)*krond(d,f)*krond(e,g)'\n f830m = 'krond(a,c)*krond(b,h)*krond(d,g)*krond(e,f)'\n f831m = 'krond(a,d)*krond(b,c)*krond(e,f)*krond(g,h)'\n f832m = 'krond(a,d)*krond(b,c)*krond(e,g)*krond(f,h)'\n f833m = 'krond(a,d)*krond(b,c)*krond(e,h)*krond(f,g)'\n f834m = 'krond(a,d)*krond(b,e)*krond(c,f)*krond(g,h)'\n f835m = 'krond(a,d)*krond(b,e)*krond(c,g)*krond(f,h)'\n f836m = 'krond(a,d)*krond(b,e)*krond(c,h)*krond(f,g)'\n f837m = 'krond(a,d)*krond(b,f)*krond(c,e)*krond(g,h)'\n f838m = 'krond(a,d)*krond(b,f)*krond(c,g)*krond(e,h)'\n f839m = 'krond(a,d)*krond(b,f)*krond(c,h)*krond(e,g)'\n f840m = 'krond(a,d)*krond(b,g)*krond(c,e)*krond(f,h)'\n f841m = 'krond(a,d)*krond(b,g)*krond(c,f)*krond(e,h)'\n f842m = 'krond(a,d)*krond(b,g)*krond(c,h)*krond(e,f)'\n f843m = 'krond(a,d)*krond(b,h)*krond(c,e)*krond(f,g)'\n f844m = 'krond(a,d)*krond(b,h)*krond(c,f)*krond(e,g)'\n f845m = 'krond(a,d)*krond(b,h)*krond(c,g)*krond(e,f)'\n f846m = 'krond(a,e)*krond(b,c)*krond(d,f)*krond(g,h)'\n f847m = 'krond(a,e)*krond(b,c)*krond(d,g)*krond(f,h)'\n f848m = 'krond(a,e)*krond(b,c)*krond(d,h)*krond(f,g)'\n f849m = 'krond(a,e)*krond(b,d)*krond(c,f)*krond(g,h)'\n f850m = 'krond(a,e)*krond(b,d)*krond(c,g)*krond(f,h)'\n f851m = 'krond(a,e)*krond(b,d)*krond(c,h)*krond(f,g)'\n f852m = 'krond(a,e)*krond(b,f)*krond(c,d)*krond(g,h)'\n f853m = 'krond(a,e)*krond(b,f)*krond(c,g)*krond(d,h)'\n f854m = 'krond(a,e)*krond(b,f)*krond(c,h)*krond(d,g)'\n f855m = 'krond(a,e)*krond(b,g)*krond(c,d)*krond(f,h)'\n f856m = 'krond(a,e)*krond(b,g)*krond(c,f)*krond(d,h)'\n f857m = 'krond(a,e)*krond(b,g)*krond(c,h)*krond(d,f)'\n f858m = 'krond(a,e)*krond(b,h)*krond(c,d)*krond(f,g)'\n f859m = 'krond(a,e)*krond(b,h)*krond(c,f)*krond(d,g)'\n f860m = 'krond(a,e)*krond(b,h)*krond(c,g)*krond(d,f)'\n f861m = 'krond(a,f)*krond(b,c)*krond(d,e)*krond(g,h)'\n f862m = 'krond(a,f)*krond(b,c)*krond(d,g)*krond(e,h)'\n f863m = 'krond(a,f)*krond(b,c)*krond(d,h)*krond(e,g)'\n f864m = 'krond(a,f)*krond(b,d)*krond(c,e)*krond(g,h)'\n f865m = 'krond(a,f)*krond(b,d)*krond(c,g)*krond(e,h)'\n f866m = 'krond(a,f)*krond(b,d)*krond(c,h)*krond(e,g)'\n f867m = 'krond(a,f)*krond(b,e)*krond(c,d)*krond(g,h)'\n f868m = 'krond(a,f)*krond(b,e)*krond(c,g)*krond(d,h)'\n f869m = 'krond(a,f)*krond(b,e)*krond(c,h)*krond(d,g)'\n f870m = 'krond(a,f)*krond(b,g)*krond(c,d)*krond(e,h)'\n f871m = 'krond(a,f)*krond(b,g)*krond(c,e)*krond(d,h)'\n f872m = 'krond(a,f)*krond(b,g)*krond(c,h)*krond(d,e)'\n f873m = 'krond(a,f)*krond(b,h)*krond(c,d)*krond(e,g)'\n f874m = 'krond(a,f)*krond(b,h)*krond(c,e)*krond(d,g)'\n f875m = 'krond(a,f)*krond(b,h)*krond(c,g)*krond(d,e)'\n f876m = 'krond(a,g)*krond(b,c)*krond(d,e)*krond(f,h)'\n f877m = 'krond(a,g)*krond(b,c)*krond(d,f)*krond(e,h)'\n f878m = 'krond(a,g)*krond(b,c)*krond(d,h)*krond(e,f)'\n f879m = 'krond(a,g)*krond(b,d)*krond(c,e)*krond(f,h)'\n f880m = 'krond(a,g)*krond(b,d)*krond(c,f)*krond(e,h)'\n f881m = 'krond(a,g)*krond(b,d)*krond(c,h)*krond(e,f)'\n f882m = 'krond(a,g)*krond(b,e)*krond(c,d)*krond(f,h)'\n f883m = 'krond(a,g)*krond(b,e)*krond(c,f)*krond(d,h)'\n f884m = 'krond(a,g)*krond(b,e)*krond(c,h)*krond(d,f)'\n f885m = 'krond(a,g)*krond(b,f)*krond(c,d)*krond(e,h)'\n f886m = 'krond(a,g)*krond(b,f)*krond(c,e)*krond(d,h)'\n f887m = 'krond(a,g)*krond(b,f)*krond(c,h)*krond(d,e)'\n f888m = 'krond(a,g)*krond(b,h)*krond(c,d)*krond(e,f)'\n f889m = 'krond(a,g)*krond(b,h)*krond(c,e)*krond(d,f)'\n f890m = 'krond(a,g)*krond(b,h)*krond(c,f)*krond(d,e)'\n f891m = 'krond(a,h)*krond(b,c)*krond(d,e)*krond(f,g)'\n f892m = 'krond(a,h)*krond(b,c)*krond(d,f)*krond(e,g)'\n f893m = 'krond(a,h)*krond(b,c)*krond(d,g)*krond(e,f)'\n f894m = 'krond(a,h)*krond(b,d)*krond(c,e)*krond(f,g)'\n f895m = 'krond(a,h)*krond(b,d)*krond(c,f)*krond(e,g)'\n f896m = 'krond(a,h)*krond(b,d)*krond(c,g)*krond(e,f)'\n f897m = 'krond(a,h)*krond(b,e)*krond(c,d)*krond(f,g)'\n f898m = 'krond(a,h)*krond(b,e)*krond(c,f)*krond(d,g)'\n f899m = 'krond(a,h)*krond(b,e)*krond(c,g)*krond(d,f)'\n f8100m = 'krond(a,h)*krond(b,f)*krond(c,d)*krond(e,g)'\n f8101m = 'krond(a,h)*krond(b,f)*krond(c,e)*krond(d,g)'\n f8102m = 'krond(a,h)*krond(b,f)*krond(c,g)*krond(d,e)'\n f8103m = 'krond(a,h)*krond(b,g)*krond(c,d)*krond(e,f)'\n f8104m = 'krond(a,h)*krond(b,g)*krond(c,e)*krond(d,f)'\n f8105m = 'krond(a,h)*krond(b,g)*krond(c,f)*krond(d,e)'\n\n # Molecular vector of basis functions\n lmol = [ f81m, f82m, f83m, f84m, f85m,\n f86m, f87m, f88m, f89m, f810m,\n f811m, f812m, f813m, f814m, f815m,\n f816m, f817m, f818m, f819m, f820m,\n f821m, f822m, f823m, f824m, f825m,\n f826m, f827m, f828m, f829m, f830m,\n f831m, f832m, f833m, f834m, f835m,\n f836m, f837m, f838m, f839m, f840m,\n f841m, f842m, f843m, f844m, f845m,\n f846m, f847m, f848m, f849m, f850m,\n f851m, f852m, f853m, f854m, f855m,\n f856m, f857m, f858m, f859m, f860m,\n f861m, f862m, f863m, f864m, f865m,\n f866m, f867m, f868m, f869m, f870m,\n f871m, f872m, f873m, f874m, f875m,\n f876m, f877m, f878m, f879m, f880m,\n f881m, f882m, f883m, f884m, f885m,\n f886m, f887m, f888m, f889m, f890m,\n f891m, f892m, f893m, f894m, f895m,\n f896m, f897m, f898m, f899m, f8100m,\n f8101m, f8102m, f8103m, f8104m, f8105m ]\n\n # Temporary set for checking uniqueness\n stmp = set()\n # This set stores the elements of saaaabbcc that are redundant when \n # we insert values of the indices.\n sintersect = set()\n # Loop through the elements of saaaabbcc\n for item in saaaabbcc:\n # Assign values to the indices\n a = item[0]\n b = item[1]\n c = item[2]\n d = item[3]\n e = item[4]\n f = item[5]\n g = item[6]\n h = item[7]\n # Temporary list for storing vectors with values\n tmp = []\n for vec in lmol:\n # Evaluate the value of the Kronecker delta products\n v = eval_krond(vec, a, b, c, d, e, f, g, h)\n tmp.append(v)\n # We need immutable objects to add in a set\n tmp = tuple(tmp)\n # Add to sintersect if the item is in stmp\n if tmp in stmp:\n sintersect.add(item)\n # Add to stmp if it isn't present\n else:\n stmp.add(tmp)\n # This function removes elements of saaaabbcc that intersect with\n # elements of sintersect. The result is a set containing only the \n # unique elements.\n saaaabbcc.difference_update(sintersect)\n\n # Print elements of saaaabbcc.\n for i in saaaabbcc:\n print(i, file=fh_2hpol)\n\n print('', file=fh_2hpol)\n print('~'*30, file=fh_2hpol)\n print('Number of aaaa,aaaa terms', len(saaaaaaaa), file=fh_2hpol)\n print('Number of aaaa,aabb terms', len(saaaaaabb), file=fh_2hpol)\n print('Number of aaaa,bbbb terms', len(saaaabbbb), file=fh_2hpol)\n print('Number of aaaa,bbcc terms', len(saaaabbcc), file=fh_2hpol)\n print('~'*30, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('%'*30, file=fh_2hpol)\n print('Mathematica style output', file=fh_2hpol)\n print('%'*30, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Basis vectors in the experimental reference frame\n r8exp, r8mol = vectors_exp_mol(8)\n print('Experimental reference frame basis vectors', file=fh_2hpol)\n for item in r8exp:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r8nn,8,False)\n print('S matrix', file=fh_2hpol)\n print(smat, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_2hpol)\n for item in r8mol:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_2hpol)\n print(vexp, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_2hpol)\n print(vmol, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n data, avg_gammaiiii, avg_gammaijjj = secondhpol_mathematica(saaaaaaaa, saaaaaabb, saaaabbbb, saaaabbcc)\n\n print('Set up molecular reference frame vectors', file=fh_2hpol)\n for item in data:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('Second Hyperpolarizability Average Terms', file=fh_2hpol)\n print('<gamma_iiii^2> term', file=fh_2hpol)\n for item in avg_gammaiiii:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n print('<gamma_ijjj^2> term', file=fh_2hpol)\n for item in avg_gammaijjj:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Print out the irreducible bases\n red_expbasis, red_molbasis = reduced_basis_2hpol()\n\n print('Irreducible experimental reference frame basis vectors', file=fh_2hpol)\n for item in red_expbasis:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('Irreducible molecular reference frame basis vectors', file=fh_2hpol)\n for item in red_molbasis:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Generate the S matrix and total vectors in the irreducible bases\n smat, vexp, vmol = generate_smat_and_vecs(r8qn,8,True)\n \n # Irreducible S matrix\n print('Irreducible S matrix', file=fh_2hpol)\n print(smat, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Irreducible experimental vector containing basis vectors\n print('Irreducible experimental total vector', file=fh_2hpol)\n print(vexp, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Irreducible molecular vector containing basis vectors\n print('Irreducible molecular total vector', file=fh_2hpol)\n print(vmol, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Close the files\n fh_pol.close()\n fh_hpol.close()\n fh_2hpol.close()", "def magic_sample(self, ys):\n\n #for each non-zero element in y\n #we want to multiply the initial state by HGate(i) SGate(i) HGate(i)\n #this turns out to be equivalent to multiplying the whole final state by\n #U H_k S_k H_k U^\\dagger\n #but H_k S_k H_k = e^{i\\pi/4} \\frac{1}{\\sqrt{2}} (I -i X_k)\n #so now we evolve identity forward by U (trivial)\n #and evolve X_k forward by U (using the AGState)\n #then we have to send the resulting Pauli through UC and UH\n #giving a third Pauli\n #then the state is of the form (we^{i\\pi/4}) UC UH (I + i^d P)/sqrt(2) |s>\n #then we apply Bravyi et al's prop. 4 to turn this into a new ch form\n \n\n chCopy = deepcopy(self.chState) #we update this copy as we go\n\n for i, y in enumerate(ys):\n if y:\n #we want to know what U_c^\\dagger U X_i U^\\dagger U_c is\n #firstly we use the A-G info\n # U X_i U^\\dagger is the i'th destabiliser\n x = self.agState.x[self.n+i]\n z = self.agState.z[self.n+i]\n r = self.agState.r[self.n+i]\n\n #print(x,z,r)\n x_col = np.array([x]).T\n z_col = np.array([z]).T\n \n #now we apply U_c to this using the CH-form info\n x_mat = chCopy.F * x_col\n z_mat = (chCopy.M * x_col + chCopy.G*z_col) % np.uint8(2)\n r = (r + util.sort_pauli_string(x_mat, z_mat)) % np.uint8(2)\n\n u = (x @ chCopy.F) % np.uint8(2)\n h = (x @ chCopy.M + z @ chCopy.G) % np.uint8(2)\n\n g = (x @ (z + chCopy.g)) % np.uint8(4)\n\n #now U_c^dag U X_i U^dag U_C = (-1)^r i^g prod_j Z_j^{h_j} X_j^{u_j}\n #we want to conjugate this by U_H\n #everywhere chCopy.v == 1 we flip a z to an x and an x to a z\n #everywhere chCopy.v == 1 and u == 1 and h == 1 we need to swap the order of our x and z so we get a minus sign\n\n u2 = u*(np.uint8(1) ^ chCopy.v) ^ (h*chCopy.v)\n h2 = (u*chCopy.v) ^ (h*(np.uint8(1) ^ chCopy.v))\n\n r = (r + (u*h*chCopy.v).sum()) % np.uint8(2)\n \n \n #now U_H^dag U_c^dag U X_i U^dag U_C U_H = (-1)^r i^g prod_j Z_j^{h2_j} X_j^{u2_j}\n\n t = u2 ^ chCopy.s\n r = (r + h2 @ t) % np.uint8(2)\n\n #now we have w UC UH |s> = w (-1)^r (i)^g UC UH |t>\n\n if all(t == chCopy.s):\n chCopy.w *= np.exp(1j*np.pi/4) * (1 + (1j)**(g+2*r -1) )/ np.sqrt(2)\n else:\n phase, VCList, v, s = util.desuperpositionise(chCopy.s, t, (g+2*r -1)%np.uint8(4), chCopy.v)\n\n chCopy.w *= phase*np.exp(1j*np.pi/4)/np.sqrt(2)\n chCopy.v = v\n chCopy.s = s\n\n for gate in VCList:\n gate.rightMultiplyC(chCopy)\n \n return chCopy", "def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):\n x = g / np.linalg.norm(g) * d_max\n for _ in range(max_iter):\n # gradient ascent\n x = x + stepsize * (M @ x + g)\n # projection to sphere\n x = x / np.linalg.norm(x) * d_max\n ## debug\n #loss = 0.5 * x.T @ M @ x + g.T @ x\n #print(f'Loss: {loss}')\n return x", "def Gram_Schmidt(vecs, row_wise_storage=True, tol=1E-10,\n normalize=False, remove_null_vectors=False,\n remove_noise=False):\n # The algorithm below views vecs as a matrix A with the vectors\n # stored as columns:\n vecs = asarray(vecs) # transform to array if list of vectors\n if row_wise_storage:\n A = transpose(vecs).copy()\n else:\n A = vecs.copy()\n\n m, n = A.shape\n V = zeros((m,n))\n\n for j in range(n):\n v0 = A[:,j]\n v = v0.copy()\n for i in range(j):\n vi = V[:,i]\n\n if (abs(vi) > tol).any():\n v -= (vdot(v0,vi)/vdot(vi,vi))*vi\n V[:,j] = v\n\n if remove_null_vectors:\n indices = [i for i in range(n) if (abs(V[:,i]) < tol).all()]\n V = V[ix_(list(range(m)), indices)]\n\n if normalize:\n for j in range(V.shape[1]):\n V[:,j] /= linalg.norm(V[:,j])\n\n if remove_noise:\n V = cut_noise(V, tol)\n\n return transpose(V) if row_wise_storage else V", "def housegen(x):\n a = linalg.norm(x)\n if a == 0:\n u=x; u[0]=sqrt(2); return u, a\n if x[0] == 0:\n r = 1\n else:\n r =x[0]/abs(x[0])\n u = conj(r)*x/a\n u[0]=u[0]+1\n u=u/sqrt(u[0])\n a=-r*a\n return u, a", "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n n = r.size\n err = beta\n v = r / beta\n\n # These will store the Givens rotations used to update the QR decompositions\n # of the Arnoldi matrices.\n # cos : givens[0, :]\n # sine: givens[1, :]\n givens = jnp.zeros((2, n_kry), dtype=x0.dtype)\n beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype)\n beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta)\n V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype)\n V = jax.ops.index_update(V, jax.ops.index[:, 0], v)\n R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype)\n\n # The variable data for the carry call. Each iteration modifies these\n # values and feeds the results to the next iteration.\n k = 0\n gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need.\n givens) # < Modified between iterations.\n gmres_constants = (tol, A_mv, A_args, b_norm, n_kry)\n gmres_carry = (gmres_variables, gmres_constants)\n # The 'x' input for the carry call. Each iteration will receive an ascending\n # loop index (from the jnp.arange) along with the constant data\n # in gmres_constants.\n gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition,\n gmres_krylov_work,\n gmres_carry)\n gmres_variables, gmres_constants = gmres_carry\n k, V, R, beta_vec, err, givens = gmres_variables\n return (k, V, R, beta_vec)", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n running_add = running_add * gamma_099 + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def compute_advantage_montecarlo(V, s, ss, r, absorbing, gamma):\n r = r.squeeze()\n q = np.zeros(len(r))\n v = V(s).squeeze()\n\n q_next = V(ss[-1]).squeeze().item()\n for rev_k in range(len(r)):\n k = len(r) - rev_k - 1\n q_next = r[k] + gamma * q_next * (1. - absorbing[k])\n q[k] = q_next\n\n adv = q - v\n return q[:, np.newaxis], adv[:, np.newaxis]", "def update_parameters_with_adam(parameters, grads, v, s, t, lambd, learning_rate, mini_batch_size, beta1, beta2, epsilon):\n\n L = len(parameters) // 2 \n v_corrected = {}\n s_corrected = {}\n\n for l in range(L):\n v[\"dW\" + str(l + 1)] = beta1 * v[\"dW\" + str(l + 1)] + (1 - beta1) * grads['dW' + str(l + 1)]\n v[\"db\" + str(l + 1)] = beta1 * v[\"db\" + str(l + 1)] + (1 - beta1) * grads['db' + str(l + 1)]\n v_corrected[\"dW\" + str(l + 1)] = v[\"dW\" + str(l + 1)] / (1 - np.power(beta1, t))\n v_corrected[\"db\" + str(l + 1)] = v[\"db\" + str(l + 1)] / (1 - np.power(beta1, t))\n\n s[\"dW\" + str(l + 1)] = beta2 * s[\"dW\" + str(l + 1)] + (1 - beta2) * np.power(grads['dW' + str(l + 1)], 2)\n s[\"db\" + str(l + 1)] = beta2 * s[\"db\" + str(l + 1)] + (1 - beta2) * np.power(grads['db' + str(l + 1)], 2)\n s_corrected[\"dW\" + str(l + 1)] = s[\"dW\" + str(l + 1)] / (1 - np.power(beta2, t))\n s_corrected[\"db\" + str(l + 1)] = s[\"db\" + str(l + 1)] / (1 - np.power(beta2, t))\n parameters[\"W\" + str(l + 1)] = (1-learning_rate*(lambd/mini_batch_size))*parameters[\"W\" + str(l + 1)] \n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * v_corrected[\"dW\" + str(l + 1)] / np.sqrt(s_corrected[\"dW\" + str(l + 1)] + epsilon)\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * v_corrected[\"db\" + str(l + 1)] / np.sqrt(s_corrected[\"db\" + str(l + 1)] + epsilon)\n\n\n return parameters, v, s", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(xrange(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def sample_states_r(r):\n\n print('<', end='')\n\n states_r = states[r, :, :].copy()\n\n # shuffle the loads that get sampled together\n shuffled_load_IDs = np.random.permutation(num_load)\n\n for g in range(0, num_load, num_together):\n\n IDs = shuffled_load_IDs[g:g+num_together]\n\n loglhoodmtx = np.zeros((num_combos, num_data))\n for unique_combo in np.unique(degenerate_combos, axis=0):\n \"\"\"\n We save computation time by calculating the log likelihood\n only for unique brightness states and then assigning them\n to the corresponding states afterwords. For example if we\n have two states: dark and bright, and we are sampling the\n joint phototrajectory for fluorophores A and B, then\n A-bright-B-dark and A-dark-B-bright would have the same\n log likelihood. Rather than compute this twice we caclulate\n the log likelihood for one-fluorophore-bright and assign it\n to both the above joint states.\n \"\"\"\n idx = (degenerate_combos == unique_combo).all(axis=1)\n for i in range(num_together):\n states_r[IDs[i], :] = unique_combo[i]\n brightness = mu_flor @ states_to_pops(states_r, num_states) + mu_back[r]\n loglhoodmtx[idx, :] = stats.gamma.logpdf(data[r,:], a=brightness, scale=gain)\n\n # demand final state is photobleached\n loglhoodmtx[:-num_end:, -1] = -np.inf\n loglhoodmtx[-1, -1] = 0\n\n # softmax the log likelihood matrix to take it out of log space\n lhoodmtx = np.exp(loglhoodmtx - np.max(loglhoodmtx, axis=0))\n lhoodmtx += (loglhoodmtx > -np.inf) * 1e-300 # for numerical stability\n\n # run forward-filter-backwards-sample algorithm using numba\n trajectory = FFBS(lhoodmtx, pi_comb)\n\n # convert from combined state space to regular state space\n states_r[IDs, :] = state_combos[trajectory, :].T\n\n print('>', end='')\n\n return states_r", "def GMRES_1(A, b, x0, max_iterations=50):\n\n last_x = x0\n curr_x = last_x\n last_r = b - A @ x0\n curr_iter = 0\n residual_queue = []\n while curr_iter < max_iterations:\n Ar = A @ last_r\n alpha = (last_r.transpose() @ Ar) / (Ar.transpose() @ Ar)\n curr_x = last_x + alpha * last_r\n curr_r = last_r - alpha * Ar\n c = np.linalg.norm(A @ curr_x - b, 2) / np.linalg.norm(b, 2)\n residual_queue.append(np.linalg.norm(A @ curr_x - b, 2))\n if curr_iter == max_iterations - 1:\n print_graph(residual_queue, curr_iter, \"residual\", \"GMRES(1)\")\n last_x = curr_x\n last_r = curr_r\n curr_iter += 1\n print(\"Number of Iterations: \" + str(curr_iter))\n\n return curr_x", "def generate_schreier_sims(self, af=False):\n\n n = self._degree\n u = self.basic_transversals\n basic_orbits = self._basic_orbits\n if len(u) == 0:\n for x in self.generators:\n if af:\n yield x._array_form\n else:\n yield x\n return\n if len(u) == 1:\n for i in basic_orbits[0]:\n if af:\n yield u[0][i]._array_form\n else:\n yield u[0][i]\n return\n\n u = list(reversed(u))\n basic_orbits = basic_orbits[::-1]\n # stg stack of group elements\n stg = [list(range(n))]\n posmax = [len(x) for x in u]\n n1 = len(posmax) - 1\n pos = [0]*n1\n h = 0\n while 1:\n # backtrack when finished iterating over coset\n if pos[h] >= posmax[h]:\n if h == 0:\n return\n pos[h] = 0\n h -= 1\n stg.pop()\n continue\n p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])\n pos[h] += 1\n stg.append(p)\n h += 1\n if h == n1:\n if af:\n for i in basic_orbits[-1]:\n p = _af_rmul(u[-1][i]._array_form, stg[-1])\n yield p\n else:\n for i in basic_orbits[-1]:\n p = _af_rmul(u[-1][i]._array_form, stg[-1])\n p1 = _af_new(p)\n yield p1\n stg.pop()\n h -= 1", "def a(self, x, t0):\n dummy = 0.0 * t0\n x_1 = x[0]\n x_2 = x[1]\n x_3 = x[2]\n x_4 = x[3]\n x_5 = x[4]\n x_6 = x[5]\n x_7 = x[6]\n # Load parameters.\n e_1_tilde = self.e_1_tilde\n e_2_tilde = self.e_2_tilde\n r = self.rr\n z_1 = self.z_1\n z_1_tilde = self.z_1_tilde\n z_2_tilde = self.z_2_tilde\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n alpha_2_tilde = self.alpha_2_tilde\n alpha_11_tilde = self.alpha_11_tilde\n alpha_12_tilde = self.alpha_12_tilde\n mu_H_tilde = self.mu_H_tilde\n mu_1_tilde = self.mu_1_tilde\n mu_2_tilde = self.mu_2_tilde\n sigma_H = self.sigma_H\n sigma_1_tilde = self.sigma_1_tilde\n sigma_2_tilde = self.sigma_2_tilde\n sigma_11 = self.sigma_11\n sigma_12 = self.sigma_12\n #\n # Right hand side of model.\n #\n f_1 = z_1 * alpha_1 * x_3 * (1.0 - x_1) - mu_H_tilde * x_1\n f_2 = (1.0 - x_2) * (\n x_3 * (z_1_tilde * alpha_11_tilde + z_2_tilde * alpha_12_tilde)\n + z_1_tilde * alpha_2_tilde * x_6) - mu_1_tilde * x_2\n f_3 = (z_1 * sigma_H * x_1 +\n (z_1_tilde * sigma_11 + z_2_tilde * sigma_12) * x_2) \\\n * (x_4 - x_3) - (e_1_tilde + x_4) * x_3\n f_4 = x_4 * (1 - x_4)\n f_5 = z_2_tilde * alpha_2 * x_6 * (1.0 - x_5) - mu_2_tilde * x_5\n f_6 = (z_1_tilde * sigma_1_tilde * x_2\n + z_2_tilde * sigma_2_tilde * x_5) * (x_7 - x_6) \\\n - (e_2_tilde + r * x_7) * x_6\n f_7 = r * x_7 * (1.0 - x_7)\n r = np.array([f_1, f_2, f_3, f_4, f_5, f_6, f_7])\n return r", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def test_numbers_can_substitute_vectors(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = p.R\n i, j, k, l = symbols('i j k l')\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, r), (j, r), x[i, j] * v[i] * w[j] + y[i, j] * v[i] * v[j])\n\n res = orig.subst(v[k], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(v[i], 1, full_balance=full_balance).simplify()\n assert res == dr.sum((i, r), (j, r), x[j, i] * w[i] + y[i, j])", "def housegen(x):\n\n a = np.linalg.norm(x)\n if a == 0:\n u = x\n u[0] = np.sqrt(2)\n return u, a\n \n if x[0] == 0:\n r = 1\n else:\n r = x[0] / abs(x[0])\n\n u = np.conj(r) * x / a\n u[0] = u[0] + 1\n u = u / np.sqrt(u[0])\n \n a = -r*a\n\n return u, a", "def rbacksolve(A, b, d):\n n = len(b)\n b[n - 1] /= A[n - 1,n - 1]\n for k in range(n-2,-1,-1):\n uk = array([n, k + d + 1]).min()\n b[k] = b[k] - dot(A[k,(k+1):uk], b[(k+1):uk])\n b[k] /= A[k,k]", "def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)", "def lambert(k, r0, r, tof, short=True, numiter=35, rtol=1e-8):\n k_ = k.to(u.km ** 3 / u.s ** 2).value\n r0_ = r0.to(u.km).value\n r_ = r.to(u.km).value\n tof_ = tof.to(u.s).value\n\n v0, v = vallado_fast(k_, r0_, r_, tof_, short, numiter, rtol)\n\n yield v0 << kms, v << kms", "def _fit_ridge_alpha(trn_fs,trn_data,val_fs,val_data,alphas=DEFAULT_ALPHAS,\n chunk_sz=5000,is_efficient=True,dtype=np.single, is_verbose=False, pthr=0.005,\n square_alpha=False,return_resids=False): \n n_tps,n_voxels = trn_data.shape\n n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)\n cc = np.zeros((n_voxels,len(alphas)),dtype=dtype)\n if return_resids:\n resids = np.zeros((n_tps,n_voxels,len(alphas)),dtype=dtype)\n pred_A = []\n if is_efficient:\n # Efficient Ridge regression from A. Huth, Part (1):\n # Full multiplication for validation (here, random split of\n # training data) prediction is: \n # pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk) # NOTE: pinv(Ux) = Ux'\n # We will pre-compute the first and third terms in parentheses:\n # pred = XvalVx * Dx * UxYchunk\n if is_verbose: \n print('->Doing SVD of stimulus design matrix')\n t0 = time.time()\n #time.sleep(.01); # To ensure printing?\n m,n = trn_fs.shape\n if m>n:\n Ux,Sx,Vx = _utils._svd(trn_fs,full_matrices=False)\n else:\n Vx,Sx,Ux = _utils._svd(trn_fs.T,full_matrices=False)\n # Switcheroo of Vx and Ux due to transpose of input matrix\n Ux = Ux.T\n Vx = Vx.T\n\n if is_verbose:\n t1 = time.time()\n print('->Done with SVD in %0.2f sec'%(t0-t1))\n # For more efficient computation:\n #k = len(Sx) \n ## OR: \n ## singcutoff = (XX);\n ## k = sum(sx > singcutoff);\n ## sx = sx(1:k);\n XvalVx = val_fs.dot(Vx.T) # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!\n else:\n raise NotImplementedError(\"Sorry, not done yet!\")\n\n for iChunk in range(n_chunks):\n print('Running chunk %d of %d...\\n'%(iChunk+1,n_chunks))\n ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk\n ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.\n Ychunk = trn_data[:,ChIdx]\n\n # Fit model with all lambdas (for subset of voxels)\n if not is_efficient:\n raise Exception('LAME! no slow reliable ridge implemented.')\n #[Wt L] = ridgemulti(X,Ychunk,params.lambdas);\n else:\n # Efficient Ridge regression from A. Huth, part (2)\n # NOTE: weights are never explicitly computed!\n UxYchunk = Ux.T.dot(Ychunk)\n \n if is_verbose:\n print('Checking model predictions...')\n for iA,A in enumerate(alphas):\n if not is_efficient:\n pred = np.cast(np.single)[Xval.dot(Wt[:,:,iA])]\n else:\n # Efficient Ridge regression from A. Huth, part (3)\n # Normalize lambda by Frobenius norm for stim matrix\n aX = A # * norm(X,'fro'); # ... or not\n # Need to decide for final whether aX**2 or not\n if square_alpha:\n Dx = Sx/(Sx**2 + aX**2) \n else:\n Dx = Sx/(Sx**2 + aX) \n # Compute predicitons (XvalVx and UxYchunk computed above)\n # (mult diag is slightly faster than matrix multiplication in timing tests)\n pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk) \n # Compute prediction accuracy (correlations)\n cc[ChIdx,iA]=_sutils.column_corr(pred,val_data[:,ChIdx])\n if return_resids:\n resids[:,ChIdx,iA] = val_data[:,ChIdx]-pred\n if return_resids:\n return cc,resids\n else:\n return cc", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def gram_schmidt(mat_a):\n # NOTE: We will use the same variable names as the one in the\n # pseudo code for clarity\n rows_count = mat_a.shape[0]\n\n u = mat_a.copy()\n r = np.zeros_like(u)\n q = np.zeros_like(u)\n for i in range(rows_count):\n u_i = u[:, i]\n r[i, i] = np.linalg.norm(u_i)\n q[:, i] = u_i / r[i, i] if r[i, i] != 0 else 0\n q_i = q[:, i]\n\n r[i, i + 1:] = q_i.T.dot(u[:, i + 1:])\n # np.outer will multiply q_i by each number in r[i, i + 1:], and create\n # a matrix that each column is a result of that multiplication\n u[:, i + 1:] -= np.outer(q_i, r[i, i + 1:])\n\n return q, r", "def test_batch_vector_substitutions(\n free_alg, full_balance, simplify\n):\n\n dr = free_alg\n p = dr.names\n\n a = IndexedBase('a')\n x = IndexedBase('x')\n y = IndexedBase('y')\n i, j = p.i, p.j\n v = p.v\n v_dag = Vec('v', indices=(CR,))\n\n #\n # Spin flipping\n #\n\n orig1 = dr.sum((i, p.R), (j, p.R), a[i, j] * v[i, UP] * v[j, DOWN])\n defs1 = [\n dr.define(v[i, UP], v[i, DOWN]), dr.define(v[i, DOWN], v[i, UP])\n ]\n\n # Sequentially apply the definitions of the substitutions\n expected_sequential = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * v[i, UP] * v[j, UP]\n )\n res = orig1.subst_all(\n defs1, simult_all=False, full_balance=full_balance, simplify=simplify\n )\n assert res == expected_sequential\n\n # Simultaneously apply the definitions of the substitutions\n expected_simutaneous = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * v[i, DOWN] * v[j, UP]\n )\n res = orig1.subst_all(\n defs1, simult_all=True, full_balance=full_balance, simplify=simplify\n )\n assert res == expected_simutaneous\n\n #\n # In-place BCS transformation\n #\n\n orig2 = dr.einst(\n a[i, j] * v_dag[i, UP] * v[j, UP] +\n a[i, j] * v_dag[i, DOWN] * v[j, DOWN]\n )\n defs2 = [\n dr.define(v_dag[i, UP], x[i] * v_dag[i, UP] - y[i] * v[i, DOWN]),\n dr.define(v_dag[i, DOWN], x[i] * v_dag[i, DOWN] + y[i] * v[i, UP]),\n dr.define(v[i, UP], x[i] * v[i, UP] - y[i] * v_dag[i, DOWN]),\n dr.define(v[i, DOWN], x[i] * v[i, DOWN] + y[i] * v_dag[i, UP]),\n ]\n\n # Sequentially apply the definitions of the substitutions\n expected_sequential = orig2\n for def_ in defs2:\n expected_sequential = def_.act(expected_sequential)\n expected_sequential = expected_sequential.simplify()\n res = orig2.subst_all(\n defs2, simult_all=False, full_balance=full_balance, simplify=simplify\n ).simplify()\n assert res == expected_sequential\n\n # Simultaneously apply the definitions of the substitutions\n expected_simutaneous = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * (\n (x[i] * v_dag[i, UP] - y[i] * v[i, DOWN])\n * (x[j] * v[j, UP] - y[j] * v_dag[j, DOWN])\n + (x[i] * v_dag[i, DOWN] + y[i] * v[i, UP])\n * (x[j] * v[j, DOWN] + y[j] * v_dag[j, UP])\n )\n ).simplify()\n res = orig2.subst_all(\n defs2, simult_all=True, full_balance=full_balance, simplify=simplify\n ).simplify()\n assert res == expected_simutaneous", "def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray,\n beta_vec: jax.ShapedArray,\n x0: jax.ShapedArray) -> jax.ShapedArray:\n q = min(k, R.shape[1])\n y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q])\n x = x0 + V[:, :q] @ y\n return x", "def fAVM(RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n\tVk=0.000 # Initially assumme no kerogen\n\tDh=Df\n#\n#\t5.1.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.5.3.1 Organic and Inorganic Component Density Values:\n# -------------------------------------------------------\n\t\tDBI=(1-PHIc1)*Dc1+(PHIc1*Dw) # Bulk Density of Inorganic Component\n\t\tDBO=(1-PHIk)*Dk+(PHIk*Dh)# Bulk Density of Organic Component\n#\n# 5.1.3.2 Compute Volume of Organic and Inorganic Component:\n# ----------------------------------------------------------\n\t\tVOR=(DBI-RHOB)/(DBI-DBO)\n\t\tVOR=ImposeLimits(VOR,0,1)\n\t\tVIN=(1-VOR)\n#\n# 5.1.3.3 Compute Volumetrics, Total & Effective Porosity and Total & Effective Water Saturation:\n# ---------------------------------------\t-------------------------------------------------------\n\t\tVc1=VIN*(1-PHIc1)\n\t\tVc2=0.000\n\t\tVc3=0.000\n\t\tVk=VOR*(1-PHIk)\n\t\tPHIt=VIN*PHIc1+VOR*PHIk\n\t\tPHIe=VOR*PHIk\n\t\tSwt=1-((VOR*PHIk)/PHIt)\n\t\tSwt=ImposeLimits(Swt,0,1)\n\t\tSwe=0.000\n\t\tSxot=Swt\n\t\tSxoe=Swe\n#\n# 5.1.3.4 Compute Bulk Volume of Water, Hydrocarbon Pore Volume and Pore Space Fluid Properties:\n# ---------------------------------------\t------------------------------------------------------\n\t\tBVW=PHIe*Swe\n\t\tHCPV=PHIe*(1-Swe)\n\t\tVs=RSK*Vk # Estimate volume of adsorbed (sorbed) hydrocarbon\n\t\tVs=ImposeLimits(Vs,0,HCPV)\n\t\tVf=(HCPV-Vs)\n\t\tVf=ImposeLimits(Vf,0,(HCPV-Vs))\n#\n# 5.1.3.5 Recompute hydrocarbon properties in the pore space:\n# -----------------------------------------------------------\n\t\tSum=Vs+Vf\n\t\tif(Sum<=0.000):\n\t\t\tDh=Df\n\t\telse:\n\t\t\tDh=(Ds*Vs+Df*Vf)/(Vs+Vf)\n#\n# 5.1.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.1.6 Preoutput computations:\n# ------------------------------\n\tQc=MissingValue\n\tDc2=0.00\n\tDc3=0.00\n\tCBW=PHIt-PHIe # The assumption is that all microporosity can be considered to be clay bound water.\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw)\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n#\n# 5.5.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def func(r, *data):\n R, M, N = data # trans. rates, system size, particle number\n n1 = get_n1(r,N) # occupation number of groundstate\n n = np.zeros(M) # vector of mean occupation numbers\n n[0] = n1 \n n[1:] = r \n func = np.zeros(M) # implement all M equations at first\n A = R - np.transpose(R) # rate asymmetry matrix\n func = np.dot(A,n)*n + np.dot(R,n) - R.sum(axis=0) * n\n \n return func[1:] # slice away the last equation", "def runIteration(self, task, Sol, Fitness, xb, fxb, A, r, S, Q, v, **dparams):\n\t\tfor i in range(self.NP):\n\t\t\tA[i], r[i] = self.selfAdaptation(A[i], r[i])\n\t\t\tQ[i] = self.Qmin + (self.Qmax - self.Qmin) * self.uniform(0, 1)\n\t\t\tv[i] += (Sol[i] - xb) * Q[i]\n\t\t\tif self.rand() > r[i]: S[i] = self.localSearch(best=xb, A=A[i], task=task, i=i, Sol=Sol)\n\t\t\telse: S[i] = task.repair(Sol[i] + v[i], rnd=self.Rand)\n\t\t\tFnew = task.eval(S[i])\n\t\t\tif (Fnew <= Fitness[i]) and (self.rand() < (self.A_l - A[i]) / self.A): Sol[i], Fitness[i] = S[i], Fnew\n\t\t\tif Fnew <= fxb: xb, fxb = S[i].copy(), Fnew\n\t\treturn Sol, Fitness, xb, fxb, {'A': A, 'r': r, 'S': S, 'Q': Q, 'v': v}", "def compute_normalization_binary_search(activations, t, num_iters=10):\n mu = tf.math.reduce_max(activations, -1, keepdims=True)\n normalized_activations = activations - mu\n shape_activations = tf.shape(activations)\n effective_dim = tf.cast(\n tf.math.reduce_sum(\n tf.cast(\n tf.greater(normalized_activations, -1.0 / (1.0 - t)), tf.int32),\n -1,\n keepdims=True), tf.float32)\n shape_partition = tf.concat([shape_activations[:-1], [1]], 0)\n lower = tf.zeros(shape_partition)\n upper = -log_t(1.0 / effective_dim, t) * tf.ones(shape_partition)\n\n def iter_body(i, lower, upper):\n logt_partition = (upper + lower) / 2.0\n sum_probs = tf.math.reduce_sum(exp_t(\n normalized_activations - logt_partition, t), -1, keepdims=True)\n update = tf.cast(tf.less(sum_probs, 1.0), tf.float32)\n lower = tf.reshape(lower * update + (1.0 - update) * logt_partition,\n shape_partition)\n upper = tf.reshape(upper * (1.0 - update) + update * logt_partition,\n shape_partition)\n return [i + 1, lower, upper]\n\n _, lower, upper = for_loop(num_iters, iter_body, [0, lower, upper])\n logt_partition = (upper + lower) / 2.0\n return logt_partition + mu", "def r_combinations(n,r):\n return r_permutations(n,r) / math.factorial(r)", "def davidson_solver(ax_function, preconditioner, guess, e_conv=1.0E-8, r_conv=None, no_eigs=1, max_vecs_per_root=10, maxiter=100):\n\n if r_conv == None:\n r_conv = e_conv * 100\n d_tol = 1.0E-8\n\n # using the shape of the guess vectors to set the dimension of the matrix\n N = guess.shape[0]\n\n #sanity check, guess subspace must be at least equal to number of eigenvalues\n nli = guess.shape[1]\n if nli < no_eigs:\n raise ValueError(\"Not enough guess vectors provided!\")\n\n nl = nli\n converged=False\n count = 0\n sub_count = nli\n A_w_old = np.ones(nli)\n max_ss_size = nli * max_vecs_per_root\n B = np.zeros((N,N))\n B[:,:nli] = guess\n\n ### begin loop\n while count < maxiter:\n active_mask = [True for x in range(nl)]\n # Apply QR decomposition on B to orthogonalize the new vectors wrto all other subspace vectors\n ## orthogonalize preconditioned residuals against all other vectors in the search subspace\n B, r = np.linalg.qr(B)\n\n # compute sigma vectors corresponding to the new vectors sigma_i = A B_i\n sigma = np.zeros((N,nl))\n for i in range(nl):\n bvec = B[:,i]\n sigma[:,i] = ax_function(B[:,i])\n\n # compute subspace matrix A_b = Btranspose sigma\n A_b = np.dot(B[:,:nl].T, sigma)\n\n # solve eigenvalue problem for subspace matrix; choose n lowest eigenvalue eigpairs\n A_w, A_v = np.linalg.eig(A_b)\n\n # sorting eigenvalues and corresponding eigenvectors\n A_v = A_v[:, A_w.argsort()]\n A_w = A_w[A_w.argsort()]\n\n # here, check if no residuals > max no residuals, if so, collapse subspace\n sub_count = A_v.shape[0]\n if sub_count >= max_ss_size:\n print(\"Subspace too big. Collapsing.\\n\")\n Bnew = np.zeros((N,N))\n Bnew[:,:nli] = np.dot(B[:,:nl], A_v[:,:nli])\n B = Bnew\n nl = nli\n continue\n # else, build residual matrix\n ## residual_i = sigma * eigvec - eigval * B * eigvec\n norm = np.zeros(nli)\n for i in range(0, nli):\n mat = A - A_w[i] * np.identity(N) \n residual = np.dot(mat, np.dot(B[:,:sub_count], A_v[:,i]))\n\n ## check for convergence by norm of residuals\n norm[i] = np.linalg.norm(residual)\n ##apply the preconditioner (A_ii - A_v_i)^-1\n precon_resid = preconditioner(residual, i, A, A_w)\n\n ## normalize and add to search subspace if they're larger than a threshold\n if np.linalg.norm(precon_resid) > d_tol:\n B[:,nl+1] = precon_resid\n nl += 1\n\n # check for convergence by diff of eigvals and residual norms\n check = norm < r_conv\n eig_norm = np.linalg.norm(A_w[:no_eigs] - A_w_old[:no_eigs])\n A_w_old = A_w\n if(check.all() == True and eig_norm < e_conv):\n converged = True\n break\n count += 1 \n\n if converged:\n print(\"Davidson converged at iteration number {}. \\n Eigenvalues: {} \\n Eigenvectors: {}\".format(count, A_w[:no_eigs], A_v[:,:no_eigs]))\n else:\n print(\"Davidson did not converge. Max iterations exceeded.\")" ]
[ "0.5948944", "0.5672769", "0.55784595", "0.5571189", "0.544097", "0.5343572", "0.5310347", "0.5287816", "0.5228561", "0.5202186", "0.5167944", "0.5159567", "0.5158173", "0.5155729", "0.51552105", "0.51497984", "0.5149065", "0.51292133", "0.5123503", "0.5118129", "0.510955", "0.5108631", "0.5101021", "0.5090278", "0.5085091", "0.5066961", "0.50533134", "0.50024366", "0.49970433", "0.49951166", "0.49887958", "0.49874768", "0.4981745", "0.49736103", "0.49642232", "0.49466377", "0.4943535", "0.4928568", "0.49249506", "0.4922128", "0.49162045", "0.4911453", "0.49039307", "0.49020365", "0.48937356", "0.48930892", "0.48824695", "0.48793972", "0.48726547", "0.48713288", "0.48569766", "0.48564562", "0.48562092", "0.4855489", "0.48501253", "0.48486626", "0.4848494", "0.48473203", "0.484134", "0.48377126", "0.48369303", "0.48362562", "0.4833943", "0.48315763", "0.4831106", "0.48310006", "0.48301962", "0.4828882", "0.4827368", "0.48227957", "0.48207834", "0.481878", "0.4816783", "0.48163214", "0.4806658", "0.48046005", "0.48041812", "0.4799683", "0.47977927", "0.47965378", "0.4795326", "0.4794527", "0.47844708", "0.47840598", "0.4783123", "0.47803465", "0.47798374", "0.47795388", "0.4775487", "0.47752172", "0.47743514", "0.47679165", "0.4765992", "0.47591507", "0.47561994", "0.47549462", "0.47548503", "0.4754445", "0.4752354", "0.47511482" ]
0.50629514
26
Performs the kth iteration of the Arnoldi reduction procedure.
def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence, V: jax.ShapedArray, H: jax.ShapedArray, tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]: v = A_mv(V[:, k], *A_args) v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T) v_norm = jnp.linalg.norm(v_new) r_new = v_new / v_norm # Normalize v unless it is the zero vector. r_new = jax.lax.cond(v_norm > tol, lambda x: x[0] / x[1], lambda x: 0.*x[0], (v_new, v_norm) ) H = jax.ops.index_update(H, jax.ops.index[:, k], H_k) H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm) V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new) return V, H
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(k):\n if k <= 1: return 1\n return fn(k-1) + fn(k-2)", "def fn(n, k):\n if n == 1: return k # base case \n return sum(fn(n-1, kk) for kk in range(1, k+1))", "def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n total = 0\n i = 1\n while i <= k:\n total += count_k(n - i, k)\n i += 1\n return total", "def iterate(rk):\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk", "def fn(n, k):\n if n == k: return 1\n if k == 0: return 0\n return ((n-1)*fn(n-1, k) + fn(n-1, k-1)) % 1_000_000_007", "def choose(n, k):\n ans, k = 1, min(k, n-k)\n for i in range(k):\n ans *= n-i\n ans //= i+1\n return ans", "def factorial(k):\n fact = 1\n for i in range(1, k + 1):\n fact *= i\n return fact", "def get_kth_ugly_number(k):\n count = 0; i = 0\n while count < k:\n i += 1\n if is_ugly(i):\n count += 1\n return i", "def dynamic_iteration(k, n):\n # If only one egg remains, n attempts must be made to find the correct floor.\n if k == 1:\n return n\n # Lookup table for previous solutions.\n W = [[0 for y in range(n + 1)] for x in range(k)]\n # Initialize the first row.\n for i in range(n + 1):\n W[0][i] = i\n # Start on second row, working downward.\n for i in range(1, k):\n # Calculate values for each cell.\n for j in range(1, n + 1):\n W[i][j] = min((max(W[i][j - x], W[i - 1][x - 1]) for x in range(1, j + 1))) + 1\n # Return the result.\n return W[k - 1][n]", "def count_func(k, x_k, arg):\n if k == 0:\n return arg\n else:\n return x_k + 1", "def combin(n, k):\n\tif k > n//2:\n\t\tk = n-k\n\tx = 1\n\ty = 1\n\ti = n-k+1\n\twhile i <= n:\n\t\tx = (x*i)//y\n\t\ty += 1\n\t\ti += 1\n\treturn x", "def n_choose_k(N,K):\n return factorial(N) // (factorial(N - K) * factorial(K))", "def fn(n, k):\n if k == 0: return 1 \n if n <= 0 or k < 0: return 0 \n return fn(n-1, k) + fn(n, k-1) - fn(n-1, k-n)", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))", "def fn(m, k):\n if m == 0 or k == 0: return 0 \n return 1 + fn(m-1, k-1) + fn(m-1, k)", "def dimension_reduction(X, k=10):\n cov = cov_generation(X)\n eig, eigv, _, _ = jacobi_loop(cov)\n sort_args = np.argsort(np.abs(eig))[::-1]\n projection_matrix = eigv[sort_args][:, :k]\n reduce_x = np.dot(X, projection_matrix)\n \n return projection_matrix, reduce_x", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def _kth_to_last_iterative(self, head, k): #\n node1 = head\n node2 = None \n flag = False \n i = 0 \n while node1 is not None:\n i += 1 \n node1 = node1.next_node \n if flag: \n node2 = node2.next_node \n if i == k:\n flag = True \n node2 = head \n return node2", "def n_choose_k(n: int, k: int) -> int:\n # Edge case, no possible way to choose.\n if k > n or k < 0 or n < 0: return 0\n # We choose the min of k or n - k\n # since nCk == nC(n - k).\n k = min(k, n - k)\n # The numerator represents the product\n # n * (n - 1) * (n - 2) * ... * (n - k - 1)\n numerator = reduce(mul, range(n, n - k, -1), 1)\n # The denominator represents the product\n # 1 * 2 * ... * k\n denominator = reduce(mul, range(1, k + 1), 1)\n # return the result as an integer.\n return numerator // denominator", "def KRC(self, ik, ipd, ipl, t):\n idx = ik - 1\n\n den1 = 1 - self.delta[idx] * self.coca.PK(ik, t)\n num1 = self.delta[idx] * self.thetak[idx]\n ins = num1 / den1\n\n for l in np.arange(0, self.L):\n pl = self.coca.PL(l, t)\n ins += ((self.thetal[l] * self.gamma[l][idx]) / (1 - pl))\n\n ans = ipd * np.exp(t * ipl) * ins\n\n return ans", "def combinln(n, k):\n return (special.gammaln(n + 1) - (special.gammaln(k + 1) +\n special.gammaln(n - k + 1)))", "def a_mix_partial_k(s, p, k=1, phase='x'): # (Validated)\n amix = a_mix(s, p, phase)\n\n Term1 = (1 - 1/p.m['r'] - 1/p.m['s']) * amix\n Term2 = 1/p.m['r'] * amix**(1 - 1/p.m['r'])\n\n # CT1 = SUM^n_i=1 x_i * a_ik^s * [SUM^n_j=1 (x_j * a_ij^s)^(r/s - 1)]\n Sigma2 = 0\n for i in range(1, p.m['n']+1):\n # SUM^n_j=1 x_j * a_ij ^ s\n Sigma1 = 0\n for j in range(1, p.m['n']+1):\n Sigma1 += s.c[j][phase] * a_ij(s, p, i, j)**p.m['s']\n\n # SUM^n_j=1 (x_j * a_ij^s)^(r/s - 1)\n Sigma1rs = Sigma1**(p.m['r']/p.m['s'] - 1)\n\n # x_i * a_ik^s * [SUM^n_j=1 (x_j * a_ij^s)^(r/s - 1)]\n Sigma2 += s.c[i][phase] * (a_ij(s, p, i, k)**p.m['s']) * Sigma1rs\n\n CT1 = (p.m['r']/p.m['s']) * Sigma2\n\n # CT2 = (SUM^n_j=1 (x_j * a_ij^s)^(r/s)\n Sigma3 = 0\n for j in range(1, p.m['n']+1):\n Sigma3 += s.c[j][phase] * a_ij(s, p, k, j)**p.m['s']\n\n CT2 = Sigma3**(p.m['r']/p.m['s'])\n\n return Term1 + Term2 * (CT1 + CT2)", "def kth_smallest_alt(arr1, arr2, k):\n pass", "def __kappa_mle(self, k, R):\n return (iv(1, k) / iv(0, k)) - R", "def combination(n, k):\n if (k > n) or (n < 0) or (k < 0):\n return 0\n val = 1\n for j in range(min(k, N - k)):\n val = (val * (N - j)) // (j + 1)\n return val", "def k(self):\n return add(self.k_b(), self.k_m())", "def nchoosek(n, k):\n if n < k:\n return 0\n return partition(n, [k, n - k])", "def mult(self, p, k):\n res = None\n while k != 0:\n if k % 2 == 1:\n res = self.add(res, p)\n p = self.add(p, p)\n k //= 2\n return res", "def choose(n, k):\n\n if n == k:\n return 1\n elif k == 1:\n return n\n elif k == 2:\n return n * (n - 1) // 2\n else:\n return fact(n) // (fact(n - k) * fact(k))", "def falling(n, k):\n total, i = 1, 0\n if k==0: \n \treturn 1\n else: \n \twhile i < k: \n \t\ttotal = total * n \n \t\tn = n-1\n \t\ti = i+1\n \treturn total", "def permutations(k: int) -> int:\n return factorial(k)", "def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def k_rank_approximate(doc_matrix, k):\n return []", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n\n if not nums:\n return 0\n\n if k <= 1:\n return 0\n\n count = 0\n lo = 0\n product = 1\n for hi in range(len(nums)):\n product *= nums[hi]\n while product >= k:\n product /= nums[lo]\n lo += 1\n count += hi - lo + 1\n return count", "def fn(i, k):\n if k < 0: return inf # impossible \n if i == 0: return 0 \n return min(ceil((fn(i-1, k) + dist[i-1])/speed) * speed, dist[i-1] + fn(i-1, k-1))", "def nCk(n, k):\n return factorial(n)//factorial(k)//factorial(n-k)", "def choose(n: int, k: int) -> int:\n return permute(n, k) // factorial(k)", "def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))", "def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))", "def get_result_k(att_trees, data):\n data_back = copy.deepcopy(data)\n all_ncp = []\n all_rtime = []\n all_pollution = []\n deletion_all_ncp = []\n deletion_all_rtime = []\n # for k in range(5, 105, 5):\n for k in [2, 5, 10, 25, 50, 100]:\n if __DEBUG:\n print '#' * 30\n print \"K=%d\" % k\n print \"Enhanced Mondrian\"\n _, eval_result = mondrian(att_trees, data, k)\n data = copy.deepcopy(data_back)\n all_ncp.append(round(eval_result[0], 2))\n all_rtime.append(round(eval_result[1], 2))\n all_pollution.append(round(eval_result[2], 2))\n if __DEBUG:\n print \"NCP %0.2f\" % eval_result[0] + \"%\"\n print \"Running time %0.2f\" % eval_result[1] + \"seconds\"\n print \"Missing Pollution = %.2f %%\" % eval_result[2]\n print \"Mondrian\"\n _, eval_result = mondrian_delete_missing(att_trees, data, k)\n data = copy.deepcopy(data_back)\n if __DEBUG:\n print \"NCP %0.2f\" % eval_result[0] + \"%\"\n print \"Running time %0.2f\" % eval_result[1] + \"seconds\"\n deletion_all_ncp.append(round(eval_result[0], 2))\n deletion_all_rtime.append(round(eval_result[1], 2))\n print \"Mondrian\"\n print \"All NCP\", deletion_all_ncp\n print \"All Running time\", deletion_all_rtime\n print \"Enhanced Mondrian\"\n print \"All NCP\", all_ncp\n print \"All Running time\", all_rtime\n print \"Missing Pollution\", all_pollution", "def nchoosek(n, k):\n if (n, k) in known:\n return known[(n,k)]\n if k == 0:\n return 1\n if n == k:\n return 1\n if n < k:\n return \"n must be greater than k\"\n result = nchoosek(n - 1, k - 1) + nchoosek(n - 1, k)\n known[(n,k)] = result\n return result", "def find_k(i, j):\n\n result = ((i * i) + (j * j)) ** 0.5\n return result", "def learn_ICA(X, k):\n\n # TODO: YOUR CODE HERE", "def fn(n, k):\n if n <= k: return 0 # one mailbox for each house\n if k == 1: return mdist[0][n-1]\n ans = inf \n for nn in range(k-1, n): \n ans = min(ans, fn(nn, k-1) + mdist[nn][n-1])\n return ans", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def kpnext(self, k: int) -> int:\n result = self._read_inline(f\"kpnext({k})\")\n return int(result)", "def all_kmers(k):\n for i in range(0, 4 ** k):\n res = number_to_kmer(i, k)\n yield res", "def transform(i, j, k):\n return i * N * N + j * N + k + 1", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def ALRA(X, k=None, n_iter=10):\n if k is None:\n k = choose_k(X)\n log.info(f\"No `k` given. Automatically determined `k={k}`.\")\n\n # Compute the SVD and compute the rank-k reconstruction\n U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)\n X_rank_k = U * s @ Va\n\n X_rank_k = np.ma.masked_array(X_rank_k)\n\n # Find the absolute values of the minimum expression levels for each gene\n minimum_expressions = np.abs(np.min(X_rank_k, axis=0))\n # Zero out all expressions with values below the gene minimum value\n X_rank_k[X_rank_k <= minimum_expressions] = np.ma.masked\n\n # Rescale the expressions so the first two moments match the original matrix\n X_mean, X_std = nonzero_mean(X, axis=0), nonzero_std(X, axis=0, ddof=1)\n X_rk_mean, X_rk_std = X_rank_k.mean(axis=0), X_rank_k.std(axis=0, ddof=1)\n\n scale = X_std / X_rk_std\n translate = -X_rk_mean * scale + X_mean\n\n scale_columns = ~np.isnan(X_std) & ~np.isnan(X_rk_std)\n X_rank_k[:, scale_columns] *= scale[scale_columns]\n X_rank_k[:, scale_columns] += translate[scale_columns]\n\n # Values can become negative during rescaling, so we zero those out\n X_rank_k[X_rank_k < 0] = np.ma.masked\n\n # Restore potentially zeroed out expression values which appeared in the\n # original expression matrix. Where both values are non-zero, prefer the\n # rank-k approximation\n zeroed_out_indices = find_zeroed_indices(X_rank_k, X)\n X_rank_k[zeroed_out_indices] = X[zeroed_out_indices]\n\n log.info(\n f\"{len(zeroed_out_indices[0])} original expression values were \"\n f\"zeroed out during imputation and restored to original values.\"\n )\n\n X_rank_k = X_rank_k.filled(0)\n\n return X_rank_k", "def tri_recursion(k):\r\n if(k>0):\r\n result = k + tri_recursion(k-1)\r\n # print(result)\r\n else:\r\n result = 0\r\n\r\n return result", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def ref_main(l, k):\n S = sum(binomial(k, z)*(2*z-k)**l for z in range(k+1))\n return S / 2**k", "def ref_main(l, k):\n S = sum(binomial(k, z)*(2*z-k)**l for z in range(k+1))\n return S / 2**k", "def falling(n, k):\n \"*** YOUR CODE HERE ***\"\n result, i = 1, 0\n while i < k:\n result = result * n\n n -= 1\n i += 1\n return result", "def choose(n, k):\r\n if 0 <= k <= n:\r\n ntok = 1\r\n ktok = 1\r\n for t in range(1, min(k, n - k) + 1):\r\n ntok *= n\r\n ktok *= t\r\n n -= 1\r\n return ntok // ktok\r\n else:\r\n return 0", "def permute(n: int, k: int) -> int:\n\n # no possible permutations if k > n\n if n < k:\n return 0\n\n # if faster, compute n! and (n - k)! and return their quotient\n fact_count = len(_factorial_sequence)\n if n - fact_count <= k:\n return factorial(n) // factorial(n - k)\n\n # compute the product (n - k + 1) * (n - k + 2) * ... * n\n return seqs.arithmetic_product(n - k + 1, k)", "def choose(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def choose(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def fn(n, k):\n if k == 1: return n \n if n == 0: return 0 \n ans = inf \n for x in range(1, n+1): \n ans = min(ans, 1 + max(fn(x-1, k-1), fn(n-x, k)))\n return ans", "def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result", "def kmedian_procedure(data_list, k, alpha=1, max_iter=100, groups_list=None, distances=None,\n client_list=None, remaining_k=-1):\n if not distances:\n distances = calc_distances(data_list)\n if not client_list:\n client_list = range(len(data_list))\n if remaining_k == -1:\n remaining_k = k\n num = len(data_list)\n random_begin = [client_list[random.randint(0, len(client_list)-1)] for x in range(remaining_k)]\n cur_acc = cal_dis(data_list, random_begin, distances)\n cur_state = random_begin\n flag = True\n cnt = 0\n while flag and cnt < max_iter:\n cnt += 1\n flag = False\n for i in client_list:\n for x in range(remaining_k):\n temp = cur_state\n temp[x] = i\n local_move_acc = cal_dis(data_list, temp, distances)\n if cur_acc > local_move_acc:\n cur_acc = local_move_acc\n cur_state = temp\n flag = True\n if flag:\n break\n if flag:\n break\n return cur_state, cur_acc", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def f_exact(n, k):\n def fact(m):\n return math.factorial(m)\n\n partition = part(n, k)\n\n total = 0\n for p in partition:\n product = 1\n nodes_left = n\n counts = dict([(x, len(list(y))) for x, y in itertools.groupby(p)])\n for num in p:\n product *= fact(num - 1) * comb(nodes_left, num)\n nodes_left -= num\n for num in counts:\n product /= fact(counts[num])\n\n total += product\n return int(total)", "def fn(x):\n while mp.get(x, 0) < k: \n mp[x] = 1 + mp.get(x, 0)\n fn(x[1:] + str(mp[x]-1))\n if not ans: ans.append(x)\n else: ans.append(x[0])", "def choose(n, k):\n # http://stackoverflow.com/a/3025547/313967\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def calc(k):\n n = factorial(4*k) * (1103.0 + 26390.0*k)\n d = factorial(k)**4 * 396.0**(4.0*k)\n z = n/d\n return z", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def idcg(k):\n res = sum([1.0 / math.log(i + 2, 2) for i in range(k)])\n if not res:\n return 1.0\n else:\n return res", "def rotate3(self, nums, k) -> None:\n k = k % len(nums)\n count = 0\n for i in range(len(nums)):\n if count >= len(nums):\n break\n current = i\n previous = nums[i]\n while True:\n next = (current + k) % len(nums)\n temp = nums[next]\n nums[next] = previous\n previous = temp\n current = next\n count += 1\n if(i == current):\n break", "def combinations(n, k):\n return factorial(n) / (factorial(k) * factorial(n - k))", "def _MStep(x, z, k):\n dim = x.shape[1]\n centers = np.repeat(np.reshape(x.mean(0), (1, dim)), k, 0)\n for q in range(k):\n if np.sum(z == q) == 0:\n pass\n else:\n centers[q] = np.mean(x[z == q], 0)\n return centers", "def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p", "def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''", "def perms(n, k):\n if n < k:\n return 0\n return partition(n, [n - k])", "def stirling(k, r) :\n\n return sum((-1)**(r-i)*binomial(r, i)*i**k for i in range(r+1)) / math.factorial(r)", "def F(N,k=0) :\n accum = 0.0\n for i in xrange(1,N+1-k) :\n accum += (1.0+F(N-1,k+i-1))/N\n return accum", "def recall_at_k(r, max_rel, k = None):\n assert k is None or k >= 1\n r = r[:k]\n r = np.asarray(r) != 0\n if np.sum(r) > max_rel:\n raise ValueError('Number of relevant documents retrieved > max_rel')\n return np.sum(r) / max_rel", "def n_choose_kv(newK):\n values = np.zeros((1,newK+1))\n ks = np.arange(newK+1)\n \n for i in range(newK+1):\n values[i] = scipy.misc.comb(newK, ks[i])\n\n return values", "def _K(self, n, s, t):\n if min(len(s), len(t)) < n:\n return 0\n else:\n part_sum = 0\n for j in range(1, len(t)):\n if t[j] == s[-1]:\n part_sum += self._K1(n - 1, s[:-1], t[:j])\n result = self._K(n, s[:-1], t) + self.lambda_decay ** 2 * part_sum\n return result", "def m_step(data, p_k_x):\r\n N = data.shape[0]\r\n D = data.shape[1]\r\n K = p_k_x.shape[1]\r\n\r\n Nk = np.sum(p_k_x, axis=0)\r\n p_i_j_new = np.empty((K, D))\r\n\r\n for k in range(K):\r\n p_i_j_new[k] = np.sum(p_k_x[:, k][:, np.newaxis] * data, axis=0) / Nk[k]\r\n\r\n return Nk / N, p_i_j_new", "def nCr(n, k):\n if n < k:\n return 0\n f = math.factorial\n return f(n) / f(k) / f(n - k)", "def rotate1(self, nums, k) -> None:\n foo = None\n previous = None\n for i in range(k):\n previous = nums[len(nums)-1]\n for j in range(len(nums)):\n foo = nums[j]\n nums[j] = previous\n previous = foo", "def pr_at_k(rels, expected_count, k):\n k = min(k, len(rels))\n TP = sum(rels[:k])\n FP = k - TP\n FN = expected_count - TP\n TN = len(rels[k:]) - sum(rels[k:])\n assert TN >= 0.0\n return TP / (TP + FP), TP / (TP + FN), TP / (TP + TN) if TP + TN > 0 else 0", "def iteration(self, A, b, n, mode, l):\n # Step 0, qr factorization of C, and create Q_1, -inverse(rows+lI)\n rows, cols = A.shape\n\n C = self.concatMatrix(A, ma.pow(l, 0.5), cols)\n q, r = np.linalg.qr(C)\n q1 = q[0:rows, 0:cols]\n r1 = r[0:rows, 0:cols]\n\n # coe second approach is: (R^tR)^-1\n coe = - np.linalg.inv(np.dot(np.transpose(r), r))\n\n # Step 1, x_k = inv(R) * transpose(Q_1) * b\n inv_r = np.linalg.inv(r1)\n trans_q1 = np.transpose(q1)\n xk = np.dot(np.dot(inv_r, trans_q1), b)\n term = []\n term.append(self.to_array(xk))\n\n # Step 2, iteration\n if mode == 1:\n sk = xk\n for k in range(1, n + 1):\n sk = np.dot(coe, sk)\n tk = ma.pow(-1, k) * ma.pow(l, k) * sk\n xk = xk + tk\n term.append(self.to_array(xk))\n\n if mode == 2:\n tk = xk\n for k in range(1, n + 1):\n t = l * tk\n tk = np.dot(-coe, t)\n xk = xk + tk\n term.append(self.to_array(xk))\n\n return xk", "def _kshape(x, k, n_init=1, max_iter=100, n_jobs = 1, random_state=None,normalize=True ):\r\n #print \"n jobs run in parallel: \" + str(cpu_count() ) \r\n random_state = check_random_state(random_state)\r\n best_tot_dist,best_centroids,best_idx = None,None,None\r\n \r\n if n_jobs ==1:\r\n\r\n for i_init in range(n_init): \r\n # n_init is the number of random starting points\r\n # pdb.set_trace()\r\n \r\n idx, centroids,tot_dist = _kshape_single(x, k, max_iter=max_iter, random_state= random_state,normalize=normalize) \r\n if best_tot_dist is None or tot_dist < best_tot_dist:\r\n best_idx = idx.copy()\r\n best_centroids = centroids.copy()\r\n best_tot_dist = tot_dist\r\n else: # n_jobs not =1 # if -1, all CPUs are used\r\n # parallelisation of kshape runs\r\n seeds = random_state.randint(np.iinfo(np.int32).max,size=n_init)\r\n results = Parallel(n_jobs=n_jobs, verbose=0)(\r\n delayed(_kshape_single)(x,k,max_iter=max_iter, random_state=seed, normalize=normalize)\r\n for seed in seeds )\r\n # Get results with the lowest distances\r\n idx, centroids,tot_dist, iterations = zip(*results)\r\n best = np.argmin(tot_dist) \r\n best_idx = idx[best]\r\n best_centroids = centroids[best]\r\n best_tot_dist = tot_dist[best]\r\n sys.stdout.write(\"Done: k=\"+str(k)+\"\\n\")\r\n return {'centroids':best_centroids, 'labels':best_idx, 'distance':best_tot_dist,'centroids_all':centroids,'labels_all':idx,'distance_all':tot_dist,'iterations':iterations}", "def iter_strings_k(n, k, m):\n # initial state -- all zeros\n state = np.zeros((n,), dtype=int)\n\n if k == 0:\n # that was it (!)\n return\n\n while True:\n #print(f\"next state is {state=}\")\n yield state\n\n # Update to next state. Idea is to count and carry as usual, except if\n # there are already k nonzeros in which case we count and carry by\n # ignoring all the trailing zeros. This is the algorithm described here\n # - https://stackoverflow.com/a/10458380/1694896 - adapted from bits to\n # base-m \"mits\"\n if np.count_nonzero(state) < k:\n _add_and_carry_in_place(state, m)\n continue\n\n # there are k nonzeros already, find first nonzero from least\n # significant end. See https://stackoverflow.com/a/52911347/1694896\n last_nonzero = np.max(np.nonzero(state))\n # and increment that one\n _add_and_carry_in_place(state, m, last_nonzero)\n if not np.any(state):\n # end of iteration reached, as we've gone back to the all-zero\n # state.\n return", "def combinations(n, k):\r\n return exp(gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1))", "def p_y_x_knn(y, k):\n result = np.zeros((len(y), 4))\n for i in range(len(y)):\n for j in range(k):\n result[i, y[i, j]] = result[i, y[i, j]] + 1\n return 1 / k * result", "def _K_computations(self, X, X2):\r\n if self.ARD:\r\n pass\r\n else:\r\n if X2 is None:\r\n self._K_inner_prod = np.dot(X,X.T)\r\n self._K_numer = self._K_inner_prod*self.weight_variance+self.bias_variance\r\n vec = np.diag(self._K_numer) + 1.\r\n self._K_denom = np.sqrt(np.outer(vec,vec))\r\n self._K_asin_arg = self._K_numer/self._K_denom\r\n self._K_dvar = four_over_tau*np.arcsin(self._K_asin_arg)\r\n else:\r\n self._K_inner_prod = np.dot(X,X2.T)\r\n self._K_numer = self._K_inner_prod*self.weight_variance + self.bias_variance\r\n vec1 = (X*X).sum(1)*self.weight_variance + self.bias_variance + 1.\r\n vec2 = (X2*X2).sum(1)*self.weight_variance + self.bias_variance + 1.\r\n self._K_denom = np.sqrt(np.outer(vec1,vec2))\r\n self._K_asin_arg = self._K_numer/self._K_denom\r\n self._K_dvar = four_over_tau*np.arcsin(self._K_asin_arg)", "def J (self, n):", "def get_k_of_each(y, k):\n if len(y.shape) != 2:\n raise ValueError(\"This function expects a 2D array.\")\n\n ixes = []\n ymax = np.argmax(y, axis=1)\n\n for i in range(y.shape[1]):\n ixes_i = np.where(ymax == i)[0]\n ixes.append(npr.choice(ixes_i, min(len(ixes_i), k), replace=False))\n\n return np.concatenate(ixes)", "def rotate(self, nums: List[int], k: int) -> None:\n k = k%len(nums)\n \n l, r = 0, len(nums)-1\n while l < r:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n r -= 1\n l, r = 0, k-1\n while l < r:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n r -= 1\n l, r = k, len(nums)-1\n while l < r:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n r -= 1", "def one_arm_irreps(self, *k: Array) -> Array:\n # Convert k to reciprocal lattice vectors\n k = _ensure_iterable(k)\n # Little-group irrep factors\n # Phase factor for non-symmorphic symmetries is exp(-i w_g . p(k))\n point_group_factors = self._little_group_irreps(k) * np.exp(\n -1j * (self.point_group_.translations() @ k)\n )\n # Translational factors\n trans_factors = []\n for axis in range(self.lattice.ndim):\n n_trans = self.lattice.extent[axis] if self.lattice.pbc[axis] else 1\n factors = np.exp(-1j * k[axis] * np.arange(n_trans))\n shape = [1] * axis + [n_trans] + [1] * (self.lattice.ndim - 1 - axis)\n trans_factors.append(factors.reshape(shape))\n trans_factors = reduce(np.multiply, trans_factors).ravel()\n\n # Multiply the factors together\n # Translations are more major than point group operations\n result = np.einsum(\"ig, t -> itg\", point_group_factors, trans_factors).reshape(\n point_group_factors.shape[0], -1\n )\n return prune_zeros(result)", "def test_k_rank_approximate(corpus):\n return", "def apk(y_true, y_pred, k):\n \n # initialize p@k list of values\n pk_values = []\n \n # loop over all k. from 1 to k+1\n for i in range(1, k + 1):\n # calculate p@i and append to list\n pk_values.append(pk(y_true, y_pred, i))\n \n # if we have no values in the list, return 0\n if len(pk_values) == 0:\n return 0\n # else, we return the sum of list over length of list\n return sum(pk_values) / len(pk_values)", "def binomial_coefficient3(n, k):\n return reduce(lambda a, b: a * (n - b) / (b + 1), xrange(k), 1)", "def gn(i, j, k):\n if i+j < n: return anti[i+j][i+k] - anti[i+j][i]\n return anti[i+j][n-1-j+k] - anti[i+j][n-1-j]" ]
[ "0.6492725", "0.6413587", "0.63328785", "0.62609035", "0.61912435", "0.6108325", "0.61041903", "0.60753584", "0.60625505", "0.6060784", "0.60554206", "0.60479176", "0.6025259", "0.60064083", "0.60026884", "0.6001473", "0.59914064", "0.59914064", "0.5979783", "0.5960043", "0.5935852", "0.59205496", "0.5893979", "0.5887268", "0.5886487", "0.587572", "0.58695173", "0.5864606", "0.5862545", "0.58426476", "0.58263415", "0.580343", "0.5799256", "0.5786899", "0.5770547", "0.5766491", "0.57539386", "0.57500386", "0.5749318", "0.57445365", "0.5741642", "0.5741642", "0.5741539", "0.5740796", "0.57349247", "0.5730505", "0.57295746", "0.5728878", "0.57137144", "0.57060176", "0.570188", "0.5698985", "0.56986964", "0.56843853", "0.56762683", "0.56759", "0.56759", "0.5673987", "0.5671203", "0.5669989", "0.56519216", "0.56519216", "0.56446123", "0.564337", "0.56399137", "0.5633922", "0.5629378", "0.5625778", "0.56229097", "0.5617501", "0.56169224", "0.560973", "0.5608274", "0.5607661", "0.5592382", "0.55778193", "0.55767417", "0.5562207", "0.5551399", "0.55490285", "0.5528607", "0.55262184", "0.55234945", "0.5519704", "0.5508862", "0.5507381", "0.5506613", "0.5505655", "0.5499285", "0.5490905", "0.5489301", "0.54883", "0.54861736", "0.54827636", "0.5472742", "0.5471829", "0.5470349", "0.5470258", "0.5463555", "0.5451921", "0.54492676" ]
0.0
-1
Successively applies each of the rotations stored in givens to H_col.
def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> jax.ShapedArray: rotation_carry = (H_col, 0, k, givens) def loop_condition(carry): i = carry[1] k = carry[2] return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0) def apply_ith_rotation(carry): H_col, i, k, givens = carry cs = givens[0, i] sn = givens[1, i] H_i = cs * H_col[i] - sn * H_col[i + 1] H_ip1 = sn * H_col[i] + cs * H_col[i + 1] H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i) H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1) return (H_col, i + 1, k, givens) rotation_carry = jax.lax.while_loop(loop_condition, apply_ith_rotation, rotation_carry) H_col = rotation_carry[0] return H_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )", "def correct_rotation(k_rotations):\n\n for key, value in Chunk.global_piece_rotations.items():\n Chunk.global_piece_rotations[key] = (k_rotations + value) % 4\n # Should I correct it for the side rotations also?", "def test_givens_rotate(shape, indices, row, left):\n matrix = np.random.rand(*shape) * 1j + np.random.rand(*shape)\n unitary, (i, j) = matrix.copy(), indices\n if row:\n a, b = matrix[indices, j - 1]\n grot_mat = _givens_matrix(a, b, left)\n unitary[indices] = grot_mat @ unitary[indices]\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[i, j - 1], 0.0) and np.isclose(unitary[j, j - 1], res)\n else:\n assert np.isclose(unitary[i, j - 1], res) and np.isclose(unitary[j, j - 1], 0.0)\n else:\n a, b = matrix[j - 1, indices].T\n grot_mat = _givens_matrix(a, b, left)\n unitary[:, indices] = unitary[:, indices] @ grot_mat.T\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[j - 1, i], 0.0) and np.isclose(unitary[j - 1, j], res)\n else:\n assert np.isclose(unitary[j - 1, indices[0]], res) and np.isclose(\n unitary[j - 1, indices[1]], 0.0\n )", "def transform_to_rotating_frame(H, U, D):\n \n #Determine the effective hamiltonian in the rotating frame\n Heff = lambda t: np.conj(U(t).T) @ H(t) @ U(t) + D\n \n return Heff", "def update_H(self):\n gamma = self.get_gamma()\n delta = self.get_delta()\n summand2 = ((1 + (gamma.transpose().dot(self.H).dot(gamma) /\n delta.transpose().dot(gamma))) *\n delta.dot(delta.transpose()) / delta.transpose().dot(gamma)\n )\n summand3 = - ((delta.dot(gamma.transpose()).dot(self.H) +\n self.H.dot(gamma).dot(delta.transpose())) /\n delta.transpose().dot(gamma))\n self.H = self.H + summand2 + summand3", "def update_rotations(pieces_to_update, k_rotations):\n # A lot of spaghetti on the next line\n for key in pieces_to_update:\n old_value = Chunk.global_piece_rotations[key]\n new_value = (k_rotations + old_value) % 4\n Chunk.global_piece_rotations[key] = new_value\n\n for piece in pieces_to_update:\n new_side_locations = list(\n map(lambda side_num: (side_num - k_rotations) % 4, Chunk.global_side_location[piece])\n )\n Chunk.global_side_location[piece] = new_side_locations", "def rotate_components(phi, gamma = 1.0, q = 50, tol = 1e-6):\n p,k = phi.shape\n r = np.eye(k)\n d = 0\n cnt = 0\n for i in np.arange(q):\n cnt = cnt + 1\n d_old = d\n Lambda = np.dot(phi, r)\n u,s,vh = np.linalg.svd(np.dot(\n phi.T,np.asarray(Lambda)**3 - (gamma/p) * np.dot(\n Lambda, np.diag(np.diag(np.dot(Lambda.T,Lambda))))))\n print(\"Matrix u: \")\n print(u)\n print(\"Matrix s: \")\n print(s)\n print(\"Matrix vh: \")\n print(vh)\n r = np.dot(u, vh)\n d = np.sum(s)\n if d_old != 0 and d / d_old < 1 + tol:\n break\n print(\"Trace rotate_components_START\")\n print(\"Rotation matrix: \")\n print(r)\n print(\"Loop number: \" + str(cnt))\n print(\"Trace rotate_components_END\")\n return np.dot(phi, r)", "def _inverse_ops(self, Yl, Yh):\n a = len(Yh) # No of levels.\n device = self.device\n\n # If biort has 6 elements instead of 4, then it's a modified\n # rotationally symmetric wavelet\n # FIXME: there's probably a nicer way to do this\n if len(self.biort) == 4:\n h0o, g0o, h1o, g1o = self.biort\n elif len(self.biort) == 6:\n h0o, g0o, h1o, g1o, h2o, g2o = self.biort\n else:\n raise ValueError('Biort wavelet must have 6 or 4 components.')\n\n # If qshift has 12 elements instead of 8, then it's a modified\n # rotationally symmetric wavelet\n # FIXME: there's probably a nicer way to do this\n if len(self.qshift) == 8:\n h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = self.qshift\n elif len(self.qshift) == 12:\n h0a, h0b, g0a, g0b, h1a, h1b, \\\n g1a, g1b, h2a, h2b, g2a, g2b = self.qshift\n else:\n raise ValueError('Qshift wavelet must have 12 or 8 components.')\n\n level = a - 1\n Z = Yl\n\n # This ensures that for level 1 we never do the following\n while level >= 1:\n if self.complex:\n lh = c2q(tf.real(Yh[level][:,:,0:6:5]),\n tf.imag(Yh[level][:,:,0:6:5]))\n hl = c2q(tf.real(Yh[level][:,:,2:4:1]),\n tf.imag(Yh[level][:,:,2:4:1]))\n hh = c2q(tf.real(Yh[level][:,:,1:5:3]),\n tf.imag(Yh[level][:,:,1:5:3]))\n else:\n lh = c2q(Yh[level].real[:,:,0:6:5],\n Yh[level].imag[:,:,0:6:5])\n hl = c2q(Yh[level].real[:,:,2:4:1],\n Yh[level].imag[:,:,2:4:1])\n hh = c2q(Yh[level].real[:,:,1:5:3],\n Yh[level].imag[:,:,1:5:3])\n\n # Do even Qshift filters on columns.\n y1 = colifilt(Z, g0b, g0a, device=device, name='l%d_ll_col_low' % level) + \\\n colifilt(lh, g1b, g1a, device=device, name='l%d_lh_col_high' % level)\n\n if len(self.qshift) >= 12:\n y2 = colifilt(hl, g0b, g0a, device=device, name='l%d_hl_col_low' % level)\n y2bp = colifilt(hh, g2b, g2a, device=device, name='l%d_hh_col_bp' % level)\n\n # Do even Qshift filters on rows.\n Z = rowifilt(y1, g0b, g0a, device=device, name='l%d_ll_row_low' % level) + \\\n rowifilt(y2, g1b, g1a, device=device, name='l%d_hl_row_high' % level) + \\\n rowifilt(y2bp, g2b, g2a, device=device, name='l%d_hh_row_bp' % level)\n else:\n y2 = colifilt(hl, g0b, g0a, device=device, name='l%d_hl_col_low' % level) + \\\n colifilt(hh, g1b, g1a, device=device, name='l%d_hh_col_high' % level)\n\n # Do even Qshift filters on rows.\n Z = rowifilt(y1, g0b, g0a, device=device, name='l%d_ll_row_low' % level) + \\\n rowifilt(y2, g1b, g1a, device=device, name='l%d_hl_row_high' % level)\n\n # Check size of Z and crop as required\n Z_r, Z_c = Z.get_shape().as_list()[-2:]\n S_r, S_c = Yh[level-1].get_shape().as_list()[-2:]\n # check to see if this result needs to be cropped for the rows\n if Z_r != S_r * 2:\n Z = Z[:,:, 1:-1, :]\n # check to see if this result needs to be cropped for the cols\n if Z_c != S_c * 2:\n Z = Z[:,:, :, 1:-1]\n\n # Assert that the size matches at this stage\n Z_r, Z_c = Z.get_shape().as_list()[-2:]\n if Z_r != S_r * 2 or Z_c != S_c * 2:\n raise ValueError(\n 'Sizes of highpasses {}x{} are not '.format(Z_r, Z_c) +\n 'compatible with {}x{} from next level'.format(S_r, S_c))\n\n level = level - 1\n\n if level == 0:\n if self.complex:\n lh = c2q(tf.real(Yh[0][:,:,0:6:5]),\n tf.imag(Yh[0][:,:,0:6:5]))\n hl = c2q(tf.real(Yh[0][:,:,2:4:1]),\n tf.imag(Yh[0][:,:,2:4:1]))\n hh = c2q(tf.real(Yh[0][:,:,1:5:3]),\n tf.imag(Yh[0][:,:,1:5:3]))\n else:\n lh = c2q(Yh[0].real[:,:,0:6:5],\n Yh[0].imag[:,:,0:6:5])\n hl = c2q(Yh[0].real[:,:,2:4:1],\n Yh[0].imag[:,:,2:4:1])\n hh = c2q(Yh[0].real[:,:,1:5:3],\n Yh[0].imag[:,:,1:5:3])\n\n # Do odd top-level filters on columns.\n y1 = colfilter(Z, g0o, device=device, name='l0_ll_col_low') + \\\n colfilter(lh, g1o, device=device, name='l0_lh_col_high')\n\n if len(self.biort) >= 6:\n y2 = colfilter(hl, g0o, device=device, name='l0_hl_col_low')\n y2bp = colfilter(hh, g2o, device=device, name='l0_hh_col_bp')\n\n # Do odd top-level filters on rows.\n Z = rowfilter(y1, g0o, device=device, name='l0_ll_row_low') + \\\n rowfilter(y2, g1o, device=device, name='l0_hl_row_high') + \\\n rowfilter(y2bp, g2o, device=device, name='l0_hh_row_bp')\n else:\n y2 = colfilter(hl, g0o, device=device, name='l0_hl_col_low') + \\\n colfilter(hh, g1o, device=device, name='l0_hh_col_high')\n\n # Do odd top-level filters on rows.\n Z = rowfilter(y1, g0o, device=device, name='l0_ll_row_low') + \\\n rowfilter(y2, g1o, device=device, name='l0_hl_row_high')\n\n return Z", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()", "def iterate_over_hkl_compute(self, max_hkl=6):\n \n # r will contain the return value, an array with rows that contain:\n # h, k, l, qhkl, qhkl_vector\n r = []\n \n for h in range(-max_hkl,max_hkl+1):\n for k in range(-max_hkl,max_hkl+1):\n for l in range(-max_hkl,max_hkl+1):\n \n # Don't put a reflection at origin\n if not (h==0 and k==0 and l==0):\n qhkl, qhkl_vector = self.q_hkl_exp(h,k,l)\n r.append( [ h, k, l, qhkl, qhkl_vector ] )\n \n return r", "def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r", "def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s", "def commutator(self, G, H):\n ggens = G.generators\n hgens = H.generators\n commutators = []\n for ggen in ggens:\n for hgen in hgens:\n commutator = rmul(hgen, ggen, ~hgen, ~ggen)\n if commutator not in commutators:\n commutators.append(commutator)\n res = self.normal_closure(commutators)\n return res", "def G_permutation(self, W):\n Wsh = W.get_shape().as_list()\n cayley = self.cayleytable\n U = []\n for i in range(24):\n perm_mat = self.get_permutation_matrix(cayley, i)\n w = W[:,:,:,:,:,:,i]\n w = tf.transpose(w, [0,1,2,3,5,4])\n w = tf.reshape(w, [-1, 24])\n w = w @ perm_mat\n w = tf.reshape(w, Wsh[:4]+[-1,24])\n U.append(tf.transpose(w, [0,1,2,3,5,4]))\n return U", "def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp", "def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def update_H(self):\n self.grid.H[:, -1, :, :] = self.grid.H[:, 0, :, :]", "def update_E(self, curl_H):\n loc = (self.x, self.y)\n self.grid.E[loc] += (\n self.grid.courant_number / self.permittivity * curl_H[loc]\n )", "def update_H(self):\n self.grid.H[:, :, -1, :] = self.grid.H[:, :, 0, :]", "def apply_fhd(self, gfhd):\n for bl in self.data.keys():\n i,j = bl\n p1,p2 = self.pol\n G = gfhd[p1][i]*gfhd[p2][j].conj()\n ind = np.where(G != 0)[0]\n self.data[bl][self.pol][:,ind] /= G[ind]", "def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho", "def Green_func(self):\n if self.bc == True:\n size = self.grid_size\n else:\n size = 2*self.grid_size\n self.Green = np.zeros([size, size])\n for x in range(len(self.Green[0])):\n for y in range(len(self.Green[1])):\n radius = np.sqrt(x**2 + y**2) \n if radius < self.soften: \n radius = self.soften\n self.Green[x, y]=1/(4 * np.pi * radius)\n if self.grid_size%2 == 0: \n self.Green[: size//2, size//2 : ] = np.flip(self.Green[: size//2, : size//2], axis = 1) # an intermittent step - the original grid has only been flipped once (2 x the original size)\n self.Green[ size//2 : , :] = np.flip(self.Green[: size//2, :], axis = 0)\n else: \n print(\"Exiting - Grid size is currently odd. Pleaset set to an even value.\")", "def set_rotation_matrices(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], \r\n self.vertices[i].meta['axis'][1], \r\n self.vertices[i].meta['axis'][2], \r\n self.vertices[i].meta['axis_order'],\r\n degrees=True)\r\n # Todo: invert this by applying angle operations in reverse order\r\n self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])", "def accumulate_homographies(H_succesive, m):\n if len(H_succesive) == 0:\n return H_succesive\n\n H2m = [np.eye(HOMOGRAPHY_RAD)]\n for i in range(m, 0, -1):\n temp_H = H2m[0].dot(H_succesive[i - 1])\n H2m.insert(0, temp_H/temp_H[2, 2])\n for i in range(m, len(H_succesive)):\n temp_H = H2m[i].dot(np.linalg.inv(H_succesive[i]))\n H2m.append(temp_H/temp_H[2, 2])\n\n return H2m", "def rotate_along(axis: Tensor) -> Tensor:\n W = torch.einsum('ijk,j->ik', levi_civita.to(axis), axis)\n return expm(W)", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def forward(self, t, h):\n if self.i == 0:\n self.A = self.beta * (self.B - self.B.transpose(1, 0)) + (\n 1 - self.beta) * (self.B +\n self.B.transpose(1, 0)) - self.gamma * self.I\n self.W = self.beta * (self.C - self.C.transpose(1, 0)) + (\n 1 - self.beta) * (self.C +\n self.C.transpose(1, 0)) - self.gamma * self.I\n\n return torch.matmul(\n h, self.A) + self.tanh(torch.matmul(h, self.W) + self.z)", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def compute_rot(v):\n if v[0] >= 0:\n M = nd.eye(len(v))\n else:\n M = - nd.eye(len(v))\n for i in range(1, len(v)):\n if v[i] == 0:\n continue\n rot_minus_theta = nd.eye(len(v))\n temp = nd.dot(M, v)\n\n theta = nd.arctan(temp[i]/temp[0])\n c = nd.cos(theta)\n s = nd.sin(theta)\n\n rot_minus_theta[0,0] = c\n rot_minus_theta[i,i] = c\n rot_minus_theta[0,i] = s\n rot_minus_theta[i,0] = -s\n\n M = nd.dot(rot_minus_theta, M)\n return M", "def accumulate_homographies(H_succesive, m):\n\n # todo think about indexes problems with that m == M//2\n\n\n H_to_ret = H_succesive.copy()\n H_to_ret.insert(m,np.eye(3))\n # todo check that it is a list and not np array\n\n for i in range(m-1):\n H_to_ret[i] = np.linalg.multi_dot(H_succesive[i:m][::-1])\n H_to_ret[i] = H_to_ret[i]/H_to_ret[i][2][2]\n\n for i in range(m+1,len(H_to_ret)):\n H_to_ret[i] = np.linalg.inv(H_to_ret[i])\n\n for i in range(m+2,len(H_to_ret)):\n H_to_ret[i] = np.linalg.multi_dot(H_to_ret[i-1:i+1])\n H_to_ret[i] = H_to_ret[i]/H_to_ret[i][2][2]\n\n return H_to_ret", "def EquatorialToGalactic(Equatorial):\n \n # ra, dec, s => l,b,s\n ra = Equatorial[:,0]\n dec = Equatorial[:,1]\n s = Equatorial[:,2]\n cd = np.cos(dec)\n sd = np.sin(dec)\n b = np.arcsin(np.sin(decgp)*sd+np.cos(decgp)*cd*np.cos(ra-ragp))\n l = lcp-np.arctan2(cd*np.sin(ra-ragp),np.cos(decgp)*sd-np.sin(decgp)*cd*np.cos(ra-ragp))\n l[l<0] += 2.*np.pi; \n if(len(Equatorial[0,:])==3):\n Galactic = np.column_stack((l,b,s))\n else:\n # vlos, muracos(dec), mudec => vlos, mulcosb, mub\n vlos = Equatorial[:,3]\n muracosd = Equatorial[:,4]\n mudec = Equatorial[:,5]\n cb = np.cos(b)\n sb = np.sin(b)\n A11 = (np.sin(decgp)*cd-np.cos(decgp)*sd*np.cos(ra-ragp))/cb\n A12 = -np.cos(decgp)*np.sin(ra-ragp)/cb\n A21 = (np.cos(decgp)*cd+np.sin(decgp)*sd*np.cos(ra-ragp)+sb*np.cos(lcp-l)*A11)/np.sin(lcp-l)\n A22 = (np.sin(decgp)*np.sin(ra-ragp)+sb*np.cos(lcp-l)*A12)/np.sin(lcp-l) \n index = np.where(np.fabs(np.cos(lcp-l)) > np.fabs(np.sin(lcp-l)))\n A21[index] = (sd[index]*np.sin(ra[index]-ragp)-sb[index]*np.sin(lcp-l[index])*A11[index])/np.cos(lcp-l[index])\n A22[index] = -(np.cos(ra[index]-ragp)+sb[index]*np.sin(lcp-l[index])*A12[index])/np.cos(lcp-l[index])\n \n Galactic = np.column_stack((l,b,s,vlos,A21*mudec+A22*muracosd,A11*mudec+A12*muracosd))\n \n return Galactic", "def angles(self,compass=0,vertical=0,roll=0):\n self.matrix = makeMatrix(compass,vertical,roll)", "def rotates(self, maze, game_display):\n if self.lidars[0].get_sense() <= self.lidars[0].radius // 3:\n if uniform(0, 1) > 0.7:\n self.rotate_right(angle=45, maze=maze, game_display=game_display)\n else:\n self.rotate_left(angle=45, maze=maze, game_display=game_display)\n # fix to left.\n if self.lidars[1].get_sense() <= 2 * self.lidars[1].radius // 3:\n self.rotate_left(angle=10, maze=maze, game_display=game_display)\n # fix to right.\n if self.lidars[2].get_sense() <= 2 * self.lidars[0].radius // 3:\n self.rotate_right(angle=10, maze=maze, game_display=game_display)", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def rotations4(polycube, axis):\r\n for i in range(4):\r\n yield rot90(polycube, i, axis)", "def Rot_layer(self, w):\n for idx, element in enumerate(w):\n qml.Rot(element[0], element[1], element[2], wires=idx)", "def gha(self):\n return np.mod(self.gmst*self.turndeg +\n self.turndeg*self.T*self.century +\n self.turndeg/2.0, self.turndeg)", "def apply_gilt_cubes(As, pars, leg=None, Rps=None):\n # Permute and rotate the tensors, so that they are arranged as if\n # leg=\"FN\".\n As = permute_As(As, leg=leg, Rps=Rps)\n env = build_gilt_cube_env(As, pars)\n As[7], As[3], done, err = apply_gilt_FNenv(env, As[7], As[3], pars,\n Rps=Rps)\n # Invert the permutations done in the beginning.\n As = permute_As(As, leg=leg, inverse=True, Rps=Rps)\n return As, done, err", "def _matvec(self, h: np.ndarray) -> np.ndarray:\n return convolve(self.x, h, mode='valid', method=self.method)", "def accumulate_homographies(H_succesive, m):\n\n h2m = [np.eye(3)]\n\n if len(H_succesive) == 0:\n return h2m\n\n [h2m.append( h2m[-1].dot(H_succesive[i]) )\n for i in range(m-1,-1,-1)]\n h2m.reverse()\n\n [h2m.append( np.linalg.inv(H_succesive[i]).dot(h2m[-1]) )\n for i in range(m, len(H_succesive))]\n\n for i in range(len(h2m)):\n h2m[i] /= h2m[i][2,2]\n\n return h2m", "def update_H(self):\n self.grid.H[-1, :, :, :] = self.grid.H[0, :, :, :]", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def cache_gH(self):\n self.gH.fill(0.)\n for lag, Wl in enumerate(self.W):\n self.gH += s_dot(Wl.T, self.resids, -lag)\n return self.gH", "def rotate90(self):", "def forward(self, h, r, t):\n h_e, r_e, t_e = self.embed(h, r, t)\n norm_h = F.normalize(h_e, p=2, dim=-1)\n norm_r = F.normalize(r_e, p=2, dim=-1)\n norm_t = F.normalize(t_e, p=2, dim=-1)\n\n return torch.sum(self._gu_bilinear(norm_h, norm_r) * self._gv_bilinear(norm_r, norm_t), -1)", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def global_fn(self, h, e, g_state):\n N, K, _ = h.shape\n\n # Concatenate all relevant inputs.\n h = h.sum(dim=1)\n\n mask = torch.ones(1, K, K, 1)\n if torch.cuda.is_available():\n mask = mask.cuda()\n for kx1 in range(K):\n for kx2 in range(K):\n if kx1 != kx2-1:\n mask[:, kx1, kx2, :] = 0.\n #if kx1 == kx2:\n # mask[:, kx1, kx2, :] = 0.\n e = torch.sum(e*mask, dim=[1,2])\n\n x = torch.cat([h, e], dim=1).view(-1, 1, 2*self.n_hidden)\n output, g_n = self.G_gru(input=x,\n hx=g_state)\n output = output.view(N, self.n_hidden)\n return output, g_n", "def apply_rotations(images, rotations):\r\n for angle in rotations:\r\n assert angle % 90 == 0, 'atm only n * 90° angle are supported'\r\n return np.array([np.rot90(img, angle // 90) for img in images for angle in rotations])", "def test_givens_matrix(a, b, left):\n\n grot_mat = _givens_matrix(a, b, left)\n assert np.isreal(grot_mat[0, 1]) and np.isreal(grot_mat[1, 1])\n\n rotated_vector = grot_mat @ np.array([a, b]).T\n result_element = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n rvec = np.array([0.0, result_element]).T if left else np.array([result_element, 0.0]).T\n assert np.allclose(rotated_vector, rvec)\n\n res1 = np.round(grot_mat @ grot_mat.conj().T, 5)\n res2 = np.round(grot_mat.conj().T @ grot_mat, 5)\n assert np.all(res1 == res2) and np.all(res1 == np.eye(2))", "def set_exp_vertical_hkl(self, h, k, l ):\n \n # TODO\n pass", "def olivine():\n\n rho = 3355.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 320.5; C[0,1] = 68.15; C[0,2] = 71.6; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 196.5; C[1,2] = 76.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 233.5; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 64.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 77.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 78.7\n\n return C, rho", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def vec_rotate_r2g(al, be, ga, lon, lat, urot, vrot, flag):\n\n # first get another coordinate\n if flag == 1:\n (rlon, rlat) = scalar_g2r(al, be, ga, lon, lat)\n else:\n rlon = lon\n rlat = lat\n (lon, lat) = scalar_r2g(al, be, ga, rlon, rlat)\n\n # then proceed...\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n\n rotate_matrix = np.linalg.pinv(rotate_matrix) \n \n rlat = rlat * rad\n rlon = rlon * rad\n lat = lat * rad\n lon = lon * rad\n\n # vector in rotated Cartesian\n txg = -vrot * np.sin(rlat) * np.cos(rlon) - urot * np.sin(rlon)\n tyg = -vrot * np.sin(rlat) * np.sin(rlon) + urot * np.cos(rlon)\n tzg = vrot * np.cos(rlat)\n\n # vector in geo Cartesian\n txr = (\n rotate_matrix[0, 0] * txg\n + rotate_matrix[0, 1] * tyg\n + rotate_matrix[0, 2] * tzg\n )\n tyr = (\n rotate_matrix[1, 0] * txg\n + rotate_matrix[1, 1] * tyg\n + rotate_matrix[1, 2] * tzg\n )\n tzr = (\n rotate_matrix[2, 0] * txg\n + rotate_matrix[2, 1] * tyg\n + rotate_matrix[2, 2] * tzg\n )\n\n # vector in geo coordinate\n v = (\n -np.sin(lat) * np.cos(lon) * txr\n - np.sin(lat) * np.sin(lon) * tyr\n + np.cos(lat) * tzr\n )\n u = -np.sin(lon) * txr + np.cos(lon) * tyr\n\n u = np.array(u)\n v = np.array(v)\n\n return (u, v)", "def get_l(GW_glitch,i,j):\n\t\t \n\ttemp = np.einsum('nmk,nmk->k', GW_glitch.r_outer_r[:,:,i,j,:], GW_glitch.Hij[:,:,i,j,:])\n\t\t \n\treturn temp", "def _r270(self,m):\n return np.rot90(m,3)", "def computeV(H):\n # Pseudo-inverse of H\n #V = np.linalg.inv(H) # Inverse\n V = np.linalg.pinv(H) # Pseudo-inverse\n \n # Normalise columns\n [m,n] = V.shape\n for i in range(n):\n V[:,i] = V[:,i]/np.linalg.norm(V[:,i])\n \n return V", "def gyration_tensor(coors):\n coors -= np.mean(coors,axis=0)\n S = np.sum([np.einsum('i,j->ij',c,c) for c in coors], axis=0)/(len(coors))\n L = np.linalg.eig(S)[0]\n\treturn np.sqrt(L)", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def update_H(self):", "def rotate(self, matrix: list[list[int]]) -> None:", "def CalcaUVW(Galactic,R0):\n l = Galactic[:,0]\n b = Galactic[:,1]\n s = Galactic[:,2]\n vlos = Galactic[:,3]\n mulcosb = Galactic[:,4]\n mub = Galactic[:,5]\n alpha = np.arctan2((s*np.sin(l)*np.cos(b)),(R0-s*np.cos(l)*np.cos(b)))\n U = pm2vel*(-np.sin(b)*np.cos(l)*s*mub - np.sin(l)*s*mulcosb) + np.cos(b)*np.cos(l)*vlos\n V = pm2vel*(-np.sin(b)*np.sin(l)*s*mub + np.cos(l)*s*mulcosb) + np.cos(b)*np.sin(l)*vlos\n W = pm2vel*np.cos(b)*s*mub + np.sin(b)*vlos\n \n return(alpha,U,V,W)", "def _rotate_cw(self, table):\n return [ [ table[1][0], table[0][0] ],\n [table[1][1], table[0][1] ] ]", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def apply_rotation_y(self, phi=0.0 ):\n \n phi = radians(phi)\n new_rotation_matrix = [[ +cos(phi) , 0 , +sin(phi) ],\n [ 0 , 1 , 0 ],\n [ -sin(phi) , 0 , +cos(phi) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def pairing(self, g, t=1):\n # Have to make test of g!!!\n # raise NotImplementedError,\" Need a proper class of holomorphic vector-valued forms!\"\n # # We need to obtain the coefficients of the inverse Shimura rep. first\n # # if we want to apply this to a scalar holomorphic form g\n # First locate the maximum discriminant we need\n Dmax = 0\n PP = self._principal_part\n for (r, n) in PP:\n D = self._space.D_from_rn((r, n))\n if(abs(D) > abs(Dmax)):\n Dmax = D\n sig = sign(Dmax)\n Dmax = 10 # abs(Dmax)\n print(\"Dmax={0}\".format(sig * Dmax))\n # t=1 # if this doesn't work we have to choose another t\n # I also assume that g and G have trivial characters\n syst = matrix(ZZ, Dmax, Dmax)\n rhs = matrix(ZZ, Dmax, 1)\n k = Integer(g.weight() / Integer(2))\n for n in range(1, Dmax + 1):\n rhs[n - 1, 0] = g[n]\n for d in range(1, Dmax + 1):\n if(n % d != 0):\n continue\n # I use the character d -> (4N / d)\n # chi=(kronecker(-1,d)**k)*kronecker(t,d)*kronecker(d,F.space.WR.level())\n chi = kronecker(t, d) # *kronecker(d,F.space.WR.level)\n am = chi * d**(k - 1)\n # print \"am[\",n,d,\"]=\",am\n syst[n - 1, n / d - 1] = am\n X = syst.solve_right(rhs)\n C = dict()\n for j in range(1, Dmax + 1):\n C[t * j**2] = X[j - 1, 0]\n print(\"C[{0}={1}\".format(t * j**2, X[j - 1, 0]))\n return C\n\n summa = 0\n\n PP = self._principal_part\n for (r, n) in PP:\n summa = summa + PP[(r, n)] * g.coeff((r, n))\n return summa", "def acc_grav(j,rA,mA,hA,W=kernel.W_M4star):\n assert rA.shape[0] == mA.shape[0] == hA.shape[0], \"arrays are mismatched\"\n N = len(mA)\n\n tot = 0\n for i in range(N):\n if i != j:\n\n r_ij = rA[j,:] - rA[i,:]\n r_ij1 = np.linalg.norm(r_ij)\n m_i = mA[i]\n h_ij = 0.5 * (hA[i] + hA[j])\n\n tot += m_i * (r_ij/r_ij**3) * W(r_ij1,h_ij)\n\n return - tot * G", "def Ham_gen(self,kx,ky):\n temp=np.zeros((self.NL*2,self.NL*2),dtype=complex) # for storage of Hamiltonian matrix\n for i in range(self.NL):\n #Diagonal terms are purely layer specific.\n # DIAG A\n temp[2*i ,2*i ]=self.layers[i].H1(kx,ky) + self.layers[i].Hz(kx,ky)\n # LOWER OFF-DIAG BA\n temp[2*i+1,2*i ]=self.layers[i].Hx(kx,ky) + 1.j*self.layers[i].Hy(kx,ky)\n # UPPER OFF-DIAG AB\n temp[2*i ,2*i+1]=self.layers[i].Hx(kx,ky) - 1.j*self.layers[i].Hy(kx,ky)\n # DIAG B\n temp[2*i+1,2*i+1]=self.layers[i].H1(kx,ky) - self.layers[i].Hz(kx,ky)\n\n # Next update the couplings between the layers.\n if i<self.NL-1:\n temp[2*i ,2*i+2]=self.couplings[i]\n temp[2*i+1,2*i+3]=self.couplings[i]\n temp[2*i+2,2*i ]=self.couplings[i]\n temp[2*i+3,2*i+1]=self.couplings[i]\n\n return temp", "def forward(self, input_tensor):\n last = input_tensor\n for module in self.projection:\n projection = module(last)\n last = torch.cat((last, projection), -1)\n projection = last\n\n intermediate = self.seed(projection)\n intermediate = intermediate.view((-1, 512, 3, 3))\n\n projection_2d = projection.view((-1, self.projection_dim, 1, 1))\n projection_2d = self.projection_upscaler(projection_2d)\n\n for i, (conv, upscaling) in enumerate(zip(self.conv, self.upscaling)):\n if i + 1 != len(self.upscaling):\n if i > 0:\n intermediate = torch.cat((intermediate, projection_2d), 1)\n intermediate = torch.nn.functional.pixel_shuffle(intermediate, 2)\n intermediate = conv(intermediate)\n projection_2d = upscaling(projection_2d)\n\n r_space = self.colourspace_r(projection)\n r_space = r_space.view((-1, 16, 1, 1))\n r_space = self.colourspace_upscaler(r_space)\n r_space = intermediate * r_space\n r_space = torch.sum(r_space, dim=1, keepdim=True)\n\n g_space = self.colourspace_g(projection)\n g_space = g_space.view((-1, 16, 1, 1))\n g_space = self.colourspace_upscaler(g_space)\n g_space = intermediate * g_space\n g_space = torch.sum(g_space, dim=1, keepdim=True)\n\n b_space = self.colourspace_b(projection)\n b_space = b_space.view((-1, 16, 1, 1))\n b_space = self.colourspace_upscaler(b_space)\n b_space = intermediate * b_space\n b_space = torch.sum(b_space, dim=1, keepdim=True)\n\n output = torch.cat((r_space, g_space, b_space), dim=1)\n\n return output", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)", "def rotateHue ( self, hue_inc ):\n if isinstance( hue_inc, int ):\n hue_inc /= 360.0\n newhue = self.h + hue_inc\n if newhue > 1.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n if newhue < 0.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n newhue = 1.0 + newhue\n self.h = newhue\n self.hsl[0] = self.h\n self.hsla[0] = self.h\n self.updateFromHsl()", "def le_func(rn, g, h):\n le = np.copy(rn)\n le -= g\n le -= h\n return le", "def func2(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2)\n return k", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def _apply_array_spin1234(self, h1e: 'Nparray', h2e: 'Nparray',\n h3e: 'Nparray', h4e: 'Nparray') -> 'Nparray':\n norb = self.norb()\n tno = 2 * norb\n assert h4e.shape == (tno, tno, tno, tno, tno, tno, tno, tno)\n lena = self.lena()\n lenb = self.lenb()\n\n nh1e = numpy.copy(h1e)\n nh2e = numpy.copy(h2e)\n nh3e = numpy.copy(h3e)\n\n if fqe.settings.use_accelerated_code:\n _make_nh123(norb, h4e, nh1e, nh2e, nh3e)\n else:\n for i in range(norb * 2):\n for j in range(norb * 2):\n for k in range(norb * 2):\n nh1e[:, :] -= h4e[:, j, i, k, j, i, k, :]\n for l in range(norb * 2):\n nh2e[i, j, :, :] += (h4e[j, l, i, k, l, k, :, :] +\n h4e[i, j, l, k, l, k, :, :] +\n h4e[i, l, k, j, l, k, :, :] +\n h4e[j, i, k, l, l, k, :, :] +\n h4e[i, k, j, l, k, :, l, :] +\n h4e[j, i, k, l, k, :, l, :] +\n h4e[i, j, k, l, :, k, l, :])\n nh3e[i, j, k, :, :, :] += (\n h4e[k, i, j, l, l, :, :, :] +\n h4e[j, i, l, k, l, :, :, :] +\n h4e[i, l, j, k, l, :, :, :] +\n h4e[i, k, j, l, :, l, :, :] +\n h4e[i, j, l, k, :, l, :, :] +\n h4e[i, j, k, l, :, :, l, :])\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n evecaa = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecab = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecba = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecbb = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n tmp = self._calculate_dvec_spin_with_coeff(dveca[i, j, :, :])\n evecaa[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecba[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n tmp = self._calculate_dvec_spin_with_coeff(dvecb[i, j, :, :])\n evecab[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecbb[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n out = self._apply_array_spin123(nh1e, nh2e, nh3e, (dveca, dvecb),\n (evecaa, evecab, evecba, evecbb))\n\n def ncon(A, B):\n \"\"\"Tensor contraction and transposition corresponding with\n einsum 'ikmojlnp,mnopxy->ijklxy'\n \"\"\"\n return numpy.transpose(numpy.tensordot(A,\n B,\n axes=((2, 6, 3, 7), (0, 1, 2,\n 3))),\n axes=(0, 2, 1, 3, 4, 5))\n\n n = norb # shorter\n nevecaa = ncon(h4e[:n, :n, :n, :n, :n, :n, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, :n, :n, n:, :n, :n, :n, n:], evecab) \\\n + ncon(h4e[:n, :n, n:, n:, :n, :n, n:, n:], evecbb)\n\n nevecab = ncon(h4e[:n, n:, :n, :n, :n, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, n:, :n, n:, :n, n:, :n, n:], evecab) \\\n + ncon(h4e[:n, n:, n:, n:, :n, n:, n:, n:], evecbb)\n\n nevecbb = ncon(h4e[n:, n:, :n, :n, n:, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[n:, n:, :n, n:, n:, n:, :n, n:], evecab) \\\n + ncon(h4e[n:, n:, n:, n:, n:, n:, n:, n:], evecbb)\n\n dveca2 = numpy.zeros(dveca.shape, dtype=self._dtype)\n dvecb2 = numpy.zeros(dvecb.shape, dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n dveca[:, :, :, :] = nevecaa[i, j, :, :, :, :]\n dvecb[:, :, :, :] = nevecab[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dveca2[i, j, :, :] += cvec[:, :]\n\n dveca[:, :, :, :] = nevecab[:, :, i, j, :, :]\n dvecb[:, :, :, :] = nevecbb[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dvecb2[i, j, :, :] += cvec[:, :]\n\n out += self._calculate_coeff_spin_with_dvec((dveca2, dvecb2))\n return out", "def H_all(self, q = np.zeros(2) , R_index = 0 ): \n \n R = self.R[ int(R_index) ]\n B = np.dot( self.jacobian_actuators( q ).T , R.T ) # Transfor to rotor space \n \n H_all = self.H( q ) + np.dot( B , np.dot( self.Ia , B.T ) ) \n \n return H_all", "def H_all(self, q = np.zeros(2) , R_index = 0 ): \n \n R = self.R[ int(R_index) ]\n B = np.dot( self.jacobian_actuators( q ).T , R.T ) # Transfor to rotor space \n \n H_all = self.H( q ) + np.dot( B , np.dot( self.Ia , B.T ) ) \n \n return H_all", "def H(self) -> BaseMatrix:", "def H(self) -> BaseMatrix:", "def gauss_seidel(self):\n for i in range(1,self.size[0]-1):\n for j in range(1,self.size[1]-1):\n for k in range(1,self.size[2]-1):\n self.A[(i,j,k)] = ((1/6)*(self.A[(i+1,j,k)] + self.A[(i-1,j,k)] + self.A[(i,j+1,k)] + self.A[(i,j-1,k)] + self.A[(i,j,k+1)] + self.A[(i,j,k-1)] + self.J[(i,j,k)]) - self.A[(i,j,k)])*self.omega + self.A_0[(i,j,k)]", "def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)", "def gilt_culg(As, culg, pars, square=False, Rps=None):\n # The philosophy here is to always perform the Gilt the same way,\n # but first permute the tensors into a suitable order according to\n # the culg, and then revert the permutation.\n cube, leg = culg\n # Permute to correct cube.\n As = permute_As(As, cube=cube, Rps=Rps)\n\n if square:\n As, done, err = apply_gilt_squares(As, pars, leg=leg, Rps=Rps)\n else:\n As, done, err = apply_gilt_cubes(As, pars, leg=leg, Rps=Rps)\n\n # Reverse permutation.\n As = permute_As(As, cube=cube, inverse=True, Rps=Rps)\n return As, done, err", "def get_Grotations(self, x):\n xsh = x.get_shape().as_list()\n angles = [0.,np.pi/2.,np.pi,3.*np.pi/2.]\n rx = []\n for i in range(4):\n # Z4 rotations about the z axis\n perm = [1,0,2,3]\n y = tf.transpose(x, perm=perm)\n y = tf.contrib.image.rotate(y, angles[i])\n y = tf.transpose(y, perm=perm)\n # Rotations in the quotient space (sphere S^2)\n # i) Z4 rotations about y axis\n for j in range(4):\n perm = [2,1,0,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[-j])\n z = tf.transpose(z, perm=perm)\n \n rx.append(z)\n # ii) 2 rotations to the poles about the x axis\n perm = [0,2,1,3]\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[3])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n z = tf.transpose(y, perm=perm)\n z = tf.contrib.image.rotate(z, angles[1])\n z = tf.transpose(z, perm=perm)\n rx.append(z)\n\n return rx", "def reduce_kcol_to_3col(G, k):\n\n G, H = prepare_grid(G)\n print(\"grid prepared\")\n N = len(G)\n H = create_kgrid(H, N, k)\n print(\"grid created\")\n H = add_pheripherals_per_edge(G.edges, H, k)\n print(\"peripherals added\")\n\n return H", "def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "def xform_homog( self , xfrmMatx ):\r\n for i in xrange( 0 , len( self.vertices ) , 3 ):\r\n self.vertX[ i : i+4 ] = apply_homog( xfrmMatx , self.vertices[ i : i+4 ] )", "def update_(self, k):\n for z in range(self.sweeps_per_update):\n new_u_grid = self.u_grid.copy()\n new_v_grid = self.v_grid.copy()\n for i in range(self.N):\n for j in range(self.N):\n\n deltaU = (self.D1*self.dt) * (self.laplacian_(self.u_grid, i, j))\\\n - self.dt * self.u_grid[i][j]*self.v_grid[i][j]**2 \\\n + self.dt * self.F*(1-self.u_grid[i][j])\n new_u_grid[i][j] += deltaU\n deltaV = (self.D2*self.dt) * (self.laplacian_(self.v_grid, i, j))\\\n + self.dt*self.u_grid[i][j]*self.v_grid[i][j]**2 \\\n - self.dt*(self.F+self.k)*self.v_grid[i][j]\n new_v_grid += deltaV\n self.u_grid = new_u_grid.copy()\n self.v_grid = new_v_grid.copy()\n if self.animation:\n self.fig.clear()\n plt.imshow(self.u_grid, interpolation='nearest',\n cmap='coolwarm', origin='lower')\n plt.colorbar()", "def encode(self, G, v, X, L, W0, W1, W2, W3, U1, U2, U3):\n h = [None] * (L+1)\n h[0] = np.zeros((L + 1, len(X)))\n for i in range(1, L+1):\n h[i] = np.zeros((L + 1, self.embedding_dimension))\n\n \"\"\"hN - embeddings do neighborhood dos nodes nas camadas 2..L (tem a mesma dimensão por uma questao de simplicidade,\n os 2 primeiros elementos vão ficar a 0) \"\"\"\n hN = np.zeros((L + 1, self.embedding_dimension))\n\n h[0] = np.transpose(X)\n\n self.H[0][v] = h[0]\n\n for node in range(self.nNodes):\n self.H[1][node] = ed.ReLU(np.matmul(W0, np.transpose(self.H[0][node])))\n if self.H[1][node].any(): # se nao for um vetor de zeros\n self.H[1][node] = self.H[1][node] / la.norm(self.H[1][node], 2)\n\n h[1] = self.H[1][v]\n\n for l in range(2, L + 1):\n for node in range(self.nNodes):\n \"\"\"AGGREGATE\"\"\"\n self.HN[l, node] = self.aggregateNeighborhood(G, node, G.get_neighbors(node), l)\n \"\"\"COMBINE\"\"\"\n self.H[l][node] = self.GRUCell(self.H[l - 1][node], self.HN[l, node], W1, W2, W3, U1, U2, U3)\n\n self.H[l][v] = self.H[l][v] / la.norm(self.H[l][v], 2)\n h[l] = self.H[l][v]\n\n \"\"\"z sera o embedding final, obtido atraves da funcao maxpool\"\"\"\n z = self.maxPool(h[1:], self.embedding_dimension)\n return [z]", "def rotate(u, w, th):\n ur = np.cos(th) * u + np.sin(th) * w\n wr = -np.sin(th) * u + np.cos(th) * w\n return ur, wr", "def perform_gauss_jordan_elimination_(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n \n r = 0\n c = 0\n rows, cols = len(m), len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n _swap = False\n if m[r,c] == 0:\n for i in range(r+1,rows):\n if m[i,c] == 1:# If new pivot found... swap\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n m[[i,r]] = m[[r,i]] ## Swap\n _swap = True\n if show:\n print_matrix(m)\n break # No more swapping in this column\n if not _swap: ## No swap, move to the next column, same row\n c+=1\n\n if m[r,c] == 1:\n ## XOR\n for i in range(rows):\n indexes = np.setdiff1d(np.where(m[:,c] == 1),r) # Get all the ones to XOR in the same column\n for i in indexes:\n m[i] = np.bitwise_xor(m[i],m[r]) # Bitwise XOR\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column) are treated\n if r == rows or c >= cols-1:\n break\n\n if show:\n print(\"Final State\")\n print_matrix(m)\n \n return m", "def _AffineGrothendieck(self, w,m):\n return sum(self._G_to_km_on_basis_single_level(w,j) for j in range(w.length(),m+1))", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])", "def Gram_Schmidt1(vecs, row_wise_storage=True):\n from numpy.linalg import inv\n from math import sqrt\n\n vecs = asarray(vecs) # transform to array if list of vectors\n m, n = vecs.shape\n basis = array(transpose(vecs))\n eye = identity(n).astype(float)\n\n basis[:,0] /= sqrt(dot(basis[:,0], basis[:,0]))\n for i in range(1, m):\n v = basis[:,i]/sqrt(dot(basis[:,i], basis[:,i]))\n U = basis[:,:i]\n P = eye - dot(U, dot(inv(dot(transpose(U), U)), transpose(U)))\n basis[:, i] = dot(P, v)\n basis[:, i] /= sqrt(dot(basis[:, i], basis[:, i]))\n\n return transpose(basis) if row_wise_storage else basis", "def jacking_calculations(dictionary, view):\n f_list = []\n s_list = []\n corners = ['Left Front', 'Right Front', 'Left Rear', 'Right Rear']\n for corner in corners:\n # Establishing Instant Center Left Front\n ic_direction, ic_point = plane_intersection_line(\n plane_equation(dictionary[corner]['Upper Fore'],\n dictionary[corner]['Upper Aft'],\n dictionary[corner]['Upper Out']),\n plane_equation(dictionary[corner]['Lower Fore'],\n dictionary[corner]['Lower Aft'],\n dictionary[corner]['Lower Out']),\n dictionary[corner]['Upper Fore'],\n dictionary[corner]['Lower Fore'])\n axis = plot_line(ic_direction, ic_point, np.linspace(0, 2, 2))\n # Establishing Side View Instant Center\n ic_xz = three_d_vector_plane_intersection((axis[0][0], axis[1][0], axis[2][0]),\n (axis[0][1], axis[1][1], axis[2][1]),\n dictionary[corner]['Wheel Center'],\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([1, 0, 0])),\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([0, 0, 1])))\n # Establishing Front View Instant Center\n ic_yz = three_d_vector_plane_intersection((axis[0][0], axis[1][0], axis[2][0]),\n (axis[0][1], axis[1][1], axis[2][1]),\n dictionary[corner]['Wheel Center'],\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([0, 1, 0])),\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([0, 0, 1])))\n # Establishing Jacking Height\n y_val = dictionary['Performance Figures']['Center of Gravity'][1]\n cg_plane_points = [[1, y_val, 1], [-1, y_val, 4], [-3, y_val, 6]]\n wheel_center_ground = [(dictionary[corner]['Wheel Center'][0]),\n (dictionary[corner]['Wheel Center'][1]), 0]\n np.array(wheel_center_ground)\n jacking_height = three_d_vector_plane_intersection(wheel_center_ground,\n ic_yz, cg_plane_points[0], cg_plane_points[1],\n cg_plane_points[2])\n # Establishing Jacking Coefficient\n wc_jh = np.subtract(jacking_height, wheel_center_ground)\n jacking_coeff = -abs(wc_jh[2] / wc_jh[1])\n # Establishing Pitch Coefficient\n wc_icxz = np.subtract(ic_xz, dictionary[corner]['Wheel Center'])\n wc_cg = np.subtract(dictionary['Performance Figures']['Center of Gravity'],\n dictionary[corner]['Wheel Center'])\n pitch_coeff = (wc_icxz[2] / wc_icxz[0]) / (wc_cg[2] / wc_cg[0])\n if view == 'Front':\n f_list.append(jacking_coeff)\n elif view == 'Side':\n s_list.append(pitch_coeff)\n else:\n print 'Wtf, you want an isometric or something?'\n return\n if view == 'Front':\n return f_list\n elif view == 'Side':\n return s_list\n else:\n print 'view does not equal Front or Side'\n return", "def vec_rotate_g2r(al, be, ga, lon, lat, ugeo, vgeo, flag):\n\n # first get another coordinate\n if flag == 1:\n (rlon, rlat) = scalar_g2r(al, be, ga, lon, lat)\n else:\n rlon = lon\n rlat = lat\n (lon, lat) = scalar_r2g(al, be, ga, rlon, rlat)\n\n # then proceed...\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n\n #rotate_matrix = np.linalg.pinv(rotate_matrix) \n \n rlat = rlat * rad\n rlon = rlon * rad\n lat = lat * rad\n lon = lon * rad\n \n # vector in Cartesian \n txg = -vgeo * np.sin(lat) * np.cos(lon) - ugeo * np.sin(lon)\n tyg = -vgeo * np.sin(lat) * np.sin(lon) + ugeo * np.cos(lon)\n tzg = vgeo * np.cos(lat)\n\n # vector in rotated Cartesian\n txr = (\n rotate_matrix[0, 0] * txg\n + rotate_matrix[0, 1] * tyg\n + rotate_matrix[0, 2] * tzg\n )\n tyr = (\n rotate_matrix[1, 0] * txg\n + rotate_matrix[1, 1] * tyg\n + rotate_matrix[1, 2] * tzg\n )\n tzr = (\n rotate_matrix[2, 0] * txg\n + rotate_matrix[2, 1] * tyg\n + rotate_matrix[2, 2] * tzg\n )\n\n # vector in rotated coordinate\n v = (\n -np.sin(rlat) * np.cos(rlon) * txr\n - np.sin(rlat) * np.sin(rlon) * tyr\n + np.cos(rlat) * tzr\n )\n u = -np.sin(rlon) * txr + np.cos(rlon) * tyr\n\n u = np.array(u)\n v = np.array(v)\n\n return (u, v)", "def houghTransform(img):\n\n #initializing the values:\n theta = np.deg2rad(np.arange(-90, 90, 1)) #initializing a vector of angles in radians\n sinTheta = np.sin(theta)\n cosinTheta = np.cos(theta)\n imgWidth = img.shape [0]\n imgHeight = img.shape [1]\n imgDiagnal = int(math.sqrt(imgWidth * imgWidth + imgHeight * imgHeight)) #get the diagonal length of the image for initializing rho\n rho = np.linspace(-imgDiagnal, imgDiagnal, imgDiagnal*2) #initializing the rho values\n\n accumulator = np.zeros((2*imgDiagnal, len(theta)))\n points = [ [ 0] * len(theta)] * (2* imgDiagnal)\n\n\n are_edges = img > 5 if True else img < value_threshold\n yXis, xXis = np.nonzero(are_edges)\n\n\n\n\n #doing hough transform\n for i in range(len(xXis)):\n currentX = xXis[i]\n currentY = yXis[i]\n\n #loop through all possible angles\n\n currentRhos = [] #have a rhos to check duplicate x, y\n for j in range(len(theta)):\n currentRho = imgDiagnal + int(currentX * cosinTheta[j] + currentY*sinTheta[j])\n\n\n if points[currentRho][j] == 0 :\n points[currentRho][j] = [ ] * len(theta)\n\n if not currentRho in currentRhos:\n currentRhos.append(currentRho)\n points[currentRho][j].append([currentX, currentY])\n\n\n accumulator[currentRho, j] += 1\n\n\n return accumulator, points, theta, rho", "def htm4(joint_rotations):\n # H0_3\n h0_3 = htm0_3(joint_rotations)\n\n # H3_4\n r3_4 = rot_z(joint_rotations[3])\n x3_4 = a4 * np.cos(np.radians(joint_rotations[3]))\n y3_4 = a4 * np.sin(np.radians(joint_rotations[3]))\n z3_4 = 0\n d3_4 = transl(x3_4, y3_4, z3_4)\n h3_4 = htm(r3_4, d3_4)\n h0_4 = np.dot(h0_3, h3_4)\n return h0_4" ]
[ "0.73926467", "0.5681619", "0.549413", "0.5357701", "0.53283906", "0.5293677", "0.520022", "0.519057", "0.5088467", "0.50847656", "0.50769705", "0.50462455", "0.50274307", "0.49993315", "0.49852484", "0.4983559", "0.4973589", "0.49634326", "0.49592367", "0.49521154", "0.49075732", "0.49026072", "0.48892424", "0.48640722", "0.48505297", "0.48495096", "0.48490933", "0.4829783", "0.48280364", "0.48143855", "0.48109263", "0.47829124", "0.47795656", "0.4776198", "0.47615218", "0.47566462", "0.47477996", "0.47464088", "0.47443572", "0.47434446", "0.47401235", "0.47155166", "0.47067806", "0.47057557", "0.46946964", "0.4690946", "0.467776", "0.4670128", "0.46675545", "0.46657962", "0.46624312", "0.4659712", "0.46479756", "0.46454945", "0.46435854", "0.46332765", "0.463198", "0.46231765", "0.4621002", "0.46172643", "0.4616753", "0.46106848", "0.4601205", "0.46004027", "0.4599053", "0.45966223", "0.45916033", "0.45876288", "0.45814964", "0.45747462", "0.45738983", "0.45728537", "0.45728537", "0.45714095", "0.45686874", "0.45679307", "0.45540744", "0.45462638", "0.45457053", "0.45457053", "0.45454264", "0.45454264", "0.4543472", "0.45429075", "0.4542333", "0.4539325", "0.453787", "0.4537361", "0.45290568", "0.452429", "0.45190534", "0.45181888", "0.45134357", "0.45128316", "0.4506107", "0.4503916", "0.44965458", "0.449514", "0.44926152", "0.44906428" ]
0.7330481
1
Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors.
def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]: # This call successively applies each of the # Givens rotations stored in givens[:, :k] to H_col. H_col = apply_rotations(H_col, givens, k) cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1]) givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k) givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k) r_k = cs_k * H_col[k] - sn_k * H_col[k + 1] R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k) R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.) return R_col, givens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col", "def chi_rs_gmat(self, nu_s, temperature, vs=3, js=70, branches=(0,),\n del_Tv=0.):\n # Construct the v-branch-independent relaxation rate matrix\n gamma_mat = self.relax_mat(temperature, js)\n\n # For different v-branch combinations\n _js = np.arange(js)\n chi_rs = np.zeros_like(nu_s, dtype='complex128')\n for _branch in branches:\n for _v in np.arange(vs):\n # Calculate line positions\n nu_raman = self.ls_factors.line_pos(_v, _js, branch=_branch)\n\n # Construct the K_mat\n K_mat = np.diag(nu_raman) + gamma_mat*1j\n\n # Solve eigenvalue problem of K_mat\n eigvals, eigvec = np.linalg.eig(K_mat)\n eigvec_inv = np.linalg.inv(eigvec)\n\n # Compute the resonant intensity\n del_pop = self.ls_factors.pop_factor(temperature, _v, _js,\n branch=_branch,\n del_Tv=del_Tv)\n d = (self.trans_amp(_v, _js, branch=_branch))**0.5\n _term_l = d @ eigvec\n _term_r = eigvec_inv @ np.diag(del_pop) @ d\n _term = _term_l*_term_r\n\n for _j in _js:\n _term_b = ((-nu_s + np.real(eigvals[_j]))**2\n + np.imag(eigvals[_j])**2)\n # A 1/2 factor is necessary to match the magnitude from\n # isolated line assumption\n chi_rs += 1/2*_term[_j]*np.conj(\n -nu_s + eigvals[_j])/_term_b\n\n # A factor of c [cm/s] needs to be considered to convert cm^-1 to s^-1\n # by 2*pi*c\n return chi_rs/2/np.pi/self.C", "def compute_S_r_kick(self, r, z, qOc, **kwargs):\n # Calculate the convolution quantities we need\n kr_cross_r = einsum('r, p -> rp', self.kr, r)\n # z does not change between S_r and S_r-inverse, so only need to compute once\n if kwargs['inverse'] == False:\n self.kz_cross_z = einsum('z, p -> zp', self.kz, z)\n self.convolved_sin = einsum('zp, z -> zp', sin(self.kz_cross_z), self.shape_function_z)\n self.d_convolved_sin_dz = einsum('zp, z -> zp', cos(self.kz_cross_z), self.kz*self.shape_function_z)\n # same here\n self.delta_r = np.ones(np.size(r)) * self.ptcl_width_r\n self.delta_u = einsum('r, p -> rp', self.kr, self.delta_r)\n\n self.tanhz = -np.tanh(((z-self.z_mean)**2 - self.z_mean**2)*self.tanh_width**2)\n\n j1 = self.convolved_j1(kr_cross_r, self.delta_u)\n int_j1 = einsum('rp, r -> rp', self.int_convolved_j1(kr_cross_r, self.delta_u), self.oneOkr)\n\n # Calculate Q_r for each mode\n modeQr = self.omegaOtwokz * (self.dc_coords[:,:,1] - self.omega_coords[:,:,1])\n\n # We dress the charge instead of the fields proper\n dressed_charge = self.tanhz*qOc\n\n kick_z = einsum('zr, rp, zp, p -> p', modeQr, int_j1, self.d_convolved_sin_dz, dressed_charge)\n kick_r = einsum('zr, rp, zp, p -> p', modeQr, j1, self.convolved_sin, dressed_charge)\n dFrdQ = einsum('rp, zp, p -> zr', int_j1, self.convolved_sin, dressed_charge)\n\n kick_Q0 = dFrdQ*self.omegaOtwokz\n kick_Qomega = -dFrdQ*self.omegaOtwokz\n\n return kick_z, kick_r, kick_Q0, kick_Qomega", "def gsis(snp_mat, qr_smy_mat, proj_mat):\n\n # Set up\n n, g = snp_mat.shape\n\n # calculate the hat matrix\n zx_mat = np.dot(proj_mat, snp_mat).T\n # inv_q_zx = np.sum(zx_mat*zx_mat, axis=1)**(-1)\n q_zx = np.sum(zx_mat*zx_mat, axis=1)\n if np.min(q_zx) == 0:\n q_zx = q_zx + 0.000001\n inv_q_zx = q_zx**(-1)\n\n w, v = eig(qr_smy_mat)\n w = np.real(w)\n w[w < 0] = 0\n w_diag = np.diag(w**(1/2))\n sq_qr_smy_mat = np.dot(np.dot(v, w_diag), v.T)\n sq_qr_smy_mat = np.real(sq_qr_smy_mat)\n g_stat = np.sum(np.dot(zx_mat, sq_qr_smy_mat)**2, axis=1)*inv_q_zx\n\n # approximate of chi2 distribution\n k1 = np.mean(g_stat)\n k2 = np.var(g_stat)\n k3 = np.mean((g_stat-k1)**3)\n a = k3/(4*k2)\n b = k1-2*k2**2/k3\n d = 8*k2**3/k3**2\n g_pv = 1-chi2.cdf((g_stat-b)/a, d)\n g_pv_log10 = -np.log10(g_pv)\n\n return g_pv_log10, g_stat", "def cer(r: list, h: list):\n # initialisation\n import numpy\n d = numpy.zeros((len(r) + 1) * (len(h) + 1), dtype=numpy.uint16)\n d = d.reshape((len(r) + 1, len(h) + 1))\n for i in tqdm(range(len(r) + 1)):\n for j in range(len(h) + 1):\n if i == 0:\n d[0][j] = j\n elif j == 0:\n d[i][0] = i\n # computation\n for i in tqdm(range(1, len(r) + 1)):\n for j in range(1, len(h) + 1):\n if r[i - 1] == h[j - 1]:\n d[i][j] = d[i - 1][j - 1]\n else:\n substitution = d[i - 1][j - 1] + 1\n insertion = d[i][j - 1] + 1\n deletion = d[i - 1][j] + 1\n d[i][j] = min(substitution, insertion, deletion)\n return d[len(r)][len(h)] / float(len(r))", "def kuzmin_rotation(R,c,M,G=astronomicalG):\n return np.sqrt(2*G*np.power(10.,M)*R*R*np.power(c*c+R*R,-1.5))", "def gram_schmidt(S, start_col=0):\n Q = S.copy()\n k = S.shape[1]\n assert k > 1 and start_col >= 0\n start_col = min(S.shape[1], start_col)\n if Q.dtype != np.float32 and Q.dtype != np.float64:\n Q = Q.astype(np.float64)\n\n if start_col == 0:\n Q[:, 0] = normalize_vector(Q[:, 0])\n\n uu = []\n for i in range(start_col + 1, k):\n Q[:, i] = S[:, i]\n for j in range(0, i):\n u = Q[:, j]\n v = Q[:, i]\n if len(uu) <= j:\n uu.append(u.T.dot(u))\n Q[:, i] -= u * (u.T.dot(v) / uu[j])\n\n Q[:, i] = normalize_vector(Q[:, i])\n # Re-project Q[:, i] to the orthogonal complement of Q[:, :i] to make sure they stay orthogonal.\n Q[:, i] = Q[:, i] - Q[:, :i].dot(Q[:, :i].T.dot(Q[:, i]))\n\n return Q", "def matrix_K2(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- w21(l, xt), d23(l, xt)))\n row2 = np.array((- w41(l, xt, zt, rhos, rho), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))", "def rotate_components(phi, gamma = 1.0, q = 50, tol = 1e-6):\n p,k = phi.shape\n r = np.eye(k)\n d = 0\n cnt = 0\n for i in np.arange(q):\n cnt = cnt + 1\n d_old = d\n Lambda = np.dot(phi, r)\n u,s,vh = np.linalg.svd(np.dot(\n phi.T,np.asarray(Lambda)**3 - (gamma/p) * np.dot(\n Lambda, np.diag(np.diag(np.dot(Lambda.T,Lambda))))))\n print(\"Matrix u: \")\n print(u)\n print(\"Matrix s: \")\n print(s)\n print(\"Matrix vh: \")\n print(vh)\n r = np.dot(u, vh)\n d = np.sum(s)\n if d_old != 0 and d / d_old < 1 + tol:\n break\n print(\"Trace rotate_components_START\")\n print(\"Rotation matrix: \")\n print(r)\n print(\"Loop number: \" + str(cnt))\n print(\"Trace rotate_components_END\")\n return np.dot(phi, r)", "def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho", "def compute_S_z_kick(self, r, z, qOc, **kwargs):\n # Calculate the convolution quantities we need\n\n kz_cross_z = einsum('z, p -> zp', self.kz, z)\n convolved_cos = einsum('zp, z -> zp', cos(kz_cross_z), self.shape_function_z)\n int_convolved_cos_dz = einsum('zp, z -> zp', sin(kz_cross_z), self.shape_function_z*self.oneOkz)\n\n # r does not change between S_z and S_z-inverse, so only need to compute once\n if kwargs['inverse'] == False:\n self.kr_cross_r = einsum('r, p -> rp', self.kr, r)\n self.delta_r = np.ones(np.size(r)) * self.ptcl_width_r\n self.delta_u = einsum('r, p -> rp', self.kr, self.delta_r)\n self.j0 = self.convolved_j0(self.kr_cross_r, self.delta_u)\n self.d_convolved_j0_dr = einsum('rp, r -> rp',\n -self.convolved_j1(self.kr_cross_r, self.delta_u), self.kr)\n\n # Calculate Q_z for each mode\n modeQz = self.omegaOtwokr * (self.dc_coords[:,:,1] + self.omega_coords[:,:,1])\n\n self.tanhz = -np.tanh(((z - self.z_mean) ** 2 - self.z_mean ** 2) * self.tanh_width ** 2)\n # We dress the charge instead of the fields proper\n dressed_charge = self.tanhz*qOc\n\n kick_z = einsum('zr, rp, zp -> p', modeQz, self.j0, convolved_cos)*dressed_charge\n kick_r = einsum('zr, rp, zp -> p', modeQz, self.d_convolved_j0_dr, int_convolved_cos_dz)*dressed_charge\n\n dFzdQ = einsum('rp, zp, p -> zr', self.j0, int_convolved_cos_dz, dressed_charge)\n\n kick_Q0 = dFzdQ*self.omegaOtwokr\n kick_Qomega = dFzdQ*self.omegaOtwokr\n\n return kick_z, kick_r, kick_Q0, kick_Qomega", "def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s", "def transform_to_rotating_frame(H, U, D):\n \n #Determine the effective hamiltonian in the rotating frame\n Heff = lambda t: np.conj(U(t).T) @ H(t) @ U(t) + D\n \n return Heff", "def build_Rdiagnol_block(self, R):\n N = self.N # number of MPC steps\n num_output = self.num_output\n \n row_list = [] # reocrd the every row in B_hat\n zero = Variable(torch.zeros(num_output, num_output*(N-1)))\n zero = self.vari_gpu(zero)\n row_long = torch.cat([zero, R, zero],1) # [0 0 ... Q 0 0 ...]\n \n for i in range(N, 0, -1):\n row_list.append(row_long[:, (i-1)*num_output : (i+N-1)*num_output])\n return torch.cat(row_list,0)", "def computeV(H):\n # Pseudo-inverse of H\n #V = np.linalg.inv(H) # Inverse\n V = np.linalg.pinv(H) # Pseudo-inverse\n \n # Normalise columns\n [m,n] = V.shape\n for i in range(n):\n V[:,i] = V[:,i]/np.linalg.norm(V[:,i])\n \n return V", "def test_squeezing(self, tol):\n r = 0.543\n phi = 0.123\n S = symplectic.squeezing(r, phi)\n out = S @ S.T\n\n # apply to an identity covariance matrix\n rotation = np.array(\n [[np.cos(phi / 2), -np.sin(phi / 2)], [np.sin(phi / 2), np.cos(phi / 2)]]\n )\n expected = rotation @ np.diag(np.exp([-2 * r, 2 * r])) @ rotation.T\n assert np.allclose(out, expected, atol=tol, rtol=0)", "def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho", "def _r270(self,m):\n return np.rot90(m,3)", "def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R", "def quartz():\n\n rho = 2649.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 86.9; C[0,1] = 7.6; C[0,2] = 12.; C[0,3] = 17.8; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 86.9; C[1,2] = 12.; C[1,3] = -17.8; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 106.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 59.5; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 59.5; C[4,5] = -17.8\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 39.6\n\n return C, rho", "def _rmat_s_helper(chi=None, omes=None, out=None):\n if chi is not None:\n cx = np.cos(chi)\n sx = np.sin(chi)\n else:\n cx = 1.0\n sx = 0.0\n\n if omes is not None:\n # omes is an array (vector): output is as many rotation matrices as omes entries.\n n = len(omes)\n out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)\n\n if chi is not None:\n # ome is array and chi is a value... compute output\n cx = np.cos(chi)\n sx = np.sin(chi)\n for i in range(n):\n cw = np.cos(omes[i])\n sw = np.sin(omes[i])\n out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw\n out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw\n out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw\n else:\n # omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.\n # cx IS 1.0, sx IS 0.0\n for i in range(n):\n cw = np.cos(omes[i])\n sw = np.sin(omes[i])\n out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw\n out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.\n out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw\n else:\n # omes is None, results should be equivalent to an array with a single element 0.0\n out = out if out is not None else np.empty((1, 3, 3))\n if chi is not None:\n # ome is 0.0. cw is 1.0 and sw is 0.0\n cx = np.cos(chi)\n sx = np.sin(chi)\n out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.\n out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx\n out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx\n else:\n # both omes and chi are None... return a single identity matrix.\n out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.\n out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.\n out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.\n\n\n return out", "def GramSchmidt(A):\r\n n = len(A)\r\n # Finds the number of lists in the list, which is also the number of rows\r\n m = len(A[0])\r\n # Finds the number of elements in list one, which is also the number of columns\r\n V = A\r\n R = [[0]*n for i in range(n)]\r\n # creates an empty list R with dimensions of n rows and n columns\r\n Q = [[0]*m for i in range(n)]\r\n # creates an empty list Q with dimensions of n rows and m columns\r\n inputStatus = True\r\n # inputStatus is true at this point until proven otherwise\r\n for i in range(n):\r\n for j in range(m):\r\n if ((type(A[i][j]) != int) and (type(A[i][j]) != float) and (type(A[i][j]) != complex)):\r\n inputStatus = False\r\n print(\"Invalid Input\")\r\n # this checks each value in the matrix A to make sure it is some time of number, if it isnt a number then the input status will be false \r\n # if the input status is false then an error message will be displayed stating that this is an invalid input\r\n if inputStatus == True:\r\n # if the given list does not fall under the previous if statement then the input status will continue to be true and we can continue to find the QR factorization \r\n for i in range(n):\r\n # for loop which continues as long as there are still lists in A \r\n R[i][i] = norm(V[i])\r\n # Creates the border for the upper triangle matrix R, where each value in the diagonal is the 2 norm of the corresponding vector in the original matrix A \r\n Q[i] = unit(V[i])\r\n # Each vector in Q is the unit vector of the corresponding vector in A \r\n for j in range(i+1,n):\r\n # the position j will be 1 more than the position i \r\n R[j][i] = dot(Q[i],V[j])\r\n # The element in R[i+1][i] is the dot product of Q[i] and V[i+1] \r\n temp = scalarmul(R[j][i],Q[i])\r\n # This is the scalar multiplication of R[i+1][i] and Q[i] which will be labeled as temp \r\n V[j] = subtract(V[j],temp)\r\n # V[j] is the difference between the original V[j] and temp \r\n return[Q,R]", "def calc_vcirc(r,menc,G=1.):\n if G is None: G = 1.\n return np.sqrt(G*menc/r)", "def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R", "def reduce_kcol_to_3col(G, k):\n\n G, H = prepare_grid(G)\n print(\"grid prepared\")\n N = len(G)\n H = create_kgrid(H, N, k)\n print(\"grid created\")\n H = add_pheripherals_per_edge(G.edges, H, k)\n print(\"peripherals added\")\n\n return H", "def hornblende():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 116.; C[0,1] = 49.9; C[0,2] = 61.4; C[0,3] = 0.; C[0,4] = 4.3; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 159.7; C[1,2] = 65.5; C[1,3] = 0.; C[1,4] = -2.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 191.6; C[2,3] = 0.; C[2,4] = 10.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 57.4; C[3,4] = 0.; C[3,5] = -6.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 31.8; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 36.8\n\n return C, rho", "def gramschmidt(A):\r\n _, k = A.shape\r\n\r\n # first basis vector\r\n Q = A[:, [0]] / np.linalg.norm(A[:, 0])\r\n for j in range(1, k):\r\n # orthogonal projection, loop-free implementation\r\n q = A[:, j] - np.dot(Q, np.dot(Q.T, A[:, j]))\r\n\r\n # check premature termination\r\n nq = np.linalg.norm(q)\r\n if nq < 1e-9 * np.linalg.norm(A[:, j]):\r\n break\r\n # add new basis vector as another column of Q\r\n Q = np.column_stack([Q, q / nq])\r\n return Q", "def rotation(X, Y, C, S) :\n Xrot = X*C + Y*S \n Yrot = Y*C - X*S \n return Xrot, Yrot", "def svd_compress_gs(mat, k):\n U, singular_vals, V = np.linalg.svd(mat)\n rank = len(singular_vals)\n print(\"Image rank %r\" % rank)\n if k > rank:\n print(\"k is larger than rank of image %r\" % rank)\n return mat\n # take columns less than k from U\n U_p = U[:, :k]\n # take rows less than k from V\n V_p = V[:k, :]\n # build the new S matrix with top k diagnal elements\n S_p = np.zeros((k, k), mat.dtype)\n for i in range(k):\n S_p[i][i] = singular_vals[i]\n print(\"U_p shape {0}, S_p shape {1}, V_p shape {2}\".format(\n U_p.shape, S_p.shape, V_p.shape))\n compressed = np.dot(np.dot(U_p, S_p), V_p)\n ss = ssim(mat, compressed,\n dynamic_range=compressed.max() - compressed.min())\n print(\"Strucural similarity: %r\" % ss)\n return U_p, S_p, V_p, ss", "def get_su_eig(self, delcc):\n pc = SimpleNamespace()\n h = self.h\n if self.rbsize:\n self._inv_mrb()\n if h:\n pc.G = h\n pc.A = h * h / 3\n pc.Ap = h / 2\n if self.unc:\n pv = self._el\n else:\n pv = np.ix_(self._el, self._el)\n if self.m is not None:\n self.m = self.m[pv]\n self.k = self.k[pv]\n self.b = self.b[pv]\n self.kdof = self.nonrf[self._el]\n self.ksize = self.kdof.size\n\n self._el = np.arange(self.ksize) # testing ...\n self._rb = np.arange(0)\n\n if self.elsize:\n self._inv_m()\n A = self._build_A()\n eig_info = eigss(A, delcc)\n pc.wn = eig_info.wn\n pc.zeta = eig_info.zeta\n pc.eig_success = eig_info.eig_success\n if h:\n self._get_complex_su_coefs(pc, eig_info.lam, h)\n self._add_partition_copies(pc, eig_info.lam, eig_info.ur, eig_info.ur_inv)\n return pc", "def svd_compress_gs(mat, k):\n U, singular_vals, V = np.linalg.svd(mat)\n rank = len(singular_vals)\n print(\"Image rank %r\" % rank)\n if k > rank:\n print(\"k is larger than rank of image %r\" % rank)\n return mat\n # take columns less than k from U\n U_p = U[:, :k]\n # take rows less than k from V\n V_p = V[:k, :]\n # build the new S matrix with top k diagnal elements\n S_p = np.zeros((k, k), mat.dtype)\n for i in range(k):\n S_p[i][i] = singular_vals[i]\n print(\"U_p shape {0}, S_p shape {1}, V_p shape {2}\".format(\n U_p.shape, S_p.shape, V_p.shape))\n compressed = np.dot(np.dot(U_p, S_p), V_p)\n ss = ssim(mat, compressed,\n dynamic_range=compressed.max() - compressed.min())\n print(\"Strucural similarity: %r\" % ss)\n return U_p, S_p, V_p", "def get_rcs():\n kk = np.loadtxt(source+\"/kids_data/rcslens2.csv\", delimiter=\",\",\n skiprows=1, max_rows=sample)\n global maxra\n maxra = max(kk[:sample, 0])\n global minra\n minra = min(kk[:sample, 0])\n global maxdec\n maxdec = max(kk[:sample, 1])\n global mindec\n mindec = min(kk[:sample, 1])\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([kk[:sample, 0], kk[:sample, 1]])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n # gamma_shear = -k[:,2]*np.cos\n return ctree, kk[:sample, 2], kk[:sample,\n 3], kk[:sample, 4], kk[:sample, 5]", "def wer(r, h):\n # initialisation\n import numpy\n d = numpy.zeros((len(r)+1)*(len(h)+1), dtype=numpy.uint8)\n d = d.reshape((len(r)+1, len(h)+1))\n for i in range(len(r)+1):\n for j in range(len(h)+1):\n if i == 0:\n d[0][j] = j\n elif j == 0:\n d[i][0] = i\n\n # computation\n for i in range(1, len(r)+1):\n for j in range(1, len(h)+1):\n if r[i-1] == h[j-1]:\n d[i][j] = d[i-1][j-1]\n else:\n substitution = d[i-1][j-1] + 1\n insertion = d[i][j-1] + 1\n deletion = d[i-1][j] + 1\n d[i][j] = min(substitution, insertion, deletion)\n\n return d[len(r)][len(h)]", "def test_squeezing_no_phi(self, tol):\n r = 0.543\n phi = 0.0\n S = symplectic.squeezing(r)\n out = S @ S.T\n\n # apply to an identity covariance matrix\n rotation = np.array(\n [[np.cos(phi / 2), -np.sin(phi / 2)], [np.sin(phi / 2), np.cos(phi / 2)]]\n )\n expected = rotation @ np.diag(np.exp([-2 * r, 2 * r])) @ rotation.T\n assert np.allclose(out, expected, atol=tol, rtol=0)", "def qri_mgs_piv( A, alpha=0.5 ):\n \n Q = numpy.array(A, dtype=float)\n m,n = Q.shape\n R = numpy.zeros( (n,n) )\n Qnorms = numpy.zeros( n )\n piv = numpy.zeros( n )\n P = numpy.eye( n )\n\n for k in range( 0, n ) :\n # step 0\n for j in range ( k, n ) :\n Qnorms[j] = numpy.linalg.norm( Q[:,j] )\n #print Qnorms\n j = numpy.where(Qnorms == max(Qnorms[k:n]))[0][0]\n Qnorms[k] = 0\n #print Q\n #print R\n #piv[k] = j\n if (j != k) :\n #print \"switching columns\", k, \"and\", j\n P[:, [j, k]] = P[:, [k, j]]\n Q[:, [j, k]] = Q[:, [k, j]]\n #if (k > 0) :\n # R[0:k, [j, k]] = R[0:k, [k, j]]\n R[:, [j, k]] = R[:, [k, j]]\n #print Q\n #print R\n\n # step 1\n vl2norm = numpy.linalg.norm( Q[:,k] )\n ii = 0\n while True : # iterate\n for i in range( 0, k ) :\n s = numpy.dot( Q[:,i], Q[:,k] )\n Q[:,k] = Q[:,k] - s * Q[:,i]\n R[i,k] = R[i,k] + s\n\n ii = ii + 1\n vlnorm = vl2norm\n vl2norm = numpy.linalg.norm( Q[:,k] )\n if (vl2norm > alpha * vlnorm) :\n #print \"on column\", k, \"used\", ii, \"orthogonalizations\"\n break\n \n # step 2\n R[k,k] = numpy.linalg.norm( Q[:,k] )\n Q[:,k] = Q[:,k] / R[k,k]\n\n # step 3\n if (k == n) :\n break\n else :\n for j in range( k+1, n ) :\n R[k,j] = numpy.dot( Q[:,k], Q[:,j] )\n Q[:,j] = Q[:,j] - R[k,j] * Q[:,k]\n\n # step 4\n #Qhat = Q[:,k]\n #Qhat2 = Qhat\n for j in range( k+1, n ) :\n ii = 0\n vl2norm = numpy.linalg.norm( Q[:,j] )\n while True : # iterate\n s = numpy.dot( Q[:,j], Q[:,k] )\n R[k,j] = R[k,j] + s\n Q[:,j] = Q[:,j] - s * Q[:,k]\n \n ii = ii + 1\n vlnorm = vl2norm\n vl2norm = numpy.linalg.norm( Q[:,j] )\n if (vl2norm > alpha * vlnorm) :\n #print \"on column\", j, \"used\", ii, \"orthogonalizations\"\n break\n \n return Q,R,P", "def rebuild_svd(self, U, S, V, k): # [5pts]\n \n N,D = U.shape[0],V.shape[0]\n \n \n if U.ndim == 3:\n Xrebuild = np.zeros((N,D,3))\n for i in range(3):\n U_temp = U[:,0:k,i]\n S_temp = S[:,i]\n S_temp = np.diag(S_temp[0:k])\n V_temp = V[0:k,:,i]\n Xrebuild_temp = U_temp@S_temp@V_temp\n Xrebuild[:,:,i] = Xrebuild_temp\n else:\n U_new = U[:,0:k]\n S_new = np.diag(S[0:k])\n V_new = V[0:k,:]\n Xrebuild = U_new@S_new@V_new\n\n return Xrebuild", "def jac(self, s, heff):\n if len(s)%3!=0:\n raise Exception(\"Not 3D spins\" + str(len(s)))\n N = int(len(s)/3)\n j = np.zeros((len(s),len(s)))\n for x in range(3):\n y = (x + 1) %3\n z = (x + 2) %3\n for i in range(N):\n iplus1 = (i + 1) % N\n iminus1 = (i - 1) % N\n j[N*x + i, N*y +i] = heff[N*z + i]\n j[N*x + i, N*z +i] = - heff[N*y + i]\n j[N*x + i, N*y + iplus1] = s[N*z + i]\n j[N*x + i, N*y + iminus1] = s[N*z + i]\n j[N*x + i, N*z + iplus1] = -s[N*y + i]\n j[N*x + i, N*z + iminus1] = - s[N*y + i]\n return j", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def wer(r, h):\n #build the matrix\n d = numpy.zeros((len(r)+1)*(len(h)+1), dtype=numpy.uint8).reshape((len(r)+1, len(h)+1))\n for i in range(len(r)+1):\n for j in range(len(h)+1):\n if i == 0: d[0][j] = j\n elif j == 0: d[i][0] = i\n for i in range(1, len(r)+1):\n for j in range(1, len(h)+1):\n if r[i-1] == h[j-1]:\n d[i][j] = d[i-1][j-1]\n else:\n substitute = d[i-1][j-1] + 1\n insert = d[i][j-1] + 1\n delete = d[i-1][j] + 1\n d[i][j] = min(substitute, insert, delete)\n result = float(d[len(r)][len(h)]) / len(r) * 100\n result = str(\"%.2f\" % result) + \"%\"\n return d[len(r)][len(h)]\n #find out the manipulation steps", "def MakeMatrixSCS(matrix,k,l,p,q):\n # initializes and fills in matrix\n # rows for p, columns for q\n matrix = [[0 for _ in range(l+1)] for __ in range(k+1)]\n matrix[0] = [x for x in range(len(matrix[0]))]\n i = -1\n for row in matrix:\n i += 1\n row[0] = i\n \n # Go through matrix row by row, comparing p & q\n for i in range(1,k+1):\n for j in range(1,l+1):\n left = matrix[i-1][j]\n north = matrix[i][j-1]\n diag = matrix[i-1][j-1]\n \n if p[i-1] == q[j-1]:\n matrix[i][j] = diag + 1\n else:\n matrix[i][j] = min([left,north]) + 1\n \n return matrix", "def WakeVorticityFromCirculation_Cont(r_cp,Gamma_cp,R,U0,Omega,bSwirl,method='analytical',bHighThrustCorr=True):\n r_cp = np.asarray(r_cp).ravel()\n Gamma_cp = np.asarray(Gamma_cp).ravel()\n if r_cp[0]==0:\n r_cp[0]=r_cp[1]*0.5;\n # Non dimensional parameters\n k = Omega*Gamma_cp/(np.pi*U0**2)\n vr_bar = r_cp/R \n lambda_r = Omega*r_cp/U0\n # Finding inductions\n a,a_prime,misc= InductionsFromCirculation_VC_Cont(vr_bar,lambda_r,k,bSwirl,method=method,bHighThrustCorr=bHighThrustCorr)\n # Computing convection\n misc['Vz'] = U0*(1-2*a)\n misc['h'] = misc['Vz']*2*np.pi/(Omega*(1+2*a_prime))\n misc['a'] = a\n misc['a_prime'] = a_prime\n misc['Gamma_cp'] = Gamma_cp\n misc['r_cp'] = r_cp\n # Vortex intensities\n Gamma_tilde = Gamma_cp - np.concatenate((Gamma_cp[1:],[0])) #Gamma_tilde = Gamma_i-Gamma_{i+1}\n gamma_t = - Gamma_tilde/misc['h']\n if bSwirl:\n gamma_l = Gamma_tilde/(2*np.pi*r_cp)\n Gamma_r = - Gamma_cp[0]\n else:\n gamma_l = 0\n Gamma_r = 0\n return gamma_t,gamma_l,Gamma_r,misc", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def calc_C(h, x0, nu_C, W):\n M = len(nu_C)\n C = np.zeros((W, M), dtype=float)\n N = len(h)\n B = np.zeros((2 * N + 2, W))\n x = x0 * np.arange(0, N + 1, dtype=float) / N\n h_ext = np.concatenate(([1.0], h))\n rhs = np.r_[np.ones(N + 1, dtype=float), np.zeros(N + 1, dtype=float)]\n rhs[0] = rhs[0] / np.sqrt(2.0)\n rhs[N] = rhs[N] / np.sqrt(2.0)\n for m, nu_val in enumerate(nu_C):\n for r in range(W):\n k = r - (W / 2) + 1\n B[:N + 1, r] = h_ext * np.cos(2 * np.pi * (k - nu_val) * x)\n B[N + 1:, r] = h_ext * np.sin(2 * np.pi * (k - nu_val) * x)\n B[0, :] = B[0, :] / np.sqrt(2.0)\n B[N, :] = B[N, :] / np.sqrt(2.0)\n B[N + 1, :] = B[N + 1, :] / np.sqrt(2.0)\n B[2 * N + 1, :] = B[2 * N + 1, :] / np.sqrt(2.0)\n q, r = np.linalg.qr(B)\n C[:, m] = solve_triangular(r, np.dot(q.transpose(), rhs))\n # C[:,m] = np.linalg.lstsq(B, rhs)[0]\n return C", "def clinopyroxene_92():\n\n rho = 3327.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 257.3; C[0,1] = 85.9; C[0,2] = 76.2; C[0,3] = 0.; C[0,4] = 7.1; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 216.2; C[1,2] = 71.8; C[1,3] = 0.; C[1,4] = 13.3; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 260.2; C[2,3] = 0.; C[2,4] = 33.7; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.2; C[3,4] = 0.; C[3,5] = 10.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 70.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 85.8\n\n return C, rho", "def InductionsFromCirculation_VC_Cont(vr_bar,lambda_r,k,bSwirl,method='analytical',bHighThrustCorr=True,Cq=None):\n # Critical values for high-thrust Spera correction\n ac = 0.34\n Ct_c = 4*ac*(1-ac)\n misc={}\n # Safety\n if lambda_r[0]==0:\n lambda_r[0]=lambda_r[1]*0.01;\n # --- Finding Cylinders intensities\n if method.lower()=='analytical':\n if bSwirl:\n a_prime = k/(4*lambda_r**2) # NOTE: entirely determined from Gamma! Eq.(38)\n Ir=np.arange(len(k)-1,-1,-1)\n Ct_rot = -8*sciint.cumtrapz(lambda_r[Ir]**2*a_prime[Ir]**2/vr_bar[Ir], vr_bar[Ir])# Eq.(6)\n Ct_rot = np.concatenate(([0],Ct_rot))\n Ct_rot=Ct_rot[Ir];\n else:\n a_prime = k*0\n Ct_rot = k*0\n Ct_KJ = k*(1+a_prime)\n Ct_eff = Ct_KJ-Ct_rot \n a=np.zeros(vr_bar.shape)\n if bHighThrustCorr:\n Icorr=Ct_eff> Ct_c\n Inorm=Ct_eff<=Ct_c\n # Spera correction\n a[Icorr]=(Ct_eff[Icorr]-4*ac**2)/(4*(1-2*ac));\n a[Inorm]=1/2*(1-np.sqrt(1-Ct_eff[Inorm]))\n else:\n a=1/2*(1-np.sqrt(1-Ct_eff));\n\n # Using Cq\n if Cq is not None:\n a_2 = 1 - lambda_r*Cq/k\n k2 = Cq*lambda_r/(1-a)\n a_prime = k2/(4*lambda_r**2)\n misc['k_Cq'] = k2\n else:\n raise NotImplementedError('')\n misc['Ct_KJ'] = Ct_KJ\n misc['Ct_eff'] = Ct_eff\n misc['Ct_rot'] = Ct_rot\n return a,a_prime,misc", "def _cramers_v_compute(confmat: Tensor, bias_correction: bool) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n cm_sum = confmat.sum()\n chi_squared = _compute_chi_squared(confmat, bias_correction)\n phi_squared = chi_squared / cm_sum\n n_rows, n_cols = confmat.shape\n if bias_correction:\n phi_squared_corrected, rows_corrected, cols_corrected = _compute_bias_corrected_values(phi_squared, n_rows, n_cols, cm_sum)\n if torch.min(rows_corrected, cols_corrected) == 1:\n _unable_to_use_bias_correction_warning(metric_name=\"Cramer's V\")\n return torch.tensor(float('nan'), device=confmat.device)\n cramers_v_value = torch.sqrt(phi_squared_corrected / torch.min(rows_corrected - 1, cols_corrected - 1))\n else:\n cramers_v_value = torch.sqrt(phi_squared / min(n_rows - 1, n_cols - 1))\n return cramers_v_value.clamp(0.0, 1.0)", "def project_p2c_points(R, C, H): #---- project to cylindrical\r\n Rc= H[0]/2; Cc= H[1]/2; # center coordinate\r\n phi = H[3]; S= H[4]; # rotation angle and sizing\r\n Tv = H[5]; Tu= H[6]; # displacement\r\n COSF= np.cos(phi); SINF= np.sin(phi); # \r\n U = Tu + S*( COSF*(C- Cc)- SINF*(R- Rc) ); \r\n V = Tv + S*( SINF*(C- Cc)+ COSF*(R- Rc) );\r\n return V, U", "def get_l(GW_glitch,i,j):\n\t\t \n\ttemp = np.einsum('nmk,nmk->k', GW_glitch.r_outer_r[:,:,i,j,:], GW_glitch.Hij[:,:,i,j,:])\n\t\t \n\treturn temp", "def qcd_cc( s, m, r, u ):\n\n l2_min = r\n l3_min = u\n l1_min = (m+1)/2\n l1_max = l2_max = l3_max = (3*s+m+2*r+2*u)/2\n\n S = 0\n for l1 in range(l1_min, l2_max+1):\n for l2 in range(l2_min, l2_max+1):\n for l3 in range(l3_min, l3_max+1):\n n1 = 2*l1 + l2 + l3 - 2*s - m - r - u\n n2_t2 = -2*(l1+l2+l3) + 3*s + m + 2*r + 2*u\n n3 = l2-r\n n4 = l3-u\n if n2_t2%2 != 0:\n continue\n n2 = n2_t2/2\n if n1 < 0 or n2 < 0 or n3 < 0 or n4 < 0:\n continue\n\n denom = factorial(n1)*factorial(n2)*factorial(n3)*factorial(n4)*factorial(3)**n1*factorial(4)**n2*factorial(m)*factorial(r)**2*factorial(u)**2\n\n nom = double_factorial(2*l1-1)*factorial(l2)*factorial(l3)\n S+= Fraction(nom, denom)\n\n return S", "def normal_modes_gHST(R, NL, KL, params, dispersion=[], spin_dir=[], sublattice_labels=[], b='hang', spring='auto',\n pin='auto'):\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle.'''\n NP = 1\n NN = 0\n\n M1 = np.zeros((2 * NP, 2 * NP))\n M2 = np.zeros((2 * NP, 2 * NP))\n if spring == 'auto':\n spring = params['k'] * params['l'] ** 2 / (params['I3'] * np.abs(params['w3']))\n # If there is more than one particle, and if the speeds vary from particle to particle,\n # then make spring the same length as a dynamical matrix column\n if len(spring) > 0:\n if (abs(spring - spring[0]) > 1e-9).any():\n # The rotation rates vary from particle to particle, so reshape\n spring_new = np.zeros_like(spring)\n dmyi = 0 # a new index ('dummy i')\n for ii in range(NP):\n # Since 2 dof for position of pivot of gHST, double the size\n spring_new[dmyi] = spring[ii]\n spring_new[dmyi + 1] = spring[ii]\n dmyi += 2\n else:\n # the elements are all identical, so just keep the first one\n spring = spring[0]\n\n if pin == 'auto':\n gn = params['Mm'] * params['g']\n pin = params['l'] * gn / (params['I3'] * np.abs(params['w3']))\n # If there is more than one particle, and if the speeds vary from particle to particle,\n # then make pin the same length as a dynamical matrix column\n if len(pin) > 0:\n if (abs(pin - pin[0]) > 1e-9).any():\n # The rotation rates vary from particle to particle, so reshape\n pin_new = np.zeros_like(pin)\n dmyi = 0 # a new index ('dummy i')\n for ii in range(NP):\n # Since 2 dof for position of pivot of gHST, double the size\n pin_new[dmyi] = pin[ii]\n pin_new[dmyi + 1] = pin[ii]\n dmyi += 2\n else:\n # the elements are all identical, so just keep the first one\n pin = pin[0]\n\n m2_shape = np.shape(M2)\n\n if b == 'hang':\n b = np.zeros(NP)\n elif b == 'stand':\n b = np.ones(NP)\n\n if spin_dir == []:\n '''Assume antialigned with a, aligned with body axis 3'''\n spin_dir = np.ones(NP)\n\n print 'Constructing dynamical matrix...'\n for i in range(NP):\n for nn in range(NN):\n\n ni = NL[i, nn] # the number of the gyroscope i is connected to (particle j)\n k = KL[i, nn] # true connection?\n\n if len(dispersion) > 1:\n disp = 1. / (1. + dispersion[i])\n else:\n disp = 1.\n\n diffx = R[ni, 0] - R[i, 0]\n diffy = R[ni, 1] - R[i, 1]\n alphaij = 0.\n\n rij_mag = np.sqrt(diffx ** 2 + diffy ** 2)\n\n if k != 0:\n alphaij = np.arctan2(diffy, diffx)\n\n # for periodic systems, KL is -1 for particles on opposing boundaries\n if KL[i, nn] == -1:\n alphaij = (np.pi + alphaij) % (2 * pi)\n\n # What is this for?\n if KL[i, nn] == -2: # will only happen on first or last gyro in a line\n if i == 0 or i == (NP - 1):\n print i, '--> NL=-2 for this particle'\n yy = np.where(KL[i] == 1)\n dx = R[NL[i, yy], 0] - R[NL[i, yy], 0]\n dy = R[NL[i, yy], 1] - R[NL[i, yy], 1]\n al = (np.arctan2(dy, dx)) % (2 * pi)\n alphaij = np.pi - al\n if i == 1:\n alphaij = np.pi - ((90 / 2) * np.pi / 180.)\n else:\n alphaij = - ((90 / 2) * np.pi / 180.)\n\n Cos = np.cos(alphaij)\n Sin = np.sin(alphaij)\n\n if abs(Cos) < 10E-8:\n Cos = 0.0\n\n if abs(Sin) < 10E-8:\n Sin = 0\n\n Cos2 = Cos ** 2\n Sin2 = Sin ** 2\n CosSin = Cos * Sin\n\n # -1 for aligned with a, 1 for aligned with 3.\n # dir factor :== 1/(-1)^c = (-1)^c\n dir_factor = spin_dir[i]\n\n if len(sublattice_labels) > 0:\n if sublattice_labels[i] == 1:\n extra_factor = 1. * del_A_B\n # print self.del_A_B\n elif sublattice_labels[i] == 0:\n extra_factor = 1.\n else:\n extra_factor = 1.\n else:\n extra_factor = 1.\n\n M1[2 * i, 2 * i] += -disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dxi - dxi\n M1[2 * i, 2 * i + 1] += -disp * k * Sin2 * ((-1) ** b[i]) * dir_factor # dxi - dyi\n M1[2 * i, 2 * ni] += disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dxi - dxj\n M1[2 * i, 2 * ni + 1] += disp * k * Sin2 * ((-1) ** b[i]) * dir_factor # dxi - dyj\n\n # (y components)\n M1[2 * i + 1, 2 * i] += disp * k * Cos2 * ((-1) ** b[i]) * dir_factor # dyi - dxi\n M1[2 * i + 1, 2 * i + 1] += disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dyi - dyi\n M1[2 * i + 1, 2 * ni] += -disp * k * Cos2 * ((-1) ** b[i]) * dir_factor # dyi - dxj\n M1[2 * i + 1, 2 * ni + 1] += -disp * k * CosSin * ((-1) ** b[i]) * dir_factor # dyi - dyj\n\n # if i==0:\n # print '\\n --- \\n added M1[2*i+1, 2*i] = ',disp*k*Cos2 *((-1)**b[i]) *dir_factor\n # print 'dir_factor = ', dir_factor\n # print 'k = ', k\n # print 'else =', ((-1)**b[i]) *dir_factor\n\n # pinning/gravitational matrix\n M2[2 * i, 2 * i + 1] = (1.) * disp * dir_factor * extra_factor\n M2[2 * i + 1, 2 * i] = -(1.) * disp * dir_factor * extra_factor\n\n # self.pin_array.append(2*pi*1*extra_factor)\n # Assumes:\n # (-1)**c adot = - spring* (-1)**b SUM{ z x nij*(nij.(dri-drj)) } + pin\n matrix = - (-spring * M1 + pin * M2)\n\n return matrix", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def blueschist_felsic():\n\n rho = 2970.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 149.85; C[0,1] = 38.7; C[0,2] = 32.59; C[0,3] = -0.15; C[0,4] = -1.; C[0,5] = -0.19\n C[1,0] = C[0,1]; C[1,1] = 163.55; C[1,2] = 30.03; C[1,3] = 1.05; C[1,4] = -1.81; C[1,5] = -1.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 121.62; C[2,3] = 0.22; C[2,4] = -0.95; C[2,5] = -0.13\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 48.03; C[3,4] = -0.63; C[3,5] = -1.14\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 48.62; C[4,5] = -0.01\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 58.42\n\n return C, rho", "def gilt_culg(As, culg, pars, square=False, Rps=None):\n # The philosophy here is to always perform the Gilt the same way,\n # but first permute the tensors into a suitable order according to\n # the culg, and then revert the permutation.\n cube, leg = culg\n # Permute to correct cube.\n As = permute_As(As, cube=cube, Rps=Rps)\n\n if square:\n As, done, err = apply_gilt_squares(As, pars, leg=leg, Rps=Rps)\n else:\n As, done, err = apply_gilt_cubes(As, pars, leg=leg, Rps=Rps)\n\n # Reverse permutation.\n As = permute_As(As, cube=cube, inverse=True, Rps=Rps)\n return As, done, err", "def intermediateJacRot2Polar(self,x):\n r = cNorm(x[:2,:],kd=False)\n x0overr = x[0,:]/r\n x1overr = x[1,:]/r\n\n Jac = Idn(x.shape[1],x.shape[0])\n Jac[:,0,0] = -x1overr\n Jac[:,0,1] = x0overr\n Jac[:,1,0] = x0overr\n Jac[:,1,1] = x1overr\n \n return Jac", "def intermediateJacPol2Rot(self,x):\n allS = np.sin(x[0,:])\n allC = np.cos(x[0,:])\n allR = x[1,:]\n \n Jac = Idn(x.shape[1],self._dim)\n Jac[:,0,0] = -allS*allR\n Jac[:,0,1] = allC\n Jac[:,1,0] = allC*allR\n Jac[:,1,1] = allS\n return Jac", "def _hess_j(C_j, I_j, b_j, b_j_norm, a_1_j, a_2_j):\n D_j = torch.ger(b_j, b_j)\n return C_j + (a_1_j / b_j_norm) * (I_j - D_j / (b_j_norm ** 2)) + a_2_j * I_j", "def matrix_K1(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- d21(l, zt), d23(l, xt)))\n row2 = np.array((- d41(l, zt), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))", "def spec_helm_decomp(k,Cu,Cv,GM=False):\n dk = k[1]-k[0]\n s = np.log(k)\n\n Fphi = np.zeros_like(Cu)\n Fpsi = np.zeros_like(Cu)\n Cphi = np.zeros_like(Cu)\n Cpsi = np.zeros_like(Cu)\n\n # assume GM for decomposing into wave and vortex\n if GM:\n gm = np.load(\"/Users/crocha/Projects/dp_spectra/GM/gm_omega_star.npz\")\n f2omg2 = gm['rgm']\n ks = gm['k']*1.e3\n\n for i in range(s.size-1):\n\n ds = np.diff(s[i:])\n\n sh = sinh(s[i]-s[i:])\n ch = cosh(s[i]-s[i:])\n\n # the function to integrate\n Fp = Cu[i:]*sh + Cv[i:]*ch\n Fs = Cv[i:]*sh + Cu[i:]*ch\n\n # integrate using Simpson's rule\n Fpsi[i] = integrate.simps(Fs,s[i:])\n Fphi[i] = integrate.simps(Fp,s[i:])\n\n # zero out unphysical values\n Fpsi[Fpsi < 0.] = 0.\n Fphi[Fphi < 0.] = 0.\n\n # compute rotational and divergent components\n Cpsi = Fpsi - Fphi + Cu\n Cphi = Fphi - Fpsi + Cv\n\n if GM:\n\n f2omg2i = np.interp(k,ks,f2omg2)\n\n Cv_w = f2omg2i*Fphi - Fpsi + Cv\n Cv_v = Cv - Cv_w\n \n kdkromg = diff_central(ks, f2omg2)\n kdkromg = np.interp(k,ks[1:-1],kdkromg)\n\n dFphi = diff_central(k, Fphi)\n #dFphi = np.gradient(Fphi,k)\n dFphi = np.interp(k,k[1:-1],dFphi.real)\n E_w = Fphi - k*dFphi\n\n Cu_w = -k*kdkromg*Fphi + f2omg2i*(-Fpsi+Cv) + Fphi\n Cu_v = Cu - Cu_w\n\n Cb_w = E_w - (Cu_w + Cv_w)/2.\n\n return Cpsi,Cphi, Cu_w,Cv_w, Cu_v,Cv_v, E_w, Cb_w\n\n else:\n return Cpsi,Cphi", "def row_echelon(self):\n # TODO: This can be refactored for better efficiency\n if all([all([self[i, j] == 0 for j in range(self.n)])\n for i in range(self.m)]):\n return Matrix.makeZero(self.m, self.n)\n res = deepcopy(self)\n i, j = 0, 0\n while i < res.m and j < res.n:\n # Use R2 to make pivot non-zero\n if res[i, j] == 0:\n found_non_zero = False\n for k in range(i, res.m):\n if res[k, j] != 0:\n found_non_zero = True\n break\n if not found_non_zero:\n j += 1\n continue\n res.data[i], res.data[k] = res.data[k], res.data[i]\n # Use R3 to make pivot one\n if res[i, j] != 1:\n if any([elem % res[i, j] != 0 for elem in res.data[i]]):\n raise ValueError\n res.data[i] = [elem / res[i, j] for elem in res.data[i]]\n # Use R1 to eliminate entries below the pivot\n for k in range(i + 1, res.m):\n if res[k, j] != 0:\n constant = res[k, j] / res[i, j]\n res.data[k] = [elem_k - elem_i * constant\n for elem_i, elem_k in\n zip(res.data[i], res.data[k])]\n i, j = i + 1, j + 1\n return res", "def get_mgc_rotation(side_a, side_b):\n # Can be reused when building the MST\n k_rotations_a = 0\n k_rotations_b = 0\n mgc_specific_relation = None\n piece_swap = False\n\n # No rotation required as MGC works with Right -> Left and Bottom -> Top relations correctly\n if side_a == RIGHT:\n k_rotations_a = 0\n mgc_specific_relation = RIGHT_LEFT\n k_rotations_b = k_rotational[side_a][side_b]\n if side_a == BOTTOM:\n k_rotations_a = 0\n mgc_specific_relation = BOTTOM_TOP\n k_rotations_b = k_rotational[side_a][side_b]\n\n if side_a == LEFT:\n if side_b == RIGHT:\n # Pretty much switch positions and that will be all\n piece_swap = True\n k_rotations_a = 0\n k_rotations_b = 0\n else:\n # Make the LEFT to be RIGHT\n # Adjust side_b to become LEFT\n k_rotations_a = 2\n k_rotations_b = k_rotational[side_a][side_b]\n mgc_specific_relation = RIGHT_LEFT\n if side_a == TOP:\n if side_b == BOTTOM:\n # Pretty much switch positions and that will be all\n piece_swap = True\n k_rotations_a = 0\n k_rotations_b = 0\n else:\n # Make the TOP side to be BOTTOM\n # Adjust side_b to become TOP\n k_rotations_a = 2\n k_rotations_b = k_rotational[side_a][side_b]\n mgc_specific_relation = BOTTOM_TOP\n return k_rotations_a, k_rotations_b, mgc_specific_relation, piece_swap", "def format_GS_HC_rec(res, s_hc, GS_HC):\n\n # GS - HC x-y mean position per HC unit.\n gs_hc_pos = pd.concat({i: pd.DataFrame(kv, index=['x', 'y'], columns=s_hc)\n for i, kv in res['gs_hc_pos'].items()}).unstack()\n\n # GS - HC entropy per HC unit.\n gs_hc_h = pd.Series([utils.entropy(gs_hc.flatten()) for gs_hc in GS_HC],\n index=s_hc)\n\n # GS - HC maximum value per HC unit.\n gs_hc_max = pd.Series([gs_hc.max() for gs_hc in GS_HC], index=s_hc)\n\n return gs_hc_pos, gs_hc_h, gs_hc_max", "def Hamiltonian(self):\n Vmat = sparse.spdiags([self.U], [0], len(self.U), len(self.U))\n Kmat = -self.KE * Schrodinger.D2mat(numpts=len(self.x), delta=self.x[1] - self.x[0], periodic=self.periodic,\n q=self.q)\n return Kmat + Vmat", "def convert_rgb_hsv(rcol, gcol, bcol):\n\n mxi = max(rcol, gcol, bcol)\n mni = min(rcol, gcol, bcol)\n\n d_f = mxi-mni\n if mxi == mni:\n hcol = 0\n elif mxi == rcol:\n hcol = (60 * ((gcol-bcol)/d_f) + 360) % 360\n elif mxi == gcol:\n hcol = (60 * ((bcol-rcol)/d_f) + 120) % 360\n elif mxi == bcol:\n hcol = (60 * ((rcol-gcol)/d_f) + 240) % 360\n if mxi == 0:\n scol = 0\n else:\n scol = d_f/mxi\n vcol = mxi\n return hcol, scol, vcol", "def compute_optimal_subspace_projection(G,X,k):\n # center the data\n # G = G - np.mean(G,axis=0)\n # compute the k largest eigens of G\n # U,S,VT = np.linalg.svd(G)\n \n U,S,VT = np.linalg.svd(G/np.sqrt(G.shape[0]))\n # truncated score matrix\n #G = U[:,:k] @ np.diag(S[:k])\n G = G @ (VT.T)[:,:k]\n # project X as well\n X = X @ (VT.T[:,:k])\n print(\"Singular values\", S[:20])\n return G,X, (VT.T[:,:k])", "def volterra_BM_path_chol(grid_points, M, H, T,rho):\n\n assert 0<H<1.0\n\n ## Step1: create partition\n\n X=np.linspace(0, T, num=grid_points)\n\n # get rid of starting point\n X=X[1:grid_points]\n\n ## Step 2: compute covariance matrix\n size=2*(grid_points-1)\n Sigma=np.zeros([size,size])\n #Sigma(1,1)\n for j in range(grid_points-1):\n for i in range(grid_points-1):\n if i==j:\n Sigma[i,j]=np.power(X[i],2*H)/2/H\n else:\n s=np.minimum(X[i],X[j])\n t=np.maximum(X[i],X[j])\n Sigma[i,j]=np.power(t-s,H-0.5)/(H+0.5)*np.power(s,0.5+H)*special.hyp2f1(0.5-H, 0.5+H, 1.5+H, -s/(t-s))\n #Sigma(1,2) and Sigma (2,1)\n for j in range(grid_points-1):\n for i in range(grid_points-1):\n Sigma[i,j+((grid_points-1))]=rho/(H+0.5)*(np.power(X[i],H+0.5)-np.power(X[i]-np.minimum(X[i],X[j]),H+0.5))\n Sigma[i+(grid_points-1),j]=rho/(H+0.5)*(np.power(X[j],H+0.5)-np.power(X[j]-np.minimum(X[i],X[j]),H+0.5))\n #Sigma(2,2)\n for j in range(grid_points-1):\n for i in range(grid_points-1):\n Sigma[i+(grid_points-1),j+(grid_points-1)]=np.minimum(X[i],X[j])\n\n ## Step 3: compute Cholesky decomposition\n P=np.linalg.cholesky(Sigma)\n\n ## Step 4: draw Gaussian rv\n\n Z=np.random.normal(loc=0.0, scale=1.0, size=[M,2*(grid_points-1)])\n\n ## Step 5: get (V,W) and add 0's in the beginning\n\n V=np.zeros((M,grid_points))\n W=np.zeros((M,grid_points))\n for i in range(M):\n aux=np.dot(P,Z[i,:])\n V[i,1:grid_points]=aux[0:(grid_points-1)]\n W[i,1:grid_points]=aux[(grid_points-1):2*(grid_points-1)]\n\n return V, W", "def _gu_bilinear(self, h, r):\n mu1h = torch.matmul(self.mu1.weight, h.T) # [k, b]\n mu2r = torch.matmul(self.mu2.weight, r.T) # [k, b]\n return (mu1h * mu2r + self.bu.weight).T # [b, k]", "def row_echelon(self):\n # TODO: This can be refactored for better efficiency\n if all([all([self[i, j] == 0 for j in range(self.n)])\n for i in range(self.m)]):\n return Matrix.makeZero(self.m, self.n)\n res = deepcopy(self)\n i, j = 0, 0\n while i < res.m and j < res.n:\n # Use R2 to make pivot non-zero\n if res[i, j] == 0:\n found_non_zero = False\n for k in range(i, res.m):\n if res[k, j] != 0:\n found_non_zero = True\n break\n if not found_non_zero:\n j += 1\n continue\n res.data[i], res.data[k] = res.data[k], res.data[i]\n # Use R3 to make pivot one\n if res[i, j] != 1:\n res.data[i] = [elem / res[i, j] for elem in res.data[i]]\n # Use R1 to eliminate entries below the pivot\n for k in range(i + 1, res.m):\n if res[k, j] != 0:\n constant = res[k, j] / res[i, j]\n res.data[k] = [elem_k - elem_i * constant\n for elem_i, elem_k in\n zip(res.data[i], res.data[k])]\n i, j = i + 1, j + 1\n return res", "def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )", "def r_2c(t9, rho = 1, sc = 1):\n # q = 13.933 MeV\n t9a = t9 / (1 + 0.0396 * t9)\n t9am13 = 1 / cbrt(t9a)\n t9a56 = t9a * t9am13\n t932 = t9 * np.sqrt(t9)\n t9m32 = 1 / t932\n t9m1 = 1 / t9\n t93 = t932**2\n r24 = (4.27e+26 * t9a56 * t9m32 *\n np.exp(-84.165 * t9am13 - 0.00212 * t93))\n f = (0.5 * sc) * rho * r24\n rev = 7.2517952463e10 * t932 * np.exp(-161.6858 * t9m1)\n r = rev * r24\n return f, r", "def Gram_Schmidt(vecs, row_wise_storage=True, tol=1E-10,\n normalize=False, remove_null_vectors=False,\n remove_noise=False):\n # The algorithm below views vecs as a matrix A with the vectors\n # stored as columns:\n vecs = asarray(vecs) # transform to array if list of vectors\n if row_wise_storage:\n A = transpose(vecs).copy()\n else:\n A = vecs.copy()\n\n m, n = A.shape\n V = zeros((m,n))\n\n for j in range(n):\n v0 = A[:,j]\n v = v0.copy()\n for i in range(j):\n vi = V[:,i]\n\n if (abs(vi) > tol).any():\n v -= (vdot(v0,vi)/vdot(vi,vi))*vi\n V[:,j] = v\n\n if remove_null_vectors:\n indices = [i for i in range(n) if (abs(V[:,i]) < tol).all()]\n V = V[ix_(list(range(m)), indices)]\n\n if normalize:\n for j in range(V.shape[1]):\n V[:,j] /= linalg.norm(V[:,j])\n\n if remove_noise:\n V = cut_noise(V, tol)\n\n return transpose(V) if row_wise_storage else V", "def get_shear_matrix2d(center: Tensor, sx: Tensor | None = None, sy: Tensor | None = None) -> Tensor:\n sx = tensor([0.0]).repeat(center.size(0)) if sx is None else sx\n sy = tensor([0.0]).repeat(center.size(0)) if sy is None else sy\n\n x, y = torch.split(center, 1, dim=-1)\n x, y = x.view(-1), y.view(-1)\n\n sx_tan = torch.tan(sx)\n sy_tan = torch.tan(sy)\n ones = torch.ones_like(sx)\n shear_mat = stack([ones, -sx_tan, sx_tan * y, -sy_tan, ones + sx_tan * sy_tan, sy_tan * (sx_tan * y + x)], -1).view(\n -1, 2, 3\n )\n\n shear_mat = convert_affinematrix_to_homography(shear_mat)\n return shear_mat", "def lizardite():\n\n rho = 2610.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 245.; C[0,1] = 50.; C[0,2] = 31.; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 245.; C[1,2] = 31.; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 23.; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 11.6; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 11.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 97.5\n\n return C, rho", "def bfgs_method(x0, eps=1e-6, H0=np.eye(18),c1=1e-4):\n k = 0 # initialize num of outer iterations.\n inner_k = 0 # initialize inner k iteration.\n old_xk = None\n alpha_original = 1\n alpha = np.copy(alpha_original)\n xk = x0 # intitialize x.\n Hk = H0 # initialize H, positive definite matrix.\n I = np.eye(len(x0)) # idenitity matrix of 2 by 2.\n\n alpha_vec = []\n f_vec = []\n grad_vec = []\n inner_k = []\n conv_c = []\n\n while np.linalg.norm(rosen_der(xk)) > eps:\n pk = -Hk @ rosen_der(xk)\n\n xk_next = xk + alpha * pk\n ink = 0\n print(xk)\n while rosen(xk_next) > rosen(xk) + c1 * alpha * (pk.T @ rosen_der(xk)):\n \"\"\" find a step size that will satisfy Armijo-Goldstein inequality. Modify alpha. \"\"\"\n alpha = 0.1* alpha\n xk_next = xk + alpha * pk\n ink += 1\n\n inner_k.append(abs(int(ink)))\n\n xk_next = xk + alpha * pk\n\n sk = xk_next - xk\n\n yk = rosen_der(xk_next) - rosen_der(xk)\n\n rho = 1 / (yk.T @ sk)\n\n Hk = np.copy((I - rho * sk @ yk.T) @ Hk @ (I - rho * yk @ sk.T) + rho * sk @ sk.T)\n\n old_xk = np.copy(xk)\n xk = np.copy(xk_next)\n\n alpha_vec.append(alpha)\n f_vec.append(rosen(xk))\n grad_vec.append(np.linalg.norm(rosen_der(xk)))\n alpha = np.copy(alpha_original)\n print(f_vec[-1])\n\n k += 1\n\n return xk, k, inner_k, alpha_vec, f_vec, grad_vec", "def cl_tomo(self,cosmo_h=None,cosmo_params=None,pk_params=None,pk_func=None,\n corrs=None,bias_kwargs={},bias_func=None,stack_corr_indxs=None):\n\n l=self.l\n if corrs is None:\n corrs=self.corrs\n\n #tracers=[j for i in corrs for j in i]\n tracers=np.unique([j for i in corrs for j in i])\n\n corrs2=corrs\n if self.do_cov:#make sure we compute cl for all cross corrs necessary for covariance\n #FIXME: If corrs are gg and ll only, this will lead to uncessary gl. This\n # is an unlikely use case though\n corrs2=[]\n for i in np.arange(len(tracers)):\n for j in np.arange(i,len(tracers)):\n corrs2+=[(tracers[i],tracers[j])]\n\n if cosmo_h is None:\n cosmo_h=self.Ang_PS.PS.cosmo_h\n\n self.SN={}\n # self.SN[('galaxy','shear')]={}\n if 'shear' in tracers:\n# self.lensing_utils.set_zs_sigc(cosmo_h=cosmo_h,zl=self.Ang_PS.z)\n self.tracer_utils.set_kernel(cosmo_h=cosmo_h,zl=self.Ang_PS.z,tracer='shear')\n self.SN[('shear','shear')]=self.tracer_utils.SN['shear']\n if 'galaxy' in tracers:\n if bias_func is None:\n bias_func='constant_bias'\n bias_kwargs={'b1':1,'b2':1}\n# self.galaxy_utils.set_zg_bias(cosmo_h=cosmo_h,zl=self.Ang_PS.z,bias_func=bias_func,\n# bias_kwargs=bias_kwargs)\n# self.SN[('galaxy','galaxy')]=self.galaxy_utils.SN\n self.tracer_utils.set_kernel(cosmo_h=cosmo_h,zl=self.Ang_PS.z,tracer='galaxy')\n self.SN[('galaxy','galaxy')]=self.tracer_utils.SN['galaxy']\n\n self.Ang_PS.angular_power_z(cosmo_h=cosmo_h,pk_params=pk_params,pk_func=pk_func,\n cosmo_params=cosmo_params)\n\n out={}\n cl={}\n cov={}\n cl_b={}\n for corr in corrs2:\n corr2=corr[::-1]\n cl[corr]={}\n cl[corr2]={}\n corr_indxs=self.corr_indxs[(corr[0],corr[1])]#+self.cov_indxs\n for (i,j) in corr_indxs:\n # out[(i,j)]\n cl[corr][(i,j)]=delayed(self.calc_cl)(zs1_indx=i,zs2_indx=j,corr=corr)\n\n cl[corr2][(j,i)]=cl[corr][(i,j)]#useful in gaussian covariance calculation.\n cl_b[corr]=delayed(self.combine_cl_tomo)(cl[corr],corr=corr,Win=self.Win.Win)\n # cl_b[corr2]=cl_b[corr]\n print('cl dict done')\n if self.do_cov:\n start_j=0\n corrs_iter=[(corrs[i],corrs[j]) for i in np.arange(len(corrs)) for j in np.arange(i,len(corrs))]\n for (corr1,corr2) in corrs_iter:\n cov[corr1+corr2]={}\n cov[corr2+corr1]={}\n\n corr1_indxs=self.corr_indxs[(corr1[0],corr1[1])]\n corr2_indxs=self.corr_indxs[(corr2[0],corr2[1])]\n\n if corr1==corr2:\n cov_indxs_iter=[ k for l in [[(i,j) for j in np.arange(i,\n len(corr1_indxs))] for i in np.arange(len(corr2_indxs))] for k in l]\n else:\n cov_indxs_iter=[ k for l in [[(i,j) for i in np.arange(\n len(corr1_indxs))] for j in np.arange(len(corr2_indxs))] for k in l]\n\n for (i,j) in cov_indxs_iter:\n indx=corr1_indxs[i]+corr2_indxs[j]\n cov[corr1+corr2][indx]=delayed(self.cl_cov)(cls=cl, zs_indx=indx,Win=self.Win.Win,\n tracers=corr1+corr2)\n indx2=corr2_indxs[j]+corr1_indxs[i]\n cov[corr2+corr1][indx2]=cov[corr1+corr2][indx]\n\n out_stack=delayed(self.stack_dat)({'cov':cov,'cl_b':cl_b,'est':'cl_b'},corrs=corrs,\n corr_indxs=stack_corr_indxs)\n return {'stack':out_stack,'cl_b':cl_b,'cov':cov,'cl':cl}", "def q_hkl_rectangular(self, h, k, l):\n \n # NOTE: This is assuming cubic/rectangular only!\n qhkl_vector = ( 2*pi*h/(self.lattice_spacing_a), \\\n 2*pi*k/(self.lattice_spacing_b), \\\n 2*pi*l/(self.lattice_spacing_c) ) \n qhkl = sqrt( qhkl_vector[0]**2 + qhkl_vector[1]**2 + qhkl_vector[2]**2 )\n \n return (qhkl, qhkl_vector)", "def project_p2c_image(src, H): #---- project p to c (whole image)\r\n Z = H[2]; phi= H[3]; S= H[4]; TV= H[5]; TU= H[6];\r\n rows= src.shape[0]; cols= src.shape[1]; # get image size info\r\n diag= np.sqrt(rows**2+cols**2); # diagnol length\r\n radi= int(diag*S/2*1.1); # radius of new plot should be larger\r\n dest= np.zeros((radi*2,radi*2,3)) # projection result\r\n cosf= np.cos(phi); sinf= np.sin(phi); # rotation parameters\r\n u0 = radi-(TU-np.floor(TU)); # only process fractional part\r\n v0 = radi-(TV-np.floor(TV)); # of TU and TV\r\n kv = np.arange(0,radi*2); # \r\n #--- ---\r\n srcx= src.copy();\r\n srcx[0,:,:]=0; srcx[rows-2:rows,:,:]=0; \r\n srcx[:,0,:]=0; srcx[:,cols-2:cols,:]=0;\r\n #--- mapping ---\r\n for ku in range(0,radi*2): # scan each column\r\n UP = (ku-u0)/S; VP= (kv-v0)/S; # correct tu,tv,s\r\n RP =-sinf*UP + cosf*VP;\r\n CP = cosf*UP + sinf*VP; # correct rotation phi\r\n theta= CP/Z; # horizontal angle\r\n C = Z*np.tan(theta) + cols/2;\r\n R = RP/np.cos(theta) + rows/2;\r\n #--- interpolation ---\r\n C = np.minimum(np.maximum(C, 0), cols-2);\r\n R = np.minimum(np.maximum(R, 0), rows-2); \r\n C0 = np.floor(C).astype(int); C1= C-C0; \r\n R0 = np.floor(R).astype(int); R1= R-R0; \r\n for m in range(0,3):\r\n pixel = srcx[R0 ,C0 ,m]*(1-R1)*(1-C1);\r\n pixel+= srcx[R0 ,C0+1,m]*(1-R1)*( C1);\r\n pixel+= srcx[R0+1,C0 ,m]*( R1)*(1-C1);\r\n pixel+= srcx[R0+1,C0+1,m]*( R1)*( C1);\r\n dest[kv,ku,m]= pixel; \r\n return dest", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def sph(vs, ucs, A, C, x, y, z, rotx=0, roty=0, rotz=0, mode=\"C\"):\n sph = []\n\n if mode == \"A\":\n nA = (A-0.07)/(0.12-0.07)\n nC = (C+0.1)/(0.31+0.1)\n elif mode == \"C\":\n nA = (A-0.07)/(0.12-0.07)\n nC = C/0.307\n elif mode == \"AC\":\n nA = (A-0.07)/(0.12-0.07)\n nC = (C+0.1)/(0.31+0.1)\n else:\n nA = (A-0.07)/(0.12-0.07)\n nC = C/0.307\n\n if (type(nA) is np.float64):\n nA = np.full(len(vs), nA)\n if (type(nC) is np.float64):\n nC = np.full(len(vs), nC)\n\n for v, uc, a, c in zip(vs, ucs, nA, nC):\n if mode == \"A\":\n H0 = a\n L0 = 1/(1+np.exp((-2.8*((a-0.52)*2.4))))\n # R0, G0, B0 = colorsys.hls_to_rgb(0.02+H0*0.35, 0.5-L0*0.4, 1.0)\n R0, G0, B0 = colorsys.hls_to_rgb(1.0-H0*0.40, 0.5-L0*0.4, 1.0)\n\n elif mode == \"C\":\n H0 = c\n L0 = 1/(1+np.exp((-2.8*((c-0.52)*2.4))))\n R0, G0, B0 = colorsys.hls_to_rgb(0.02+H0*0.35, 0.5-L0*0.4, 1.0)\n\n elif mode == \"AC\":\n R0 = a*1.0\n # G0 = max(0.8-(max(a+c, 0)), 0)\n G0 = 0.0\n B0 = c*1.0\n\n else:\n R0 = 0.3\n G0 = 0.2\n B0 = 0.2\n\n R1 = 1.0 - R0\n G1 = 1.0 - G0\n B1 = 1.0 - B0\n\n sph.append(Sphere(v, 0.022,\n Texture(Pigment('color',\n [R0+uc*R1, G0+uc*G1, B0+uc*B1]),\n Finish('phong', 0.7,\n 'specular', 0.2,\n 'diffuse', 0.9,\n 'ambient', 0.1)),\n 'rotate', [rotx, 0, 0],\n 'rotate', [0, roty, 0],\n 'rotate', [0, 0, rotz],\n 'translate', [x, y, z],\n 'no_shadow'))\n\n return sph", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def canonical_shu_osher_form(self,r):\n s=len(self)\n K=np.vstack([self.A,self.b])\n K=np.hstack([K,np.zeros([s+1,1])])\n I=snp.eye(s+1)\n P=r*snp.solve(I+r*K,K)\n d=(I-P).sum(1)\n return d,P", "def cr_v(self, v, k):\n\n return self.cr(v[:, 0], v[:, 1], v[:, 2], k).T", "def get_R_h(E, M, l, g_h, T_h):\n # transform energy-vector into matrices\n mat_E_x, mat_E_y = np.meshgrid(E,E) \n mat_diff = mat_E_y - mat_E_x # matrix representing: E_i - E_j\n \n # leave out the sine-terms at first\n R_h = np.ones((M,M))*g_h**2 * T_h*4 # matrix for transition rates\n ind = np.abs(mat_diff) > 0 # indices of the non-divergent elements\n # fill in just those elements without divergences 1/0\n # the rest is set to the correct limit\n R_h[ind] = g_h**2 *4* mat_diff[ind]/(np.exp(mat_diff[ind]/T_h)-1)\n \n # multiply the sine-terms\n sin = np.sin(l*np.arange(1,M+1)*np.pi/(M+1))**2 # vector with sine-values sin(li)**2\n # transform sine-vectors into matrices\n mat_sin_x, mat_sin_y = np.meshgrid(sin,sin)\n R_h *= mat_sin_x * mat_sin_y\n \n return R_h", "def decompose_shear(coords, gamma1, gamma2):", "def transformation_matrix_bishop(self, kappa, phi, s):\n if kappa == 0.0:\n # See Design and Kinematic Modeling of Constant Curvature Continuum Robots: A Review\n # the entries (limits) of the 4th column in case kappa = 0 can be calculated by using L'Hopital's rule\n return np.array([[cos(phi)**2*(cos(kappa*s)-1)+1, sin(phi)*cos(phi)*(cos(kappa*s)-1), cos(phi)*sin(kappa*s), 0],\n [sin(phi)*cos(phi)*(cos(kappa*s)-1), cos(phi)**2*(1-cos(kappa*s))+cos(kappa*s), sin(phi)*sin(kappa*s), 0],\n [-cos(phi)*sin(kappa*s), -sin(phi)*sin(kappa*s), cos(kappa*s), s],\n [0, 0, 0, 1]])\n else:\n return np.array([[cos(phi)**2*(cos(kappa*s)-1)+1, sin(phi)*cos(phi)*(cos(kappa*s)-1), cos(phi)*sin(kappa*s), cos(phi)*(1-cos(kappa*s))/kappa],\n [sin(phi)*cos(phi)*(cos(kappa*s)-1), cos(phi)**2*(1-cos(kappa*s))+cos(kappa*s), sin(phi)*sin(kappa*s), sin(phi)*(1-cos(kappa*s))/kappa],\n [-cos(phi)*sin(kappa*s), -sin(phi)*sin(kappa*s), cos(kappa*s), sin(kappa*s)/kappa],\n [0, 0, 0, 1]])", "def rotate(u, w, th):\n ur = np.cos(th) * u + np.sin(th) * w\n wr = -np.sin(th) * u + np.cos(th) * w\n return ur, wr", "def hw_c(box):\n\n w = box[:, 2] - box[:, 0]\n h = box[:, 3] - box[:, 1]\n w_c = box[:, 0] + 0.5 * w\n h_c = box[:, 1] + 0.5 * h\n return h, w, h_c, w_c", "def get_UBmat(self, i, j, hi, hj):\n h1c = (self.Bmat * vec(*hi)).T\n h2c = (self.Bmat * vec(*hj)).T\n\n t1c = norm_vec(h1c)\n t3c = norm_vec(np.cross(h1c, h2c))\n t2c = norm_vec(np.cross(h1c, t3c))\n Tc = np.concatenate((t1c, t2c, t3c)).T\n\n g1 = self.Gvec(self.xp[i], self.yp[i], self.zp[i]).T\n g2 = self.Gvec(self.xp[j], self.yp[j], self.zp[j]).T\n\n t1g = norm_vec(g1)\n t3g = norm_vec(np.cross(g1, g2))\n t2g = norm_vec(np.cross(g1, t3g))\n Tg = np.concatenate((t1g, t2g, t3g)).T\n\n return Tg * np.linalg.inv(Tc)", "def sfm(points):\n # Construct the required W/Rh/Sh matrices.\n\t\n # Get ih/jh from Rh and use them to find Q.\n\n # Use Q, Rh, and Sh to get R and S.\n\n # Extract the F 2x3 rotation matrices from R and form an (F,2,3) array of\n # rotation matrices.\n\n # Build an orthonormal matrix that rotates the first R matrix into an\n # identity matrix.\n\n # Apply the computed rotation matrix to the rotation matrices and the\n # points in S.\n\n # Return the R matrices and an ** Nx3 ** matrix containing the\n # reconstructed 3D points (note that S is 3xN).\n return None", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def homog_rot_mtx(angle_rads: float, axis: str) -> numpy.array:\n cosang = numpy.cos(angle_rads)\n sinang = numpy.sin(angle_rads)\n\n if \"z\" == axis:\n return numpy.array(\n (\n (cosang, -sinang, 0, 0),\n (sinang, cosang, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n elif \"y\" == axis:\n return numpy.array(\n (\n (cosang, 0, sinang, 0),\n (0, 1, 0, 0),\n (-sinang, 0, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )\n else:\n return numpy.array(\n (\n (1, 0, 0, 0),\n (0, cosang, -sinang, 0),\n (0, sinang, cosang, 0),\n (0, 0, 0, 1),\n ),\n dtype=numpy.float64,\n )", "def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat", "def get_shear_matrix3d(\n center: Tensor,\n sxy: Tensor | None = None,\n sxz: Tensor | None = None,\n syx: Tensor | None = None,\n syz: Tensor | None = None,\n szx: Tensor | None = None,\n szy: Tensor | None = None,\n) -> Tensor:\n sxy = tensor([0.0]).repeat(center.size(0)) if sxy is None else sxy\n sxz = tensor([0.0]).repeat(center.size(0)) if sxz is None else sxz\n syx = tensor([0.0]).repeat(center.size(0)) if syx is None else syx\n syz = tensor([0.0]).repeat(center.size(0)) if syz is None else syz\n szx = tensor([0.0]).repeat(center.size(0)) if szx is None else szx\n szy = tensor([0.0]).repeat(center.size(0)) if szy is None else szy\n\n x, y, z = torch.split(center, 1, dim=-1)\n x, y, z = x.view(-1), y.view(-1), z.view(-1)\n # Prepare parameters\n sxy_tan = torch.tan(sxy)\n sxz_tan = torch.tan(sxz)\n syx_tan = torch.tan(syx)\n syz_tan = torch.tan(syz)\n szx_tan = torch.tan(szx)\n szy_tan = torch.tan(szy)\n\n # compute translation matrix\n m00, m10, m20, m01, m11, m21, m02, m12, m22 = _compute_shear_matrix_3d(\n sxy_tan, sxz_tan, syx_tan, syz_tan, szx_tan, szy_tan\n )\n\n m03 = m01 * y + m02 * z\n m13 = m10 * x + m11 * y + m12 * z - y\n m23 = m20 * x + m21 * y + m22 * z - z\n\n # shear matrix is implemented with negative values\n sxy_tan, sxz_tan, syx_tan, syz_tan, szx_tan, szy_tan = -sxy_tan, -sxz_tan, -syx_tan, -syz_tan, -szx_tan, -szy_tan\n m00, m10, m20, m01, m11, m21, m02, m12, m22 = _compute_shear_matrix_3d(\n sxy_tan, sxz_tan, syx_tan, syz_tan, szx_tan, szy_tan\n )\n\n shear_mat = stack([m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23], -1).view(-1, 3, 4)\n shear_mat = convert_affinematrix_to_homography3d(shear_mat)\n\n return shear_mat", "def serpentinite_37():\n\n rho = 3000.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 205.52; C[0,1] = 66.36; C[0,2] = 62.29; C[0,3] = -0.1; C[0,4] = -1.48; C[0,5] = 3.86\n C[1,0] = C[0,1]; C[1,1] = 195.79; C[1,2] = 62.53; C[1,3] = -0.37; C[1,4] = 0.2; C[1,5] = 1.54\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 193.30; C[2,3] = -1.78; C[2,4] = -0.24; C[2,5] = 0.83\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 66.17; C[3,4] = 1.47; C[3,5] = -0.57\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 64.70; C[4,5] = -0.84\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 67.83\n\n return C, rho", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def score_scene(sr, hr, clearhr, norm, num_crop=6):\n zSR = []\n max_x, max_y = np.array(hr.shape) - num_crop\n sr_ = sr[num_crop//2:-num_crop//2, num_crop//2:-num_crop//2]\n \n np.place(clearhr, clearhr==0, np.nan)\n \n zSR = np.zeros((num_crop + 1, num_crop + 1), np.float64)\n for x_off in prange(0, num_crop+1):\n for y_off in prange(0, num_crop+1):\n \n clearHR_ = clearhr[x_off : x_off + max_x, y_off : y_off + max_y]\n\n hr_ = hr[x_off:x_off + max_x, y_off:y_off + max_y]\n\n diff = (hr_- sr_)* clearHR_\n\n b = np.nanmean(diff)\n\n\n ## compute cMSE\n cMSE = np.nanmean( (diff-b)**2) \n\n cPSNR = -10.0*np.log10(cMSE)\n \n zSR[x_off, y_off] = norm/cPSNR\n\n return zSR.min()", "def obtain_Q(self):\n \n #create the initial triangular matrix as a copy of the m x n - matrix A\n \n v_list = Householder.vector(self)\n n_v = len(v_list) # number of vectors, not equal to number of columns in R\n q_m = len(v_list[0]) # longest vector, should determine the shape of Q\n \n H_list = []\n for i in list(range(n_v)):\n \n gamma = ((np.linalg.norm(v_list[i]))**2)/2\n vvtrans = v_list[i] * np.transpose(v_list[i])\n H = np.identity((q_m-i)) - (vvtrans/gamma)\n \n print(H.shape)\n\n m_H, n_H = H.shape\n if m_H < q_m:\n I = np.identity(q_m)\n x = y = i\n I [ x:x+H.shape[0], y:y+H.shape[1]] = H\n H = I\n H_list.append(H)\n \n # The transpose of Q is the result of the dot product H(n-1)...H1 \n \n len_H = len(H_list)\n\n H_temp = H_list[-1]\n \n for i in np.arange(len_H-1,0,-1):\n \n H_temp = np.matmul(H_temp, H_list[i-1])\n \n Q = np.transpose(H_temp)\n \n return(Q)", "def Rotation_EQJ_ECL():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, -s],\n [ 0, +s, +c]\n ])", "def clinopyroxene_98():\n\n rho = 3190.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 237.8; C[0,1] = 83.5; C[0,2] = 80.; C[0,3] = 0.; C[0,4] = 9.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 183.6; C[1,2] = 59.9; C[1,3] = 0.; C[1,4] = 9.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 229.5; C[2,3] = 0.; C[2,4] = 48.1; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 76.5; C[3,4] = 0.; C[3,5] = 8.4\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 73.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 81.6\n\n return C, rho" ]
[ "0.6138148", "0.5132266", "0.5044118", "0.50204897", "0.49985307", "0.49635363", "0.4903446", "0.48277012", "0.48053998", "0.47816756", "0.4768826", "0.4737764", "0.4733043", "0.4726679", "0.47147375", "0.4713533", "0.47095165", "0.4691508", "0.46743643", "0.46470118", "0.46460894", "0.4645053", "0.46360397", "0.46185982", "0.46155247", "0.45997173", "0.4592768", "0.45862383", "0.45861313", "0.45806757", "0.4580016", "0.45746186", "0.45661378", "0.45548245", "0.4547586", "0.45442474", "0.45411342", "0.45383328", "0.45242023", "0.45212024", "0.45074815", "0.45020184", "0.45000103", "0.4495653", "0.44901872", "0.44817114", "0.4481346", "0.44779563", "0.44647005", "0.44645506", "0.44642538", "0.44597685", "0.44491783", "0.44440934", "0.44391978", "0.44374835", "0.4428915", "0.44278884", "0.4426155", "0.44110784", "0.44110107", "0.43977895", "0.43867552", "0.43782935", "0.43766958", "0.4365484", "0.4363013", "0.43629", "0.43616208", "0.43606874", "0.4359547", "0.43517444", "0.4350539", "0.4350265", "0.43491632", "0.4348693", "0.43486536", "0.43466765", "0.43430564", "0.4341054", "0.43401685", "0.4337077", "0.43265146", "0.43262583", "0.43256772", "0.43249235", "0.43248034", "0.43229613", "0.43158665", "0.43143162", "0.43129265", "0.43107268", "0.43076178", "0.43056104", "0.43037507", "0.42999297", "0.4291908", "0.42862758", "0.4280638", "0.42773956" ]
0.6756806
0
Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta) so that [cs sn] @ [v1] = [r] [sn cs] [v2] [0]
def givens_rotation(v1: float, v2: float) -> Tuple[float, float]: t = jnp.sqrt(v1**2 + v2**2) cs = v1 / t sn = -v2 / t return cs, sn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cos_sim(v1, v2):\r\n return np.inner(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def cos_sim(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float]) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle", "def getCosRateBetweenSegments(seg1, seg2):\n return 1-(np.cos(np.deg2rad(get_north_azimut(seg1)-get_north_azimut(seg2)))/2+0.5)", "def f2(self,k1,k2,cosTheta):\n ans = (1.0 + self.mu) +(k1/k2 + k2/k1)*cosTheta + (1.0 - self.mu)*cosTheta**2\n return ans", "def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None", "def w2v_sim(self, s1, s2):\n v1 = self.word2vec.get_centroid_vector(s1)\n v2 = self.word2vec.get_centroid_vector(s2)\n return self.__cos_sim(v1, v2)", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)", "def cos1m2_2(self,k1,k2,k12,cos12):\n return (k1*cos12 - k2)/k12", "def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def coscurv(s, x, y):\n length = x.size\n cur = np.zeros(length)\n for i in range(1, length-1):\n a = np.array([x[i+1]-x[i], y[i+1]-y[i]])\n b = np.array([x[i]-x[i-1], y[i]-y[i-1]])\n c = np.array([1, 0])\n flag = 1\n if flag == 1 and a[1] < 0:\n flag = -1\n elif flag == -1 and a[1] <= 0:\n flag = 1\n angle_cos = flag \\\n *(np.arccos(np.vdot(a, c)/np.linalg.norm(a)/np.linalg.norm(c)) \\\n - np.arccos(np.vdot(b, c)/np.linalg.norm(b)/np.linalg.norm(c)))\n cur[i] = angle_cos/(s[i+1]-s[i-1])*2\n if np.abs(cur[i]) < ZERO:\n cur[i] = 0\n for i in range(1, length-1):\n ave = (cur[i-1]+cur[i+1])/2\n if np.abs(cur[i]-ave) > 5*np.abs(cur[i-1]-cur[i+1]):\n cur[i] = ave\n return cur", "def py_ang(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def cos_sim(vec1, vec2):\n if len(vec1) != len(vec2):\n print 'dimension does not agree.'\n numerator_sum = 0 \n for i in range(len(vec1)):\n numerator_sum = numerator_sum + vec1[i]*vec2[i]\n \n denom = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n \n return numerator_sum/denom", "def _arccosine(self, s1, s2, tf_embs):\n tf_pi = tf.constant(np.pi, dtype=tf.float64)\n mat1 = tf.gather(tf_embs, s1)\n mat2 = tf.gather(tf_embs, s2)\n tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')\n norms1 = tf.gather(tf_norms, s1)\n norms2 = tf.gather(tf_norms, s2)\n dot = tf.matmul(mat1, tf.transpose(mat2))\n norms = tf.matmul(norms1, tf.transpose(norms2))\n # We clip values due to numerical errors\n # which put some values outside the arccosine range.\n cosine = tf.clip_by_value(dot / norms, -1, 1)\n angle = tf.acos(cosine)\n # The 0 vector has norm 0, which generates a NaN.\n # We catch these NaNs and replace them with pi,\n # which ends up returning 0 similarity.\n angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)\n return 1 - (angle / tf_pi)", "def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)", "def vector_cosine_angle(vec_1:tuple, vec_2:tuple)->float:\n if is_zero_vector(vec_1) or is_zero_vector(vec_2):\n return None\n return dot_product(vec_1, vec_2) / (magnitude(vec_1) * magnitude(vec_2))", "def cosine(vector_1, vector_2):\n\n def _norm(_v):\n return np.sqrt(sum([x ** 2 for x in _v.values()]))\n\n numerator = dot_product(vector_1, vector_2)\n denominator = _norm(vector_1) * _norm(vector_2)\n if denominator == 0:\n return -1\n return numerator / denominator", "def law_of_cosines(lat1, lon1, lat2, lon2):\n\n return np.arccos(np.sin(lat1)*np.sin(lat2)+np.cos(lat1)*np.cos(lat2)*np.cos(lon2-lon1))", "def cosinesimilarity_cal(CTRDM1, CTRDM2):\n\n # get number of conditions\n n_cons = np.shape(CTRDM1)[0]\n\n # calculate the number of value above the diagonal in RDM\n n = n_cons * (n_cons - 1)\n\n # initialize two vectors to store the values above the diagnal of two RDMs\n v1 = np.zeros([n], dtype=np.float64)\n v2 = np.zeros([n], dtype=np.float64)\n\n # assignment\n nn = 0\n for i in range(n_cons):\n for j in range(n_cons):\n if i != j:\n v1[nn] = CTRDM1[i, j]\n v2[nn] = CTRDM2[i, j]\n nn = nn + 1\n\n # calculate the Cosine Similarity\n V1 = np.mat(v1)\n V2 = np.mat(v2)\n num = float(V1 * V2.T)\n denom = np.linalg.norm(V1) * np.linalg.norm(V2)\n cos = num / denom\n similarity = 0.5 + 0.5 * cos\n\n return similarity", "def vector_arc_distance(v_1, v_2):\n delta = math.sqrt(\n (v_2[0] - v_1[0]) ** 2 + (v_2[1] - v_1[1]) ** 2 + (v_2[2] - v_1[2]) ** 2\n )\n return 2 * 1 * delta / 2 / 1 # assuming unit circle so R = 1", "def angle_between(v1, v2):\n return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))", "def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle", "def vector_angle_finder(vect_1, vect_2):\n theta = np.arccos(np.dot(vect_1, vect_2) / (magnitude_vect(vect_1) * magnitude_vect(vect_2)))\n angle = theta * 180 / math.pi\n return angle", "def rotate2p(v1: vect2d, v2: vect2d, angle: float) -> vect2d:\n dx = v2.x - v1.x\n dy = v2.y - v1.y\n vector = vect2d((dx * math.cos(angle) - dy * math.sin(angle)),\n (dx * math.sin(angle) + dx * math.cos(angle)))\n vector += v1\n\n return vector", "def angle_between(v1, v2):\n v = np.array(v1)\n w = np.array(v2)\n\n norm_v = norm(v)\n norm_w = norm(w)\n\n cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)\n\n if not -1 <= cos_angle <= 1:\n return None\n else:\n return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)", "def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim", "def angle2vecs(vec1, vec2):\n # vector a * vector b = |a|*|b|* cos(angle between vector a and vector b)\n dot = np.dot(vec1, vec2)\n vec1_modulus = np.sqrt((vec1*vec1).sum())\n vec2_modulus = np.sqrt((vec2*vec2).sum())\n if (vec1_modulus * vec2_modulus) == 0:\n cos_angle = 1\n else: cos_angle = dot / (vec1_modulus * vec2_modulus)\n return math.degrees(np.arccos(cos_angle))", "def angle(v1,v2, deg = False):\n # v1.v2 = ||v1||||v2|| cos(angle) => angle = arcos(v1.v2/||v1||||v2||)\n # see more: http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n # tested with http://codereview.stackexchange.com/a/54413\n if deg: return np.rad2deg(np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))) # *180.0/np.pi\n return np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))", "def R2(theta):\n\n DCM = np.array([[np.cos(theta), 0, -np.sin(theta)], \n [0, 1, 0], \n [np.sin(theta), 0, np.cos(theta)]])\n\n return DCM", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def GetCorr_and_RMSE(v1, v2):\n\treturn pearsonr(v1,v2)[0], math.sqrt(np.mean([(a-b)**2 for a,b in zip(v1,v2)]))", "def __cos_sim(self, v1, v2):\n if np.count_nonzero(v1) == 0 or np.count_nonzero(v2) == 0:\n # whenever at least one of the vectors is all zeros, spatial.distance.cosine will fail by returning nan\n ret = 0\n else:\n ret = 1 - spatial.distance.cosine(v1, v2)\n return ret", "def cossim(vA, vB):\n return np.dot(vA, vB) / (np.sqrt(np.dot(vA, vA)) * np.sqrt(np.dot(vB, vB)))", "def _cosd(v):\n return math.cos(math.radians(v))", "def compute_cosine_sim(vec1, vec2):\r\n\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.dot(vec1, vec2)/(norm(vec1) * norm(vec2))", "def cos1(self,k1,k2,cos12):\n return (-k1 - k2*cos12)/self.k3Length(k1, k2, cos12)", "def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))", "def addVectors(r1, r2):\n \"\"\" [0] = angle, [1] = lenght \"\"\"\n x = (math.sin(r1[0]) * r1[1]) + (math.sin(r2[0]) * r2[1])\n y = (math.cos(r1[0]) * r1[1]) + (math.cos(r2[0]) * r2[1])\n \n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n\n return (angle, length)", "def cal_angle_between_two_vectors(vec_1, vec_2):\n unit_vec_1 = vec_1 / np.linalg.norm(vec_1)\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n dot_product = np.dot(unit_vec_1, unit_vec_2)\n \n return np.arccos(dot_product) / np.pi * 180", "def _add_vectors(v1, v2):\n x = math.cos(v1[1]) * v1[0] + math.cos(v2[1]) * v2[0]\n y = math.sin(v1[1]) * v1[0] + math.sin(v2[1]) * v2[0]\n\n angle = 0.5 * math.pi - math.atan2(y, x)\n length = math.hypot(x, y)\n return (length, angle)", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def angle_btw(v1, v2):\n cos_ang = np.dot(v1, v2)\n sin_ang = np.linalg.norm(np.cross(v1, v2))\n return np.arctan2(sin_ang, cos_ang) * 180 / math.pi", "def arc_length_sq(Y1, Y2):\n assert Y1.shape == Y2.shape\n\n s = scipy.linalg.svdvals(Y1.transpose() @ Y2)\n\n # handle numerical imprecision\n s[np.isclose(s, 1)] = 1\n s[np.isclose(s, -1)] = -1\n theta = np.arccos(s)\n\n return (theta ** 2).sum()", "def get_cosine(vec1, vec2):\n OPS = get_current_ops()\n v1 = OPS.to_numpy(OPS.asarray(vec1))\n v2 = OPS.to_numpy(OPS.asarray(vec2))\n return numpy.dot(v1, v2) / (numpy.linalg.norm(v1) * numpy.linalg.norm(v2))", "def get_angle(vert1, vert2):\n x_axis = np.array([1, 0])\n input_axis = vert2 - vert1\n input_axis = input_axis / np.linalg.norm(input_axis)\n return math.degrees(np.arccos(np.dot(x_axis, input_axis)))", "def calcP2(thetaS, r1, r2, ds, a):\n v = (2*(r2*thetaS + ds*r1*(1 + r1 + r2 + a*thetaS))* \\\n (r2*Power(thetaS,2) + Power(ds,2)*(1 + r1 + r2 + a*thetaS)* \\\n (2*Power(r1,2) + r2 + 3*r1*r2 + Power(r2,2) + a*(r1 + 2*r2)*thetaS) - \\\n ds*thetaS*(2*r2 + Power(r1 + r2,2) + a*(r1 + 3*r2)*thetaS)))/ \\\n (Power(r1 + r2,2)*(1 + 2*r1 + r2 + 2*a*thetaS)* \\\n (-(thetaS*(r1 - r2 + a*thetaS)) + ds*(2*r1 + a*thetaS)* \\\n (1 + r1 + r2 + a*thetaS)))\n return v", "def vincenty(lon0, lat0, a1, s):\n\n lon0 = np.deg2rad(lon0)\n lat0 = np.deg2rad(lat0)\n a1 = np.deg2rad(a1)\n s = np.deg2rad(s)\n\n sina = np.cos(lat0) * np.sin(a1)\n\n num1 = np.sin(lat0) * np.cos(s) + np.cos(lat0) * np.sin(s) * np.cos(a1)\n den1 = np.sqrt(\n sina**2 + (np.sin(lat0) * np.sin(s) - np.cos(lat0) * np.cos(a1)) ** 2\n )\n lat = np.rad2deg(np.arctan2(num1, den1))\n\n num2 = np.sin(s) * np.sin(a1)\n den2 = np.cos(lat0) * np.cos(s) - np.sin(lat0) * np.sin(s) * np.cos(a1)\n L = np.arctan2(num2, den2)\n lon = np.rad2deg(lon0 + L)\n\n return lon, lat", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def vincenty(lat1, lon1, lat2, lon2,\n r_major=6378.1370, r_minor=6356.752314, r_sphere=None):\n lat1 = m.radians(lat1)\n lat2 = m.radians(lat2)\n lon1 = m.radians(lon1)\n lon2 = m.radians(lon2)\n \n if (r_sphere is not None):\n r_major = r_sphere\n r_minor = r_sphere\n f = 0.0\n else:\n f = (r_major-r_minor)/r_major\n \n U1 = m.atan((1.0-f) * m.tan(lat1))\n U2 = m.atan((1.0-f) * m.tan(lat2))\n L = lon2 - lon1\n \n epsilon = 1E-12 # Accuracy (10E-12 -> ~ 0.06mm)\n max_iter = 500\n lam = L\n \n cU1 = m.cos(U1)\n cU2 = m.cos(U2)\n sU1 = m.sin(U1)\n sU2 = m.sin(U2)\n \n for i in range(max_iter):\n lam_old = lam\n sLam = m.sin(lam)\n cLam = m.cos(lam)\n sin_sig = m.sqrt((cU2*sLam)**2 + (cU1*sU2 - sU1*cU2*cLam)**2)\n cos_sig = sU1*sU2 + cU1*cU2*cLam\n sig = m.atan2(sin_sig,cos_sig)\n sin_alp = (cU1*cU2*sLam) / sin_sig\n cos2_alp = 1.0 - sin_alp**2\n if (cos2_alp == 0.0):\n # equitorial line\n cos_2sigm = 100\n C = 0.0\n else:\n cos_2sigm = cos_sig - (2.0*sU1*sU2)/cos2_alp\n C = f/16.0 * cos2_alp * (4.0 + f*(4.0-3.0*cos2_alp))\n lam = L + (1.0 - C) * f * sin_alp * \\\n (sig + C * sin_sig * (cos_2sigm + C * cos_sig * \\\n (-1.0 + 2.0 * cos_2sigm**2)))\n if ((m.fabs(lam - lam_old)) <= epsilon):\n # Found a solution in i iters...\n break\n elif (i == max_iter):\n # Catch the out of iters case, never seen this.\n raise Exception(\"Failed to solve for distance\")\n \n usq = cos2_alp * ((r_major**2 - r_minor**2) / r_minor**2)\n A = 1 + usq/16384 * (4096 + usq*(-768 + usq*(320 - 175*usq)))\n B = usq/1024 * (256 + usq*(-128 + usq*(74 - 47*usq)))\n del_sig = B * sin_sig * (cos_2sigm + 0.25*B*(cos_sig*( \\\n -1 + 2*cos_2sigm**2) - (1.0/6.0)*B*cos_2sigm * ( \\\n -3 + 4*sin_sig**2) * (-3 + 4 * cos_2sigm**2)))\n s = r_minor * A * (sig - del_sig)\n alp1 = m.atan2(cU2*m.sin(lam),(cU1*sU2-sU1*cU2*m.cos(lam)))\n alp2 = m.atan2(cU1*m.sin(lam),(cU1*sU2*m.cos(lam)-sU1*cU2))\n\n return (s, m.degrees(alp1), m.degrees(alp2))", "def angleBetween(v1, v2):\n v1_u = unitVector(v1)\n v2_u = unitVector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))", "def theta(a, b):\n \n \n def norm_vec(x):\n norm_out = sqrt(dot(x, x))\n return norm_out\n \n theta = acos(dot(a, b) / (norm_vec(a) * norm_vec(b))) * 180 / pi\n \n print theta", "def Misorien2FZ1(m1,m2,symtype='Cubic'):\n m2=np.matrix(m2)\n ops=GetSymRotMat(symtype)\n angle=6.3\n for op in ops:\n tmp=m1.dot(op.dot(m2.T))\n cosangle=0.5*(tmp.trace()-1)\n cosangle=min(0.9999999999,cosangle)\n cosangle=max(-0.99999999999,cosangle)\n newangle=np.arccos(cosangle)\n if newangle<angle:\n angle=newangle\n oRes=tmp\n return oRes,angle", "def cos_sim(u, v):\n return np.vdot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))", "def compute_angle_v2v(v1, v2, v3=None):\n\n alpha = math.acos(dot_product(v1, v2) / (vlength(v1)*vlength(v2)))\n if v3 is not None:\n cross = cross_product(v2, v1)\n if dot_product(cross,v3) > 0.0:\n return 2*math.pi-alpha\n\n return alpha", "def angle_between(v2, v1):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n result = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n if np.isnan(result):\n if abs(v1_u + v2_u) < .5 * (abs(v1_u) + abs(v2_u)):\n return np.pi\n else:\n return 0.0\n if Left( [v2[1],v2[3]], [0,0], [v1[1],v1[3]] ):\n return 2*np.pi - result\n return result", "def cross(v1: Vec2, v2: Vec2) -> float:\n return v1.x * v2.x + v1.y * v2.y", "def circ_dist(azimuth1, azimuth2, radius=1.0):\n return np.arccos(np.cos(azimuth1 - azimuth2))", "def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n #takes out if vectors are 1 or -1 (basically if they're the same direction)\n angle = math.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))\n return angle", "def angleBetweenVectors(v1, v2):\n v2Size = vectorLength(v2)\n if not v2Size:\n theta = 0.0\n else:\n theta = math.acos(dotProduct(v1, v2) / v2Size)\n return theta", "def getAngle(v1,v2,prec=1E-6):\n \n return(math.acos((np.dot(v1,v2))/np.linalg.norm(v1)/np.linalg.norm(v2)))", "def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)", "def cort(s1, s2):\n num = 0.0\n sum_square_x = 0.0\n sum_square_y = 0.0\n for t in range(len(s1) - 1):\n slope_1 = s1[t + 1] - s1[t]\n slope_2 = s2[t + 1] - s2[t]\n num += slope_1 * slope_2\n sum_square_x += slope_1 * slope_1\n sum_square_y += slope_2 * slope_2\n return num / (np.sqrt(sum_square_x * sum_square_y))", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def calcul_angle_vector(vec1, vec2):\n \n try:\n div=(vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n if div>1:\n div=1\n if div<-1:\n div=-1\n #KC#CG# tranlation to degrees\n angle=180/math.pi*math.acos(div)\n except:\n print vec1\n print vec2\n print (vec1[0]*vec2[0]+vec1[1]*vec2[1]+vec1[2]*vec2[2])/(distance(vec1,[0,0,0])*distance(vec2,[0,0,0]))\n return angle", "def segmentarc(c,u1,u2):\n\n pol1=samplearc(c,u1,polar=True)\n pol2=samplearc(c,u2,polar=True)\n sr= (c[1][3] == -2)\n if sr:\n return arc(pol1[0],pol1[1],pol2[2],pol1[2],samplereverse=True)\n else:\n return arc(pol1[0],pol1[1],pol1[2],pol2[2])", "def integral_zhao_vec(x1, x2, s0=0.08333, theta=0.242):\n return kernel_primitive_zhao_vec(x2, s0, theta) - kernel_primitive_zhao_vec(x1, s0, theta)", "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))", "def get_radians(g1,g2):\n unit_vector_1 = g1 / np.linalg.norm(g1) if np.linalg.norm(g1) != 0 else 0\n unit_vector_2 = g2 / np.linalg.norm(g2) if np.linalg.norm(g2) != 0 else 0\n dot_product = np.dot(unit_vector_1, unit_vector_2)\n radians = np.arccos(dot_product)\n return radians", "def angle_between_vectors(vector1,vector2):\n value = np.sum(np.multiply(vector1, vector2)) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n if (value<-1) | (value>1):\n value = np.sign(value)\n angle = np.arccos(value)\n return angle", "def get_cross2d(v1, v2):\n return v1[0]*v2[1] - v1[1]*v2[0]", "def b(self,k1,k2,cosTheta,c):\n return self.b1(k1, k2, cosTheta,c) + \\\n self.b1(k1, self.k3Length(k1, k2, cosTheta), \\\n self.cos1(k1, k2, cosTheta),c) +\\\n self.b1(k2, self.k3Length(k2, k1, cosTheta), \\\n self.cos1(k2, k1, cosTheta),c)", "def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle", "def integral_zhao(x1, x2, s0=0.08333, theta=0.242):\n return kernel_primitive_zhao(x2, s0, theta) - kernel_primitive_zhao(x1, s0, theta)", "def great_circle_distance(theta1,phi1,theta2,phi2):\n alt1 = np.pi/2.-theta1\n alt2 = np.pi/2.-theta2\n return np.arccos(np.sin(alt1)*np.sin(alt2)+np.cos(alt1)*np.cos(alt2)*np.cos(phi1-phi2))", "def Arc( x, y0, y1, r):\n return 0.5 * r*r * ( np.arctan( (y1).astype(float)/(x).astype(float) ) - np.arctan( (y0).astype(float)/(x).astype(float) ) )", "def dist_sph(w1, w2):\n r = w1.norm(2, -1)\n theta = torch.sum((w1*w2), -1)/r**2\n return torch.acos(theta)", "def angle_hkls(self, h1, h2):\n h1v = norm_vec((vec(*h1).T * self.Bmat)).T\n h2v = norm_vec((vec(*h2).T * self.Bmat)).T\n return np.around(np.arccos(h1v.T*h2v)[0, 0] * degrees, 3)", "def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))", "def covariance(self, param1: list, param2: list) -> float:\n assert len(param1) == len(param2), \"Parameter lists must be of the same length.\"\n \n n = len(param1)\n \n mean1 = np.mean(param1)\n mean2 = np.mean(param2)\n \n arr1 = np.array(param1)\n arr2 = np.array(param2)\n \n arr1_diff = arr1 - mean1\n arr2_diff = arr2 - mean2\n \n multiplied = arr1_diff * arr2_diff\n sumMultiplied = sum(multiplied)\n covar = sumMultiplied/(n - 1.0)\n \n return covar", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator", "def spheredist(ra1, dec1, ra2, dec2):\n\n from numpy import radians, degrees, sin, cos, arctan2, hypot, tan\n\n # terminology from the Vicenty formula - lambda and phi and\n # \"standpoint\" and \"forepoint\"\n lambs = radians(ra1)\n phis = radians(dec1)\n lambf = radians(ra2)\n phif = radians(dec2)\n\n dlamb = lambf - lambs\n\n numera = cos(phif) * sin(dlamb)\n numerb = cos(phis) * sin(phif) - sin(phis) * cos(phif) * cos(dlamb)\n numer = hypot(numera, numerb)\n denom = sin(phis) * sin(phif) + cos(phis) * cos(phif) * cos(dlamb)\n\n theta = arctan2(sin(dlamb), cos(phis) * tan(phif) - sin(phis) * cos(dlamb))\n \n return degrees(arctan2(numer, denom)), degrees(theta)", "def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))", "def sample_rate(P1, P2):\n v = (P1[0] - P2[0], P1[1] - P2[1], P1[2] - P2[2])\n # Project v onto the xy plane\n # xvect is a unit vector on that plane\n normalized = (1. / np.sqrt(2), 1. / np.sqrt(2), 0.)\n \n angle = np.dot(normalized, v) / modulus(v)\n \n # We need 1 / cosA\n return 1. / np.cos(angle)", "def rotation(self, e1, e2, theta):\n e1_r = e1 * numpy.cos(2 * theta) - e2 * numpy.sin(2 * theta)\n e2_r = e1 * numpy.sin(2 * theta) + e2 * numpy.cos(2 * theta)\n return e1_r, e2_r", "def dist_vincenty(lat1, lon1, lat2, lon2, iterations=20):\r\n if lat1 < -90 or lat1 > 90 or lat2 < -90 or lat2 > 90 or lon1 < -180 or lon1 > 180 or lon2 < -180 or lon2 > 180:\r\n raise ValueError(\r\n \"Latitude values shoulds range from (-90,90) and longitude from (-180,180) but one of the input values is out of bounds. Latitude_1: %f, Logitude_1: %f, Latitude_2: %f, Logitude_2: %f\" %\r\n (lat1, lon1, lat2, lon2))\r\n\r\n major, minor, f = 6378137, 6356752.314245, 1 / 298.257223563\r\n\r\n lat1, lng1, lat2, lng2 = radians(\r\n lat1), radians(lon1), radians(lat2), radians(lon2)\r\n delta_lng = lng2 - lng1\r\n reduced_lat1, reduced_lat2 = atan(\r\n (1 - f) * tan(lat1)), atan((1 - f) * tan(lat2))\r\n\r\n sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)\r\n sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)\r\n\r\n lambda_lng = delta_lng\r\n lambda_prime = 2 * pi\r\n while abs(lambda_lng - lambda_prime) > 10e-12 and iterations > 0:\r\n sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)\r\n\r\n sin_sigma = sqrt(\r\n (cos_reduced2 * sin_lambda_lng) ** 2 +\r\n (cos_reduced1 * sin_reduced2 -\r\n sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2\r\n )\r\n if sin_sigma == 0:\r\n return 0 # Coincident points\r\n\r\n cos_sigma = (\r\n sin_reduced1 * sin_reduced2 +\r\n cos_reduced1 * cos_reduced2 * cos_lambda_lng\r\n )\r\n sigma = atan2(sin_sigma, cos_sigma)\r\n\r\n sin_alpha = (cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma)\r\n cos_sq_alpha = 1 - sin_alpha ** 2\r\n\r\n if cos_sq_alpha != 0:\r\n cos2_sigma_m = cos_sigma - 2 * \\\r\n (sin_reduced1 * sin_reduced2 / cos_sq_alpha)\r\n else:\r\n cos2_sigma_m = 0.0 # Equatorial line\r\n\r\n C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))\r\n\r\n lambda_prime = lambda_lng\r\n lambda_lng = (\r\n delta_lng + (1 - C) * f * sin_alpha * (\r\n sigma + C * sin_sigma * (\r\n cos2_sigma_m + C * cos_sigma * (-1 + 2 * cos2_sigma_m ** 2)\r\n )\r\n )\r\n )\r\n iterations -= 1\r\n\r\n if iterations == 0:\r\n raise ValueError(\"Vincenty formula failed to converge!\")\r\n\r\n u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2\r\n A = 1 + u_sq / 16384. * (4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq)))\r\n B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))\r\n delta_sigma = B * sin_sigma * (\r\n cos2_sigma_m + B / 4. * (cos_sigma * (-1 + 2 * cos2_sigma_m ** 2) -\r\n B / 6. * cos2_sigma_m * (-3 + 4 * sin_sigma ** 2) *\r\n (-3 + 4 * cos2_sigma_m ** 2))\r\n )\r\n s = minor * A * (sigma - delta_sigma)\r\n\r\n return round(s, 3) # round to 1mm precision\r", "def cos12_1(self,k1,k2,k12,cos12):\n return (k1 + k2*cos12)/k12", "def SVD_rotate(m1, m2):\n assert m1.shape[0] == m2.shape[0]\n\n # Find the centroids of m1, m2\n centroid1 = np.mean(m1, axis=0)\n centroid2 = np.mean(m2, axis=0)\n\n # Build the covariance matrix\n H = np.dot((m1 - centroid1).T, (m2 - centroid2))\n\n U, S, V = np.linalg.svd(H)\n\n # Middle matrix is to ensure that matrix yields a rotation, not reflection\n R = np.dot(V.T, np.array([ [1,0,0] , [0,1,0], [0,0, np.linalg.det(np.dot(V.T,U.T))] ]) ) \n R = np.dot(R, U.T)\n\n # Find translation \n t = -np.dot(R, centroid1) + centroid2\n \n return (R, t)", "def addVectors((angle1, length1), (angle2, length2)):\n x = math.sin(angle1) * length1 + math.sin(angle2) * length2\n y = math.cos(angle1) * length1 + math.cos(angle2) * length2\n length = math.hypot(x,y)\n angle = 0.5 * math.pi - math.atan2(y,x)\n return (angle, length)", "def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest", "def cos(\r\n vec1: torch.FloatTensor, vec2: torch.FloatTensor, dim: int = -1\r\n) -> torch.FloatTensor:\r\n return torch.sum(vec1 * vec2, dim=dim) / (\r\n vec1.norm(dim=dim) * vec2.norm(dim=dim) + EPS\r\n )", "def cc_coefficient(x, y):\n cor = np.sum( (x-np.mean(x)) * (y-np.mean(y)) )\n norm = sqrt( np.sum((x-np.mean(x))**2) * np.sum((x-np.mean(x))**2) )\n r = cor/norm\n return r", "def p2c(r, t):\n if np.isnan(r) or np.isnan(t):\n return 0, 0 # return 0, 0\n else:\n return r * np.sin(t), r * np.cos(t) # x, y", "def get_arc_3D(v1, v2, points_per_radian=100, radius=1):\n\n # v1 and w become the x, y axes of the great circle\n v1_3D = ang_to_vec_coords(v1, radius=radius)\n v2_3D = ang_to_vec_coords(v2, radius=radius)\n w_axis_3D = np.cross(np.cross(v1_3D, v2_3D), v1_3D)\n # make w a vector of proper radius\n w_len = np.sqrt(square_distance([0,0,0], w_axis_3D))\n w_3D = w_axis_3D * (radius / w_len) \n arc_len = np.arccos(np.dot(v1_3D, v2_3D))\n num_points = arc_len * points_per_radian\n t = np.linspace(0, arc_len, num_points)\n u, cos_t = np.meshgrid(v1_3D, np.cos(t))\n w, sin_t = np.meshgrid(w_3D, np.sin(t))\n arc_points = u*cos_t + w*sin_t\n return arc_points", "def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity" ]
[ "0.72262084", "0.7002232", "0.68205464", "0.68178624", "0.67799133", "0.67180985", "0.66046625", "0.65430385", "0.65345", "0.6518306", "0.6469074", "0.6411598", "0.6373019", "0.6347909", "0.6329347", "0.62953633", "0.6269493", "0.6250731", "0.6243701", "0.6227481", "0.62264526", "0.6213007", "0.6189849", "0.6185835", "0.61696106", "0.6134397", "0.61172503", "0.61077017", "0.6099179", "0.60944146", "0.60716766", "0.6059066", "0.60465235", "0.6032738", "0.6021339", "0.60209167", "0.6019919", "0.6004615", "0.6002627", "0.60002273", "0.59851396", "0.59791005", "0.5978645", "0.597821", "0.596284", "0.5958713", "0.5953041", "0.5952422", "0.5946972", "0.593531", "0.5933386", "0.5906572", "0.59060645", "0.58997256", "0.58958286", "0.58894634", "0.58847165", "0.58567995", "0.5848753", "0.5848061", "0.58381253", "0.5837943", "0.5835192", "0.58180237", "0.580786", "0.5795801", "0.57935953", "0.5783683", "0.57647973", "0.5757329", "0.5753471", "0.5752591", "0.5743716", "0.57302237", "0.57231176", "0.5696417", "0.5681222", "0.5663947", "0.5657518", "0.5647721", "0.5647282", "0.56266665", "0.56234604", "0.5622224", "0.56194293", "0.56090707", "0.5595262", "0.558833", "0.558364", "0.55722475", "0.55705804", "0.5562075", "0.55599153", "0.5559659", "0.55527425", "0.55495346", "0.55482066", "0.55459124", "0.5538055", "0.55336577" ]
0.6803314
4
Check if quote already exists in Nostalgiabot's memory for this Person.
def has_said(self, quote: str) -> bool: return any(q for q in self.quotes if q.content.lower() == quote.lower())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_term_exist(self, term):\n return term in self.postingDict", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")", "def isUnique(self, word):\n abbr = self.getAbbr(word)\n return abbr not in self.d or len(self.d[abbr]) == 1 and self.d[abbr][0] == word", "def test_repeated_calls_different_quotes(self):\n quoteSet = set()\n for i in range(5):\n quoteSet.add(getRandomJoke()[\"joke\"])\n self.assertEqual(len(quoteSet) > 1, True)", "def isUnique(self, word):\n abbr = self.gen_abbr(word)\n\n if abbr not in self.dict:\n return True\n elif len(self.dict[abbr]) == 1 and word in self.dict[abbr]:\n return True\n else:\n return False", "def is_person_identifier_used(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT personid FROM person WHERE personid =?\", (person_id,))\n person_identifier = \"\"\n is_used = True\n for row in c:\n person_identifier = row[\"personid\"]\n conn.close()\n if len(person_identifier) == 0:\n is_used = False\n if len(person_identifier) > 0:\n is_used = True\n return is_used\n except:\n return False", "def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def isUnique(self, word):\n abbr = self.get_abbr(word)\n if abbr not in self.abbr:\n return True\n elif len(self.abbr[abbr]) == 1 and word == self.abbr[abbr][0]:\n return True\n else:\n return False", "def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False", "def exist(self):", "def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False", "def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def exists(self):\n return True", "def exists(self):\n return True", "def checkIfExists(dbconnection, title):\n cursor = dbconnection.cursor()\n output = \"\"\n title = title.replace(\"'\", \"''\")\n try:\n cursor.execute(\"SELECT * FROM transcriptions WHERE title = '\" + title + \"';\")\n dbconnection.commit()\n output = cursor.fetchone()\n cursor.close()\n if(output is None):\n return False\n else:\n return True\n except:\n dbconnection.rollback()\n cursor.execute(\"SELECT * FROM transcriptions WHERE title = '\" + title + \"';\")\n dbconnection.commit()\n output = cursor.fetchone()\n cursor.close()\n if(output is None):\n return False\n else:\n return True", "def checkWord(word):\r\n check = word in cachedWordList\r\n if check:\r\n print(word + \" spelt correctly\")\r\n else:\r\n print(word + \" not found in dictionary\")\r\n return check", "def check_if_already_prepared(self, instance, product_attribute):\n attribute_exist = self.search([('ks_shopify_instance', '=', instance.id),\n ('ks_product_attribute', '=', product_attribute.id)], limit=1)\n if attribute_exist:\n return attribute_exist\n else:\n return False", "def exists_in_db(self) -> bool:\n query = '''SELECT * \n FROM ESLReceipts \n WHERE Transaction_Number=? AND Date=? AND Description=? \n AND Memo=? AND Amount_Debit=? \n AND Amount_Credit=? AND Balance=? \n AND Check_Number=? AND Fees=? \n AND Card_Type=? AND Is_Payment=? \n AND Is_Transaction=? AND User_id=?;'''\n return len(self.db.fetchall(query, values=self.to_tuple())) > 0", "def exists( identifier ):\n return note.exists(identifier)", "def exists(self, answer):\n return self.find(answer) is not None", "def party_exist(party_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from party where name = '{}'\".format(party_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def _object_exists(name):\n conn = sqlite3.connect('/dev/input')\n try:\n cur = conn.cursor()\n sql = 'SELECT ROWID FROM object WHERE name=? AND deleted=0'\n cur.execute(sql, (name, ))\n result = cur.fetchall()\n return len(result) > 0\n finally:\n conn.close()", "def exists(self, obj):\n return False", "def exists(self):\n\n if self:\n pass", "def __contains__(self, seqno):\n self._gc()\n for item in self._queue:\n if item[0] == seqno:\n return True\n return False", "def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")", "def check_duplicate(triple: str, result: List[str]) -> bool:\n fields = triple.strip().split(', ')\n assert len(fields) == 13\n assert fields[9] == 'BERT'\n psuedo_triple = fields[:11]\n psuedo_triple[9] = 'RELEVANCE'\n return ', '.join(psuedo_triple) in result", "def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def object_exists(self, fname):\n return False", "def test_add_quote_but_file_contains_quote_already(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n quote = api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"])\n api.add_quote(path, quote)\n\n with self.assertRaisesRegexp(Exception, re.escape(\n 'the quote \"This is an added quote.\" is already in the quote file {0}.'.format(path))):\n api.add_quote(path, quote)", "def have_own_oid(self, oid):\r\n for order in self.owns:\r\n if order.oid == oid:\r\n return True\r\n return False", "def _verify_unique_instance_name(self, name):\n existing = self.instances.find_one({'name': name, 'deleted': False})\n if existing:\n raise AXApiInvalidParam(\"Fixture instance with name '{}' already exists\".format(name))", "def validate_new_person(self, person_id):\n\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Person WHERE id == %s\"\"\", (person_id,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n return False\n return True", "def _word_exists(self, word):\n try:\n self.vault[word]\n return True\n except KeyError:\n log('w', 'No vault entry for %s.' % word)\n return False", "def IsExtraProvide(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if token in self._duplicate_provide_tokens:\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for created_namespace, created_identifier, _ in self._created_namespaces:\n if namespace == created_namespace or namespace == created_identifier:\n return False\n\n return True", "def object_exists(self, fname):\n return True", "def symptomExists(self, symptom):\n try:\n ret = symptom in self.symptoms_dict_by_letter[symptom[0]]\n except KeyError:\n return False\n return ret", "def __contains__(self, rq):\n return rq in self._data", "def is_article_duplicate(cls, article):\n return cls.db.hkeys(\"article_map\").count(article.link) != 0", "def exists(self):\n return self.obj is not None", "def exists(cls, ko):\n if isinstance(ko, BagDocument):\n return ko._key in cls._dbag\n else:\n return ko in cls._dbag", "def __contains__(self, ngram):\n return ngram in self._ngrams", "def exists_in_db( self, energy, n_digits ):\n if ( len(self.db_energies) == 0 ):\n self.read_db_energies()\n factor = 10**n_digits\n for eng in self.db_energies:\n diff = abs(energy-eng)\n if ( int(diff*factor) == 0 ):\n return True\n return False", "def _item_exists(self, item):\n cursor = self.conn.cursor()\n cursor.execute(\n 'SELECT * FROM Members where first_name = ?;',\n (item['first_name'])\n )\n return True if len(cursor.fetchall()) else False", "def check_unique(self):\n pass", "def mention_exists(self, mention):\n return self._entity_symbols.alias_exists(mention)", "def exists(self, word):\n result = self.find(word)\n return False if result is None else result.is_word", "def exists(self):\r\n return bool(self.bucket.lookup(self.name))", "def __contains__(self, nom_canal):\n return nom_canal in dict(self._canaux)", "def exists(self):\n query = db.session.query(Farmer.btc_addr)\n return query.filter(Farmer.btc_addr == self.btc_addr).count() > 0", "def test_random_quote(self):\n quote = Quote().print()\n self.assertTrue(type(quote) == str)", "def dexists(self, name, key):\n return key in self.db[name]", "def supplier_exist(supplier_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from supplier where name = '{}'\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def exists(self, arg):\n raise NotImplementedError", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def _check_address(self):\n for object_ in self.objects:\n if object_.object_name.endswith(' ЕС'):\n if object_.object_address[:6].isnumeric():\n object_.object_address = \\\n object_.object_address[:7] + \\\n object_.object_fed_subj + ', ' + \\\n object_.object_address[7:]", "def search(self, word):\n for i in xrange(len(word)):\n w = word[:i] + '*' + word[i+1:]\n if w in self.dict and (len(self.dict[w]) > 1 or word[i] not in self.dict[w]): return True \n return False", "def tie_exists(self):\n return len(self.marks) == 9", "def exists(self, name):\n return name in self.cache", "def mark_garbage(row):\n\n def is_relation_deprecated():\n return row._relation.isdigit() or row._relation in DEPREC_RELS\n\n def is_postag_undefined():\n return np.all(row['subject']['postag'] == np.zeros((MAX_PHRASE_LEN, 18))) or np.all(\n row['object']['postag'] == np.zeros((MAX_PHRASE_LEN, 18))) or np.all(\n row['relation']['postag'] == np.zeros((MAX_PHRASE_LEN, 18)))\n\n return is_relation_deprecated() # or is_postag_undefined()", "def hexists(self):\n try:\n key = self.key\n except DoesNotExist:\n \"\"\"\n If the object doesn't exists anymore, its PK is deleted, so the\n \"self.key\" call will raise a DoesNotExist exception. We catch it\n to return False, as the field doesn't exists too.\n \"\"\"\n return False\n else:\n return self.connection.hexists(key, self.name)", "def isduplicate(self, a, b):\n open(self.mybib, 'w').write(a)\n open(self.otherbib, 'w').write(b)\n res = sp.call('papers add {} --bibtex {} --update-key --mode r --debug'.format(self.otherbib, self.mybib), shell=True)\n return res != 0", "def test_duplicate_quotes(self):\n\n # Setup\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes8.txt\")\n\n # Call function being tested\n with self.assertRaisesRegexp(Exception, re.escape(\"a duplicate quote was found on line 5 of '{}'. Quote: \\\"The depressing thing about tennis is that no matter how good I get, I'll never be as good as a wall.\\\"\".format(path))):\n api.read_quotes(path)", "def _check_registered_proposal(self, id: bytes) -> bool:\n proposal_in_bytes = self._proposal_list[id]\n return True if proposal_in_bytes else False", "def isExist(data):\n return True/False", "def exists(self, key):\n try:\n return (self.salt + str(key)) in self.DB\n except KeyError:\n return False", "def _check_add_entities(self, word: str, type_: str) -> bool:\n\n if self.check_known and self.check_unknown:\n return True\n elif self.check_known and word in self.known_words[type_]:\n return True\n elif self.check_unknown and word not in self.known_words[type_]:\n return True\n return False", "def member_in_database(uniqname, conn):\n with conn.cursor() as cur:\n cur.execute(\n 'SELECT * '\n 'FROM members '\n 'WHERE uniqname = %s',\n (uniqname,)\n )\n member_exists = cur.rowcount > 0\n\n return member_exists", "def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True", "def word_dict_contains (self,\r\n word):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('WORDDICT CONTAINS')\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"SELECT rowid\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(word) in self.word_dict", "def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False", "def qid_exists(self, qid):\n return self._entity_symbols.qid_exists(qid)", "def __contains__(self, word):\n if word in self.vocab:\n return True\n else:\n char_ngrams = compute_ngrams(word, self.min_n, self.max_n)\n return any(ng in self.ngrams for ng in char_ngrams)", "def is_existing_object(did):\n if not d1_gmn.app.did.is_existing_object(did):\n raise d1_common.types.exceptions.NotFound(\n 0,\n \"Identifier is {}. Expected a Persistent ID (PID) for an existing \"\n 'object. id=\"{}\"'.format(d1_gmn.app.did.classify_identifier(did), did),\n identifier=did,\n )", "def isUnique(self, word):\n if len(word) < 3:\n abbrev = word\n else:\n abbrev = word[0] + str(len(word) - 2) + word[-1]\n if not abbrev in self.abbrev_dict:\n return True\n elif word in self.abbrev_dict[abbrev] and len(self.abbrev_dict[abbrev]) == 1:\n return True\n else:\n return False", "def exists(self, name):\n raise NotImplementedError()", "def __contains__(self, coord):\n cr = self.__class__(coord)\n if cr.title is None:\n cr.title = self.title\n return self.issuperset(cr)", "def isspeech(phone):\n return phone not in OTHERS", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def __contains__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).count() == 1", "def check_duplicate(self, state):\n pass", "def search_done(self):\n \n if len(self.hypotheses) == 0:\n return True\n elif len(self.hypotheses[0]._sequence) >= min([\n self.input_length + 50,\n self.max_sequence_length\n ]):\n return True\n return False", "def is_product_saved(self):\n\n db.execute(\"SELECT product_id FROM Substitute WHERE product_id = %s\",\n (self.product.id,))\n product = db.fetch()\n if product:\n return True\n else:\n return False", "def can_process(self, statement):\n return self.chatbot.storage.count()", "def exists(cls, token):\n return cls.objects.filter(token=token).exists()", "def _check_for_preexisting_identifier(self, doi: Doi):\n # The database expects each field to be a list.\n query_criterias = {\"ids\": [doi.pds_identifier]}\n\n # Query database for rows with given id value.\n columns, rows = self._database_obj.select_latest_rows(query_criterias)\n\n for row in rows:\n existing_record = dict(zip(columns, row))\n\n if doi.doi != existing_record[\"doi\"]:\n raise IllegalDOIActionException(\n f\"There is already a DOI {existing_record['doi']} associated \"\n f\"with PDS identifier {doi.pds_identifier} \"\n f\"(status={existing_record['status']}).\\n\"\n f\"You cannot modify a DOI for an existing PDS identifier.\"\n )", "def check_inventory(self, check_word):\n is_there = False\n for thing in self.bag_of_holding:\n if check_word == thing.name:\n is_there = True\n\n return is_there", "def gene_exists(ensemble, methylation_type, gene):\n\n\tgene_table_name = 'gene_' + gene.replace(\".\", \"_\")\n\treturn len(db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM information_schema.tables WHERE table_name = '%s'\"%gene_table_name).fetchall()) > 0", "def __contains__(self, name):\n return name in set(self)", "def test_known_related_objects_identity_preservation(self):\n self.assertIs(self.aldous, self.brave_new_world.author)", "def contains(self, word: Iterable[Terminal]) -> bool:\n return self._get_final_state(word) is not None", "def exists(self):\n return bool(self.get())", "def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False", "def test_add_quote_but_quote_object_not_passed(self):\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n with self.assertRaisesRegexp(Exception, \"The quote parameter must be type class Quote.\"):\n api.add_quote(path, None)", "def has_chunk(self, dimension: Dimension, cx: int, cz: int) -> bool:\n key = (dimension, cx, cz)\n return key in self._chunk_cache or key in self._chunk_history", "def object_exists(self, fname):\n return self.object_exists", "def check_for_duplicate_subject_identifier(self):\n pass", "def __contains__(self, key):\n return key in self._get_storage()" ]
[ "0.5549986", "0.5529568", "0.55025846", "0.54980576", "0.54824877", "0.54619014", "0.5449591", "0.52898127", "0.52789867", "0.5272745", "0.5230741", "0.52132195", "0.5202484", "0.5193138", "0.5162743", "0.5162743", "0.5120887", "0.51052827", "0.5090298", "0.50702953", "0.50422996", "0.50374776", "0.5006044", "0.50045365", "0.4992264", "0.49846002", "0.4983254", "0.49743173", "0.49729666", "0.4970381", "0.49598125", "0.49589273", "0.4958251", "0.49354038", "0.49346295", "0.49291718", "0.4927164", "0.49166423", "0.49093905", "0.48920217", "0.48823988", "0.48820347", "0.4877772", "0.48733157", "0.48614058", "0.48478624", "0.48382983", "0.48230508", "0.48223093", "0.48222485", "0.4812214", "0.48087755", "0.4802464", "0.4790412", "0.47886273", "0.4787243", "0.47809377", "0.47785944", "0.4771092", "0.4768899", "0.47622168", "0.47550258", "0.47493657", "0.47460884", "0.47434005", "0.474138", "0.47403252", "0.4739011", "0.4736059", "0.47331718", "0.47315788", "0.47306192", "0.4730374", "0.47294793", "0.47231525", "0.4720974", "0.47198516", "0.47159588", "0.4715777", "0.47021875", "0.47018656", "0.46987906", "0.46971446", "0.4695307", "0.46850926", "0.46823663", "0.46779966", "0.46775863", "0.4675543", "0.4672689", "0.46717054", "0.46694762", "0.46642023", "0.46614376", "0.46608222", "0.4660413", "0.4659836", "0.4659394", "0.46583256", "0.46580294" ]
0.6199974
0
Create a state network.
def __init__( self, name_or_scope, output_dim, env_spec=None, observation_dim=None, observation_input=None, **kwargs): self.setup_serialization(locals()) super(StateNetwork, self).__init__(name_or_scope, **kwargs) self.output_dim = output_dim assert env_spec or observation_dim self.observation_dim = (observation_dim or env_spec.observation_space.flat_dim) with tf.variable_scope(self.scope_name): if observation_input is None: if not isinstance(self.observation_dim, collections.Iterable): observation_input = tf.placeholder( tf.float32, [None, self.observation_dim], "_observation") else: observation_input = tf.placeholder( tf.float32, [None] + list(self.observation_dim), "_observation") self.observation_input = observation_input self._create_network(observation_input=observation_input)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_network(layers):\r\n return NeuronNetwork(layers)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def _build_network(self):\n pass", "def test_create_state(self):\n\n # States should be abled to be identified by numbers or by strings I suppose.\n # I don't imagine that strings will ever be used.\n mdp = MDP()\n mdp.add_state(0)\n mdp.add_state(1)\n mdp.add_state(2)\n mdp.add_state(3)\n mdp.add_state(4)\n mdp.add_state(5, terminal=True)\n self.assertEqual(mdp.num_states(), 6)", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def create_network():\n net = ln.models.TinyYolo(CLASSES, CONF_THRESH, NMS_THRESH)\n\n net.load(args.weight)\n net.eval()\n net.postprocess.append(ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))\n net = net.to(device)\n return net", "def _create_network(self):\n self.z_mean, self.z_log_sigma_sq = self._recognition_network()\n tf.add_to_collection(\"outputs\", self.z_mean)\n\n # Draw one sample z from Gaussian distribution\n eps = tf.random_normal((self.batch_size, self.output_size), 0, 1, dtype=tf.float32)\n # z = mu + sigma*epsilon\n self.z_latent = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))\n tf.add_to_collection(\"latents\", self.z_latent)\n\n # Use generator to determine mean of\n # Bernoulli distribution of reconstructed input\n self.x_decoded = self._generator_network()\n tf.add_to_collection(\"generators\", self.x_decoded)\n tf.add_to_collection(\"targets\", tf.zeros([self.batch_size], dtype=tf.int32))", "def _network_template(self, state):\n net = tf.cast(state, tf.float32)\n net = tf.div(net, 255.)\n net = slim.conv2d(\n net, int(32 * self.network_size_expansion), [8, 8], stride=4)\n net = slim.conv2d(\n net, int(64 * self.network_size_expansion), [4, 4], stride=2)\n net = slim.conv2d(\n net, int(64 * self.network_size_expansion), [3, 3], stride=1)\n net = slim.flatten(net)\n net = slim.fully_connected(net, int(512 * self.network_size_expansion))\n\n q_values = []\n for _ in range(self.number_of_gammas):\n gamma_q_value = slim.fully_connected(\n net, self.num_actions, activation_fn=None)\n q_values.append(gamma_q_value)\n\n # Estimate the hyperbolic discounted q-values\n hyp_q_value = agent_utils.integrate_q_values(q_values,\n self.integral_estimate,\n self.eval_gammas,\n self.number_of_gammas,\n self.gammas)\n\n return self._get_network_type()(hyp_q_value, q_values)", "def state_create(recipe, stage, devtype):\n\n # subsequent `.load_state_dict()` automatically moves to device and casts\n # `model` stage in a stage are allowed to be None\n model = get_model(recipe, **stage[\"model\"]).to(**devtype)\n\n # base lr is specified in the optimizer settings\n optim = get_optimizer(model, stage[\"optimizer\"])\n\n # name-id mapping for copying optimizer states\n mapper = {k: id(p) for k, p in model.named_parameters()}\n\n return State(model, optim, mapper)", "def _init_networks(self, state_dict: OrderedDict):\n self.dqn = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.dqn.load_state_dict(state_dict)\n self.dqn.eval()", "def _make_graph(self):\n # this resets the whole default graph for tensorflow\n tf.reset_default_graph()\n # inputs/outputs:\n # each input example will be two np.hstacked 3x3 matrices, flattened\n # (initial state s and final state s' after selecting action a)\n self.input = tf.placeholder(tf.float32, [None, 3 * 6])\n self.layers, self.weights, self.biases = \\\n make_fully_connected_network(\n input_layer=self.input,\n architecture=self.architecture,\n activation=self.activation\n )\n self.output = self.layers[-1]\n self.observed = tf.placeholder(tf.float32, shape=[None, 1])\n # MSE loss function\n self.loss = tf.reduce_sum(tf.square(self.output - self.observed))\n if self.penalty:\n penalty_tensor = tf.add_n([self.penalty_function(x) for x in self.weights])\n self.loss = self.loss + self.penalty * penalty_tensor\n self.optimizer = (self.optimizer_algo(learning_rate=self.learning_rate, **self.optimizer_params)\n .minimize(self.loss))", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n self._policy_logits = tf.nn.softmax(self._q_values / self.tau, axis=0)\n self._stochastic_action = tf.random.categorical(\n self._policy_logits[None, Ellipsis],\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the states and\n # next states in the replay buffer.\n target_next_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n target_action = self.target_convnet(self._replay.states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_next_quantile_values_action = target_next_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_next_quantile_values_action = tf.reshape(\n target_next_quantile_values_action,\n [self.num_quantile_samples, self._replay.batch_size, self.num_actions])\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = target_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_next_target_q_values = tf.squeeze(tf.reduce_mean(\n target_next_quantile_values_action, axis=0))\n self._replay_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_next_target_q_values, axis=1)", "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def construct_network(self):\n r = 0\n n = self.nbr_0_splxs\n for k in range(n):\n self.splxs.append((0, (0, k)))\n self.nbr_splxs += 1\n r, edge = self.find_next_edge(r)\n # this while loop finds the new edge to treat and add it to the 1-splx list and then finds out if a 2-splx is created\n while edge != (-1, -1):\n # Add the new edge\n self.one_splxs.append((edge, self.nbr_splxs))\n self.splxs.append((1, self.nbr_1_splxs))\n self.nbr_1_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n a, b = edge\n # find out if a 2-splx has been created\n for i in range(self.nbr_1_splxs - 1):\n c, d = self.one_splxs[i][0]\n if d == a:\n for j in range(i + 1, self.nbr_1_splxs - 1):\n e, f = self.one_splxs[j][0]\n if e == c and f == b:\n self.two_splxs.append((self.nbr_1_splxs - 1, i, j))\n self.splxs.append((2, self.nbr_2_splxs))\n self.nbr_2_splxs += 1\n self.nbr_splxs += 1\n self.dist_appearance.append(r)\n # find the next edge to treat\n r, edge = self.find_next_edge(r)\n print(\"Network created\")\n return ()", "def create_net(self, net_name, shared=\"false\"):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _net_info = {\"network\":\n {\"name\": net_name,\n \"shared\": shared,\n \"admin_state_up\": True}}\n _body = json.dumps(_net_info)\n\n response = self.request(\"POST\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating network.\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Creation of network Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Network is created successfully. Details : %s \" %\n output['network'])\n\n return output['network']['id']", "def _createNetwork(self,verbose):\n filename,n,rulesList = self.filename,self.n,self.rulesList\n if self.verbose:\n mult = 2\n if self.MichaelisMenten:\n mult = 4\n start,startWall = cpuTime(),wallTime()\n print(\"\")\n print(\"Creating network with \"+str(n)+\" activation sites\")\n print(\" and \"+str(len(rulesList))+\" additional rules (\" \\\n +str(mult*(n+len(rulesList)))+\" parameters).\")\n \n namesList = writeBNGL.writeBNGLnetwork(n,rulesList,filename, \\\n MichaelisMenten=self.MichaelisMenten)\n self._runBNGLfile(filename)\n \n if self.verbose:\n print(\"Network creation took \"+bothTimeStr(start,startWall))\n \n return namesList", "def prepare_state(infos: StateInfos):\n state = GraphVisualizerState()\n\n state.connected = infos.connected\n\n if infos.S1_filename:\n if not state.set_first_star(infos.S1_filename):\n print(f\"Can't load {infos.S1_filename} as S1\")\n\n if infos.S2_filename:\n if not state.set_second_star(infos.S2_filename):\n print(f\"Can't load {infos.S2_filename} as S2\")\n\n if infos.event_filename:\n if not state.set_event(infos.event_filename):\n print(f\"Can't load {infos.event_filename} as event\")\n\n state.set_distance(infos.distance)\n\n for i in range(len(infos.top_texts)):\n state.set_top_text(infos.top_texts[i], i)\n\n for i in range(len(infos.bot_texts)):\n state.set_bot_text(infos.bot_texts[i], i)\n\n return state", "def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G", "def _create_model(self, input_state, num_actions):\n with tf.name_scope('shared_layers'):\n layer = Dense(NN_WIDTH, activation = 'relu')(input_state);\n layer = Dense(NN_WIDTH, activation = 'relu')(layer);\n layer = Dense(NN_WIDTH, activation = 'relu')(layer);\n layer = Dense(NN_WIDTH, activation = 'relu')(layer);\n with tf.name_scope('policy_network'):\n policy = Dense(num_actions, activation = 'softmax')(layer);\n with tf.name_scope('value_network'):\n value = Dense(1)(layer);\n return (policy, value);", "def create_state():\n state_json = request.get_json()\n if state_json is None:\n abort(400, 'Not a JSON')\n if state_json.get('name') is None:\n abort(400, \"Missing name\")\n state = State(**state_json)\n storage.new(state)\n storage.save()\n return jsonify(state.to_dict()), 201", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def make_state() -> state.GameState:\r\n dung: world.Dungeon = worldgen.EmptyDungeonGenerator(20, 20).spawn_dungeon(0)\r\n p1x, p1y = dung.get_random_unblocked()\r\n p2x, p2y = dung.get_random_unblocked()\r\n while (p2x, p2y) == (p1x, p1y):\r\n p2x, p2y = dung.get_random_unblocked()\r\n ent1 = entities.Entity(1, 0, p1x, p1y, 10, 10, 2, 1, [], dict())\r\n ent2 = entities.Entity(2, 0, p2x, p2y, 10, 10, 2, 1, [], dict())\r\n return state.GameState(True, 1, 1, 2, world.World({0: dung}), [ent1, ent2])", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def create_network(options, vsm_obj):\n edge_id = get_edge(vsm_obj)\n if not edge_id:\n if not add_edge(options):\n print(\"Failed to create edge\")\n return False\n edge_id = get_edge(vsm_obj)\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n name = get_network_name(options)\n response = virtual_wire.read_by_name(name)\n if response != \"FAILURE\":\n print(\"Found network %s already exists\" % options.name)\n return True\n\n virtual_wire_create = VirtualWireCreateSpecSchema()\n virtual_wire_create.name = name\n virtual_wire_create.tenantId = name\n virtual_wire_create.description = 'NSX network %s' % name\n\n # check if user needs to enable guest vlan tagging,\n # this is require if one needs to run vlan tests in nested\n # environment.\n if hasattr(options, 'guest_vlan'):\n if options.guest_vlan is True:\n print(\"network %s has guest vlan tagging enabled\"\\\n % options.name)\n virtual_wire_create.guestVlanAllowed = True\n\n print(\"Creating network %s\" % options.name)\n result = virtual_wire.create(virtual_wire_create)\n if (result[0].response.status != 201):\n print \"response: %s\" % result[0].response.status\n print \"response: %s\" % result[0].response.reason\n return False\n print(\"Changing security settings on the network\")\n set_network_security_policy(options)\n return add_edge_interface(options, edge_id)", "def initialise_network(self):\n raise NotImplementedError", "def newnode(self, name=None, num_states=0):\n # (const char* name, int num_states, net_bn* net)\n if num_states == 0:\n print(\"Warning: Set the number of states when using newnode() \" +\n \"or adding discrete levels won't work.\")\n\n cnetica.NewNode_bn.argtypes = [c_char_p, c_int, c_void_p]\n cnetica.NewNode_bn.restype = c_void_p\n return cnetica.NewNode_bn(ccharp(name), num_states, self.net)", "def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)", "def create_network(self, context, network):\n LOG.debug(_(\"NeutronRestProxyV2: create_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n with context.session.begin(subtransactions=True):\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context,\n network[\"network\"])\n\n # create network in DB\n new_net = super(NeutronRestProxyV2, self).create_network(context,\n network)\n self._process_l3_create(context, new_net, network['network'])\n mapped_network = self._get_mapped_network_with_subnets(new_net,\n context)\n\n # create network on the network controller\n self.servers.rest_create_network(tenant_id, mapped_network)\n\n # return created network\n return new_net", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def test_create_network():\n _network = Network()", "def create_state():\n state_json = request.get_json(silent=True)\n if not state_json:\n return jsonify({'error': 'Not a JSON'}), 400\n if 'name' not in state_json:\n return jsonify({'error': 'Missing name'}), 400\n state = State(**state_json)\n state.save()\n return jsonify(state.to_dict()), 201", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def macro_network():\n # fmt: off\n tpm = np.array([\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 1.0, 1.0],\n ])\n # fmt: on\n return Network(tpm, node_labels=LABELS[:tpm.shape[1]])", "def create_tree():\n basey = 960/2\n basex = 600/2\n newstruct = defaultdict(dict)\n homenw = Utility.read_configuration(config=\"HOME_NETWORK\")\n alive, _ = srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=homenw),\\\n timeout=2, verbose=0)\n\n for idx in range(0, len(alive)):\n try:\n hname, _, _ = socket.gethostbyaddr(alive[idx][1].psrc)\n hostname = hname.split(\".\")[0]\n except:\n hostname = alive[idx][1].psrc\n\n mac = alive[idx][1].hwsrc\n ipaddr = alive[idx][1].psrc\n xcoord = random.randint(0, basex)\n ycoord = random.randint(0, basey)\n\n newstruct[hostname]['ip'] = ipaddr\n newstruct[hostname]['mac'] = mac\n newstruct[hostname]['hostname'] = hostname\n newstruct[hostname]['x'] = xcoord\n newstruct[hostname]['y'] = ycoord\n\n if not ipaddr.endswith('.1'):\n newstruct[hostname]['gateway'] = \"N\"\n else:\n newstruct[hostname]['gateway'] = \"Y\"\n newstruct[hostname]['x'] = basex + 50\n newstruct[hostname]['y'] = basey + 50\n\n\n #---------------------------------#\n # New implementation with sqlite3 #\n #---------------------------------#\n HomeNetwork.add_update_rows(newstruct, init=True)", "def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))", "def _generateNetwork(self, n_actions, obs_space):\r\n \r\n import tensorflow as tf \r\n self._ALPHA = 1e-3 # learning rate \r\n RESIZED_SCREEN = 84\r\n self._STATE_FRAMES = 3 # states/images used for taking a decision\r\n \r\n # Graph for compressing the input image \r\n x, y, z = obs_space\r\n self._image_input_layer = tf.placeholder(\"float\", \r\n [None, x, y, z])\r\n image_step_size_x = int(np.ceil(float(x / RESIZED_SCREEN)))\r\n image_step_size_y = int(np.ceil(float(y / RESIZED_SCREEN)))\r\n extra_pad_x = RESIZED_SCREEN - int(x / image_step_size_x)\r\n extra_pad_y = RESIZED_SCREEN - int(y / image_step_size_y)\r\n self._image_output_layer = tf.nn.max_pool(\r\n self._image_input_layer, \r\n ksize=[1, image_step_size_x, image_step_size_y, 1],\r\n strides=[1, image_step_size_x, image_step_size_y, 1], \r\n padding=\"VALID\") \r\n \r\n # Function for compressing (and reshaping) the image\r\n self._compressImage = lambda obs : np.pad(\r\n self._session.run(\r\n self._image_output_layer, \r\n feed_dict={self._image_input_layer: np.array([obs])})/255.0, \r\n ((0,0), (0,extra_pad_x), (0,extra_pad_y), (0,0)),\r\n mode='constant') \r\n\r\n CONVOLUTION_FILTER_VECTOR = [6, 6, 4]\r\n CONVOLUTION_STRIDE_VECTOR = [3, 3, 2]\r\n CONVOLUTION_KERNEL_VECTOR = [16, 16, 36]\r\n CONVOLUTION_INPUT_VECTOR = ([z * self._STATE_FRAMES] + \r\n CONVOLUTION_KERNEL_VECTOR[:-1])\r\n FEED_FWD_VECTOR = [(3**2) * CONVOLUTION_KERNEL_VECTOR[-1], 64, \r\n n_actions] \r\n \r\n # The chosen activation function is the Leaky ReLU function\r\n self._activation = lambda x : tf.maximum(0.01*x, x)\r\n\r\n \r\n # Initialization parameters\r\n INITIALIZATION_STDDEV = 0.1\r\n INITIALIZATION_MEAN = 0.00\r\n INITIALIZATION_BIAS = -0.001\r\n\r\n # Convolutional layers\r\n self._input_layer = tf.placeholder(\"float\", \r\n [None, \r\n RESIZED_SCREEN, \r\n RESIZED_SCREEN, \r\n z * self._STATE_FRAMES])\r\n self._convolutional_weights = []\r\n self._convolutional_bias = []\r\n self._hidden_convolutional_layer = [self._input_layer]\r\n\r\n for i in range(len(CONVOLUTION_FILTER_VECTOR)):\r\n self._convolutional_weights.append(tf.Variable(tf.truncated_normal(\r\n [CONVOLUTION_FILTER_VECTOR[i], \r\n CONVOLUTION_FILTER_VECTOR[i], \r\n CONVOLUTION_INPUT_VECTOR[i], \r\n CONVOLUTION_KERNEL_VECTOR[i]], \r\n mean=INITIALIZATION_MEAN, \r\n stddev=INITIALIZATION_STDDEV)))\r\n self._convolutional_bias.append(tf.Variable(tf.constant(\r\n INITIALIZATION_BIAS, \r\n shape=[CONVOLUTION_KERNEL_VECTOR[i]])))\r\n self._hidden_convolutional_layer.append(\r\n self._activation(tf.nn.conv2d(\r\n self._hidden_convolutional_layer[i], \r\n self._convolutional_weights[i], \r\n strides=[1, \r\n CONVOLUTION_STRIDE_VECTOR[i],\r\n CONVOLUTION_STRIDE_VECTOR[i], \r\n 1], \r\n padding=\"VALID\") \r\n + self._convolutional_bias[i]))\r\n \r\n # Feed forward layers\r\n self._hidden_activation_layer = [tf.reshape(\r\n self._hidden_convolutional_layer[-1], \r\n [-1, FEED_FWD_VECTOR[0]])]\r\n self._feed_forward_weights = []\r\n self._feed_forward_bias = []\r\n\r\n for i in range(len(FEED_FWD_VECTOR) - 2):\r\n self._feed_forward_weights.append(tf.Variable(tf.truncated_normal(\r\n [FEED_FWD_VECTOR[i], \r\n FEED_FWD_VECTOR[i+1]], \r\n mean=INITIALIZATION_MEAN, \r\n stddev=INITIALIZATION_STDDEV)))\r\n self._feed_forward_bias.append(tf.Variable(tf.constant(\r\n INITIALIZATION_BIAS, shape=[FEED_FWD_VECTOR[i+1]])))\r\n self._hidden_activation_layer.append(self._activation(\r\n tf.matmul(self._hidden_activation_layer[i], \r\n self._feed_forward_weights[i]) \r\n + self._feed_forward_bias[i])\r\n )\r\n \r\n # The calculation of the state-action value function does not \r\n # require the neurons' activation function\r\n self._feed_forward_weights.append(tf.Variable(tf.truncated_normal(\r\n [FEED_FWD_VECTOR[-2], \r\n FEED_FWD_VECTOR[-1]], \r\n mean=INITIALIZATION_MEAN, \r\n stddev=INITIALIZATION_STDDEV)))\r\n self._feed_forward_bias.append(tf.Variable(tf.constant(\r\n INITIALIZATION_BIAS, \r\n shape=[FEED_FWD_VECTOR[-1]])))\r\n self._state_value_layer = (tf.matmul(self._hidden_activation_layer[-1], \r\n self._feed_forward_weights[-1]) \r\n + self._feed_forward_bias[-1])\r\n\r\n # Define the logic of the optimization\r\n self._action = tf.placeholder(\"float\", [None, n_actions])\r\n self._target = tf.placeholder(\"float\", [None])\r\n self._action_value_vector = tf.reduce_sum(tf.mul(\r\n self._state_value_layer, self._action), reduction_indices=1)\r\n self._cost = tf.reduce_sum(tf.square(\r\n self._target - self._action_value_vector))\r\n self._alpha = tf.placeholder('float')\r\n self._train_operation = tf.train.AdamOptimizer(\r\n self._alpha).minimize(self._cost)\r\n self._session = tf.Session()\r\n\r\n operation_intizializer = tf.initialize_all_variables()\r\n self._saver = tf.train.Saver()\r\n\r\n try:\r\n self._saver.restore(self._session, self._PARAMETERS_FILE_PATH)\r\n print ('Calibrated parameters SUCCESSFULLY LOADED.',\r\n flush=True)\r\n except:\r\n self._session.run(operation_intizializer)\r\n print ('It was not possible to load calibrated parameters.',\r\n flush=True)\r\n \r\n # Definition of feed_forward and optimization functions\r\n self._feedFwd = lambda state : self._session.run(\r\n self._state_value_layer, \r\n feed_dict={self._input_layer: state})\r\n \r\n self._backProp = lambda valueStates, actions, valueTarget : (\r\n self._session.run(self._train_operation, \r\n feed_dict={self._input_layer: valueStates,\r\n self._action: actions,\r\n self._target: valueTarget,\r\n self._alpha : self._ALPHA}))", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def create_model(self, input_state, layer1=450, layer2=350):\n # create the DQN\n self.model = Sequential()\n self.model.add(Dense(units=layer1, input_dim=input_state.nn_input.size))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=layer2))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=(input_state.size_graph+1)))\n self.model.add(Activation('linear'))\n\n self.model.compile(optimizer='rmsprop', loss='mse')\n\n self.model.predict(input_state.nn_input.reshape(1, input_state.nn_input.size), batch_size=1)", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def create_state():\n if not request.json:\n abort(400, \"Not a JSON\")\n if 'name' not in request.json:\n abort(400, \"Missing name\")\n state = models.state.State(name=request.json['name'])\n state.save()\n return jsonify(state.to_dict()), 201", "def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def create_state():\n state = request.get_json()\n if type(state) is not dict:\n abort(400, {'Not a JSON'})\n elif 'name' not in state:\n abort(400, {'Missing name'})\n else:\n new_state = State(**state)\n storage.new(new_state)\n storage.save()\n return make_response(jsonify(new_state.to_dict()), 201)", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def create_train_state(\n config, rng, learning_rate_fn, example_batch\n):\n model, variables, metric_collector = create_model(config, rng, example_batch)\n params = variables['params']\n parameter_overview.log_parameter_overview(params)\n tx = train_utils.create_optimizer(config, learning_rate_fn)\n\n state = train_state.TrainState.create(\n apply_fn=model.apply,\n params=variables['params'],\n tx=tx,\n )\n return model, state, metric_collector", "def makeState(*args,**kwargs):\n \n cells = []\n\n for item in args:\n #print item\n cells.append(item)\n \n newState = State(cells)\n #newState.printBoard()\n return newState", "def network_initial(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_obj, created = Network.objects.get_or_create(user_id=request.user.pk, SPIC=SPIC_obj, local_id=0, deleted=False)\n\n if created is True:\n # Check if prototype exists\n prototype = get_object_or_404(Network, user_id=0, SPIC=SPIC_obj)\n network_obj.nodes_json = prototype.nodes_json\n network_obj.links_json = prototype.links_json\n network_obj.save()\n\n return network(request, SPIC_group, SPIC_id, 0)", "def test_create_state(self):\n state = State()\n self.assertTrue(isinstance(state, State))", "def init_network() -> dict:\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network", "def _create_nn(self):\n with tf.name_scope('policy_network'):\n with tf.variable_scope(\"policy_network\"):\n model = tf.keras.Sequential(name='policy_network_model')\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[0], activation=tf.nn.relu,\n input_shape=(1, self.neurons_in_each_layer[0])))\n for num_neurons in self.neurons_in_each_layer[1:-1]:\n model.add(tf.keras.layers.Dense(num_neurons, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[-1], name='policy_output_layer'))\n\n return model", "def create(self):\n x = self.x\n settings = self.SETTINGS\n debug = self.DEBUG\n warm_start_nn = self.WARM_START_NN\n num_target_dims = self.NUM_TARGET_DIMS\n\n layers = [x]\n # Set the drop probability for dropout. The same for all layers\n if settings[\"drop_chance\"] != 0:\n drop_prob = tf.constant(settings[\"drop_chance\"], dtype=x.dtype)\n # Track if the NN is evaluated during training or testing/validation\n # Needed for dropout, only drop out during training!\n self.is_train = tf.placeholder(tf.bool)\n for ii, (activation, neurons) in enumerate(\n zip(settings[\"hidden_activation\"], settings[\"hidden_neurons\"]), start=1\n ):\n # Set the weight and bias initialization from settings. The same for all layers\n if warm_start_nn is None:\n weight_init = settings[\"weight_init\"]\n bias_init = settings[\"bias_init\"]\n else:\n if (\n warm_start_nn.layers[ii - 1]._activation == activation\n and warm_start_nn.layers[ii - 1]._weights.shape[1] == neurons\n ):\n weight_init = warm_start_nn.layers[ii - 1]._weights\n bias_init = warm_start_nn.layers[ii - 1]._biases\n activation = warm_start_nn.layers[ii - 1]._activation\n else:\n raise Exception(\"Settings file layer shape does not match warm_start_nn\")\n\n # Get the activation function for this layer from the settings dict\n if activation == \"tanh\":\n act = tf.tanh\n elif activation == \"relu\":\n act = tf.nn.relu\n elif activation == \"none\":\n act = None\n\n # Initialize the network layer. It is autoconnected to the previou one.\n layer = nn_layer(\n layers[-1],\n neurons,\n \"layer\" + str(ii),\n dtype=x.dtype,\n act=act,\n debug=debug,\n bias_init=bias_init,\n weight_init=weight_init,\n )\n # If there is dropout chance is nonzero, potentially dropout neurons\n if settings[\"drop_chance\"] != 0:\n dropout = tf.layers.dropout(layer, drop_prob, training=self.is_train)\n if debug:\n tf.summary.histogram(\"post_dropout_layer_\" + str(ii), dropout)\n layers.append(dropout)\n else:\n layers.append(layer)\n\n # Last layer (output layer) usually has no activation\n activation = settings[\"output_activation\"]\n if warm_start_nn is None:\n weight_init = bias_init = settings[\"standardization\"]\n else:\n weight_init = warm_start_nn.layers[-1]._weights\n bias_init = warm_start_nn.layers[-1]._biases\n activation = warm_start_nn.layers[-1]._activation\n\n if activation == \"tanh\":\n act = tf.tanh\n elif activation == \"relu\":\n act = tf.nn.relu\n elif activation == \"none\":\n act = None\n # Finally apply the output layer and set 'y' such that network.y\n # can be evaluated to make a prediction\n self.y = nn_layer(\n layers[-1],\n num_target_dims,\n \"layer\" + str(len(layers)),\n dtype=x.dtype,\n act=act,\n debug=debug,\n bias_init=bias_init,\n weight_init=weight_init,\n )", "def makeState(self):\n state = None\n\n if self.ai == True:\n state = AIState(self.game, self.blockSize)\n else:\n state = PlayerState(self.game, self.blockSize)\n\n return state", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def copy_state_to_network(self):\n state = self.rigid_body_state\n\n state.position = self.transform.world_position.copy()\n state.orientation = self.transform.world_orientation.copy()\n state.angular = self.physics.world_angular.copy()\n state.velocity = self.physics.world_velocity.copy()\n # state.collision_group = self.physics.collision_group\n # state.collision_mask = self.physics.collision_mask\n self.rigid_body_time = WorldInfo.elapsed", "def _build_network(self, h_size=16, l_rate=0.001):\n with tf.variable_scope(self.net_name):\n self._X = tf.placeholder(tf.float32, [None, self.input_size], name=\"input_x\")\n net = self._X\n\n net = tf.layers.dense(net, h_size, activation=tf.nn.relu)\n net = tf.layers.dense(net, self.output_size)\n self._Qpred = net\n\n self._Y = tf.placeholder(tf.float32, shape=[None, self.output_size])\n self._loss = tf.losses.mean_squared_error(self._Y, self._Qpred)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=l_rate)\n self._train = optimizer.minimize(self._loss)", "def instantiate_network_objects(params):\n # Instantiate generator.\n generator = generators.Generator(\n input_shape=(params[\"latent_size\"]),\n kernel_regularizer=tf.keras.regularizers.l1_l2(\n l1=params[\"generator_l1_regularization_scale\"],\n l2=params[\"generator_l2_regularization_scale\"]\n ),\n bias_regularizer=None,\n name=\"generator\",\n params=params\n )\n\n # Instantiate discriminator.\n discriminator = discriminators.Discriminator(\n input_shape=(\n params[\"height\"] * params[\"width\"] * params[\"depth\"]\n ),\n kernel_regularizer=tf.keras.regularizers.l1_l2(\n l1=params[\"discriminator_l1_regularization_scale\"],\n l2=params[\"discriminator_l2_regularization_scale\"]\n ),\n bias_regularizer=None,\n name=\"discriminator\",\n params=params\n )\n\n return {\"generator\": generator, \"discriminator\": discriminator}", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def state(self, nodes: typing.Sequence[uuid.UUID], tag: typing.Optional['genmod.Tag'] = None) -> State:\n return State(self._generation, nodes, tag)", "def build_and_display_network():\n bpn = NeuralNetwork.BackPropagationNetwork((input_nodes, hidden_nodes, output_nodes),[None, sigmoid, linear])\n DisplayNetwork.displayLayers(bpn.matrixDimension)\n\n return bpn", "def __init__(self, name, state_shape, n_actions, reuse=False):\n\n with tf.variable_scope(name, reuse=reuse):\n # Prepare neural network architecture\n ### Your code here: prepare any necessary layers, variables, etc.\n if os.path.exists('model.h5'):\n self.model = load_model('model.h5')\n else:\n inp = Input(state_shape)\n dense0 = Dense(64, activation='tanh', kernel_initializer='ones', bias_initializer='ones')(inp)\n dense1 = Dense(256, activation='tanh', kernel_initializer='ones', bias_initializer='ones')(dense0)\n dense2 = Dense(128, activation='relu', kernel_initializer='ones', bias_initializer='ones')(dense1)\n dense3 = Dense(64, activation='relu', kernel_initializer='ones', bias_initializer='ones')(dense2)\n dense4 = Dense(32, activation='tanh', kernel_initializer='ones', bias_initializer='ones')(dense3)\n\n logits = Dense(n_actions, activation='linear', kernel_initializer='ones', bias_initializer='ones')(dense4)\n # probs = Activation('softmax')(logits)\n state_value = Dense(1, activation='linear', kernel_initializer='ones', bias_initializer='ones')(dense4)\n\n self.model = Model(inputs=inp, outputs=[logits, state_value])\n\n # prepare a graph for agent step\n self.state_t = tf.placeholder('float32', [None, ] + list(state_shape))\n self.agent_outputs = self.symbolic_step(self.state_t)", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def _create_new_state(\n self,\n new_state: str,\n prev_state: CircuitBreakerState | None = None,\n notify: bool = False,\n ) -> CBStateType:\n state_map: dict[str, Type[CBStateType]] = {\n STATE_CLOSED: CircuitClosedState,\n STATE_OPEN: CircuitOpenState,\n STATE_HALF_OPEN: CircuitHalfOpenState,\n }\n try:\n cls = state_map[new_state]\n return cls(self, prev_state=prev_state, notify=notify)\n except KeyError:\n msg = \"Unknown state {!r}, valid states: {}\"\n raise ValueError(msg.format(new_state, \", \".join(state_map)))", "def create_state():\n data = {}\n for key in request.form.keys():\n \tfor value in request.form.getlist(key):\n \t\tdata[key] = value\n try:\n ''' Check that name key is in data '''\n if not 'name' in data:\n raise KeyError('name')\n\n ''' Check that name key is not null '''\n if not data['name']:\n raise TypeError(\"'name' cannot be NULL\")\n\n ''' Check that name key value is a string '''\n if not type_test(data['name'], 'string'):\n raise TypeError(\"'name' must be a string\")\n\n ''' Check if state already exists '''\n query = State.select().where(State.name == data['name'])\n if query.exists():\n raise ValueError('State already exists')\n\n new = State.create(\n name = data['name']\n )\n res = {}\n res['code'] = 201\n res['id'] = new.id\n res['msg'] = \"State was created successfully\"\n return res, 201\n except TypeError as e:\n response = {}\n response['code'] = 400\n response['msg'] = e.message\n return response, 400\n except ValueError as e:\n response = {}\n response['code'] = 10001\n response['msg'] = e.message\n return response, 409\n except KeyError as e:\n response = {}\n response['code'] = 40000\n response['msg'] = 'Missing parameters'\n return response, 400\n except Exception as e:\n print e.message\n abort(500)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def _make_q_network(states, num_actions, params, scope_name):\n kernel_regularizer = tf.contrib.layers.l2_regularizer(params.l2_scale) if params.l2_scale else None\n with tf.variable_scope(scope_name) as scope:\n hidden0 = tf.layers.dense(states, params.hidden_units[0],\n activation=params.activation,\n kernel_initializer=params.initializer,\n kernel_regularizer=kernel_regularizer)\n hidden = tf.layers.dense(hidden0, params.hidden_units[1],\n activation=params.activation,\n kernel_initializer=params.initializer,\n kernel_regularizer=kernel_regularizer)\n outputs = tf.layers.dense(hidden, num_actions,\n kernel_initializer=params.initializer,\n kernel_regularizer=kernel_regularizer)\n\n trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope=scope.name)\n trainable_vars_by_name = {var.name[len(scope.name):]: var\n for var in trainable_vars}\n # get the regularization loss\n reg_loss = tf.losses.get_regularization_loss(scope_name)\n return outputs, trainable_vars_by_name, reg_loss", "def create_model(input_state, num_actions,\n model_name='q_network'): # noqa: D103\n\n with tf.name_scope('hidden1'):\n hidden1 = Dense(256, activation='sigmoid')(input_state)\n with tf.name_scope('output'):\n output = Dense(num_actions, activation='softmax')(hidden1)\n return output;", "def create_train_state(\n config,\n rng,\n data_info = None, # data_info\n):\n grid_size = config.model.grid_size\n num_features = config.model.num_features\n features_color = jax.random.uniform(\n rng,\n (grid_size, grid_size, grid_size, num_features - 1),\n dtype=jnp.float32,\n )\n features_color = (features_color - 0.5) * 2.0\n features_color = features_color * config.model.pyramid_init_noise_level\n features_density = jnp.zeros(\n (grid_size, grid_size, grid_size, 1), dtype=jnp.float32\n )\n params = jnp.concatenate([features_density, features_color], axis=-1)\n parameter_overview.log_parameter_overview({\"params\": params})\n\n adam_kwargs = dict(config.opt.args)\n adam_kwargs[\"learning_rate\"] = get_learning_rate_scheduler(config, data_info)\n optimizer = optax.adamw(**adam_kwargs)\n\n return (\n None,\n optimizer,\n train_state.TrainState.create(\n apply_fn=None,\n params=params,\n tx=optimizer,\n ),\n )", "def __init__(self):\n raise NotImplementedError('cannot create independent state')", "def make_network(identity):\n network = []\n for layer in range(1,len(identity)):\n network.append(array([\n [random.uniform(0.2, 0.5) for __ in range(identity[layer-1] + 1)]\n for _ in range(identity[layer])\n ]))\n return network", "def compile(self):\n logger.info('Define network with dnnet of version : %s'\\\n % dnnet.__version__)\n if self.layers.size == 0:\n msg = 'NeuralNetwork has no layer.\\n Add layers before compiling.'\n raise DNNetRuntimeError(msg)\n\n parent = self.layers[0]\n self.add(OutputLayer())\n\n for i, layer in enumerate(self.layers, 1):\n logger.debug('Add %s layer.' % layer.get_type())\n layer.set_parent(parent)\n parent = layer\n\n logger.debug('Defined network.')", "def create_neural_network(NumberOfFeatures, NumberOfClasses, optimizer_type, lr, moment, lr_decay):\n model = create_base_network(NumberOfFeatures, NumberOfClasses)\n if optimizer_type == 'sgd':\n opt = optimizers.SGD(lr=lr, momentum=moment, decay=lr_decay)\n else:\n opt = optimizer_type\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n print(model.summary())\n return model", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])", "def create_nodes(nd=None):\n\n if not nd:\n raise ValueError(\"No nodes data provided.\")\n\n nodes = []\n\n # Create Bus objects from buses table\n busd = {}\n\n for i, b in nd[\"buses\"].iterrows():\n if b[\"active\"]:\n bus = solph.Bus(label=b[\"label\"])\n nodes.append(bus)\n\n busd[b[\"label\"]] = bus\n if b[\"excess\"]:\n nodes.append(\n solph.Sink(\n label=b[\"label\"] + \"_excess\",\n inputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"excess costs\"]\n )\n },\n )\n )\n if b[\"shortage\"]:\n nodes.append(\n solph.Source(\n label=b[\"label\"] + \"_shortage\",\n outputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"shortage costs\"]\n )\n },\n )\n )\n\n # Create Source objects from table 'commodity sources'\n for i, cs in nd[\"commodity_sources\"].iterrows():\n if cs[\"active\"]:\n nodes.append(\n solph.Source(\n label=cs[\"label\"],\n outputs={\n busd[cs[\"to\"]]: solph.Flow(\n variable_costs=cs[\"variable costs\"]\n )\n },\n )\n )\n\n # Create Source objects with fixed time series from 'renewables' table\n for i, re in nd[\"renewables\"].iterrows():\n if re[\"active\"]:\n # set static outflow values\n outflow_args = {\n \"nominal_value\": re[\"capacity\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == re[\"label\"]:\n outflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Source(\n label=re[\"label\"],\n outputs={\n busd[re[\"to\"]]: solph.Flow(**outflow_args)\n },\n )\n )\n\n # Create Sink objects with fixed time series from 'demand' table\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de['active']):\n # set static inflow values\n inflow_args = {\n \"nominal_value\": de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == de[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]: solph.Flow(**inflow_args)\n },\n )\n )\n\n # Create Transformer objects from 'transformers' table\n for i, t in nd[\"transformers\"].iterrows():\n if t[\"active\"]:\n # set static inflow values\n inflow_args = {\"variable_costs\": t[\"variable input costs\"]}\n # get time series for inflow of transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == t[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n # create\n nodes.append(\n solph.Transformer(\n label=t[\"label\"],\n inputs={busd[t[\"from\"]]: solph.Flow(**inflow_args)},\n outputs={\n busd[t[\"to\"]]: solph.Flow(nominal_value=t[\"capacity\"])\n },\n conversion_factors={busd[t[\"to\"]]: t[\"efficiency\"]},\n )\n )\n\n for i, s in nd[\"storages\"].iterrows():\n if s[\"active\"]:\n nodes.append(\n solph.components.GenericStorage(\n label=s[\"label\"],\n inputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity inflow\"],\n variable_costs=s[\"variable input costs\"],\n )\n },\n outputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity outflow\"],\n variable_costs=s[\"variable output costs\"],\n )\n },\n nominal_storage_capacity=s[\"nominal capacity\"],\n loss_rate=s[\"capacity loss\"],\n initial_storage_level=s[\"initial capacity\"],\n max_storage_level=s[\"capacity max\"],\n min_storage_level=s[\"capacity min\"],\n inflow_conversion_factor=s[\"efficiency inflow\"],\n outflow_conversion_factor=s[\"efficiency outflow\"],\n )\n )\n\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"]:\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label=\"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs={bus1: solph.Flow(), bus2: solph.Flow()},\n outputs={\n bus1: solph.Flow(nominal_value=p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1, bus2): p[\"efficiency\"],\n (bus2, bus1): p[\"efficiency\"],\n },\n )\n )\n\n return nodes", "def network(self):\n inp = Input((self.env_dim))\n # #\n # x = Dense(256, activation='relu')(inp)\n # x = GaussianNoise(1.0)(x)\n # #\n # x = Flatten()(x)\n # x = Dense(128, activation='relu')(x)\n # x = GaussianNoise(1.0)(x)\n # #\n # out = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n # out = Lambda(lambda i: i * self.act_range)(out)\n # #\n\n x = conv_block(inp, 32, (2, 2), 8)\n x = conv_block(x, 64, (2, 2), 4)\n x = conv_block(x, 64, (2, 2), 3)\n x = Flatten()(x)\n x = Dense(256, activation='relu')(x)\n\n x = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(x)\n\n return Model(inp, out)", "def make_model(n_dimensions, seed):\n with spa.SPA(seed=seed) as model:\n # Create the state holding element\n model.state = spa.State(dimensions=n_dimensions,\n feedback=1.0, feedback_synapse=0.01)\n\n # Create the state transitions\n actions = spa.Actions(*(\"dot(state, {}) --> state = {}\".format(x, y) for\n (x, y) in zip(\"ABCDE\", \"BCDEA\")))\n model.bg = spa.BasalGanglia(actions=actions)\n model.thal = spa.Thalamus(model.bg)\n\n # Create the input for the initial state\n model.input = spa.Input(state=lambda t: 'A' if t < 0.05 else '0')\n\n return model", "def create(self):\n\n super().create()\n\n ##########################################################\n # State Definition\n ##########################################################\n standby_locked = State('standby_locked', patterns.standby_locked)\n\n self.add_state(standby_locked)", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n conv1_size=5, conv1_n_chan=32, conv1_n_pool=2,\n conv2_size=5, conv2_n_chan=64, conv2_n_pool=2,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.conv1_size = conv1_size\n self.conv1_n_chan = conv1_n_chan\n self.conv1_n_pool = conv1_n_pool\n self.conv2_size = conv2_size\n self.conv2_n_chan = conv2_n_chan\n self.conv2_n_pool = conv2_n_pool\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.conv1_size = net_architecture['conv1_size']\n self.conv1_n_chan = net_architecture['conv1_n_chan']\n self.conv1_n_pool = net_architecture['conv1_n_pool']\n self.conv2_size = net_architecture['conv2_size']\n self.conv2_n_chan = net_architecture['conv2_n_chan']\n self.conv2_n_pool = net_architecture['conv2_n_pool']\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Convert input image to tensor with channel as last dimension\n # x_image = [-1 x im-height x im-width x n-input-channels]\n x_image_temp = tf.reshape(self.x, [-1,\n self.n_input_channels,self.y_res,self.x_res])\n x_image = tf.transpose(x_image_temp, [0,2,3,1])\n\n #########################################################\n # Set up convolutional layer 1\n # W = [im-height x im-width x n-input-channels x n-output-channels])\n self.conv1_shape = [self.conv1_size, self.conv1_size,\n self.n_input_channels, self.conv1_n_chan]\n self.W_conv1 = tf.Variable( tf.truncated_normal(\n shape=self.conv1_shape, stddev=0.1))\n self.b_conv1 = tf.Variable( tf.constant(0.1,\n shape=[self.conv1_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv1_lin = tf.nn.conv2d( x_image, self.W_conv1,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv1_relu = tf.nn.relu( self.conv1_lin + self.b_conv1 )\n\n # Max pooling\n self.conv1_kernel = [1, self.conv1_n_pool, self.conv1_n_pool, 1]\n self.conv1_pool = tf.nn.max_pool( self.conv1_relu,\n ksize=self.conv1_kernel, strides=self.conv1_kernel, padding='SAME')\n\n #########################################################\n # Convolutional layer 2\n self.conv2_shape = [self.conv2_size, self.conv2_size,\n self.conv1_n_chan, self.conv2_n_chan]\n self.W_conv2 = tf.Variable( tf.truncated_normal(\n shape=self.conv2_shape, stddev=0.1 ) )\n self.b_conv2 = tf.Variable( tf.constant(0.1,\n shape=[self.conv2_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv2_lin = tf.nn.conv2d( self.conv1_pool, self.W_conv2,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv2_relu = tf.nn.relu( self.conv2_lin + self.b_conv2 )\n\n # Max pooling\n self.conv2_kernel = [1, self.conv2_n_pool, self.conv2_n_pool, 1]\n self.conv2_pool = tf.nn.max_pool( self.conv2_relu,\n ksize=self.conv2_kernel, strides=self.conv2_kernel, padding='SAME')\n\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = [self.fc1_y_size * self.fc1_x_size * self.conv2_n_chan,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Flatten output from conv2\n self.conv2_pool_flat = tf.reshape(\n self.conv2_pool, [-1, self.fc1_shape[0]] )\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.conv2_pool_flat,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def create_neural_network(mode, layer_sizes, use_stored_weights, weights_path):\n\n\tif verbose: print('model_tensorflow.create_neural_network() called')\n\n\tnum_layers = len(layer_sizes) - 1\n\tweights = {}\n\tbiases = {}\n\n\tif verbose: print('creating a DNN with', str(num_layers-1), 'hidden layers of size', \n\t\t\t\t\t str(layer_sizes[1:len(layer_sizes)-1]))\n\n\t# Initialise the weights\n\t# (a) Create new weights and biases\n\tif not use_stored_weights:\n\t\tfor i in range(num_layers): \n\t\t\t# Layer l has dimensions (|l-1|, |l|) for weights and (|l|) for biases\n\t\t\tw_name = 'W' + str(i+1)\n\t\t\tweights[w_name] = tf.get_variable(w_name, [layer_sizes[i], layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n\t\t\tb_name = 'b' + str(i+1)\n\t\t\tbiases[b_name] = tf.get_variable(b_name, [layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.zeros_initializer(), dtype=tf.float32)\n\n\t\t# Initialise all existing global variables \n\t\tsess.run(tf.global_variables_initializer())\n\n\t\t# Save weights and biases\n\t\tsaver = tf.train.Saver()\n\t\tsave_path = saver.save(sess, weights_path + 'weights/' + 'init.ckpt') \n\t# (b) Restore existing weights and biases\n\telse:\n\t\tfor i in range(num_layers):\n\t\t\t# Prepare variable\n\t\t\tw_name = 'W' + str(i+1)\n\t\t\tb_name = 'b' + str(i+1)\n\t\t\tweights[w_name] = tf.get_variable(w_name, [layer_sizes[i], layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.zeros_initializer(), dtype=tf.float32)\n\t\t\tbiases[b_name] = tf.get_variable(b_name, [layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.zeros_initializer(), dtype=tf.float32)\n\n\t\t# Initialise all existing global variables \n\t\tsess.run(tf.global_variables_initializer())\n\n\t\t# Restore weights and biases\n\t\tsaver = tf.train.Saver()\n\t\tif mode == trn:\n\t\t\tsaver.restore(sess, weights_path + 'weights/' + 'init.ckpt') \n\t\telif mode == tst or mode == app:\n\t\t\tsaver.restore(sess, weights_path + 'weights/' + 'trained.ckpt')\n\n\twb = {'weights': weights, 'biases': biases}\n\treturn wb", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph" ]
[ "0.6997173", "0.6979348", "0.6762708", "0.67333984", "0.6585905", "0.65094924", "0.64667505", "0.6448271", "0.6415188", "0.635982", "0.6349135", "0.63338166", "0.63314515", "0.62870693", "0.62643075", "0.6260604", "0.62521416", "0.6246148", "0.6229527", "0.6212824", "0.6211686", "0.6189977", "0.61879617", "0.61639124", "0.6131859", "0.6125071", "0.6117734", "0.6116808", "0.6106887", "0.61025715", "0.60874844", "0.6086121", "0.6074059", "0.6065387", "0.6040243", "0.60246783", "0.60226643", "0.6015497", "0.6007135", "0.6005757", "0.59959793", "0.59956884", "0.5993322", "0.59841615", "0.5957576", "0.59232986", "0.591489", "0.59070027", "0.59001464", "0.58941716", "0.58900076", "0.5886369", "0.58840215", "0.5864718", "0.5850912", "0.5846061", "0.5842165", "0.58418727", "0.5835645", "0.583487", "0.58201087", "0.58105075", "0.58086646", "0.5799233", "0.5797615", "0.57916385", "0.5788837", "0.5788518", "0.5780774", "0.5776918", "0.5773226", "0.5770015", "0.57698214", "0.5758354", "0.5741033", "0.5739539", "0.5733766", "0.5728505", "0.5726384", "0.57256836", "0.5723892", "0.57210386", "0.5717319", "0.57157356", "0.57104224", "0.5694934", "0.5694872", "0.56916684", "0.56885934", "0.56881684", "0.56842655", "0.5680174", "0.5676907", "0.5668862", "0.56556076", "0.5652638", "0.56501645", "0.5649309", "0.56460637", "0.5641849", "0.5633143" ]
0.0
-1
Builds a Visit object
def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix, detection_departure_fix: GpsFix): self.id_visit = id_visit self.id_stay_point = id_stay_point self.pivot_arrival_fix = pivot_arrival_fix self.pivot_departure_fix = pivot_departure_fix self.detection_arrival_fix = detection_arrival_fix self.detection_departure_fix = detection_departure_fix self.stay_time = None self.update_stay_time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_visit_plan(self):\n self.visit_plan = VisitPlanner(self.detector, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.num_orbits,\n # TEMP! to make observations sparser\n exp_overhead=3 * u.min)\n\n self.exp_start_times = self.visit_plan['exp_times'].to(\n u.day) + self.start_JD\n\n # So this is a weird thing to do, maybe the JD should be added in the\n # visit planner - used in visit trend generation\n self.visit_plan['exp_start_times'] = self.exp_start_times", "def makeVisitInfo():\n return afwImage.VisitInfo(exposureTime=10.01,\n darkTime=11.02,\n date=dafBase.DateTime(65321.1, dafBase.DateTime.MJD, dafBase.DateTime.TAI),\n ut1=12345.1,\n era=45.1*lsst.geom.degrees,\n boresightRaDec=lsst.geom.SpherePoint(23.1, 73.2, lsst.geom.degrees),\n boresightAzAlt=lsst.geom.SpherePoint(134.5, 33.3, lsst.geom.degrees),\n boresightAirmass=1.73,\n boresightRotAngle=73.2*lsst.geom.degrees,\n rotType=afwImage.RotType.SKY,\n observatory=Observatory(11.1*lsst.geom.degrees, 22.2*lsst.geom.degrees, 0.333),\n weather=Weather(1.1, 2.2, 34.5),\n )", "def accept(visitor):", "def accept(visitor):", "def _buildVisitorList(self):\n # Instance this list only once for all elements\n if self.__visitor_list is None:\n self.__visitor_list = []\n #\n # Insert each of the code producer visitors into the list of\n # visitors. The visitor must be in the list of\n # configured visitors before it is inserted. Otherwise, it is\n # considered disabled, and not placed in the list.\n #\n for inst in self.__configured_visitors:\n config = self.__configured_visitors[inst]\n # If enabled instance it and place in list\n if config.getEnabled():\n visitor = config.Instance()\n if not config.getGenerateCode():\n visitor.setGenerateEmptyFile()\n self.__visitor_list.append(visitor)\n #\n return self.__visitor_list", "def createVisit(tx, query, personId, locationId, date, startHour, endHour):\n tx.run(query, personId=personId, locationId=locationId, date=date, startHour=startHour,\n endHour=endHour)", "def accept(self, visitor):\r\n\r\n return visitor(copy.deepcopy(self))", "def _construct_visitor_obj(self, visitor_id, df_slice, generate_uuids=False, attributes=False):\n \n visitor_obj = {\"visitor_id\":visitor_id, \"attributes\":json.loads(attributes)} if attributes else {\"visitor_id\":visitor_id}\n \n events_array = []\n sig_types = ['revenue', 'value', 'tags', 'entity_id', 'timestamp']\n\n for _, row in df_slice.iterrows():\n args = {}\n for s in sig_types:\n val = False\n if s in row and not pd.isnull(row[s]):\n val = row[s]\n args[s] = val\n if generate_uuids:\n args['uuid'] = uuid.uuid1()\n else:\n args['uuid'] = row['uuid']\n events_array.append(getattr(self, '_construct_event_obj')(**args))\n \n\n visitor_obj[\"snapshots\"] = [{\"decisions\":[], \"events\":events_array}]\n\n return visitor_obj", "def build_graph(self):\n pass", "def __init__(self, visitor, base_dir, do_not_descend_map=None, private_map=None):\n self._visitor = visitor\n self._base_dir = base_dir\n self._do_not_descend_map = do_not_descend_map or {}\n self._private_map = private_map or {}", "def _build_graph(self):\n pass", "def __init__(self, *args):\n if self.__class__ == ctree_visitor_t:\n _self = None\n else:\n _self = self\n this = _ida_hexrays.new_ctree_visitor_t(_self, *args)\n try: self.this.append(this)\n except: self.this = this", "def _start_times_to_visit_info(self):\n\n self.visit_plan = {\n 'exp_start_times': self.exp_start_times,\n # for visit trends\n 'orbit_start_index': tools.detect_orbits(self.exp_start_times),\n }", "def build_graph(self):\n raise NotImplementedError", "def visit(self, obj):\n pass", "def __init__(self, node, depth=0, visited_by=None):\n if visited_by is None:\n visited_by = {}\n self.node = node\n self.depth = depth\n self.visited_by = visited_by", "def accept(self, visitor):\n #Triggers the visitng operation\n visitor.visit(self)", "def gen_graph(self):", "def add_visit(self,\n visit_occurrence_id,\n visit_concept_id,\n visit_start_date,\n visit_end_date,\n visit_type_concept_id,\n **kwargs):\n\n # add all optional values\n visit_d = {key: value for key, value in kwargs.items()}\n\n # Required values (Except person_id)\n visit_d[\"visit_occurrence_id\"] = visit_occurrence_id\n visit_d[\"visit_concept_id\"] = visit_concept_id\n visit_d[\"visit_start_date\"] = visit_start_date\n visit_d[\"visit_end_date\"] = visit_end_date\n visit_d[\"visit_type_concept_id\"] = visit_type_concept_id\n\n self.visits.append(visit_d)", "def _construct_graph(self):\n raise NotImplementedError", "def __init__(self, game, value, visited):\n\t\tself.type = \"robot\"\n\t\tself.game = game\n\t\tself.observations = self.game.getAllObservations()\n\t\tself.children = self.make_children()\n\t\tself.value = value\n\t\tself.visited = visited", "def Instance(self):\n if self.__type == \"ComponentCppVisitor\":\n inst = ComponentCppVisitor.ComponentCppVisitor()\n elif self.__type == \"ComponentHVisitor\":\n inst = ComponentHVisitor.ComponentHVisitor()\n elif self.__type == \"ImplCppVisitor\":\n inst = ImplCppVisitor.ImplCppVisitor()\n elif self.__type == \"ImplHVisitor\":\n inst = ImplHVisitor.ImplHVisitor()\n elif self.__type == \"TestImplCppVisitor\":\n inst = TestImplCppVisitor.TestImplCppVisitor()\n elif self.__type == \"TestImplCppHelpersVisitor\":\n inst = TestImplCppHelpersVisitor.TestImplCppHelpersVisitor()\n elif self.__type == \"TestImplHVisitor\":\n inst = TestImplHVisitor.TestImplHVisitor()\n elif self.__type == \"TestMainVisitor\":\n inst = TestMainVisitor.TestMainVisitor()\n elif self.__type == \"PortCppVisitor\":\n inst = PortCppVisitor.PortCppVisitor()\n elif self.__type == \"PortHVisitor\":\n inst = PortHVisitor.PortHVisitor()\n elif self.__type == \"SerialCppVisitor\":\n inst = SerialCppVisitor.SerialCppVisitor()\n elif self.__type == \"SerialHVisitor\":\n inst = SerialHVisitor.SerialHVisitor()\n elif self.__type == \"TopologyCppVisitor\":\n inst = TopologyCppVisitor.TopologyCppVisitor()\n elif self.__type == \"TopologyHVisitor\":\n inst = TopologyHVisitor.TopologyHVisitor()\n elif self.__type == \"InstanceTopologyCppVisitor\":\n inst = InstanceTopologyCppVisitor.InstanceTopologyCppVisitor()\n elif self.__type == \"InstanceTopologyHVisitor\":\n inst = InstanceTopologyHVisitor.InstanceTopologyHVisitor()\n elif self.__type == \"InstanceTopologyCmdHTMLVisitor\":\n inst = InstanceTopologyCmdHTMLVisitor.InstanceTopologyCmdHTMLVisitor()\n elif self.__type == \"InstanceTopologyChannelsTMLVisitor\":\n inst = (\n InstanceTopologyChannelsHTMLVisitor.InstanceTopologyChannelsHTMLVisitor()\n )\n elif self.__type == \"InstanceTopologyEventsHTMLVisitor\":\n inst = (\n InstanceTopologyEventsHTMLVisitor.InstanceTopologyEventsHTMLVisitor()\n )\n elif self.__type == \"ComponentTestHVisitor\":\n inst = ComponentTestHVisitor.ComponentTestHVisitor()\n elif self.__type == \"ComponentTestCppVisitor\":\n inst = ComponentTestCppVisitor.ComponentTestCppVisitor()\n elif self.__type == \"GTestHVisitor\":\n inst = GTestHVisitor.GTestHVisitor()\n elif self.__type == \"GTestCppVisitor\":\n inst = GTestCppVisitor.GTestCppVisitor()\n elif self.__type == \"CommandVisitor\":\n inst = CommandVisitor.CommandVisitor()\n elif self.__type == \"EventVisitor\":\n inst = EventVisitor.EventVisitor()\n elif self.__type == \"ChannelVisitor\":\n inst = ChannelVisitor.ChannelVisitor()\n elif self.__type == \"SerializableVisitor\":\n inst = SerializableVisitor.SerializableVisitor()\n elif self.__type == \"InstanceCommandVisitor\":\n inst = InstanceCommandVisitor.InstanceCommandVisitor()\n elif self.__type == \"InstanceEventVisitor\":\n inst = InstanceEventVisitor.InstanceEventVisitor()\n elif self.__type == \"InstanceChannelVisitor\":\n inst = InstanceChannelVisitor.InstanceChannelVisitor()\n elif self.__type == \"InstanceSerializableVisitor\":\n inst = InstanceSerializableVisitor.InstanceSerializableVisitor()\n elif self.__type == \"HtmlDocVisitor\":\n inst = HtmlDocVisitor.HtmlDocVisitor()\n elif self.__type == \"MdDocVisitor\":\n inst = MdDocVisitor.MdDocVisitor()\n elif self.__type == \"TopologyIDVisitor\":\n inst = TopologyIDVisitor.TopologyIDVisitor()\n else:\n s = f\"VisitorConfig.getInstance: unsupported visitor type ({self.__type})\"\n PRINT.info(s)\n raise ValueError(s)\n return inst", "def create_user_visit_helper(sess, request_id, user_obj_id, merchant_data):\n merchant = Merchant.get_and_check_else_new(\n sess, merchant_data[\"merchantId\"], merchant_data[\"merchantName\"]\n )\n user = User.get_and_check_else_new(sess, user_obj_id)\n visit_id = uuid.uuid4()\n timestamp = int(time.time())\n Visit.new(sess, visit_id, timestamp, user.pk, merchant.pk)\n log.info(\"[{}] New visit created for user: {}\".format(request_id, user.pk))\n return {\n \"visitId\": visit_id,\n \"timestamp\": timestamp,\n \"merchant\": {\n \"merchantId\": merchant.merchant_id,\n \"merchantName\": merchant.merchant_name,\n },\n \"user\": {\"userId\": user.user_obj_id},\n }", "def build(*args, **kwargs):\n\n\treturn Spider(*args, **kwargs)", "def setup_visit(self, start_JD, num_orbits, exp_start_times=False):\n self.start_JD = start_JD # in days\n self.num_orbits = num_orbits\n\n if exp_start_times: # i.e. visit plan is specified\n self.exp_start_times = exp_start_times\n self._start_times_to_visit_info()\n else:\n # TODO detector must be setup first but no indicator of this in code\n self._generate_visit_plan()", "def construct(self):\n return self.as_search().construct()", "def accept(self, visitor):\n raise NotImplementedError()", "def __init__(self):\n jinja2.visitor.NodeVisitor.__init__(self)\n self.getattr_nodes = set()", "def _build(self, prefilt=None):\n self.make_filiation()\n if prefilt is not None:\n self.prefilter(filt=prefilt)\n self.make_trees()\n return", "def accept(self, visitor):\n visitor.visit(self)", "def __init__(self):\n self.__instance = None\n self.__configured_visitors = {}", "def view(self):\n from devito.ir.iet.visitors import printAST\n return printAST(self)", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def visit(self, node):", "def visit(self, node):", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def _addVisitor(self, type_obj, visitor_list):\n for v in visitor_list:\n type_obj.addVisitor(v)", "def __init__(self, the_ast):\n self._ast = the_ast", "def build(c):", "def build(self):", "def build(self):", "def build(self):", "def build(self) -> None:", "def traverse(self, visit, *args, **kwargs):\n if not self.__visited:\n visit(self, *args, **kwargs)\n self.__visited = True\n for c in self.parameters:\n c.traverse(visit, *args, **kwargs)\n self.__visited = False", "def _build(self, *args, **kwargs):\n for directive in self.directives:\n alias, ip, port, server_name, location = directive[\"signature\"].split(\":\")\n\n if location not in self.locations.keys():\n handle_location = Location(**{\n \"location\" : location,\n }\n )\n self.locations = handle_location\n self.locations[location].directives = directive", "def _build(self):", "def _build(self):", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def build(self):\n pass", "def build(self):\n pass", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def __init__(self):\n\n self.loops = []\n self.ast_util = ast_util.ASTUtil()", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def visitor(self) -> pd.DataFrame:\n # load the report folder visitor trips trip list\n trips = pd.read_csv(\n os.path.join(self.scenario_path,\n \"report\",\n \"visitorTrips.csv\"),\n usecols=[\"tripID\", # unique trip id\n \"tripMode\", # trip mode\n \"weightTrip\", # trip weight\n \"weightPersonTrip\", # person trip weight\n \"timeTotal\", # total trip time\n \"distanceTotal\", # total trip distance\n \"costTotal\", # total trip cost\n \"tripPurposeDestination\"]) # trip purpose\n\n trips[\"purpose\"] = trips[\"tripPurposeDestination\"]\n\n # return fields of interest\n return trips[[\"tripID\",\n \"tripMode\",\n \"weightTrip\",\n \"weightPersonTrip\",\n \"timeTotal\",\n \"distanceTotal\",\n \"costTotal\",\n \"purpose\"]]", "def create(self, the_type):\n\n # Get the list of code producing visitors. The hardwired list is\n # based on the project argument. A different set of visitors can\n # be assembled by project.\n\n project_visitor_list = self._buildVisitorList()\n\n # Instance the needed code snippet generator here.\n\n if the_type == \"initFiles\":\n code_section_generator = InitFiles.InitFiles()\n elif the_type == \"startSource\":\n code_section_generator = StartSource.StartSource()\n elif the_type == \"includes1\":\n code_section_generator = Includes1.Includes1()\n elif the_type == \"includes2\":\n code_section_generator = Includes2.Includes2()\n elif the_type == \"namespace\":\n code_section_generator = Namespace.Namespace()\n elif the_type == \"public\":\n code_section_generator = Public.Public()\n elif the_type == \"protected\":\n code_section_generator = Protected.Protected()\n elif the_type == \"private\":\n code_section_generator = Private.Private()\n elif the_type == \"finishSource\":\n code_section_generator = FinishSource.FinishSource()\n\n elif the_type == \"DictStart\":\n code_section_generator = DictStart.DictStart()\n elif the_type == \"DictHeader\":\n code_section_generator = DictHeader.DictHeader()\n elif the_type == \"DictBody\":\n code_section_generator = DictBody.DictBody()\n\n elif the_type == \"InstanceDictStart\":\n code_section_generator = InstanceDictStart.InstanceDictStart()\n elif the_type == \"InstanceDictHeader\":\n code_section_generator = InstanceDictHeader.InstanceDictHeader()\n elif the_type == \"InstanceDictBody\":\n code_section_generator = InstanceDictBody.InstanceDictBody()\n\n elif the_type == \"HtmlStart\":\n code_section_generator = HtmlStartPage.HtmlStartPage()\n elif the_type == \"HtmlDoc\":\n code_section_generator = HtmlDocPage.HtmlDocPage()\n\n elif the_type == \"MdStart\":\n code_section_generator = MdStartPage.MdStartPage()\n elif the_type == \"MdDoc\":\n code_section_generator = MdDocPage.MdDocPage()\n\n else:\n print(f\"GenFactory: unsupported code section ({the_type}).\")\n return None\n\n self._addVisitor(code_section_generator, project_visitor_list)\n\n return code_section_generator", "def make_target(self, state_index: int):\n tv = self.terminal_value(state_index)\n if tv is None:\n tv = 0\n return tv, self.child_visits[state_index]", "def addStartVisitor(self, element_name, visitor):\n self.__node_start_visit_dict[element_name] = visitor", "def buildGraph(self):\n return None", "def build(_):", "def __post_init__(self) -> None:\n self.current_line = Line(mode=self.mode)\n\n v = self.visit_stmt\n Ø: Set[str] = set()\n self.visit_assert_stmt = partial(v, keywords={\"assert\"}, parens={\"assert\", \",\"})\n self.visit_if_stmt = partial(\n v, keywords={\"if\", \"else\", \"elif\"}, parens={\"if\", \"elif\"}\n )\n self.visit_while_stmt = partial(v, keywords={\"while\", \"else\"}, parens={\"while\"})\n self.visit_for_stmt = partial(v, keywords={\"for\", \"else\"}, parens={\"for\", \"in\"})\n self.visit_try_stmt = partial(\n v, keywords={\"try\", \"except\", \"else\", \"finally\"}, parens=Ø\n )\n self.visit_except_clause = partial(v, keywords={\"except\"}, parens={\"except\"})\n self.visit_with_stmt = partial(v, keywords={\"with\"}, parens={\"with\"})\n self.visit_classdef = partial(v, keywords={\"class\"}, parens=Ø)\n self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)\n self.visit_return_stmt = partial(v, keywords={\"return\"}, parens={\"return\"})\n self.visit_import_from = partial(v, keywords=Ø, parens={\"import\"})\n self.visit_del_stmt = partial(v, keywords=Ø, parens={\"del\"})\n self.visit_async_funcdef = self.visit_async_stmt\n self.visit_decorated = self.visit_decorators\n\n # PEP 634\n self.visit_match_stmt = self.visit_match_case\n self.visit_case_block = self.visit_match_case", "def build():", "def _build(self, **kwargs):", "def test_01_visit(self):", "def build(self):\n raise NotImplementedError", "def build (self):\n raise NotImplementedError", "def getOrCreateLinkVisitor(self):\n return _osgAnimation.AnimationManagerBase_getOrCreateLinkVisitor(self)", "def init(self):\n self.padre = self.id\n self.sinVisitar = [] \n self.visited = False\n print (\"inicializo algoritmo\")\n for i in range (len(self.neighbors)): #De esta forma se pueden manipular las listas por aparte\n self.sinVisitar.append(self.neighbors[i])", "def build_graph(self):\n start_time = time.time()\n\n # init temp node\n for value in self.domain:\n node = TempDepNode(value)\n self.nodes[value] = node\n\n attr_data = self.graph_data[self.attr_name]\n print(f'{len(attr_data)} records in data')\n\n # init temp edge\n for source_ix, value_i in tqdm(attr_data.items()):\n visited = set()\n for target_ix, value_j in attr_data[source_ix+1:].items():\n if value_j in visited:\n continue\n else:\n visited.add(value_j)\n time_diff = self.graph_data[self.time][target_ix] - \\\n self.graph_data[self.time][source_ix]\n if time_diff > self.time_diff_threshold:\n break\n if (value_i, value_j) not in self.edges or (value_j, value_i) not in self.edges:\n self.edges[(value_i, value_j)] = TempDepEdge(value_i, value_j)\n self.edges[(value_j, value_i)] = TempDepEdge(value_j, value_i)\n self.edges[(value_i, value_j)].add_event(time_diff)\n if value_i != value_j:\n self.edges[(value_j, value_i)].add_event(time_diff)\n end_time = time.time()\n print(f'{end_time-start_time} seconds for graph building')", "def make_walk_node(self, g):\r\n start = len(self.walk)\r\n self.walk.append(g)\r\n g.visited += 1\r\n self.add_loop(start, g)\r\n\r\n i = start\r\n while i < len(self.walk):\r\n node = self.walk[i]\r\n unused = self.find_unused_connection(node)\r\n if unused is None:\r\n i += 2\r\n continue\r\n i += self.add_loop(i, node)\r\n i += 2", "def _build_chain(G, u, v, visited):\n while v not in visited:\n yield u, v\n visited.add(v)\n u, v = v, G.nodes[v]['parent']\n yield u, v", "def __init__(self, query_path, field=None, visit_counter=1):\n if not isinstance(query_path, tuple):\n raise TypeError(u'Expected query_path to be a tuple, was: '\n u'{} {}'.format(type(query_path).__name__, query_path))\n if field and not isinstance(field, six.string_types):\n raise TypeError(u'Expected field to be None or string, was: '\n u'{} {}'.format(type(field).__name__, field))\n\n self.query_path = query_path\n self.field = field\n\n # A single visit counter is enough, rather than a visit counter per path level,\n # because field names are unique -- one can't be at path 'X' and\n # visit 'Y' in two different ways to generate colliding 'X__Y___1' identifiers.\n self.visit_counter = visit_counter", "def build(self, A):", "def get_ga_visitor(self, *args, **kwargs):\n if not hasattr(self, 'ga_visitor') or 'force' in kwargs:\n if 'user_id' in kwargs or 'force' in kwargs:\n self.get_utma(**kwargs)\n try:\n self._utma.split('.')\n except:\n self.ga_visitor = Visitor()\n else:\n self.ga_visitor = Visitor().extract_from_utma(self._utma)\n if self._ip:\n self.ga_visitor.ip_address = self._ip\n\n return self.ga_visitor", "def build_plan(self):\n assert False, \"Not implemented.\"", "def get_visitor(self, node):\r\n method = 'visit_' + node.__class__.__name__\r\n return getattr(self, method, None)", "def visitTo(self, date):\n raise NotImplementedError()", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)", "def __init__(self, method_name, method_time, path, visited_nodes):\n self.name = method_name\n self.time = method_time\n self.path = path\n self.visited_nodes = visited_nodes", "def dft(self, starting_vertex):\n # create a plan to visit stack and add starting_vertex to it\n plan_to_visit = Stack()\n plan_to_visit.push(starting_vertex)\n # create a set for visited_vertices\n visited_vertices = set()\n # while the plan_to_visit stack is not Empty:\n while plan_to_visit.size() > 0:\n # pop the first vertex on the stack\n current_vertex = plan_to_visit.pop()\n # if its not been visited\n if current_vertex not in visited_vertices:\n # print the vertex\n print(current_vertex)\n # mark it as visited, (add it to visited_verticles)\n visited_vertices.add(current_vertex)\n # add all unvisited neighbors to the queue\n for neighbor in self.get_neighbors(current_vertex):\n if neighbor not in visited_vertices:\n plan_to_visit.push(neighbor)", "def visit(self, node):\n method_name = 'visit_' + type(node).__name__\n visit_method = getattr(self, method_name, self.generic_visit)\n return visit_method(node)", "def __init__(self, criterion, max_depth=10):\n self.criterion = criterion\n self.max_depth = max_depth\n self.type = -1\n self.Graph = {}\n \"\"\"\n Structure of self.Graph\n Discrete Input\n {root : {attr_name : {val1 : {...}, val2 : {...}}}}\n > root is a dummy starting point.\n > attr_name is the attribute\n > val1, val2 are values of attr_name\n > the same patterm repeats\n Real Input\n {root : {attr_name: {(val,0) : {...}, (val,1) : {...}}}}\n > root is a dummy starting point.\n > attr_name is the attribute\n > val is the threashold value of the attribute\n > (val,0) and (val,1) goes to the subtree when value is less or high than threashold respectively.\n \"\"\"", "def makeTree(self):\n return makeTree(self.events,self.outTree)", "def _visitor_impl(self, arg):\n method = _methods[(_qualname(type(self)), type(arg))]\n return method(self, arg)", "def __init__(self, from_node, to_node, span=None):\n self.from_node = from_node\n self.to_node = to_node\n self.span = span\n self.dummyedges = []", "def __init__(self,\n point_size: int,\n max_levels=6,\n min_levels=3,\n mutation_prob=0.5\n ) -> None:\n self.rec_refs = {}\n self.mutation_prob = mutation_prob\n\n vars1 = []\n vars2 = []\n for i in range(point_size):\n vars1 += [f\"X1[{i}]\"]\n\n for i in range(point_size):\n vars2 += [f\"X2[{i}]\"]\n\n self.grammar = {\n **{f\"<expr_{i}>\": [f\"<expr_{i+1}> <op> <expr_{i+1}>\", f\"<func> ( <expr_{i+1}> <op> <expr_{i+1}> )\"] for i in range(min_levels)},\n **{f\"<expr_{min_levels + i}>\": [f\"<expr_{min_levels + i+1}> <op> <expr_{min_levels + i+1}>\", f\"<func> ( <expr_{min_levels + i + 1}> <op> <expr_{min_levels + i + 1}> )\", \"<term>\"] for i in range(max_levels - min_levels)},\n f\"<expr_{max_levels}>\": [\"<term_1> <op> <term_2>\", \"<term_2> <op> <term_1>\"],\n \"<term>\": [\n \"<term_1>\", \"<term_2>\"\n ],\n \"<term_1>\": [\n \"<var_1>\",\n \"<pre-op> ( <var_1> )\",\n ],\n \"<term_2>\": [\n \"<var_2>\",\n \"<pre-op> ( <var_2> )\",\n ],\n \"<pre-op>\": [\n \"1/\",\n \"-\",\n \"+\",\n \"abs\",\n \"numpy.math.sqrt\"\n ],\n \"<func>\": [\n \"abs\",\n \"\"\n ],\n \"<op>\": [\n \"+\",\n \"*\",\n \"-\",\n \"/\",\n ],\n \"<var_1>\": vars1,\n \"<var_2>\": vars2,\n }\n\n self.non_terminals = sorted(self.grammar.keys())\n\n # these two lines are described in the pseudocode of the reference paper\n rec_refs = self.countRecursiveReferences()\n self.ref_count = {\n key: self.findReferences(key, *rec_refs) for key in self.grammar.keys()\n }", "def get_visits(visit_container):\r\n return visit_container.visits.all()", "def __init__(self):\n self.V = 0 # number of vertices\n self.E = 0 # number of directed edges\n self.adjacency_list = [None] # index 0 is not used\n\n # Traversal\n self.visited_node = [False]\n self.visited = []", "def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):\n self.walls = gameState.getWalls()\n self.startState = gameState.getPacmanPosition()\n if start != None: self.startState = start\n self.goal = goal\n self.costFn = costFn\n self.visualize = visualize\n if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):\n print 'Warning: this does not look like a regular search maze'\n\n # For display purposes\n self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE", "def visit(self, node, node_map):\n node_type = node.__class__.__name__\n method = 'visit_' + node_type \n visitor = getattr(self, method, self.generic_visit)\n \n return visitor(node, node_map)", "def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):\n self.walls = gameState.getWalls()\n self.startState = gameState.getPacmanPosition()\n if start != None: self.startState = start\n self.goal = goal\n self.costFn = costFn\n self.visualize = visualize\n if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):\n print('Warning: this does not look like a regular search maze')\n\n # For display purposes\n self._visited, self._visitedlist, self._expanded = {}, [], 0", "def make_graph(info):\n nodes = []\n edges = []\n for fd in info:\n p1 = (fd.x, fd.y)\n p2 = (fd.x2, fd.y2)\n e = p1, p2\n if p1 not in nodes:\n nodes.append(p1)\n if p2 not in nodes:\n nodes.append(p2)\n edges.append(e)\n\n g = Graph()\n g.add_nodes(nodes)\n g.add_edges(edges)\n\n # log.debug ('branch graph', len(g), g)\n return g", "def __init__(self, vertex):\n self.id = vertex\n self.neighbors = {}", "def compile(self):\n\n for v in list(self.graph.values()):\n if isinstance(v, Graph):\n v.compile()\n self._compiled = compile_graph(self.graph)\n return self._compiled", "def __init__(self, state, parent=None):\n self.state = state\n self.n_visits = 0\n self.n_a = {}\n self.q_a = {}\n self.p_a = {}\n # These will initialize value = 0 for whichever keys yet to be added.\n self.n_a = defaultdict(lambda: 0, self.n_a)\n self.q_a = defaultdict(lambda: 0, self.q_a)\n self.p_a = defaultdict(lambda: 0, self.q_a)\n self.parent = parent\n self.children = {}\n self.action_taken = None", "def build(self, trajectory):\n pass", "def _build_impl(self, input):\n self.out = self._build_impl_impl(input)\n return self.out # output", "def generic_visit(self, node):\n \n node.map_subcalls(self.visit)", "def __init__(self, tree, result, url):\n self.tree = tree\n self.result = result\n self.url = url", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def FindCityVisit(self, city_visit_parameters,\n city_visit_accumulator_generator):\n raise NotImplemented()", "def __init__(self, gameState, costFn=lambda x: 1, goal=(1, 1), start=None, warn=True, visualize=True):\n self.walls = gameState.getWalls()\n self.startState = gameState.getPacmanPosition()\n if start != None: self.startState = start\n self.goal = goal\n self.costFn = costFn\n self.visualize = visualize\n if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):\n print('Warning: this does not look like a regular search maze')\n\n # For display purposes\n self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE" ]
[ "0.650446", "0.6304008", "0.5627681", "0.5627681", "0.53474486", "0.53437334", "0.5272759", "0.52613014", "0.52547306", "0.52441776", "0.52312696", "0.51499623", "0.51391286", "0.50897574", "0.50847214", "0.5052277", "0.49540627", "0.49472973", "0.48501718", "0.4815049", "0.48009035", "0.4798739", "0.4795714", "0.47953826", "0.4783006", "0.47248456", "0.47211984", "0.47122", "0.46997938", "0.46870577", "0.46169835", "0.46071023", "0.45990825", "0.45886102", "0.45886102", "0.45860618", "0.45859966", "0.45847178", "0.45703402", "0.45610237", "0.45610237", "0.45610237", "0.45566443", "0.4553467", "0.4548614", "0.45474595", "0.45474595", "0.4534377", "0.4533558", "0.4533558", "0.4512593", "0.45107687", "0.45065585", "0.45043653", "0.45013547", "0.44967505", "0.4494774", "0.4486982", "0.44857737", "0.4474256", "0.4472896", "0.44710886", "0.44697323", "0.44691485", "0.4457734", "0.44554168", "0.44502732", "0.44466838", "0.44203302", "0.44021824", "0.44013417", "0.43970835", "0.439186", "0.43820298", "0.43795696", "0.4370369", "0.43696535", "0.436929", "0.43677205", "0.43663955", "0.4366259", "0.43511412", "0.43508264", "0.43503362", "0.43430787", "0.43423533", "0.4334662", "0.4333783", "0.43325046", "0.43270224", "0.4326803", "0.43253553", "0.43221956", "0.43219924", "0.43147784", "0.43144366", "0.4312413", "0.43085977", "0.43054172", "0.43025663", "0.43024683" ]
0.0
-1
Updates the stay time of visit
def update_stay_time(self): # It would not be better to simply self.stay_time = self.get_length() ?? self.stay_time = self.get_length()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_time(self):\n pass # Do nothing", "def update(self):\n super().update()\n self.checkTimeToLive()", "def update(self):\n if not self.exists:\n return\n if AT.TIME_TO_EXPIRE in self.attributes:\n if not self.calculate_time_left():\n self.fire_trigger(TR.TIME_EXPIRED)", "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)", "def _RecordVisitTime(self, mr, now=None):\n now = now or int(time.time())\n if not settings.read_only and mr.auth.user_id:\n user_pb = mr.auth.user_pb\n if (user_pb.last_visit_timestamp <\n now - framework_constants.VISIT_RESOLUTION):\n user_pb.last_visit_timestamp = now\n self.services.user.UpdateUser(mr.cnxn, user_pb.user_id, user_pb)", "def __pass_time(self):\n self.hunger += 1\n self.boredom += 1", "def update(self, dt):\n\t\tpass", "def increase_time(self,s):\n self.days += 1\n if self.disease_status > 0:\n self.time_since_infection += 1\n if self.days == 365:\n self.increase_age(s)", "def update(self, dt):", "def update(self, dt):", "def update(self, dt):\n pass", "def GAME_TIME_ADVANCE(dt):", "def setSubmitTime(t):", "def update_trip_time(trip_path, paths, stay_time, mpoi_gains, start_end, model_params, method_use, stay_offset):\n\n trip_time = 0.0\n tot_gain = 0.\n time_list = []\n stay_list = []\n gain_list = []\n\n for idx, node in enumerate(trip_path):\n next_node = trip_path[(idx+1)%trip_path.size]\n rtime = paths[node, next_node]\n trip_time += rtime\n time_list.append(rtime)\n\n # if this is start node or end node check if it is in the tour\n if next_node in start_end and not start_end[next_node]:\n # don't add stay time\n gain_list.append(0)\n stay_list.append(0)\n else:\n # compute stay time\n if method_use == method.proposed or method_use == method.personal or method_use == method.profit:\n stime, gain = find_stay_time(model_params[next_node], rtime, stay_time[next_node], mpoi_gains[next_node], stay_offset)\n else:\n stime = stay_time[next_node]\n gain = mpoi_gains[next_node]\n trip_time += stime\n tot_gain += gain\n\n stay_list.append(stime)\n gain_list.append(gain)\n \n return trip_time, tot_gain, time_list, stay_list, gain_list", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()", "def stay(self):\n\n pass", "def update(self) -> None:\n\n \n #If time to live is 0\n if self.ttl == 0:\n\n #Kill itself\n self.kill()\n return\n\n #Otherwise\n else:\n\n #Reduce time to live\n self.ttl -= 1\n\n #Call superclass update\n return super().update()", "def elapseTime(self, gameState):\n\n \"*** YOUR CODE HERE ***\"\n\n allPossible = util.Counter()\n\n for oldPos in self.legalPositions:\n actions = gameState.getLegalActions(agentIndex)\n successorStates = [gameState.generateSuccessor(action) for action in actions]\n newPosDist = {}\n for state in successorStates:\n position = state.getAgentPosition(agentIndex)\n prob = 1.0/len(actions)\n newPosDist[position] = prob\n\n for newPos, prob in newPosDist.items():\n allPossible[newPos] += prob * self.beliefs[oldPos]\n\n allPossible.normalize()\n self.beliefs = allPossible", "def update(self, delta_time):\n pass", "def update(self, deltatime):\n pass", "def update(self):\n self.age += 1\n self.starve -= 1\n if self.starve < 1:\n self.alive = False\n self.move()", "def update_isolation(self, time: int):", "async def paydaytime(self, ctx: commands.Context, seconds: int):\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n await self.config.PAYDAY_TIME.set(seconds)\r\n else:\r\n await self.config.guild(guild).PAYDAY_TIME.set(seconds)\r\n await ctx.send(\r\n _(\"Value modified. At least {num} seconds must pass between each payday.\").format(\r\n num=seconds\r\n )\r\n )", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def update_activity(self, time_delta):\n if self.state == DoctorState.IN_PATIENT_EXAM_ROOM:\n return\n # We have the amount of time a doctor stays in patient room as\n # DoctorConstant.PORTION_TIME_SPENT_WITH_PATIENT\n # We have the length of time a doctor stays in patient room as\n # DoctorConstant.AVG_TIME_SPENT_WITH_PATIENT\n # Thus, for average time spent doing OTHER we use PORTION * all time\n # = TIME_SPENT_WITH_PATIENT\n total_time_unit = DoctorConstant.AVG_TIME_SPENT_WITH_PATIENT / \\\n DoctorConstant.PORTION_TIME_SPENT_WITH_PATIENT\n time_spent_doing_other = total_time_unit - \\\n DoctorConstant.AVG_TIME_SPENT_WITH_PATIENT\n chance_change_task = time_delta_to_minutes(time_delta) / \\\n time_spent_doing_other\n if self.state == DoctorState.OTHER and random.random() < \\\n chance_change_task:\n self.state = DoctorState.READY", "def change_time(self, new_time):\r\n self.when = new_time", "def _update_active_rides_fast(self, time: datetime) -> None:\n pass", "def update(self, deltaTime):\n pass", "def updatePullDate(self):\n self.startTime = datetime.now()", "def reportBallSeen(self):\r\n self.lastTimeSeen = time.time()", "def update(self, delta_time):\n self.total_time += delta_time", "def update(self, *args):\n\n gmtexpires = None\n (name, ip, expires) = args[:3]\n\n for arg in args:\n if arg.lower().startswith('expires='):\n gmtexpires = arg[8:]\n\n if gmtexpires is None:\n if len(args) == 3:\n gmtexpires = expires\n else:\n if args[2] == 'NEVER':\n gmtexpires = args[2]\n else:\n gmtexpires = args[3]\n\n self.name = name # \"www.example.com\"\n self.ip = maybe_ip_addr(ip) # IPV4Address instance, or string\n\n if self.ip == '<error>':\n self._expire()\n return\n\n fmt = \"%Y-%m-%d %H:%M:%S\"\n\n # if we already have expiry times, etc then we want to\n # properly delay our timeout\n\n oldexpires = self.expires\n\n if gmtexpires.upper() == 'NEVER':\n # FIXME can I just select a date 100 years in the future instead?\n self.expires = None\n else:\n self.expires = datetime.datetime.strptime(gmtexpires, fmt)\n self.created = datetime.datetime.utcnow()\n\n if self.expires is not None:\n if oldexpires is None:\n if self.expires <= self.created:\n diff = datetime.timedelta(seconds=0)\n else:\n diff = self.expires - self.created\n self.expiry = self.map.scheduler.callLater(diff.seconds,\n self._expire)\n\n else:\n diff = self.expires - oldexpires\n self.expiry.delay(diff.seconds)", "def update(self):\r\n \r\n self.time_to_next_fire = self.generate_fire_recurrence()\r\n return self.time_to_next_fire", "def timeSinceSeen(self):\r\n return time.time() - self.lastTimeSeen\r\n \r\n \t'''定位模块加入了再使用'''", "def important_time(self):\n\t\twork_s = self.work_time().seconds\n\t\tbreak_s = self.break_time().seconds\n\t\tif self.status():\n\t\t\tremaining_time_s = tomato(work_s, break_s)\n\t\telse:\n\t\t\tremaining_time_s = potato(work_s, break_s)\n\n\t\timp_time = datetime.now() + timedelta(0, remaining_time_s)\n\t\treturn imp_time", "def step(self):\n self.update(Options['update interval'])", "def set_remain_time(self, time):\n for task in self.tasks:\n task.remain_time = time", "def stale(self, now: datetime | None = None) -> bool:\n return (\n self.last_seen is None\n or (now or dt_util.utcnow()) - self.last_seen > self.consider_home\n )", "def time_passes(self):\n\n old_new = {\n StarType.SUN: StarType.RED_GIANT,\n StarType.RED_GIANT: StarType.WHITE_DWARF,\n StarType.WHITE_DWARF: StarType.SUPERNOVA,\n StarType.SUPERNOVA: StarType.DEAD,\n }\n\n self.star_type = old_new.get(self.star_type, StarType.DEAD)\n\n if self.star_type == StarType.DEAD:\n self.age_years *= 2\n self.mass_tons = 0\n else:\n self.age_years *= 2\n self.mass_tons *= 8", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def update_goal(self):\n pass", "def work_refresh(self):\n now = dt.now()\n self.eisenhower_priority()\n p_week = now.isocalendar()[1] - self.work_datetime.isocalendar()[1]\n\n if (1 <= p_week) and (self.priority not in [1, 2]):\n self.time_ntf = now\n else:\n pass", "def on_action_time_changed(self, content):\n time = parse_iso_dt(content['time']).time()\n self.set_guarded(time=time)", "def set_time_available(self, new_value):\n\n self.available_at = new_value\n self.save()", "def elapseTime(self, idx):\n newBeliefs = util.Counter()\n for oldPos in self.legalPositions:\n if self.beliefs[idx][oldPos] <= 0:\n continue\n newPosDist = self.getPositionDistribution(oldPos)\n for newPos, prob in newPosDist.items():\n newBeliefs[newPos] += prob * self.beliefs[idx][oldPos]\n newBeliefs.normalize()\n self.beliefs[idx] = newBeliefs", "def update_period(self):\n return 0.1", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def set_times(self):\n if self.anchor == \"P\":\n # specified pickup time, 5 minutes early.\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - 300\n # given pickup time, we are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) + 900\n # We are given pickup time, caluclate pickup time, and are 5 min early\n self.earliestDropoff = tools.time_to_seconds(self.times) - 300 + self.time_for_travel()\n # we are given pickup time, add travel time, and are 20 minutes\n self.latestDropoff = tools.time_to_seconds(self.times) + self.time_for_travel() + 900\n else:\n # this means the dropoff time is given. calculate the time it takes to drive, and then 5 minutes early\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel() - 1200\n # given dropoff time, we calucate when to arrive, and then are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel()\n # we are given dropoff time. It's earliest pickup time + travel time\n self.earliestDropoff = tools.time_to_seconds(self.times) - 1200\n self.latestDropoff = tools.time_to_seconds(self.times)", "def check_day_advance(self):\n days_ago = datetime.now().toordinal() - self.start_time.toordinal()\n if days_ago:\n # New day. Save data for the old day.\n self.save(days_ago = days_ago)\n self.start_time = datetime.now()\n # Reset all counters back to 0:00:00.\n for rd in self.row_detail_list:\n rd.time = '0:00:00'\n self.refresh_display()", "def crawl(self) -> datetime.timedelta:\n pass", "def adjust_traveltime(self, edge):\n\n num_cars = self.graph[edge[0]][edge[1]][\"numcars\"]\n t_0 = self.graph[edge[0]][edge[1]][\"t_0\"]\n N_0 = self.graph[edge[0]][edge[1]][\"N_0\"]\n\n if num_cars == 0:\n self.graph[edge[0]][edge[1]][\"traveltime\"] = t_0\n else:\n traveltime = t_0 * N_0 * (np.exp(num_cars / N_0) - 1) / num_cars\n self.graph[edge[0]][edge[1]][\"traveltime\"] = traveltime", "def renew(self):\n remaining=self.time_left()\n if ( (remaining !=-1) and (self.update_frequency!=-1) and \n (remaining<self.update_frequency) ): \n self.create()", "def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_rent = datetime.date.today()\n self.end_time_rent = self.start_time_rent + datetime.timedelta(days=7)\n self.reservation.isrented = True\n self.reservation.save()\n return super(Rental, self).save(*args, **kwargs)", "def set_time_to_process_last_submission(self, seconds: int):\n self.snapshot['time_to_process_last_submission'] = seconds", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))", "def update_news_intime(minutes):\n while True:\n db_update.update()\n time.sleep(60 * minutes)", "def _update_transition(self, dt, time, direction): #pylint:disable-msg=C0103,C0301\r\n pass", "def test_travel_up_with_updates(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(70)\n travelcalculator.start_travel(50) # 10 seconds to reach 50\n\n mock_time.return_value = 1580000005.0\n assert travelcalculator.current_position() == 60\n assert not travelcalculator.position_reached()\n # update from bus not matching calculation takes precedence (1 second faster)\n travelcalculator.update_position(58)\n assert travelcalculator.current_position() == 58\n assert not travelcalculator.position_reached()\n # position reached 1 second earlier than predicted\n mock_time.return_value = 1580000010.0 - 1\n assert travelcalculator.current_position() == 50\n assert travelcalculator.position_reached()", "def update(self):\n\n self.time_to_next_fire = self.generate_fire_recurrence()\n return self.time_to_next_fire", "def update(self,dt):\n #print self._state\n if self._state == STATE_INACTIVE:\n self._inactive()\n elif self._state == STATE_COUNTDOWN:\n self._countdown()\n elif self._state == STATE_PAUSED:\n self._paused()\n elif self._state == STATE_ACTIVE:\n self._active()\n elif self._state == STATE_RESET:\n self._reset()\n elif self._state == STATE_COMPLETE:\n self._complete()", "def updateTimeStep(self, newDt):\n self.timeStep = newDt", "def take(self):\n self.when_taken = datetime.datetime.now().timestamp()", "def time_updated(self, time_updated):\n self._time_updated = time_updated", "def update_visited(self):\n\t\tcount = self.visited\n\t\tcount = count + 1\n\t\tself.visited = count", "def update(self, current_time, *args):\n self.blockers = self.set_blockers()\n self.current_time = current_time\n state_function = self.state_dict[self.state]\n state_function()\n self.location = self.get_tile_location()", "def touch(self):\n self._timestamps['last_seen'] = rospy.get_rostime()", "def on_update(self, delta_time):\n pass", "def on_update(self, delta_time):\n pass", "def update(self, dt=None): #pylint: disable=invalid-name\n if dt is None:\n dt = datetime.utcnow()\n\n self.update_location(self.old_location, dt - timedelta(seconds=1))\n self.update_location(self.current_location, dt)\n self.update_location(self.future_location, dt + timedelta(seconds=1))", "def update(self,dt):\n # increment timer \n if not self.time == None:\n self.time = self.time + 1\n \n if self._state == STATE_INACTIVE:\n self._inactive()\n if self._state == STATE_NEWGAME:\n self._newgame() \n if self._state == STATE_COUNTDOWN:\n self._countdown()\n if self._state == STATE_ACTIVE:\n self._active()\n if self._state == STATE_PAUSED:\n self._paused()\n if self._state == STATE_COMPLETED:\n self._completed()", "async def _timein_refresh(self):\n\t\t\n\t\tawait self.refresh_cache()", "def _update_dates_from_history(self, keep_updated_at: bool = False):\n updated_at = self.updated_at\n state_history = self.state_history\n\n def number_of_transitions(transition_name):\n \"\"\"Return the number of times one transition happened.\"\"\"\n total = [t for t in state_history if t['transition'] == transition_name]\n return len(total)\n\n # updated refused times\n self.refused_times = number_of_transitions('refuse')\n\n def updated_if_changed(attr, t_list, first=False):\n \"\"\"Update only if changed.\"\"\"\n existing = getattr(self, attr)\n new = get_transition_date_from_history(\n t_list, state_history, first=first\n )\n if new != existing:\n setattr(self, attr, new)\n\n # Set first deliver date\n transitions = ('deliver',)\n updated_if_changed('deliver_date', transitions, True)\n\n # Set last deliver date\n transitions = ('deliver',)\n updated_if_changed('last_deliver_date', transitions, False)\n\n # Set acceptance date\n transitions = ('accept', 'refuse')\n updated_if_changed('accept_date', transitions, False)\n\n if keep_updated_at:\n self.updated_at = updated_at", "def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)", "def set_last_used_on(self):\n self.last_used_on = datetime.now()\n self.save()", "def saveState(self):\n e = constrain.saveState(self)\n e.attrib['timeLeft'] = str(self.timeLeft)\n return e", "def mark_seen(self):\r\n self.seen_at = now()\r\n return self", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def test_travel_down_with_updates(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(40)\n travelcalculator.start_travel(100) # 15 seconds to reach 100\n\n # time not changed, still at beginning\n assert travelcalculator.current_position() == 40\n assert not travelcalculator.position_reached()\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 48\n assert not travelcalculator.position_reached()\n # update from bus matching calculation\n travelcalculator.update_position(48)\n assert travelcalculator.current_position() == 48\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000010.0\n assert travelcalculator.current_position() == 80\n assert not travelcalculator.position_reached()\n # update from bus not matching calculation takes precedence (1 second slower)\n travelcalculator.update_position(76)\n assert travelcalculator.current_position() == 76\n assert not travelcalculator.position_reached()\n # travel time extended by 1 second due to update from bus\n mock_time.return_value = 1580000015.0\n assert travelcalculator.current_position() == 96\n assert not travelcalculator.position_reached()\n mock_time.return_value = 1580000015.0 + 1\n assert travelcalculator.current_position() == 100\n assert travelcalculator.position_reached()", "def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()", "def edit_time_spent(entry):\n entry.time_spent = get_minutes()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def update_time(cls, key):\n key.put()", "def timer_update(self):\n if self.mineboard.gamestate is not None:\n return\n time_so_far = round(time.time()-self.start_time)\n if time_so_far == 1:\n self.now.set(f\"Time so far: {time_so_far} second\")\n else:\n self.now.set(f\"Time so far: {time_so_far} seconds\")\n self.after(1000, self.timer_update) # calls this function every second", "def update(self):\n if self.account and OPTION.COMET in self.account.options:\n if self.get_conn().get_browser() not in IFRAME_BROWSERS:\n self.game_request.write(OUTPUT_END_TAG)\n self.seconds_played += 1\n self.inactivity += 1\n\n if FLAG.INGESTED in self.flags:\n return\n\n pass", "def lose_life(self):\n self.lives -= 1\n self.alive = self.calculate_alive()", "def LocalUpdate(self):\n\n # Get current timestamp in miliseconds from unix epoch\n t = int(time.time() * 1000)\n\n # Number of times refill has occured\n lstrefil = self.status['timestamp'] - (60000 - self.status['refillIn'])\n nrefil = (t - lstrefil) / 60000.0\n\n if nrefil > 1:\n self.status['tokensLeft'] += self.status['refillRate'] * \\\n int(nrefil)\n\n if self.status['tokensLeft'] > 60 * self.status['refillRate']:\n self.status['tokensLeft'] = 60 * self.status['refillRate']\n\n # Update timestamps\n self.status['timestamp'] = t\n self.status['refillIn'] = int((1 - nrefil % 1) * 60000)", "def update_news_timer():\n try:\n global last_time_user_got_news\n last_time_user_got_news = dt.datetime.timestamp(dt.datetime.now())\n return '', 204\n except Exception as e:\n return e, 500", "def test_work_time_saved(self):\n\n self.create_page.configure()\n\n from_time = '20.10.2014'\n to_time = '25.10.2014'\n days_count = 6\n\n self.create_page.ad_form.set_work_time_by_input(from_time, to_time)\n\n info_page = self.create_page.ad_form.submit()\n edit_page = info_page.edit_page()\n\n text = edit_page.ad_form.get_work_time_line_text()\n actual_days_count = int(text.split()[0])\n\n info_page.delete()\n\n self.assertEquals(days_count, actual_days_count)", "def LingerTime(self) -> int:", "def setInDownTime(self, downtime):\n self.adParams['GLIDEIN_In_Downtime'] = str(downtime)", "def update_dt(self):\r\n if self.CFL(self.dt) >= self.Cmax:\r\n # find the updated timestep\r\n ndt = self.dt*self.Cmax/self.CFL(self.dt)\r\n\r\n # update the timestep\r\n self.dt = ndt", "def noteActivity(): \r\n global lastActivity\r\n lastActivity = millis()", "def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))", "def time_to_sleep():\n # daily event time\n event_time = time.struct_time(\n (now[0], now[1], now[2], DAILY_UPDATE_HOUR, 0, 0, -1, -1, now[8])\n )\n # how long is that from now?\n remaining = time.mktime(event_time) - time.mktime(now)\n # is that today or tomorrow?\n if remaining < 0: # ah its aready happened today...\n remaining += 24 * 60 * 60 # wrap around to the next day\n # return it\n return remaining", "def expireDate(self)->datetime:\n return datetime.now() + self.secondsLeft", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def time_day_update_func(self, time, day, ride_duration):\n day = (day + ((time + ride_duration) // t)) % d\n time = (time + ride_duration) % t\n return time, day", "def setExpTime(self, exptime):\n with self.lock:\n self.exptime = exptime" ]
[ "0.6485091", "0.63547605", "0.6233127", "0.5951948", "0.59343076", "0.58443356", "0.58252096", "0.57942843", "0.5782025", "0.5782025", "0.5764802", "0.5756116", "0.57427424", "0.5739974", "0.57184714", "0.5699622", "0.56783235", "0.56636465", "0.56427634", "0.56412554", "0.56383103", "0.5631024", "0.55772746", "0.5557848", "0.5537442", "0.5537442", "0.54994017", "0.5463884", "0.54580337", "0.544741", "0.54413617", "0.54329777", "0.543075", "0.54185057", "0.53915447", "0.5388305", "0.5385107", "0.53847384", "0.53790486", "0.53741854", "0.5370701", "0.5370651", "0.53598946", "0.53582156", "0.5356204", "0.53521967", "0.5336604", "0.5324191", "0.5307574", "0.5304693", "0.5304359", "0.5272603", "0.52716255", "0.5266878", "0.5263152", "0.52400815", "0.52215105", "0.52182436", "0.521638", "0.5174936", "0.51687217", "0.5159717", "0.5152072", "0.5143457", "0.51379985", "0.5135232", "0.5134261", "0.51318353", "0.51172435", "0.51155955", "0.51155955", "0.50992876", "0.50978976", "0.5097442", "0.5095314", "0.5092837", "0.5092517", "0.50874674", "0.50860643", "0.5082324", "0.50788367", "0.5071629", "0.5069291", "0.5064057", "0.5059545", "0.50518376", "0.5050523", "0.5043743", "0.50433725", "0.50413513", "0.50410336", "0.5037235", "0.5036058", "0.50348365", "0.50297666", "0.5027905", "0.50272", "0.5011441", "0.500906", "0.5005063" ]
0.75728226
0
Gets the length of visit in seconds
def get_length(self) -> int: return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def secondsPassed(self)->int:\n return self._lic.params['sessionTimeUsed'].value", "def duration(self):\n\t\tif self.status():\n\t\t\t# Currently on, return time since session was started\n\t\t\treturn self.length()\n\t\telse:\n\t\t\t# Otherwise return time until last bit of work\n\t\t\t# Check that this isn't an empty session\n\t\t\tif not self.toggles: return timedelta()\n\t\t\treturn self.toggles[-1] - self.toggles[0]", "def duration(self):\n pass", "def duration(self):\n pass", "def duration(self) -> int:\n return 0", "def duration(self) -> float:\n return time.time() - self.connect_time", "def time_length(self):\n return self._time_length", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def secondsPassed(self)->int:\n return 0 if not self.used else int((datetime.utcnow() - self.firstAccessDate).total_seconds())", "def duration(self):\n return (self.fcip_doc[\"latest_timestamp\"] - self.fcip_doc[\"packet_timestamps\"][0])", "def duration( self ):\n return (self.start and time.process_time()-self.start) or 0", "def secondsPassed(self)->int:\n return self._lic.params['usedDurationInSeconds'].value", "def length(self):\n if self.running:\n return ZERO_TIME\n else:\n return self.end - self.start", "def duration_in_seconds(self):\n return self.get_data(\"duration_in_seconds\")", "def duration(self):\n if self._connected:\n return (datetime.datetime.now() - self._connected).total_seconds()\n return float('inf')", "def duration(self):\r\n return self.stop - self.start", "def duration(self):\n return total_seconds(self.timestamp - self.start_timestamp)", "def duration(self):\n return self.end_time - self.start_time", "def duration(self):\n return self._end - self._begin", "def duration(self):\n return (datetime.datetime.now() - self._when_connected).total_seconds()", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def duration(self):\n return self.end - self.start", "def duration(self) -> float:\n return self.endTime()-self.startTime()", "def time_passed(self):\n return (datetime.now(timezone.utc) - self._time_run).total_seconds()", "def get_duration(self):\n\n return self.endtime - self.starttime", "def duration(self):\n return time.time() - self.socket_opened", "def fan_timer_duration(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"fan_timer_duration\"))\r\n return self._fan_timer_duration.seconds / 60", "def duration(self):\n # type: () -> int\n return self._duration", "def duration(self):\n\n ended = time.time() if self.ended is None else self.ended\n return ended - self.started", "def duration(self):\n self.wait()\n return self._duration", "def total_seconds(self):\n return 0", "def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))", "def duration(self):\n return self._get(\"duration\")", "def get_duration(self):\n duration_ns = self.stream.InitialTimeToWaitGet()\n duration_ns += self.stream.NumberOfFramesGet() * self.stream.InterFrameGapGet()\n return datetime.timedelta(seconds=duration_ns / 1e9)", "def seconds_remaining(self):\n pass", "def time(self) -> int:\n pass", "def LingerTime(self) -> int:", "def get_duration(self):\n return self.duration", "def time(self):\n return self._clock() - self._starttime", "def duration(self):\n return self.end_abs - self.start", "def getTimeLeftSec(self):\n if self.sess is None: return 0\n since = self.sess.data.get('validSince')\n if not since: return 0\n\n sofar = time.time() - since\n if sofar < 0: return 0\n out = self.sess.data.get('validLifetime', 0) - sofar\n if out < 0: out = 0\n return out", "def duration(self):\n return self._get('duration')", "def get_seconds(self):\n return self.seconds_remaining", "def duration(self):\n self._current_duration = time.perf_counter() - self._duration_start\n return round(self._current_duration, 4)", "def duration(self) -> float:\n return self._stop - self._start if self._stop is not None else None", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def get_duration(self) -> int:\n return int( (self._frame_count / self._fps) * 1000 )", "def login_validity_duration(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"login_validity_duration\")", "def performance_length(self):\n first_touch = self.touches[:1].index[0].to_pydatetime()\n last_touch = self.touches[-1:].index[0].to_pydatetime()\n return (last_touch - first_touch).total_seconds()", "def get_session_length(row):\n time_delta = row['session_end'] - row['session_start']\n session_length = time_delta.total_seconds()\n return session_length", "def duration(self) -> str:\n return pulumi.get(self, \"duration\")", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def duration(self):\n return self._duration", "def Duration(self):\r\n\t\treturn self._get_attribute('duration')", "def get_timeout(self) -> int:", "def life_time(self) -> int:\n\n return self._life_time", "def duration(self):\n if not self.started:\n return None\n start = self.started\n end = self.completed\n if not end:\n end = datetime.utcnow()\n return end - start", "def Duration(self):\n\t\treturn self._get_attribute('duration')", "def seconds(self):\n end = self.end or timezone.now()\n result = end - self.start\n return result.seconds", "def elapsed(self):\n return datetime.datetime.now() - self.start", "def duration_in_seconds(self):\n \"Should not set track length\"\n return self.duration / float(self.samplerate)", "def elapsed(self):\n return str(datetime.datetime.now() - self.start).split('.')[0]", "def duration(self):\n started = self.started_at\n finished = self.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet", "def duration(self):\r\n return self.t2 - self.t1", "def get_frame_duration(self):\n return self._frame_duration", "def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start", "def duration(self) -> float:\n return self.delta_t * len(self)", "def duration(self) -> float:\n return self.delta_t * len(self)", "def get_duration(self):\n return self._duration", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def total_duration(self):\r\n # XXX: bug in duration after slicing - attr_onread should be reset\r\n # after slicing\r\n #return self.duration.sum()\r\n return (self.stop - self.start).sum()", "def get_duration(self, current_time):\n return current_time - self.slam.get_data(node_name=self.last_point_name)['time']", "def age(self):\n\t\treturn time.time() - self.sent", "def elapsed_time_in_seconds(self):\n return self._elapsed_time_in_seconds", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def secondsTotal(self)->int:\n return self._lic.params['maxSessionTime'].value", "def time_remaining(self) -> float:\n\n return self.event.time - time.time()", "def get_session_age(self):\n return (time.time() - self._session_login_time) // 60", "def idle(self):\n return (datetime.datetime.now() - self._last_received).total_seconds()", "def duration(self):\n if hasattr(self, 'completed_on') and hasattr(self, 'started_on'):\n diff = (self.completed_on - self.started_on)\n minutes, seconds = divmod(diff.seconds, 60)\n return \"{:0>2d}m {:0>2d}s\".format(minutes, seconds)\n else:\n return None", "def elapsed(self):\n return self.__last_time() - self.__start", "def elapsed():\n global start_time\n return time.time() - start_time", "def get_duration(self):\n frame_dur = self.get_frame_duration()\n num_frames = self.get_num_frames()\n motion_dur = frame_dur * (num_frames - 1)\n return motion_dur", "def elapsed_time(self) -> float:\n current_time = datetime.utcnow()\n start = self.start_time or current_time\n end = self.end_time or current_time\n return (end - start).total_seconds()", "def getLength(self):\n stop = 0\n if type(self.stop) is SharedCounter:\n stop = self.stop.getVal()\n else:\n stop = self.stop\n return stop - self.start", "def virtual_time(self):\n return (_time.time() - PROTOCOL_START_TIME) / ROUND_DURATION", "def length(self):\n\t\treturn datetime.now() - self.toggles[0]", "def time(self):\n return sum(self._interval) * .5", "def track_duration(self):\n return self._track_duration", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])", "def remaining_ms():", "def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60" ]
[ "0.73857874", "0.7209997", "0.71742225", "0.71742225", "0.7137135", "0.7124937", "0.7086354", "0.7061175", "0.7056725", "0.70382965", "0.7029751", "0.69703007", "0.6957682", "0.69457513", "0.6928", "0.6925967", "0.6889457", "0.68751544", "0.684056", "0.6830896", "0.68183225", "0.68122214", "0.68091017", "0.6802342", "0.6769146", "0.6758418", "0.6749954", "0.67275083", "0.67032063", "0.66708523", "0.6665829", "0.66651833", "0.6627158", "0.6620077", "0.66173524", "0.66089654", "0.6608021", "0.65892774", "0.6587198", "0.6580114", "0.6578147", "0.6575843", "0.6566888", "0.6565648", "0.65637606", "0.65451354", "0.65437704", "0.6533034", "0.65316755", "0.6517773", "0.6502146", "0.65004724", "0.65004724", "0.65004724", "0.65004724", "0.65004724", "0.65004724", "0.65004724", "0.65004724", "0.65001005", "0.64924365", "0.64903903", "0.6487863", "0.6482062", "0.64804286", "0.64755446", "0.6473566", "0.6436603", "0.64311117", "0.6429474", "0.64263254", "0.64179283", "0.64074975", "0.64074975", "0.6401234", "0.6397645", "0.6388592", "0.6382992", "0.6371846", "0.63667005", "0.6356755", "0.634953", "0.6349263", "0.63464004", "0.63437414", "0.6340072", "0.6335174", "0.6333558", "0.6326744", "0.63232005", "0.63023996", "0.62990785", "0.6297639", "0.6297492", "0.6297243", "0.62832594", "0.62784374", "0.6278299", "0.62551916", "0.6253613" ]
0.6827581
20
Repeats a message multiple times.
async def repeat(self,ctx, times: int, content='repeating...'): for i in range(times): await ctx.send(content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def async_repetitive_message(message, interval_seconds):\n repeat = ['-', '\\\\', '|', '/']\n\n for switch in itertools.cycle(repeat):\n print('\\r[{}] {}'.format(switch, message), end='')\n yield from async_sleep(interval_seconds)", "async def repeat(self, ctx, *, text):\n await ctx.send(text)", "async def ripgupta(self, ctx, count, *, message):\n int(count)\n gupta = 468209010978455552\n channel = 617525238392946699\n mloop = 0\n int(mloop) \n while mloop > count:\n await channel.send(\"{} {}\".format(gupta.mention, message))\n int(mloop)\n mloop = mloop + 1", "async def repeat(ctx, *, arg):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('repeat: ' + arg, extra={'invoker': ctx.message.author.name})\r\n await ctx.send(arg)", "async def repeat(self, ctx, times : int, content : str):\n if times < 6:\n for i in range(times):\n await ctx.send(content)\n else:\n await ctx.send(\"Please don't get me banned by Discord! (Max 5)\")", "async def do(ctx, times : int, *, command):\n msg = copy.copy(ctx.message)\n msg.content = command\n for i in range(times):\n await bot.process_commands(msg)", "async def repeat(\n text: ('str', 'The content to repeat')\n):\n if not text:\n text = 'nothing to repeat'\n \n return InteractionResponse(text, allowed_mentions = None)", "def cycle(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n players = self.player_queue.pop_all()\n players_str = ' '.join(players)\n channel = SOCKET_ARGS['channel']\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n self.player_queue_credentials = credential_str\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self.player_queue_credentials = None\n for player in players:\n self._add_to_whisper_queue(player, whisper_str)\n # self.command_queue.appendleft(('_delete_last_row', {}))\n self._add_to_chat_queue(\"Invites sent to: {} and there are {} people left in the queue\".format(\n players_str, len(self.player_queue.queue)))", "def cycle_one(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n channel = SOCKET_ARGS['channel']\n try:\n player = self.player_queue.pop()\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n elif self.player_queue_credentials is not None:\n credential_str = self.player_queue_credentials\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self._add_to_whisper_queue(player, whisper_str)\n self._add_to_chat_queue(\"Invite sent to: {} and there are {} people left in the queue\".format(player, len(self.player_queue.queue)))\n # self.command_queue.appendleft(('_delete_last_row', {}))\n except IndexError:\n self._add_to_chat_queue('Sorry, there are no more players in the queue')", "def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)", "def repeat(word, repetitions):\n return word * repetitions", "def repeat(self, count):\n return self.Sequence((self,) * count)", "def repeat(s):\r\n\r\n return s", "def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)", "def repeat_string_n_times(string, count):\r\n return string * int(count)", "async def echo(ctx, *, message=None):\n message = message or \"Please provide the message to be repeated.\"\n await ctx.message.delete()\n await ctx.send(message)", "def repeat_timers(bot, chat_id, message_id):\n\n bot_collection[chat_id].timers.repeat()\n start_timer(bot, chat_id, message_id)", "def repeat(self, repeat: bool=None):\n self._select_interface(self._rc_repeat, self._http_repeat, repeat)", "async def repeat(self, msg):\n if msg.guild.id in self.player:\n if msg.voice_client.is_playing() is True:\n if self.player[msg.guild.id]['repeat'] is True:\n self.player[msg.guild.id]['repeat'] = False\n return await msg.message.add_reaction(emoji='✅')\n\n self.player[msg.guild.id]['repeat'] = True\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"No audio currently playing\")\n return await msg.send(\"Bot not in voice channel or playing music\")", "def repeat(self):\n return self._repeat", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)", "def message_all(self, message):\n # We copy the _clients into a list to avoid dictionary changing\n # size during iteration.\n for character in self.players.values():\n character.message(message)", "async def say(self, string, *, update=True):\r\n said = False\r\n while not said:\r\n if not self.ended:\r\n for x in range(4):\r\n try:\r\n msg = await bot.send_message(self.channel, string)\r\n said = True\r\n if update and self.player:\r\n self.player.update_message(string)\r\n return\r\n except (discord.HTTPException, OSError, aiohttp.ClientResponseError) as e:\r\n print(\"Suffered\", type(e), \"error in botcommand.say().\")\r\n print(\"info: \", string, self.channel.name, self.player.id)\r\n await asyncio.sleep(x ** x)\r\n self.end()\r\n raise CommandEndedError\r\n else:\r\n raise CommandEndedError", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def ScrollMessage(text, color, repeat):\n text_area.text = text\n text_area.color = color\n\n # Start the message just off the side of the glasses\n x = display.width\n text_area.x = x\n\n # Determine the width of the message to scroll\n width = text_area.bounding_box[2]\n\n for _ in range(repeat):\n while x != -width:\n x = x - 1\n text_area.x = x\n\n # Update the switch and if it has been pressed abort scrolling this message\n switch.update()\n if not switch.value:\n return\n\n time.sleep(0.025) # adjust to change scrolling speed\n x = display.width", "def repeat(self):\n return self._get('repeat')", "def printmessage(text, amount=1):\n\n # Repeat for value of amount\n for _ in range(amount):\n # Print the text\n print(text)", "def repeat(s, exclaim):\n\n result = s*3\n\n if exclaim:\n result = result + '!!!'\n\n return result", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def _workout_messages(self, msgs_bunch):\n if msgs_bunch != []:\n while True:\n r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))\n # request success condition below - to end the handler\n if r.status_code == 200:\n break\n print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')\n time.sleep(self.timeout)\n # next bunch of messages will not be read until this function ends\n # current bunch of messags will be deleted in next request if delete_flag = True is set", "def next_message(self):\n while self.queue.consuming:\n yield self.queue.channel._consume_message()", "async def send_wrapped_message(channel, message):\n for part in wrap(message, 2000):\n await channel.send(part)", "def repeated(self, *args, **kwargs):\n return self.rep.RepeatBorders(self._trg, *args, **kwargs)", "def run(self):\n run1=0\n while (run1==0):\n Publisher().sendMessage(\"updatetext\", \"\")\n time.sleep(3)", "def twist(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(0, 50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)\n r.go(0, -50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)", "async def bother(self, ctx, user: discord.Member):\n for i in range(5):\n msg = await ctx.bot.send_message(ctx.message.channel, user.mention)\n await ctx.bot.delete_message(msg)", "def alert_pet(self, reps=3):\n for x in range(0,reps):\n time.sleep(1)\n GPIO.output(self.alert_pin, 0)\n time.sleep(1)\n GPIO.output(self.alert_pin, 1)\n return", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def repeatfunc(cls, func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def sender_iter(self):\n while 1:\n yield self.send_next()", "def repeat_count(instance, args):\r\n count = instance.repeat_count(args)\r\n return count", "async def bother(self, ctx, user: discord.Member):\n for i in range(5):\n msg = await ctx.send(user.mention)\n await msg.delete()", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def MULTIPLAYER_LOOP():\n pass", "def repeat(s,exclaim):\n result = s+s+s\n if exclaim:\n result = result + '!!!'\n return result", "def repeat(self, state, device=None, **kwargs):\n if state not in [\"track\", \"context\", \"off\"]:\n logger.warning(\"Invalid state\")\n return\n\n self._put(\n API.REPEAT.value, state=state, device_id=device, check_202=True, **kwargs\n )", "async def repeat(self, ctx: commands.Context, mode: t.Optional[str]) -> None:\n player = self.get_player(ctx.guild)\n\n # Show a helpful embed if an invalid mode was passed.\n if mode not in [\"none\", \"one\", \"all\"]:\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.description = \"Valid repeat modes are: `none`, `one` and `all`.\"\n embed.set_footer(\n text=f\"The current repeat mode is {player.queue.repeating.name}.\",\n icon_url=Icons.info,\n )\n return await ctx.send(embed=embed)\n\n # Otherwise, just set the mode and display a success message.\n if mode == \"none\":\n player.queue.repeating = RepeatMode.none\n elif mode == \"one\":\n player.queue.repeating = RepeatMode.one\n elif mode == \"all\":\n player.queue.repeating = RepeatMode.all\n\n embed = Embeds.status(success=True, desc=f\"Changed repeat mode to `{mode}`.\")\n await ctx.send(embed=embed)", "async def pm(self, string, *, update=False):\r\n said = False\r\n while not said:\r\n if not self.ended:\r\n for x in range(4):\r\n try:\r\n try:\r\n await bot.send_message(self.author, string)\r\n except discord.Forbidden:\r\n await bot.send_message(self.channel, string)\r\n said = True\r\n if update and self.player:\r\n self.player.updateMessage(string)\r\n return\r\n except (discord.HTTPException, OSError, aiohttp.ClientResponseError) as e:\r\n print(\"Suffered\", type(e), \"error in botcommand.pm().\")\r\n print(\"info: \", string, self.player.id)\r\n await asyncio.sleep(x ** x)\r\n self.end()\r\n raise CommandEndedError\r\n else:\r\n raise CommandEndedError", "def repeat(a, repeats, axis=None):\n return afnumpy.asarray(a).repeat(repeats, axis=axis)", "def multiple_messages(self, messages):\n for message in messages:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)", "def simple_send():\n i = None\n while True:\n i = yield i", "def MultiMessage(self, *args, **kwargs):\n pass", "def comunication_loop(client_socket):\n again = True\n while again:\n # Send a message\n message = input(\"Enter message: \")\n if message == 'Bye':\n again = False\n byte_message = message.encode('utf-8')\n client_socket.send(byte_message)\n # Wait for the reply\n byte_reply = client_socket.recv(1024)\n reply = byte_reply.decode('utf-8')\n print(f\"Got reply {reply}\")\n client_socket.close()", "def multiple_whispers(self, nickname, messages):\n for message in messages:\n cmd = '{}serverWhisper \"{}\" \"{}\"'.format(self.console, Commands.aquote(nickname), Commands.aquote(message))\n self.write_command(cmd)", "def _process_repeaters(self, s: str) -> str:\r\n while self._color_and_repeater_regexp.search(s):\r\n s = self._insert_reset_char(s)\r\n s = self._match_and_remove_repeater(s)\r\n\r\n return s", "def tick():\n global n, message\n n += 1\n message = format(n)", "async def esay(self, string):\r\n said = False\r\n while not said:\r\n if not self.ended:\r\n for x in range(5):\r\n try:\r\n msg = await bot.send_message(self.channel, string)\r\n self.end()\r\n return\r\n except (discord.HTTPException, OSError, aiohttp.ClientResponseError) as e:\r\n print(\"Suffered\", type(e), \"error in botcommand.esay().\")\r\n print(\"info: \", string, self.channel.name, self.player.id)\r\n await asyncio.sleep(x ** x)\r\n self.end()\r\n else:\r\n raise CommandEndedError", "def testValidate_Repeated(self):\n class SimpleMessage(messages.Message):\n repeated = messages.IntegerField(1, repeated=True)\n\n simple_message = SimpleMessage()\n\n # Check valid values.\n for valid_value in [], [10], [10, 20], (), (10,), (10, 20):\n simple_message.repeated = valid_value\n simple_message.check_initialized()\n\n # Check cleared.\n simple_message.repeated = []\n simple_message.check_initialized()\n\n # Check invalid values.\n for invalid_value in 10, ['10', '20'], [None], (None,):\n self.assertRaises(\n messages.ValidationError,\n setattr, simple_message, 'repeated', invalid_value)", "def Repeat(dataset, count=None):\n return dataset.repeat(count=count)", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "def send_messages(messages, sent_messages):\n while messages:\n current_message = messages.pop()\n print(f\"Sending message: {current_message}\")\n sent_messages.append(current_message)", "def emulate_repeat(self, value, timeval):\n repeat_event = self.create_event_object(\n \"Repeat\",\n 2,\n value,\n timeval)\n return repeat_event", "def while_repeat(sentence_string_list,input_word,answer_list):\r\n\tchance = 5\r\n\tRepeat = \"Repeat\"\r\n\t\r\n\twhile Repeat == \"Repeat\":\r\n\t\tprint \" \".join(sentence_string_list)+\"\\n\"\r\n\t\tuser_ans = raw_input(\"Your answer of \" + input_word + \" is : \")\r\n\r\n\t\tif correct_or_not(user_ans,answer_list):\r\n\r\n\t\t\tuser_ans_list.append(user_ans)\r\n\t\t\treplace_all(sentence_string_list,input_word, user_ans)\r\n\t\t\tprint \"/////Corrent!/////\\n\"\r\n\t\t\tRepeat = \"Stop\"\r\n\r\n\t\telse:\r\n\t\t\tchance -= 1\r\n\t\t\tprint \"/////Worng! You've got \" + str(chance) + \"chances left!/////\\n\"\r\n\t\t\tRepeat = \"Repeat\"\r\n\r\n\t\tif chance == 0:\r\n\t\t\treturn \"unvalid\"\r\n\t\t\r\n\treturn \"valid\"", "def chat_jumble(self, msg, *args):\n if self.jumble:\n msg.body = ''.join(random.sample(self.jumble, len(self.jumble)))\n else:\n word_site = \"https://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain\"\n r = req.get(word_site)\n self.jumble = random.choice(r.text.splitlines()).lower()\n msg.body = ''.join(random.sample(self.jumble, len(self.jumble)))\n self.sendmsg(msg)", "def IRC_send_called_every_three_seconds(self):\n\n if (self.ircMessageBuffer):\n try:\n # print(\"Buffered\")\n stringToSend = str(self.ircMessageBuffer.popleft())\n print(\"string to send : \" + stringToSend)\n if self.ircSocket:\n self.ircSocket.send((stringToSend).encode('utf8'))\n except Exception as e:\n logging.error(\"IRC send error:\")\n logging.error(\"In IRCSendCalledEveryThreeSeconds\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:\n num_repeats = kwargs[ATTR_NUM_REPEATS]\n\n for _ in range(num_repeats):\n for single_command in command:\n await self.coordinator.roku.remote(single_command)\n\n await self.coordinator.async_request_refresh()", "def run(self):\n while self._msg_queue:\n actor, msg = self._msg_queue.popleft()\n try:\n actor.send(msg)\n except StopIteration:\n pass", "def repeat(obj, times=None):\n if times is None:\n return Iter(itertools.repeat(obj))\n return Iter(itertools.repeat(obj, times))", "def repeater(seconds):\n return lambda function: TwistedRepeater(function, seconds)", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def send(self, message):\n if not hasattr(message, '__iter__'):\n self.socket.send(message, constants.NOBLOCK)\n else:\n for m in message[:-1]:\n self.socket.send(m, constants.NOBLOCK | constants.SNDMORE)\n self.socket.send(message[-1], constants.NOBLOCK)\n\n if self.read_scheduled is None:\n self.read_scheduled = reactor.callLater(0, self.doRead)", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def repeat_value(value: Any = None, repeat_count: int = None) -> ObservableBase:\n from ..operators.observable.repeat import repeat_value\n return repeat_value(value, repeat_count)", "def sendMessage(self, message):\n for component in CraftChatMessage.fromString(message):\n self.block.sendMessage(component)", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def MakeRepeat1(self,content):\n return self.register(Repeat1(content,reg=self))", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def repeating_char_tally(user_name, msg_logs, scorecard_map):\n for row in msg_logs:\n msg = ujson.loads(row[0])\n content = msg['Text']\n\n # emojis are written with multiple characters but we want to treat them as one unit\n # this line replaces emojis with a special character for easy counting\n content = re.sub(r'\\[[a-zA-Z]+\\]', '@', content)\n\n max_char, cnt = get_max_repeating_char(content)\n if cnt < 3 or max_char in [' ', '.', '。', '-', '_', '+', '=', ',', '`', '*', '|', '\\\\']:\n continue\n\n if is_my_outgoing_msg(msg):\n scorecard_map[user_name].my_pval += 0.1 * (cnt - 2)\n else: # this is an incoming message from my friend\n scorecard_map[user_name].their_pval += 0.1 * (cnt - 2)", "def repeat(phrase, num):\n #JUST DISCOVERED DOCTEST!!!\n #ALL THIS TIME i'VE BEEN MANUALLY CUTTING AND PASTING THE DOCTESTS TO MANUALLY\n #TEST THEM WHEN I COULD HAVE JUST BEEN RUNNING THEM!\n #GAAAAAHHHH!!!\n if isinstance(num, (int, float)) and num >= 0:\n return phrase * num\n else:\n return None", "async def joke(message):\n return random.choice(jokes)", "def repeat(self, repeat):\n if repeat in RepeatTypes.values():\n self._set('repeat', repeat)\n else:\n raise ValidationError(\"%s is not an allowed repeat type (%s).\" % (repeat, ', '.join(RepeatTypes.values())))", "def _keep_getting_new_messages(self):\n while True:\n new_messages = self.get_new_messages()\n for message in new_messages:\n self.handle(message)\n time.sleep(self.refresh_delay)", "def kitt(r, num_repeats=2):\n for i in range(num_repeats):\n r.setLEDs(0, 255, 0, 0)\n time.sleep(.25)\n r.setLEDs(0, 0, 1, 0)\n time.sleep(.25)\n r.setLEDs(0, 0, 0, 1)\n time.sleep(.25)\n r.setLEDs(0, 0, 1, 0)\n time.sleep(.25)\n r.setLEDs(12, 255, 0, 0)\n time.sleep(.25)\n r.setLEDs(0, 0, 1, 0)\n time.sleep(.25)\n r.setLEDs(0, 0, 0, 1)\n time.sleep(.25)\n r.setLEDs(0, 0, 1, 0)\n time.sleep(.25)\n r.setLEDs(255, 255, 0, 0)\n time.sleep(.25)\n r.setLEDs(0, 0, 1, 0)\n time.sleep(.25)\n r.setLEDs(0, 0, 0, 1)\n time.sleep(.25)\n r.setLEDs(0, 0, 1, 0)\n time.sleep(.25)\n r.setLEDs(0, 255, 0, 0)", "def test_subsequent_new_messages(self):\n # Emulate inbox check\n self.alice_inbox.update_last_checked()\n\n messages = []\n for i in range(5):\n new_message = Message.objects.create(level=constants.INFO, text=\"Message {0}\".format(i + 1),\n author=self.bob, user_generated=True)\n new_message.sent_to_users.add(self.alice)\n messages.append(new_message)\n self.assertEqual(i + 1, self.alice_inbox.new_count)\n\n self.assertEqual(5, self.alice_inbox.new_count)\n messages.reverse()\n self.assertSequenceEqual(messages, self.alice_inbox.new)\n self.alice_inbox.update_last_checked()\n self.assertEqual(0, self.alice_inbox.new_count)\n self.assertSequenceEqual([], self.alice_inbox.new)", "def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat", "def repeat_nd(x, reps):\n return RepeatND(reps)(x)", "def run(self):\n most_recent = self.__most_recent\n while True:\n emails = self.__get_emails()\n\n if most_recent != emails[0]:\n print(f'{self.__source} New messsage recieved')\n\n # Dispatch event for new email\n self.__email_event()\n\n # Reset most recent\n most_recent = self.__get_emails()[0]\n\n else:\n time.sleep(0.3)", "def doContinue(self, message, razzy):\n return", "def multiply_string(message, n):\r\n return message*n", "def repeat(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"repeat\")", "def pass_multi_request(message, num_words=1):\n try:\n tries = int(num_words)\n except ValueError:\n message.reply(Strings['NONSENSE'])\n return\n if (tries > 10):\n message.reply(Strings['TOO_MANY_PASSWORDS'])\n return\n if (tries < 1):\n message.reply(Strings['NONSENSE'])\n return\n for x in range(tries):\n message.reply(\"```\" + generate_password() + \"```\")" ]
[ "0.75642896", "0.75241643", "0.74671906", "0.6782859", "0.6725025", "0.66473454", "0.6640616", "0.6501877", "0.6463185", "0.6406237", "0.62558305", "0.6149078", "0.61431366", "0.61045724", "0.60727173", "0.60414445", "0.6028678", "0.59844077", "0.59493715", "0.59287435", "0.57907003", "0.5782056", "0.5741135", "0.5721196", "0.56817126", "0.5671292", "0.56708336", "0.5663998", "0.5582047", "0.5578785", "0.5541155", "0.553998", "0.5523861", "0.55223894", "0.54999846", "0.5470145", "0.5467135", "0.54557747", "0.544217", "0.5432938", "0.541108", "0.5400931", "0.5392364", "0.5389491", "0.537541", "0.537541", "0.5361557", "0.53554475", "0.5354367", "0.5352066", "0.53496647", "0.5348531", "0.53432053", "0.5340562", "0.5338527", "0.53309774", "0.53290707", "0.5323805", "0.5323495", "0.5312559", "0.5309107", "0.53063405", "0.53018034", "0.5287368", "0.52859485", "0.52780145", "0.52759635", "0.5272437", "0.52654386", "0.52625304", "0.52589595", "0.5257265", "0.5242863", "0.523643", "0.5235992", "0.52335423", "0.52182615", "0.5212192", "0.5204904", "0.52012146", "0.5201134", "0.51672524", "0.5166822", "0.5158946", "0.5154864", "0.5150165", "0.51475275", "0.51409173", "0.51358235", "0.5135724", "0.5123904", "0.5119655", "0.5113025", "0.51123774", "0.5109492", "0.5083975", "0.5080563", "0.50787324", "0.50775635", "0.50768775" ]
0.7628505
0
create user for testing
def create_user(username,password): return User.objects.create_user(username=username,password=password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_user(email, password, f_name, l_name):\n pass", "def test_createUser_single(self):\n #TODO: this and other tests", "def sample_user(email='[email protected]', password='open@123'):\n return get_user_model().objects.create_user(email, password)", "def setUp(self):\n self.new_user = User.objects.create_user(first_name='John', last_name='Doe', username='john_doe', email='[email protected]', bio='I am new here.', password='test_password', website='example.com', social_media={\n 'facebook':'Facebook link',\n 'Dribble': 'Dribble link',\n })", "def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)", "def setup_test_user(self):\n self.setup_test_tenant()\n self.test_user = rand_name('test_user_')\n self.test_password = rand_name('pass_')\n self.test_email = self.test_user + '@testmail.tm'\n resp, self.user = self.client.create_user(self.test_user,\n self.test_password,\n self.tenant['id'],\n self.test_email)\n self.users.append(self.user)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def test_create_user(self):\n user = User(\"Gideon Bamuleseyo\", \"[email protected]\", \"secret\")\n self.assertEqual(user.name, \"Gideon Bamuleseyo\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.password, \"secret\")", "def sample_user(email: str = \"[email protected]\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)", "def setUp(self):\n self. user = User.objects.create_user(username='fredbob',\n first_name='Fred',\n last_name='Bob',\n email='[email protected]',\n password='foobar')", "def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def sample_user(username='arturbartecki', password='testpassword'):\n return get_user_model().objects.create_user(username, password)", "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='password'):\n return get_user_model().objects.create_user(email, password)", "def setUp(self):\n self.user = User.objects.create_user(username='Marry', email='[email protected]', password='secret')\n self.user.first_name = 'Marry'\n self.user.last_name = 'Tomson'\n self.user.save()", "def create_test_user(self):\n user = User.objects.create_user(\n username='[email protected]', password='password')\n user.groups.add(self.group)\n user.user_permissions.add(p('wagtailadmin.access_admin'))\n user.save()\n return user", "def sample_user(email='[email protected]', password='testpass'):\n\n return get_user_model().objects.create_user(email, password)", "def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def setUp(self):\n self.credentials = {\"username\": \"BobRobert\", \"password\": \"fglZfYmr%?,\"}\n User.objects.create_user(**self.credentials)", "def sample_user_third(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name3\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def sample_user(email=\"[email protected]\", password=\"password123\"):\n\n return get_user_model().objects.create_user(email, password)", "def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')", "def setUp(self):\n self.user1 = User.objects.create_user(username='jack', email='[email protected]', password='secret')\n self.user1.first_name = \"Jack\"\n self.user1.last_name = \"Smith\"\n self.user1.save()", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def getTestUser():\n allUsers = User.objects.all()\n if len(allUsers) > 0 :\n return allUsers[0]\n else :\n return User.objects.create_user(username='profiletester',\n email='[email protected]',\n password='superduperpassword2000')", "def setUp(self):\n self.new_user = User(username='burens', password='12345')", "def setUp(self):\n self.client = Client()\n self.user = User.objects.create_user('testuser', '[email protected]', 'q2w3E$R%')", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def _create_test_user(\n user_name,\n password='password',\n org=None,\n is_org_admin=False\n):\n test_user = OcUser().setup_user(\n username=user_name,\n email=user_name + '@email.cc',\n first_name=user_name + '_first_name',\n last_name=user_name + '_last_name'\n )\n\n if org:\n # mapping user to org\n oui = OrgUserInfo(test_user.id)\n oui.setup_orguser(org)\n\n # making a user an org admin\n if is_org_admin:\n oui.make_org_admin(org.id)\n\n test_user.set_password(password)\n test_user.save()\n return test_user", "def users_create():", "def test_create_with_username(self):\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), '[email protected]')\n\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=False)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck')", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def setUp(self):\n self.new_user = User('Valentine', 'Robai', '0712345678', '[email protected]', 'vrobai',\n 'password')", "def sample_user_fourth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name4\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def setUp(self):\n self.client = Client()\n #creamos un usuario en la db\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def test_create_user(self):\n email = '[email protected]'\n password = 'testPass'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertEqual(user.role, Role.PLAYER)\n self.assertTrue(user.check_password(password))\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_staff)", "def setUp(self):\n self.new_user = User(\"Juma\",\"12345\")", "def create_testuser(app, created_models, verbosity, **kwargs):\n if not settings.DEBUG:\n return\n try:\n auth_models.User.objects.get(username='test')\n except auth_models.User.DoesNotExist:\n print '*' * 80\n print 'Creating test user -- login: test, password: test'\n print '*' * 80\n assert auth_models.User.objects.create_superuser('test', '[email protected]', 'test')\n else:\n print 'Test user already exists'", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def setUp(self):\n\n self.user = self.client.users.create({})", "def user():\n\n user = User.objects.create(name='Janek', surname='Kowalski',\n internal_id='PUHgjdJ', is_administrator=True,\n is_payment_creator=True, is_payment_approver=False,\n can_delete_payment=True)\n return user", "def add_testuser(self):\n user = UserFactory.create()\n user.username = 'testuser'\n user.set_password('testuser')\n user.save()\n return user.profile", "def test_add_user(self):\n pass", "def sample_user_second(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name2\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200", "def test_create_simple_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'password231',\n 'name': 'vasia'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n u = get_user_model().objects.get(**res.data)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(u.check_password(self.payload['password']))\n self.assertEqual(u.email, self.payload['email'])", "def test_create(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.user_profile2.id,\n }\n Users = self.env['res.users']\n user_test = Users.create(userValue)\n newUser = self.env['res.users'].browse(user_test.id)\n self.assertEqual(userValue['name'], newUser['name'])", "def new_user():\n pass", "def test_create_user(self):\n User.objects.create_user(username='abc', password='abcpass', email='[email protected]')\n user_obj = User.objects.get(username='abc')\n self.assertTrue(user_obj.email, \"[email protected]\")\n self.assertEqual(str(user_obj), \"abc\")", "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')", "def test_create_user(self):\n self.assertIsInstance(\n User.objects.create_user(username=\"username\", email=\"[email protected]\", password=\"password\"), User)", "def setUp(self):\n self.new_user = User(\"Hamisi\",\"python\")", "def create_user(self):\n return UserFactory.create()", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n first_name=\"John\",\n last_name=\"Kenedy\",\n username=\"johnny\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )\n self.user_2 = User.objects.create_user(\n first_name=\"Kent\",\n last_name=\"Philip\",\n username=\"kenty\",\n password=\"Phrase908\",\n email=\"[email protected]\",\n )", "def test_good_user_creation(self):\n data = json.dumps({\n \"username\" : \"mark\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 201)", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def test_user_creation(self):\n username = 'Smith'\n password = 'password'\n email = '[email protected]'\n new_user = User.objects.create_user(username)\n new_user.set_password(password)\n new_user.email = email\n new_user.save()\n new_profile = Profile()\n new_profile.user = new_user\n new_profile.save()\n self.assertEqual(username, new_user.username)\n self.assertEqual(email, new_user.email)\n self.assertTrue(authenticate(username=new_user.username, password=password))", "async def create_fake_user(db: AsyncpgStorage, data=None) -> UserInfoDict:\n data = data or {}\n data.setdefault(\"password\", \"secret\")\n data.setdefault(\"status\", UserStatus.ACTIVE.name)\n data.setdefault(\"role\", UserRole.USER.name)\n params = random_user(**data)\n\n user = await db.create_user(params)\n user[\"raw_password\"] = data[\"password\"]\n return user", "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_create_user(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertEqual(user.email, \"[email protected]\")\n self.assertNotEqual(user.password, \"testpassword\")\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.created_at)\n self.assertIsNotNone(user.confirmation_token)", "def sample_user_fifth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name5\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def _create_user(self, username, password, domain_id, project_id):\n request = {\n \"user\": {\n \"name\": username,\n \"password\": password,\n \"domain_id\": domain_id,\n \"default_project_id\": project_id,\n \"description\": \"description\",\n \"email\": \"[email protected]\",\n \"enabled\": True,\n }\n }\n response = self.client.post(USER_PATH, data=json.dumps(request),\n headers=HEADERS)\n if response.status_code == 409:\n return\n elif response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create test user.\")", "def setUp(self):\n a, b, c = (\n User.objects.create_user(guy, email=\"%[email protected]\" % guy, password=guy)\n for guy in \"abc\"\n )\n a.is_superuser = True\n a.save()", "def test_createuser():\n url = baseUrl + userurl\n payload = user_payload\n logging.info(\"Create a user: %s\" % payload)\n r = requests.post(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 201\n resp = r.text\n assert resp == 'Success'", "def setUp(self):\n\n self.new_user = User(\"Danlon\", \"Situma\", \"Dasi202\", \"passcode\")", "def create_user(self, conn, name, password, group):\n user = conn.user.allocate(name, password, \"\", [group])\n return user", "def setUp(self):\n self.new_user = User(username=\"Hey\")\n self.new_user.save()", "def new_user(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n new_user = User(username=\"test\", password=pwd_context.hash(\"test\"))\n dbsession.add(new_user)", "def test_create_user_endpoint_creates_user(caplog):\n caplog.set_level('INFO')\n\n _request_create_user(SEED_USER_DATA)\n created_user = Advisor.objects.get(email=SEED_USER_DATA['email'])\n\n user_data_keys = SEED_USER_DATA.keys() - set(['token'])\n for key in user_data_keys:\n assert str(getattr(created_user, key)) == SEED_USER_DATA[key]\n\n user_info = [\n 'Creating a user: {',\n f' \"dit_team_id\": \"{SEED_USER_DATA[\"dit_team_id\"]}\",',\n f' \"email\": \"{SEED_USER_DATA[\"email\"]}\",',\n f' \"first_name\": \"{SEED_USER_DATA[\"first_name\"]}\",',\n f' \"last_name\": \"{SEED_USER_DATA[\"last_name\"]}\",',\n f' \"sso_email_user_id\": \"{SEED_USER_DATA[\"sso_email_user_id\"]}\"',\n '}',\n ]\n user_token = f'Created a token `{SEED_USER_DATA[\"token\"]}` for user {created_user.id}.'\n assert caplog.messages == [\n '\\n'.join(user_info),\n user_token,\n ]", "def test_create_user_object():\n from .scripts.initializedb import create_user_object\n user_object = create_user_object(\"test\", \"test\", \"test\")\n assert isinstance(user_object, User)", "def add_testuser(self):\n user = UserFactory.create()\n user.username = 'testuser'\n user.set_password('testuser')\n user.is_active = True\n user.save()\n return user.profile", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def test_create_user(self) -> None:\n\n u1 = self.register_user(\"u1\", \"pass\")\n\n u1stats = self._get_current_stats(\"user\", u1)\n\n assert u1stats is not None\n\n # not in any rooms by default\n self.assertEqual(u1stats[\"joined_rooms\"], 0)", "def test_create(km_user_factory):\n models.Profile.objects.create(\n is_private=True, km_user=km_user_factory(), name=\"My Profile\"\n )", "def test_user():\n user_data = {\n \"name\": \"Brad\",\n \"username\": \"brad345\",\n \"email\": \"[email protected]\",\n \"password\": \"facebook\",\n \"location\": {\n \"city\": \"Philadelphia\",\n \"state\": \"Pennsylvania\",\n \"country\": \"United States\"\n }\n }\n return UserFactory.create_user(user_data)", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user", "def test_create_user_same_username(self):\n first_name = \"a\"\n last_name = \"a\"\n username = \"a\"\n email = \"a\"\n password = \"a\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertFalse(result)", "def test_create_new_user(self):\n\n\t\tdata = {'username': u'Test_User',\n\t\t\t\t\t'password': u'test',\n\t\t\t\t\t'work': u'88 7th Avenue, New York, NY, United States',\n\t\t\t\t\t'home': u'152 Lexington Avenue, New York, NY, United States',\n\t\t\t\t\t'homeLngLat': u'-73.98199699999998 40.743772',\n\t\t\t\t\t'workLngLat': u'-74.0014936 40.7396046'}\n\n\t\t# Add Test_User to the database\n\t\tserver.create_new_user(data)\n\n\t\tnew_user = db.session.query(User).filter(User.username=='Test_User').one()\n\n\t\t# new_user would return none if it did not exist in the db\n\t\tself.assertTrue(new_user, 'Test_User was not sucessfully added to db.')\n\t\tself.assertNotEqual(new_user.password, 'password', 'Password likely not hashed before stored in db.')", "def setUpTestUsers(self) -> None:\n self.password = \"thisisasecret\"\n self.other = get_user_model().objects.create_user(\"other\", password=self.password)\n self.user = get_user_model().objects.create_user(\"user\", password=self.password)\n self.admin = get_user_model().objects.create_superuser(\"admin\", password=self.password)\n self.anonymous = AnonymousUser()", "def setUp(self):\n user = UserFactory(username='mike', email='[email protected]')\n user.set_password('password')\n user.save()" ]
[ "0.87459725", "0.87459725", "0.87459725", "0.8526213", "0.8520784", "0.82702565", "0.8198614", "0.8178401", "0.81581074", "0.81339604", "0.8086235", "0.80827844", "0.80777", "0.8074274", "0.80639285", "0.80639285", "0.80639285", "0.8062536", "0.8042246", "0.8039984", "0.8032341", "0.80252033", "0.80070364", "0.80064476", "0.79885095", "0.79758465", "0.79626256", "0.7961329", "0.79460025", "0.79256684", "0.7923155", "0.79144216", "0.788768", "0.7881218", "0.78766596", "0.78754437", "0.7863284", "0.786293", "0.7855407", "0.78457016", "0.78320205", "0.7814733", "0.7777078", "0.77752197", "0.77731586", "0.7769764", "0.77624834", "0.774824", "0.77447623", "0.7740116", "0.7734626", "0.77324635", "0.77111745", "0.77090085", "0.7703185", "0.7697099", "0.769462", "0.768834", "0.76803714", "0.7678778", "0.7671049", "0.76489913", "0.76488936", "0.7647386", "0.76426786", "0.76419795", "0.7635664", "0.763458", "0.762409", "0.76240736", "0.7623925", "0.76237833", "0.76202047", "0.7616832", "0.7614553", "0.76086324", "0.75982744", "0.7594729", "0.7594328", "0.7585079", "0.7583056", "0.7576574", "0.75741214", "0.75717825", "0.75621825", "0.75543183", "0.7552951", "0.75528955", "0.7550141", "0.75497776", "0.7538264", "0.7535235", "0.7517215", "0.75167376", "0.7511668", "0.7505923", "0.7495094", "0.7489654", "0.74865186", "0.74762636" ]
0.75613874
85
create data that use Question model
def create_question(user,title='title',text='text'): return Question.objects.create(created_by=user, title=title, text=text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setData(self):\n #offset = datetime.timedelta(prefs.getNoOfDaysBeforeQuestionSchedule())\n date_formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n def _q_data_item(q):\n item = {}\n item[\"qid\"]= \"q_%s\" % q.question_id\n if q.question_number:\n item[\"subject\"] = u\"Q %s %s\" % (q.question_number, q.short_name)\n else:\n item[\"subject\"] = q.short_name\n item[\"title\"] = q.short_name\n item[\"result_item_class\"] = \"workflow-state-%s\" % q.status\n item[\"url\"] = url.set_url_context(\"questions/obj-%s\" % q.question_id)\n item[\"status\"] = misc.get_wf_state(q)\n item[\"status_date\"] = date_formatter.format(q.status_date)\n item[\"owner\"] = \"%s %s\" %(q.owner.first_name, q.owner.last_name)\n item[\"type\"] = _(q.type)\n item[\"to\"] = q.ministry.short_name\n return item\n self._data = [ _q_data_item(question) for question in self.query.all() ]", "def create(self, validated_data):\n question = Question.objects.create(**validated_data)\n question.save()\n if 'tag' in self.context['request'].data:\n tag = Tag.objects.get(id=self.context['request'].data['tag'])\n question_tag = QuestionTag.objects.create(question=question,\n tag=tag)\n question_tag.save()\n return question", "def data_for_question(self, question_type):\n\t\treturn {}", "def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self", "def create_question(self):\n\n locations = [\"meetup_id\", \"user_id\", \"title\", \"body\"]\n\n try:\n\n user = self.sql.get_username_by_id(\n int(self.question_details[\"user\"]))\n\n meetup = self.sql.fetch_details_by_criteria(\n \"meetup_id\", self.question_details[\"meetup\"], \"meetups\")\n\n existing = self.sql.fetch_details_if_text_exists(\n \"title\", self.question_details[\"title\"], \"questions\")\n\n title = self.question_details[\"title\"]\n\n body = self.question_details[\"body\"]\n\n except KeyError as keyerror:\n return self.makeresp(\"{} is a required field\".format(keyerror), 400)\n\n isempty = DataValidators(\n self.question_details).check_values_not_empty()\n\n if isinstance(isempty, str):\n return self.makeresp(isempty, 400)\n\n if not user:\n return self.makeresp(\"User not found\", 404)\n\n if not meetup:\n return self.makeresp(\"Meetup not found\", 404)\n\n if not self.check_is_error(existing):\n\n if [meet_id[1] for meet_id in existing if self.question_details[\"meetup\"] in meet_id]:\n\n return self.makeresp(\"This Question already exists\", 409)\n\n question = {\n \"meetup\": self.question_details[\"meetup\"],\n \"createdBy\": self.question_details[\"user\"],\n \"title\": title,\n \"body\": body\n }\n\n question_id = SqlHelper(question).save_to_database(\n locations, \"questions\")\n\n return self.makeresp(\n {\n \"id\": question_id,\n \"user\": question[\"createdBy\"],\n \"meetup\": question[\"meetup\"],\n \"title\": question[\"title\"],\n \"body\": question[\"body\"]\n }, 201)", "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "def gen_questions(self, number_of_questions):", "def initialize_new_questionnaire(questionnaire, option_type, uuid):\r\n q = {}\r\n if (type(questionnaire) == dict):\r\n for key, val in questionnaire.items():\r\n if key != 'index':\r\n\r\n q[key] = [val] if type(val) != list else val\r\n questionnaire = pd.DataFrame(q)\r\n\r\n\r\n if \"_questionnaire\" not in option_type:\r\n option_type = option_type + \"_questionnaire\"\r\n\r\n option_type = option_type.lower()\r\n if 'option_type' not in questionnaire:\r\n questionnaire['option_type'] = [option_type]\r\n questionnaire['uuid'] = [uuid]\r\n questionnaire['timestamp'] = [datetime.datetime.utcnow()]\r\n print(\"this is questionaire: \", questionnaire)\r\n\r\n questionnaire=questionnaire.set_index('uuid')\r\n print(\"this is questionaire: \", questionnaire)\r\n questionnaire.to_sql(option_type, con=Database.DATABASE.engine, if_exists=\"append\", index=True)", "def __init__(self, data={}):\n\n self.config = db_config(BaseConfig.DATABASE_URI)\n self.table = 'questions'\n self.title = data.get('title')\n self.body = data.get('body')\n self.q = data.get('q')\n self.question_id = data.get('id')\n self.user_id = data.get('user_id')\n self.now = str(datetime.now())\n self.logged_in_user_id = Auth.get_logged_in_user(request)[0]['data']['user_id']", "def __init__(self, createdby, meetup, title, body, votes,createdOn):\n self.question_id = len(Question.question_list) + 1\n self.createdon = datetime.now()\n self.createdby = createdby\n self.meetup = meetup\n self.title = title\n self.body = body\n self.votes = votes", "def create_question():\n body = request.get_json()\n\n question_text = body.get('question', None)\n answer = body.get('answer', None)\n category = body.get('category', 1)\n difficulty = body.get('difficulty', 1)\n\n try:\n question = Question(question=question_text,\n answer=answer,\n category=category,\n difficulty=difficulty)\n question.insert()\n\n selection = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, selection)\n\n return jsonify({\n 'success': True,\n 'created': question.id,\n 'questions': current_questions,\n 'total_questions': len(selection)\n })\n\n except Exception:\n abort(422)", "def __init__(self, name):\n self.name = name\n self.questions = []", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item", "def _create_examples(self, lines, kb_data, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n \n context_a_list = kb_data[idx]['answerA']\n context_b_list = kb_data[idx]['answerB']\n context_c_list = kb_data[idx]['answerC']\n\n context_a = \"\"\n for l in context_a_list[:1]:\n context_a += l.replace(\"\\n\",\". \")\n context_a = context_a[:-1]\n\n context_b = \"\"\n for l in context_b_list[:1]:\n context_b += l.replace(\"\\n\",\". \")\n context_b = context_b[:-1]\n\n context_c = \"\"\n for l in context_c_list[:1]:\n context_c += l.replace(\"\\n\",\". \")\n context_c = context_c[:-1]\n \n \n question = item[\"context\"] + item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context_a,context_b,context_c],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def test_create_questions(self):\n res = self.client().post('/questions',\n json={\n \"question\": \"What is chemical \\\n composition of water\",\n \"answer\": \"H2O\",\n \"category\": 1,\n \"difficulty\": 2\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])", "def questions_collection(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n task = Task.objects.get(id=data.get(\"taskId\", \"\"))\n commenter = User.objects.get(username=data.get(\"commenter\", \"\"))\n content = data.get(\"content\", \"\")\n\n question = Question(\n task=task,\n commenter=commenter,\n content=content\n )\n question.save()\n return JsonResponse({\"message\": \"Question created successfully\"}, status=201)", "def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}", "def __init__(self, exam_name):\n\n self.name = exam_name\n self.questions = []", "def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question", "def create(self, request):\n if not hasattr(request, \"data\"):\n request.data = request.POST\n attrs = self.flatten_dict(request.data)\n if not attrs.get('include_answer_page', None):\n if 'answer_page_title' in attrs:\n del attrs['answer_page_title']\n if 'answer_page_link' in attrs:\n del attrs['answer_page_link']\n kn = Knowledge(question = attrs['question'], \n search_keywords = attrs.get('search_keywords', ''),\n answer_summary = attrs.get('answer_summary', ''),\n answer_page_title = attrs.get('answer_page_title', ''),\n answer_page_link = attrs.get('answer_page_link', ''),\n tags = attrs.get('tags', ''),\n user=request.user)\n kn.save()\n return kn", "def __init__(self, question):\n self.question = question\n self.responses = []", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Answer.query.delete()\n Question.query.delete()\n\n # Add sample users, answers and questions\n cat = User(user_name=\"Cat\", email=\"[email protected]\", password=\"abc\")\n dog = User(user_name=\"Dog\", email=\"[email protected]\", password=\"abc\")\n horse = User(user_name=\"Horse\", email=\"[email protected]\", password=\"abc\")\n\n db.session.add_all([cat, dog, horse])\n db.session.commit()\n\n question_1 = Question(question_id=\"q1\", title=\"Should we save the planet?\", description=\" \", user_id=3)\n question_2 = Question(question_id=\"q2\", title=\"Is recycling pointless?\", description=\" \", user_id=3)\n question_3 = Question(question_id=\"q3\", title=\"Mustard or Ketchup?\", description=\" \", user_id=1)\n\n db.session.add_all([question_1, question_2, question_3])\n db.session.commit()\n\n answer_1 = Answer(question_id=\"q1\", user_id=1, body=\"Yes, I agree.\")\n answer_2 = Answer(question_id=\"q2\", user_id=2, body=\"No, I disagree.\")\n answer_3 = Answer(question_id=\"q3\", user_id=3, body=\"Hrm, I'm indifferent.\")\n\n db.session.add_all([answer_1, answer_2, answer_3])\n db.session.commit()", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def mutate(self, info, question_text):\n question = Question(\n question_text=question_text,\n pub_date=now()\n )\n question.save()\n ok = True\n return CreateQuestion(\n question=question,\n ok=ok\n )", "def create_answer(question, user):\n return Answer.objects.create(question=question,answered_by=user)", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }", "def test_create_question(self):\n res = self.client().post('/api/questions', json=self.new_question)\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['created'])\n \n new_question = Question.query.filter(Question.id == res_body['created']).one_or_none()\n self.assertTrue(new_question)", "def add_question():\n data = request.get_json()\n question = data['question']\n answer = data['answer']\n difficulty = data['difficulty']\n category = data['category']\n for key, value in data.items():\n if not value:\n return jsonify({'success': False, 'error': 400,\n 'message': f'{key} field is missing a value'\n }), 400\n new_question = Question(question, answer, category, difficulty)\n new_question.insert()\n return jsonify({'success': True, 'message': 'Question was created',\n 'question': new_question.format()}), 201", "def get_questions():\n fields_dt = ['name', 'category', 'key', 'text']\n questions = frappe.db.get_list('Big Five Factor Model',\n fields=fields_dt)\n\n # Ordenamiendo random: se aplica sobre el objeto original\n suffle_data = random.shuffle(questions)\n\n return questions", "def ask_question():\n title_question = request.form.get(\"title\")\n question = request.form.get(\"question\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n \n ask = Question(user_id = session[\"user_id\"],question_created=date_string, title_question = title_question, question = question)\n\n db.session.add(ask)\n db.session.commit()\n\n return \"question added\"", "def _create_mc_question(self, description):\n\n mc_dict = {\n 'description': description,\n 'type': models.QuestionDTO.MULTIPLE_CHOICE,\n 'choices': [\n {\n 'text': 'correct answer',\n 'score': 1.0\n },\n {\n 'text': 'incorrect answer',\n 'score': 0.0\n }],\n 'version': '1.5'\n }\n question = models.QuestionDTO(None, mc_dict)\n qid = models.QuestionDAO.save(question)\n return models.QuestionDAO.load(qid)", "def post(self, request):\n\n data = request.data\n\n try:\n Question = Questions(**data)\n Question.save()\n LOGGER.info(\"Question created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})", "def __init__(self):\n\n self.question_list = self.read_quiz_json()", "def create_question(question_text, days):\n\ttime = timezone.now()+dt.timedelta(days=days)\n\treturn Question.objects.create(question_text=question_text, pub_date=time)", "def create_question(question_text, pub_date, end_date):\n time = timezone.now() + datetime.timedelta(days=pub_date)\n time2 = timezone.now() + datetime.timedelta(days=end_date)\n return Question.objects.create(question_text=question_text, pub_date=time, end_date=time2)", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data = {}\n data[\"qid\"] = (\"i-\" + str(result.parliamentary_item_id))\n if type(result)==domain.AgendaItem:\n g = u\" \" + result.group.type + u\" \" + result.group.short_name\n else:\n g = u\"\" # !+ g?\n data[\"subject\"] = result.short_name\n data[\"title\"] = result.short_name\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"%ss/obj-%i\" % (\n result.type, result.parliamentary_item_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= u\"\"\n # remember original domain object\n data[\"id\"] = result.parliamentary_item_id\n data[\"_obj\"] = result\n # append processed result item\n data_list.append(data)\n self._data = data_list", "def fetchQuestions (self):\n # Create query and get data\n query = \"SELECT * from \" + self.dbTable + \" where main_ID = '\" + str(self.ID) + \"'\";\n data = self.sqlConnection.executeSelectQuery(query);\n \n # Convert the data into Question objects\n self.convertQuestions(data)", "def test_create_new_question(self):\n response = self.client().post('/questions', json=self.new_question)\n body = json.loads(response.data)\n\n question = Question.query.filter_by(id=body['created']).one_or_none()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertIsNotNone(question)", "def __init__(self, raw_question_list):\n\n self.question_list = []\n self.total_penetrance = 0.0\n self.question_count = 0\n\n for raw_q in raw_question_list:\n q = Question(raw_q)\n self.question_list.append(q)\n self.total_penetrance += q.penetrance\n self.question_count += 1\n\n return None", "def test_create_new_question(self):\n\n # get number of questions before post\n questions_before = Question.query.all()\n\n # create new question and load response data\n response = self.client().post('/questions', json=self.new_question)\n data = json.loads(response.data)\n\n # get number of questions after post\n questions_after = Question.query.all()\n\n # see if the question has been created\n question = Question.query.filter_by(id=data['created']).one_or_none()\n\n # check status code and success message\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check if one more question after post\n self.assertTrue(len(questions_after) - len(questions_before) == 1)\n\n # check that question is not None\n self.assertIsNotNone(question)", "def _create_examples(self, lines, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n context = item[\"context\"]\n question = item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context,context,context],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def _create_response_model(self, data):\n pass", "def create_quiz():\n try:\n\n quiz_category_id = request.json.get(\"quiz_category_id\")\n previous_question_ids = request.json.get(\"previous_question_ids\")\n questions = Question.query.filter(\n ~Question.id.in_(previous_question_ids)\n )\n\n if quiz_category_id != 0:\n questions = questions.filter(\n Question.category_id == quiz_category_id\n )\n\n questions = questions.all()\n\n if len(questions) > 0:\n question = random.choice(questions).format()\n else:\n question = None\n\n response = jsonify({\"success\": True, \"question\": question})\n\n except AttributeError:\n abort(400)\n\n return response", "def __init__(self, data):\n self.user_id = data['user_id']\n self.condition_id = data['condition_id']\n self.condition = data['condition']\n self.condition_details = data['condition_details']\n self.user_answer = data['user_answer']", "def load_question(data, mm, request, ignore_similar=False):\n try:\n subject = Subject.objects.get(name=data['subject'])\n topic = Topic.objects.get(name=data['topic'], subject=subject)\n\n question = Question(text=data['question'], topic=topic)\n question.full_clean()\n answers = []\n for ans in data['answers']:\n answer = Answer()\n answer.text = ans['text']\n answer.is_correct = ans['is_correct']\n answers.append(answer)\n\n if question.similar_exists() and not ignore_similar:\n raise SimilarQuestionError(_(\"A similar question exists\"),\n data)\n else:\n question.save()\n for answer in answers:\n answer.question = question\n answer.full_clean()\n answer.save()\n mm.added.append(question)\n\n except Subject.DoesNotExist:\n mm.no_subject.append((data['subject'], data['question']))\n\n except Topic.DoesNotExist:\n mm.no_topic.append((data['topic'], data['question']))\n\n except ValidationError as err:\n mm.validation_error.append((err, data['question']))\n\n except SimilarQuestionError as err:\n request.session['duplicates'].append(err.data)", "def createQuestion(question_text, days):\n time = timezone.now() + datetime.timedelta(days = days)\n return Question.objects.create(question_text= question_text, pub_date = time)", "def return_questions_data():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n \n cursor.execute(\"select * from questions\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Answer']\n questions = {}\n for q,a in data:\n table.add_row([q,a])\n questions[q] = a\n conn.close()\n\n return table, questions", "def get_questions():\n count = 1\n for i in range(6):\n r = requests.get('https://api.stackexchange.com/2.2/questions?filter=withbody&site=eosio&pagesize=100&page={}'.format(count))\n data = json.loads(r.text)\n for item in data['items']:\n own = item['owner']['user_id']\n dsp = item['owner']['display_name']\n try:\n owner = User.objects.get(username=own, se_display_name=dsp)\n except Exception:\n owner = None\n tags = item['tags']\n ts = []\n if owner:\n for tag in tags:\n t, created = Tag.objects.get_or_create(name=tag)\n ts.append(t)\n q = Question.objects.create(owner=owner, se_question_id=item['question_id'], title=item['title'], body=item[\n 'body'], se_link=item['link'], is_answered=item['is_answered'], score=item['score'])\n for t in ts:\n q.tags.add(t)\n q.save()\n count += 1\n print(count)", "def create_question(question_text, days, create_choice=True):\n\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n if create_choice:\n question.choice_set.create(choice_text=\"Choice 1\", votes=0)\n return question", "def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)", "def generate_question(self, num_question = 10):\n\t\t\n\t\tquestions = []\n\t\tfor q in range(num_question):\n\t\t\tfor r in range(2):\n\t\t\t\tquestion = np.zeros(self.question_dim, dtype = np.float32)\n\t\t\t\tcolor = np.random.randint(len(COLOR))\n\t\t\t\tquestion[color] = 1.0\n\t\t\t\tquestion[6 + r] = 1.0\n\t\t\t\tquestion_label = np.random.randint(3)\n\t\t\t\tquestion[8 + question_label] = 1.0\n\t\t\t\tquestions.append(question)\n\t\treturn questions", "def get_or_create(cls, question, student, result, answer, correct=None):\n qa = QuestionAnswer.objects.filter(question=question, student=student,\n result=result).first()\n if qa:\n qa.answer = answer,\n qa.correct = correct\n else:\n ans_data = {\n 'question': question,\n 'student': student,\n 'result': result,\n 'answer': answer,\n 'correct': correct,\n }\n qa = QuestionAnswer(**ans_data)\n qa.save()\n return qa", "def _create_question_from_dict(self, d):\n question_type_str = d[Question.TYPE]\n d_copy = d.copy()\n\n # Todo: figure out a global setting for whether select all\n # that apply questions have an automatic none option.\n if question_type_str.startswith(u\"select all that apply\"):\n self._add_none_option_to_select_all_that_apply(d_copy)\n\n # hack job right here to get this to work\n if question_type_str.endswith(u\" or specify other\"):\n question_type_str = question_type_str[:len(question_type_str)-len(u\" or specify other\")]\n d_copy[Question.TYPE] = question_type_str\n self._add_other_option_to_multiple_choice_question(d_copy)\n return [self._create_question_from_dict(d_copy),\n self._create_specify_other_question_from_dict(d_copy)]\n question_class = self._get_question_class(question_type_str)\n # todo: clean up this spaghetti code\n d_copy[u\"question_type_dictionary\"] = self._question_type_dictionary\n if question_class:\n return question_class(**d_copy)\n return []", "def test_questions_answers_add_model(self):\n content = Content(header = \"Test_Header\", content = \"Test_Content\")\n question = Questions(question_text = \"Test_Question?\", content = content)\n answer = Answers(answer_text = \"Answer_Test\", correct = 0, question = question)\n db.session.add(content)\n db.session.add(question)\n db.session.add(answer)\n db.session.commit()\n self.assertEqual(Questions.query.filter_by(question_text = \"Test_Question?\").count(), 1)\n self.assertEqual(Answers.query.filter_by(answer_text = \"Answer_Test\", correct = 0, question = question).count(), 1)", "def addQuestion(self):\n self.questions.append(Question(self))", "def _createPoints(self):\n self.doc2quest = self._docMapping()\n\n self.unigram, self.bigram = invertedIndex(self.documents)\n self.points = [dataPoint(key, self) for key in self.questions.keys()]", "def create_question(question_text, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)", "def create_question(question_text, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)", "def create_question(question_text, days):\n time = timezone.now() + timezone.timedelta(days=days)\n return Question.objects.create(question_text=question_text, pub_date=time)", "def create_type_question_ft(self):\n for i in range(0, len(self.cleaned_questions.splitlines())):\n self.type_question_ft += '__label__'+self.types_q.splitlines()[i] + ' ' + self.cleaned_questions.splitlines()[i] + '\\n'", "def __init__(self, question, correct_answer):\n\n self.question = question\n self.correct_answer = correct_answer", "def question_new_validate():", "def create_question():\n try:\n\n search_term = request.json.get(\"search_term\")\n\n if search_term is not None:\n\n questions = (\n Question.query.filter(\n Question.question.ilike(f\"%{search_term}%\")\n )\n .order_by(Question.id)\n .all()\n )\n page = request.args.get(\"page\", 1, type=int)\n current_questions = paginate_questions(questions, page)\n\n categories = Category.query.order_by(Category.id).all()\n categories = {\n category.id: category.name for category in categories\n }\n\n response = jsonify(\n {\n \"success\": True,\n \"questions\": current_questions,\n \"total_questions\": len(questions),\n \"current_category_id\": None,\n \"categories\": categories,\n }\n )\n\n else:\n\n question = Question(\n question=request.json.get(\"question\"),\n answer=request.json.get(\"answer\"),\n category_id=request.json.get(\"category_id\"),\n rating=request.json.get(\"rating\"),\n difficulty=request.json.get(\"difficulty\"),\n )\n\n question.insert()\n\n response = jsonify(\n {\"success\": True, \"created_question_id\": question.id}\n )\n\n except AttributeError:\n abort(400)\n\n return response", "def __init__(self, question_list):\n self.question_list = question_list\n self.question_number = 0\n self.score = 0", "def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu", "def create_question(meetup_id):\n\n data = request.get_json()\n\n if not data:\n return jsonify({\"Message\": 'Cannot send empty data'}),409\n else:\n title = request.get_json()['title']\n body = request.get_json()['body']\n \n \n val_input = {\"title\":title,\"body\":body}\n\n for key,value in val_input.items():\n if not value.strip():\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"{} cannot be empty\".format(key)\n })), 400\n\n question_object.add_question(meetup_id,title,body)\n\n return jsonify({\"status\": 201,\n \"data\":[{\"title\": title,\n \"user_id\":len(question_object.questions)+1,\n \"meetup\": meetup_id,\n \"body\": body}]}), 201", "def __init__(self, examdb, number_of_questions, intended_learning_outcome_used, course_code,\n course_version, exam_date, allow_same_tags=False, existing_questions=None):\n try:\n assert (isinstance(number_of_questions, int))\n self.numQuest = number_of_questions\n self.ILOUsed = list(intended_learning_outcome_used)\n\n assert (isinstance(course_code, str))\n self.course_code = course_code\n\n assert (isinstance(course_version, float))\n self.course_version = course_version\n\n assert (isinstance(exam_date, date))\n self.exam_date = exam_date\n\n assert (isinstance(allow_same_tags, bool))\n self.allow_same_tags = allow_same_tags\n\n except AssertionError as err:\n print(\"Generate Questions By Goal init: \" + str(err))\n return\n\n self.ExamDB = examdb\n self._exam_id = {\n 'exam_id': '',\n 'question_ids': [],\n 'declaration_id': [],\n 'bibliography_id': []\n }\n\n self._objects = {'Declarations': [],\n 'Questions': [],\n }\n self._days = 365 # Number of days that a question is \"quarantined\".\n\n if existing_questions:\n for _qid in existing_questions:\n self._exam_id['question_ids'].append(_qid)\n self._add_question_to_exam(_qid)\n self.numQuest -= len(existing_questions)\n\n if self.numQuest > 0: # If there are more questions to add, run generator algorithm\n self._gen_questions_by_goals()", "def create_models( self ):", "def post_question(self):\n self.post_meetup()\n return self.client.post(\"api/v2/questions\", headers={\"Authorization\": \"{}\".format(self.token())}, data=json.dumps(self.question), content_type='application/json')", "def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer", "def setUp(self):\n\t\tself.app = create_app()\n\t\tself.client = self.app.test_client\n\t\tself.database_name = \"trivia_test\"\n\t\tself.database_path = \"postgres:///{}\".format(self.database_name)\n\t\tself.q_id = 2\n\t\tsetup_db(self.app, self.database_path)\n\n\t\tself.question = {\n\t\t\t\"question\": \"Türkiye'nin başkenti hangi ilimizdir?\",\n\t\t\t\"answer\": \"Ankara\",\n\t\t\t\"difficulty\": 1,\n\t\t\t\"category\": 3\n\t\t}\n\n\t\tself.wrong_question = {\n\t\t\t\"question\": \"Türkiye'nin başkenti hangi ilimizdir?\",\n\t\t\t\"answer\": \"Ankara\",\n\t\t\t\"difficulty\": 1\n\t\t}\n\n\t\t# binds the app to the current context\n\t\twith self.app.app_context():\n\t\t\tself.db = SQLAlchemy()\n\t\t\tself.db.init_app(self.app)\n\t\t\t# create all tables\n\t\t\tself.db.create_all()", "def init_data(cls):\n data=[{'forum':'General Discussion','group':0,'order':0,'note':''},\n {'forum':'Frequently Asked Questions','group':0,'order':1,'note':''},\n {'forum':'Rules and Policies','group':0,'order':2,'note':''},\n {'forum':'News and Announcements','group':1,'order':10,'note':''},\n {'forum':'Feedback and Suggestions','group':1,'order':11,'note':'Suggest ideas of improvement and new features'},\n {'forum':'Bug Reports','group':1,'order':12,'note':'Report problems of the web services'},\n {'forum':'Book Reviews','group':2,'order':20,'note':''},\n {'forum':'Artists Corner','group':2,'order':21,'note':'Discuss topics about art and artists'},\n {'forum':'Writers Corner','group':2,'order':22,'note':'Discuss topics about stories and writers'}\n ]\n for d in data:\n f = SuiForum(forum=d['forum'],note=d['note'],group=d['group'],order=d['order'])\n f.put()", "def create(self):", "def post(self, request, *args, **kwargs):\n # user = request.user\n quizTakerId = kwargs[\"pk\"]\n quizTaker = QuizTakers.objects.filter(id=quizTakerId).first()\n data = request.data[\"questions\"]\n # question = Question.objects.first()\n\n if len(data) == 0:\n data = [{'question': 0 }]\n\n serializer = ResponseSerializer(data=data, many=True)\n if serializer.is_valid():\n serializer.save(quiztaker=quizTaker)\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)", "def create(self, data):\n raise NotImplementedError", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = mtrx.create_distance_matrix(mtrx.create_data()) \r\n data['demands'] = clean.demands\r\n # Each location has a demand corresponding to the quantity—for example, \r\n # weight or volume—of the item to be picked up.\r\n data['vehicle_capacities'] = capacity\r\n # Each vehicle has a capacity: the maximum quantity that the vehicle can hold. \r\n # As a vehicle travels along its route, the total quantity of the items it is carrying \r\n # can never exceed its capacity.\r\n data['num_vehicles'] = number\r\n data['depot'] = 0\r\n return data", "def get(self):\n user = getAuthData()\n question_list = list_questions()\n # user_question_list = list_questions_by_username(user['username'])\n # nonuser_question_list = list_questions_by_username(user['username'], invert=True)\n\n tasks = get_tasks().values()\n\n # filter out the SUCCESS/FAILURE tasks\n tasks = [t for t in tasks if not (t['state'] == 'SUCCESS' or t['state'] == 'FAILURE' or t['state'] == 'REVOKED')]\n\n # get question hashes\n question_tasks = {q.id:[] for q in question_list}\n for t in tasks:\n if not t['args']:\n continue\n match = re.match(r\"[\\[(]'(.*)',?[)\\]]\", t['args'])\n if not match:\n continue\n question_id = match.group(1)\n question_tasks[question_id].append(t)\n\n # split into answer and update tasks\n for t in tasks:\n t['type'] = 'answering' if t['name'] == 'manager.tasks.answer_question' else \\\n 'refreshing KG' if t['name'] == 'manager.tasks.update_kg' else \\\n 'something?'\n\n def augment_info(question):\n answerset_timestamps = [a.timestamp for a in question.answersets]\n if answerset_timestamps:\n latest_idx = answerset_timestamps.index(max(answerset_timestamps))\n latest_answerset_id = question.answersets[latest_idx].id\n latest_answerset_timestamp = question.answersets[latest_idx].timestamp\n else:\n latest_answerset_id = None\n latest_answerset_timestamp = None\n q = question.toJSON()\n q['user_email'] = question.user.email\n q.pop('user_id')\n q.pop('machine_question')\n return {'latest_answerset_id': latest_answerset_id,\n 'latest_answerset_timestamp': latest_answerset_timestamp.isoformat() if latest_answerset_timestamp else None,\n 'tasks': [t['type'] for t in question_tasks[question.id]],\n **q}\n\n return [augment_info(q) for q in question_list], 200", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql://{}:{}@{}/{}\".format('postgres','9048','localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n self.new_question={\n \n \"question\":\"Testing\",\n \"answer\":\"yes\",\n \"difficulty\":3,\n \"category\":3\n }\n \n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n \n\n # Create Test Object\n \n self.new_Question = {\n 'question':'What is the tallest building',\n 'answer':'burjdubai',\n 'category':'4',\n 'difficulty':'2'\n }", "def generate_questions(self):\n\n # Define questions from all positive statements\n self.statements = get_all_statements(autogenerate_softmax=True,\n flatten=True)\n self.questions = [s.get_question_string() for s in self.statements\n if s.positivity == 'is' and not s.target == 'nothing']\n\n # Map questioner's statements to only positive statements\n self.statements = [s for s in self.statements\n if s.positivity == 'is' and not s.target == 'nothing']\n\n # Pre-Generate likelihoods for all questions/statements\n self.likelihoods = np.empty(len(self.questions),\n dtype=[('question', np.object),\n ('probability', np.object),\n ('time_last_answered', np.float),\n ])\n for i, question in enumerate(self.questions):\n self.likelihoods[i]['question'] = question\n lh = self.statements[i].get_likelihood(discretized=True)\n self.likelihoods[i]['probability'] = lh\n self.likelihoods[i]['time_last_answered'] = -1\n logging.info('Generated {} questions.'.format(len(self.questions)))", "def collection(self):\n questions = []\n choice_list = []\n answers = []\n\n if self.form=='The correct German word':\n for i in range(self.num_ques):\n question, options, answer = self.generate_eng2ger()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n else:\n for i in range(self.num_ques):\n question, options, answer = self.generate_ger2eng()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n\n return questions, choice_list, answers", "async def generate_question(self) -> None:\n topic = choice(list(self._topics.keys()))\n arrayList = await self.session.get(\"https://restcountries.eu/rest/v2\")\n arrayList = await arrayList.json() # get request to the country API\n countries = []\n \n for _ in range(4):\n country = choice(arrayList)\n del arrayList[arrayList.index(country)]\n countries.append(country)\n del arrayList\n \n country = choice(countries)\n del countries[countries.index(country)]\n self.question = f\"What is the {self._topics[topic]} of {country['name']}?\"\n self.correct_order = randint(0, 3)\n self.choices = [i[topic] for i in countries]\n self.choices.insert(self.correct_order, country[topic])\n del countries, topic", "def create(self, validated_data):", "def add_questions(self, questions):\n for question in questions:\n self.questions.append(question)", "def prepare_data(self):", "def _make_vocab_files(self):\n self.logger.info('making question vocab...' + self.opt.QUESTION_VOCAB_SPACE)\n qdic, _ = self.load_data(self.opt.QUESTION_VOCAB_SPACE)\n question_vocab = VQADataProvider.make_question_vocab(qdic, self.max_length)\n self.logger.info('making answer vocab...' + self.opt.ANSWER_VOCAB_SPACE)\n qdic, adic = self.load_data(self.opt.ANSWER_VOCAB_SPACE)\n answer_vocab = VQADataProvider.make_answer_vocab(adic, qdic, self.opt.MAX_ANSWER_VOCAB_SIZE, self.use_ocr)\n return question_vocab, answer_vocab", "def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n teacher_answers = [\n a_stemmed,\n a_stemmed_ordered,\n ]\n \n # Change sentence into multiple versions\n log = dict()\n log['student_answer'] = answer\n log['teacher_answer'] = self.teacher_answer\n log['q_answer'] = answer\n log['q_stopwords'] = sf.remove_stopwords(answer)\n log['q_stemmed'] = sf.stem_sentence(answer)\n log['q_stem_ordered'] = sf.order_sentence(log['q_stemmed'])\n \n # Might need to save scaling until jsut before modeling\n log['wordcount'] = sf.word_count(answer)\n log['wordcount'] = sf.scale_column(self.word_scaler, log['wordcount'])\n\n\n# Stem sim\n log['stem_g_similarity'] = sf.generic_similarity(log['q_stemmed'], a_stemmed)\n log['stem_j_similarity'] = sf.jaccard_similarity(log['q_stemmed'], a_stemmed)\n log['stem_c_similarity'] = sf.cosine_similarity(log['q_stemmed'], a_stemmed)\n # Ordered\n log['stem_ordered_g_similarity'] = sf.generic_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_j_similarity'] = sf.jaccard_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_c_similarity'] = sf.cosine_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n\n\n \n # Appending New Answer\n self.new_answers = self.new_answers.append(log, ignore_index = True)\n \n # Entity Extraction\n types_of_sentences = [\n 'q_stemmed',\n 'q_stem_ordered',\n ]\n \n for sent_type, teach_ans in zip(types_of_sentences, teacher_answers):\n \n self.new_answers = sf.unigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.bigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.trigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)", "def to_object(cls, query_dict: Dict):\n question = Question()\n question.id = query_dict.get(\"id\")\n question.created_date = query_dict.get(\"created_date\")\n question.created_by = query_dict.get(\"created_by\")\n question.meet_up = query_dict.get(\"meetup\")\n question.title = query_dict.get(\"title\")\n question.body = query_dict.get(\"body\")\n question.votes = query_dict.get(\"votes\")\n question.upvotes = query_dict.get(\"upvotes\")\n question.downvotes = query_dict.get(\"downvotes\")\n question.comments = len(\n Comment.query_by_field(\"question\", question.id))\n return question", "def create_question(self, input_title, input_details, user_id):\n try:\n query = (u\"INSERT INTO tbl_questions (question_title, \"\n \"question_details, posted_by) VALUES (%s,%s,%s) \"\n \";\")\n inputs = input_title, input_details, user_id\n return run_query(query, inputs)\n except psycopg2.Error as e:\n print(e)", "def create_populated_question(answer_states: List[bool], question_text: str = None):\n question = create_question(question_text)\n\n for state in answer_states:\n create_answer(question, state)\n\n return question", "def populate_poll(user=\"\",total=10):\n user_list = None\n #create random user only when user argument empty\n if user == \"\":\n create_random_user(20)\n user_list = User.objects.all()\n \n for i in range(total):\n Question.objects.create(\n created_by=random.choice(user_list) if user_list is not None else user,\n title=create_random_string(seed_random(10)),\n text=create_random_string(seed_random(300)),\n slug=create_random_string(seed_random(100)) )", "def get_questions(self):\n self.post_question()\n return self.client.get(\"api/v2/questions\", headers={\"Authorization\": \"{}\".format(self.token())}, data=json.dumps(self.question), content_type='application/json')", "def create():", "def create():", "def _add_question_to_exam(self, _question_id):\n # Add question to current exams question dataset.\n assert (isinstance(_question_id, str))\n question = Question()\n question.load_from_database(*self.ExamDB.get_questions_by_id(_question_id))\n\n self._objects[\"Questions\"].append(question)\n self._exam_id['question_ids'].append(_question_id)\n\n return", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "def makedata():\n # train\n print('Clean Train Dataset and separate questions')\n df = pd.read_csv(TRAIN_DATASET).replace(np.nan, ' ')\n t = df.shape[0] * 2\n print t\n df['question1'] = cleanText(df['question1'])\n df['question2'] = cleanText(df['question2'])\n\n df.to_csv(os.path.join(rootpathdata_cleaned, 'train.csx'), index=False)\n overallquestions = df['question1'].tolist() + df['question2'].tolist()\n tpm = pd.DataFrame()\n tpm['question'] = overallquestions\n tpm.to_csv(os.path.join(rootpathdata_cleaned, 'train_allquestions.csx'), index=False)\n # test\n\n print('Clean Test Dataset and separate questions')\n df = pd.read_csv(TEST_DATASET).fillna(' ')\n t1 = df.shape[0] * 2\n df['question1'] = cleanText(df['question1'])\n df['question2'] = cleanText(df['question2'])\n df.to_csv(os.path.join(rootpathdata_cleaned, 'test.csx'), index=False)\n\n overallquestions += df['question1'].tolist() + df['question2'].tolist()\n tpm = pd.DataFrame()\n tpm['question'] = overallquestions\n tpm.to_csv(os.path.join(rootpathdata_cleaned, 'test_allquestions.csx'), index=False)\n print len(overallquestions), t1 + t", "def create_freeform(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_TEXT)\n poll.contacts = contacts \n return poll", "def test_create_new_question_fails(self):\n \n init_question = Question.query.all()\n\n response = self.client().post('/questions', json={})\n body = json.loads(response.data)\n\n final_question = Question.query.all()\n\n\n self.assertEqual(response.status_code, 422)\n self.assertEqual(body['success'], False)\n self.assertTrue(len(init_question) == len(final_question))", "def _create_guess_datasets(self):\n raise NotImplementedError('Please override the _create_guess_datasets '\n 'specific to your model')" ]
[ "0.70364606", "0.68326944", "0.6766363", "0.65983987", "0.6595654", "0.65301675", "0.6528276", "0.6509688", "0.6469362", "0.63934886", "0.63322735", "0.6294712", "0.6284928", "0.6249892", "0.62365043", "0.62052953", "0.6195677", "0.61907166", "0.6157784", "0.61300015", "0.6125107", "0.61186624", "0.6109792", "0.6096726", "0.6090358", "0.6087753", "0.60822594", "0.6074919", "0.6053695", "0.60488707", "0.6013473", "0.6011501", "0.5968226", "0.5951871", "0.59486884", "0.5947095", "0.59368294", "0.592305", "0.5913577", "0.5911227", "0.5887569", "0.5886375", "0.5874578", "0.5861561", "0.585946", "0.58360994", "0.58322793", "0.5818936", "0.5793764", "0.57877254", "0.5781029", "0.5768085", "0.57671535", "0.57601476", "0.57548183", "0.5748189", "0.5735629", "0.5735629", "0.57297856", "0.5727684", "0.5719845", "0.57079583", "0.57030046", "0.57022375", "0.5685844", "0.5678052", "0.56766915", "0.5675404", "0.5674696", "0.56698996", "0.5658887", "0.5651999", "0.5651936", "0.56469595", "0.56444603", "0.5641363", "0.5636521", "0.56301177", "0.5623709", "0.5607883", "0.5602301", "0.55997026", "0.559944", "0.559431", "0.5592654", "0.5588454", "0.5585706", "0.5580877", "0.5578136", "0.55736715", "0.55685276", "0.5566289", "0.5560449", "0.5560449", "0.55575037", "0.5556673", "0.55485064", "0.5545189", "0.5538681", "0.55383503" ]
0.63760406
10
create data that use Choice model
def create_choices(question_model, text="text", total_votes = 0): return Choice.objects.create(question=question_model, text=text, total_votes=total_votes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, choice):\r\n self.choice = choice", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def create_choice(question, choice_text, votes=0):\n return question.choice_set.create(choice_text=choice_text, votes=votes)", "def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question", "def _create_response_model(self, data):\n pass", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def MakeChoice(self,content):\n return self.register(Choice(content,reg=self))", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def get_context_data(self, **kwargs):\n question_id = int(self.kwargs['question_id'])\n question = self._get_question(question_id)\n kwargs.setdefault('question', question)\n choices = question.choice_set.all()\n kwargs.setdefault('choices', choices)\n return super().get_context_data(**kwargs)", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def add_choice(self, name, value):\r\n self.choices += [{\"name\": name, \"value\": value}]", "def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self", "def create_dummy_form(title,text,fill_choice=[],choice_length=[]):\n # fill it with blank for dummy choices\n count=0\n choices=[]\n while count < 8:\n choices.append(None)\n count+=1\n \n # fill choices based on value on fill_choice\n for i in fill_choice:\n try :\n length = choice_length[i]\n except IndexError :\n length = 10\n choices[i] = create_random_string(length)\n\n dummy_form=CreatePollQuestion(\n {\"question_title\":title,\n \"question_text\" :text,\n \"choice_1\":choices[0],\n \"choice_2\":choices[1],\n \"choice_3\":choices[2],\n \"choice_4\":choices[3],\n \"choice_5\":choices[4],\n \"choice_6\":choices[5],\n \"choice_7\":choices[6],\n \"choice_8\":choices[7],\n })\n\n return dummy_form", "def test_distribution_choices_added_successfully(self):\n valid_choices = [\"cpu\", \"memory\"]\n for good_input in valid_choices:\n self.ocp_data[\"distribution\"] = good_input\n self.assertEqual(self.ocp_data[\"distribution\"], good_input)\n with tenant_context(self.tenant):\n instance = None\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n instance = serializer.save()\n self.assertIsNotNone(instance)\n self.assertIsNotNone(instance.uuid)", "def data(self) -> dict:\n _data = super().data\n _choices = 'choices'\n if _choices in _data.keys():\n raise TypeError('Implementation Error: Key \\'{}\\' already defined in parent class'.format(_choices))\n _data[_choices] = sorted(list(self._choices))\n return _data", "def genre_choices(request):\n choices = GENRES\n diction = {}\n li = []\n for data in choices:\n li.append(data[0])\n diction['GENRE_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)#, safe=False)", "def build_choice_element(node_type, contents, tail_text, value):\r\n # When xml is being parsed numtolerance_input and decoy_input tags map to textinput type\r\n # in order to provide the template with correct rendering information.\r\n if node_type in ('numtolerance_input', 'decoy_input'):\r\n node_type = 'textinput'\r\n choice = {'type': node_type, 'contents': contents, 'tail_text': tail_text, 'value': value}\r\n return choice", "def instrument_choices(request):\n #choices = INSTRUMENT_CLASSES\n choices = [x.name for x in Instrument.objects.all()]\n diction = {}\n li = []\n for data in choices:\n li.append(data)\n diction['INSTRUMENT_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)", "def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]", "def _create_question_from_dict(self, d):\n question_type_str = d[Question.TYPE]\n d_copy = d.copy()\n\n # Todo: figure out a global setting for whether select all\n # that apply questions have an automatic none option.\n if question_type_str.startswith(u\"select all that apply\"):\n self._add_none_option_to_select_all_that_apply(d_copy)\n\n # hack job right here to get this to work\n if question_type_str.endswith(u\" or specify other\"):\n question_type_str = question_type_str[:len(question_type_str)-len(u\" or specify other\")]\n d_copy[Question.TYPE] = question_type_str\n self._add_other_option_to_multiple_choice_question(d_copy)\n return [self._create_question_from_dict(d_copy),\n self._create_specify_other_question_from_dict(d_copy)]\n question_class = self._get_question_class(question_type_str)\n # todo: clean up this spaghetti code\n d_copy[u\"question_type_dictionary\"] = self._question_type_dictionary\n if question_class:\n return question_class(**d_copy)\n return []", "def _create_choice_element(self, **kwargs):\r\n text = kwargs.get('text', '')\r\n correct = kwargs.get('correctness', \"true\")\r\n inputs = kwargs.get('inputs', [])\r\n choice_element = etree.Element(\"choice\")\r\n choice_element.set(\"correct\", correct)\r\n choice_element.text = text\r\n for inp in inputs:\r\n # Add all of the inputs as children of this choice\r\n choice_element.append(inp)\r\n\r\n return choice_element", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "def create_models( self ):", "def add_model_case(wiz, choices):\n page = wiz.add_page(u\"Model definition\")\n lay = page.use(qt.QVBoxLayout())\n lay.addWidget(qt.QLabel(u\"What kind of model do you want to work on?\"))\n wfield = page.register(\"model\", choices[0][0])\n lay.addWidget(ModelSelection(wfield, choices))", "def build_model():", "def __init__(self, name, list_countries,list_sectors,EORA=False,list_fd_cats=[]):\n self.name = name\n self.m = ConcreteModel()\n self.countries = list_countries\n self.total_countries = len(list_countries)\n self.sectors = list_sectors\n self.fd_cat = list_fd_cats\n if EORA is True:\n self.EORA = True\n else:\n self.EORA = False", "def test_question_with_choices(self):\n create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], ['<Question: Question with choices>'])", "def _initChoiceTable(self):\n\n t = self.tableWidget_choice_list # shorthand notation\n\n ### Header popluation & properties\n '''\n for (i, col_name) in enumerate(self.data.col_name_list):\n # Order the column labels as in the order of the definition\n # of the dictionary for the element property names and the\n # column names\n t.horizontalHeaderItem(i).setText(col_name)\n '''\n # or\n t.setHorizontalHeaderLabels(self.data.col_name_list)\n\n t.horizontalHeader().setMovable(True)", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def validate(self, data):\n choices = data['answer']\n question = Question.objects.get(id=data['question'])\n if question.qtype != 'select-multiple' and len(choices) > 1:\n raise serializers.ValidationError('This is a question with single choice')\n queryset = Choice.objects.filter(question_id=data['question'])\n for choice in choices:\n get_object_or_404(queryset, id=choice)\n return data", "def __init__(self, name, list_countries,list_sectors,list_products,EORA=False):\n self.name = name\n self.m = ConcreteModel()\n self.countries = list_countries\n self.total_countries = len(list_countries)\n self.sectors = list_sectors\n self.products = list_products\n \n if EORA is True:\n self.EORA = True\n else:\n self.EORA = False", "def create(self, validated_data):", "def process_formdata(self, valuelist):\n self.data = None\n if valuelist:\n self._original_value = valuelist[0]\n\n if valuelist[0] == u'':\n return\n\n try:\n self.data = self.query_factory(valuelist[0]).one()\n except NoResultFound:\n raise ValidationError('Not a valid choice.')", "def create_model(self):\n pass", "def create_model(self):\n pass", "def assign_choice_names(self):\r\n\r\n for index, choice in enumerate(self.xml.xpath('//*[@id=$id]//choice',\r\n id=self.xml.get('id'))):\r\n choice.set(\"name\", \"choice_\" + str(index))", "def setup(self):\r\n self.text_input_values = {}\r\n if self.tag == 'radiotextgroup':\r\n self.html_input_type = \"radio\"\r\n elif self.tag == 'checkboxtextgroup':\r\n self.html_input_type = \"checkbox\"\r\n else:\r\n raise Exception(\"ChoiceGroup: unexpected tag {0}\".format(self.tag))\r\n\r\n if self.value == '':\r\n # Make `value` an empty dictionary, if it currently has an empty\r\n # value. This is necessary because the template expects a\r\n # dictionary.\r\n self.value = {}\r\n self.choices = self.extract_choices(self.xml)", "def _pacbio_choice_option_from_dict(d):\n choices = d['choices']\n default_value = d['default']\n # this will immediately raise\n option_type_id = TaskOptionTypes.from_choice_str(d['optionTypeId'])\n\n opt_id = d['id']\n name = d['name']\n desc = to_utf8(d['description'])\n\n klass_map = {TaskOptionTypes.CHOICE_STR: PacBioStringChoiceOption,\n TaskOptionTypes.CHOICE_FLOAT: PacBioFloatChoiceOption,\n TaskOptionTypes.CHOICE_INT: PacBioIntChoiceOption}\n\n k = klass_map[option_type_id]\n\n # Sanitize Unicode hack\n if k is PacBioStringChoiceOption:\n default_value = to_ascii(default_value)\n choices = [to_ascii(i) for i in choices]\n\n opt = k(opt_id, name, default_value, desc, choices)\n\n return opt", "def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )", "def to_representation(self, obj):\n return self._choices[obj]", "def type_for(data):\n switcher = {\n # Startup\n \"FileHeader\": models.FileHeader,\n \"ClearSavedGame\": models.ClearSavedGame,\n \"NewCommander\": models.NewCommander,\n \"LoadGame\": models.LoadGame,\n \"Progress\": models.Progress,\n \"Rank\": models.Rank,\n # Travel\n \"Docked\": models.Docked,\n \"DockingCancelled\": models.DockingCancelled,\n \"DockingDenied\": models.DockingDenied,\n \"DockingGranted\": models.DockingGranted,\n \"DockingRequested\": models.DockingRequested,\n \"DockingTimeout\": models.DockingTimeout,\n \"FSDJump\": models.FSDJump,\n \"Liftoff\": models.Liftoff,\n \"Location\": models.Location,\n \"SupercruiseEntry\": models.SupercruiseEntry,\n \"SupercruiseExit\": models.SupercruiseExit,\n \"Touchdown\": models.Touchdown,\n \"Undocked\": models.Undocked,\n # Combat\n \"Bounty\": models.Bounty,\n \"CapShipBond\": models.CapShipBond,\n \"Died\": models.Died,\n \"EscapeInterdiction\": models.EscapeInterdiction,\n \"FactionKillBond\": models.FactionKillBond,\n \"HeatDamage\": models.HeatDamage,\n \"HeatWarning\": models.HeatWarning,\n \"HullDamage\": models.HullDamage,\n \"Interdicted\": models.Interdicted,\n \"Interdiction\": models.Interdiction,\n \"PVPKill\": models.PVPKill,\n \"ShieldState\": models.ShieldState,\n # Exploration\n \"Scan\": models.Scan,\n \"MaterialCollected\": models.MaterialCollected,\n \"MaterialDiscarded\": models.MaterialDiscarded,\n \"MaterialDiscovered\": models.MaterialDiscovered,\n \"BuyExplorationData\": models.BuyExplorationData,\n \"SellExplorationData\": models.SellExplorationData,\n \"Screenshot\": models.Screenshot,\n # Trade\n \"BuyTradeData\": models.BuyTradeData,\n \"CollectCargo\": models.CollectCargo,\n \"EjectCargo\": models.EjectCargo,\n \"MarketBuy\": models.MarketBuy,\n \"MarketSell\": models.MarketSell,\n \"MiningRefined\": models.MiningRefined,\n # Station Services\n \"BuyAmmo\": models.BuyAmmo,\n \"BuyDrones\": models.BuyDrones,\n \"CommunityGoalDiscard\": models.CommunityGoalDiscard,\n \"CommunityGoalJoin\": models.CommunityGoalJoin,\n \"CommunityGoalReward\": models.CommunityGoalReward,\n \"CrewAssign\": models.CrewAssign,\n \"CrewFire\": models.CrewFire,\n \"CrewHire\": models.CrewHire,\n \"EngineerApply\": models.EngineerApply,\n \"EngineerCraft\": models.EngineerCraft,\n \"EngineerProgress\": models.EngineerProgress,\n \"FetchRemoteModule\": models.FetchRemoteModule,\n \"MassModuleStore\": models.MassModuleStore,\n \"MissionAbandoned\": models.MissionAbandoned,\n \"MissionAccepted\": models.MissionAccepted,\n \"MissionCompleted\": models.MissionCompleted,\n \"MissionFailed\": models.MissionFailed,\n \"ModuleBuy\": models.ModuleBuy,\n \"ModuleRetrieve\": models.ModuleRetrieve,\n \"ModuleSell\": models.ModuleSell,\n \"ModuleSellRemote\": models.ModuleSellRemote,\n \"ModuleStore\": models.ModuleStore,\n \"ModuleSwap\": models.ModuleSwap,\n \"PayFines\": models.PayFines,\n \"PayLegacyFines\": models.PayLegacyFines,\n \"RedeemVoucher\": models.RedeemVoucher,\n \"RefuelAll\": models.RefuelAll,\n \"RefuelPartial\": models.RefuelPartial,\n \"Repair\": models.Repair,\n \"RepairAll\": models.RepairAll,\n \"RestockVehicle\": models.RestockVehicle,\n \"ScientificResearch\": models.ScientificResearch,\n \"SellDrones\": models.SellDrones,\n \"ShipyardBuy\": models.ShipyardBuy,\n \"ShipyardNew\": models.ShipyardNew,\n \"ShipyardSell\": models.ShipyardSell,\n \"ShipyardTransfer\": models.ShipyardTransfer,\n \"ShipyardSwap\": models.ShipyardSwap,\n # Powerplay\n \"PowerplayCollect\": models.PowerplayCollect,\n \"PowerplayDefect\": models.PowerplayDefect,\n \"PowerplayDeliver\": models.PowerplayDeliver,\n \"PowerplayFastTrack\": models.PowerplayFastTrack,\n \"PowerplayJoin\": models.PowerplayJoin,\n \"PowerplayLeave\": models.PowerplayLeave,\n \"PowerplaySalary\": models.PowerplaySalary,\n \"PowerplayVote\": models.PowerplayVote,\n \"PowerplayVoucher\": models.PowerplayVoucher,\n # Other Events\n \"ApproachSettlement\": models.ApproachSettlement,\n \"CockpitBreached\": models.CockpitBreached,\n \"CommitCrime\": models.CommitCrime,\n \"Continued\": models.Continued,\n \"DatalinkScan\": models.DatalinkScan,\n \"DatalinkVoucher\": models.DatalinkVoucher,\n \"DataScanned\": models.DataScanned,\n \"DockFighter\": models.DockFighter,\n \"DockSRV\": models.DockSRV,\n \"FuelScoop\": models.FuelScoop,\n \"JetConeBoost\": models.JetConeBoost,\n \"JetConeDamage\": models.JetConeDamage,\n \"LaunchFighter\": models.LaunchFighter,\n \"LaunchSRV\": models.LaunchSRV,\n \"Promotion\": models.Promotion,\n \"RebootRepair\": models.RebootRepair,\n \"ReceiveText\": models.ReceiveText,\n \"Resurrect\": models.Resurrect,\n \"SelfDestruct\": models.SelfDestruct,\n \"SendText\": models.SendText,\n \"Synthesis\": models.Synthesis,\n \"USSDrop\": models.USSDrop,\n \"VehicleSwitch\": models.VehicleSwitch,\n \"WingAdd\": models.WingAdd,\n \"WingJoin\": models.WingJoin,\n \"WingLeave\": models.WingLeave,\n }\n return switcher.get(data[\"event\"], models.BaseModel)", "def generate_eng2ger(self):\n question = []\n data_len = len(self.df)+1\n n = random.randint(0, data_len)\n lst = []\n options = []\n for i in range(3):\n no = random.randint(0, data_len)\n lst.append(no)\n lst.append(n)\n lst = random.sample(lst, len(lst))\n ### Creating the question\n question.append(f'Select a german word for \"{self.df.iloc[n, 1]}\":')\n ### Creating options/choices\n for l in lst:\n options.append(f'{self.df.iloc[l, 0]}')\n ### Allocating the answer\n answer = self.df.iloc[n, 0]\n\n return question, options, answer", "def get_choice(cls, polls):\n\n cl = cls()\n items = []\n for poll in polls:\n items.append((poll.id, poll.question))\n\n setattr(cl.poll, 'items', items)\n return cl", "def create_question(question_text, days, create_choice=True):\n\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n if create_choice:\n question.choice_set.create(choice_text=\"Choice 1\", votes=0)\n return question", "def initDefaultChoices(self):\n return []", "def test_question_with_choices(self):\n question = create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:details', args=(question.id, )))\n self.assertContains(response, question.question_text)", "def test_question_with_choices(self):\n question = create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:results', args=(question.id, )))\n self.assertContains(response, question.question_text)", "def collection(self):\n questions = []\n choice_list = []\n answers = []\n\n if self.form=='The correct German word':\n for i in range(self.num_ques):\n question, options, answer = self.generate_eng2ger()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n else:\n for i in range(self.num_ques):\n question, options, answer = self.generate_ger2eng()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n\n return questions, choice_list, answers", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def create(self):\n\n if self.data.get('hydrogeology', None):\n self.form = self._make_form(\n self.well.hydrogeology_parameter if self.well.hydrogeology_parameter else HydrogeologyParameter()\n , HydrogeologyParameterForm, self.data['hydrogeology'])\n\n if self.data['hydrogeology'].get('pumping_test'):\n self.pumping_test_form = self._make_form(\n self.form.instance.pumping_test if self.form.instance.pumping_test else PumpingTest(),\n PumpingTestForm, self.data['hydrogeology']['pumping_test']\n )", "def choice(self, label, choices, initial=0, optional=False, initial_on=False, handler=None, pack=True, **kwargs):\n handler = self._changed_handler(handler)\n ch = wx.Choice(self, choices=choices)\n ch.Bind(wx.EVT_CHOICE, handler)\n if optional:\n cb = wx.CheckBox(self, label=label)\n cb.SetValue(initial_on)\n cb.Bind(wx.EVT_CHECKBOX, self._changed)\n ch.checkbox = cb\n if pack:\n self.pack(\"\", cb, ch, enable=initial_on, **kwargs)\n elif pack:\n self.pack(label, ch, **kwargs)\n ch.SetSelection(initial)\n return ch", "def build_model(self):\n pass", "def build_model(self):\n pass", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def generate_ger2eng(self):\n question = []\n data_len = len(self.df)+1\n n = random.randint(0, data_len)\n lst = []\n options = []\n for i in range(3):\n no = random.randint(0, data_len)\n lst.append(no)\n lst.append(n)\n lst = random.sample(lst, len(lst))\n ### Creating the question\n question.append(f'Ein Englisches Wort für \"{self.df.iloc[n, 0]}\" auswählen:')\n ### Creating options/choices\n for l in lst:\n options.append(f'{self.df.iloc[l, 1]}')\n ### Allocating the answer\n answer = self.df.iloc[n, 1]\n\n return question, options, answer", "def __init__(self, data):\n self.user_id = data['user_id']\n self.condition_id = data['condition_id']\n self.condition = data['condition']\n self.condition_details = data['condition_details']\n self.user_answer = data['user_answer']", "def assign_choice_names(self):\r\n\r\n for index, choice in enumerate(\r\n self.xml.xpath('//*[@id=$id]//choice', id=self.xml.get('id'))\r\n ):\r\n # Set the name attribute for <choices>\r\n # \"bc\" is appended at the end to indicate that this is a\r\n # binary choice as opposed to a numtolerance_input, this convention\r\n # is used when grading the problem\r\n choice.set(\r\n \"name\",\r\n self.answer_id + \"_choiceinput_\" + str(index) + \"bc\"\r\n )\r\n # Set Name attributes for <numtolerance_input> elements\r\n # Look for all <numtolerance_inputs> inside this choice.\r\n numtolerance_inputs = choice.findall('numtolerance_input')\r\n # Look for all <decoy_input> inside this choice\r\n decoys = choice.findall('decoy_input')\r\n # <decoy_input> would only be used in choices which do not contain\r\n # <numtolerance_input>\r\n inputs = numtolerance_inputs if numtolerance_inputs else decoys\r\n # Give each input inside of the choice a name combining\r\n # The ordinality of the choice, and the ordinality of the input\r\n # within that choice e.g. 1_2_1_choiceinput_0_numtolerance_input_1\r\n for ind, child in enumerate(inputs):\r\n child.set(\r\n \"name\",\r\n self.answer_id + \"_choiceinput_\" + str(index) +\r\n \"_numtolerance_input_\" + str(ind)\r\n )", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def add_model_page(wiz, choices):\n page = wiz.add_page(u\"Model definition\")\n lay = page.use(qt.QVBoxLayout())\n lay.addWidget(qt.QLabel(u\"What kind of model do you want to work on?\"))\n wfield = page.register(\"model\", choices[0][0])\n lay.addWidget(ModelSelection(wfield, choices))", "def __init__(self,decisiondict):\n self.decisiondict = decisiondict", "def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def init(tipo: int, factor:float):\n if tipo == 1:\n tipo = \"CHAINING\"\n else:\n tipo = \"PROBING\"\n # catalog es utilizado para interactuar con el modelo\n catalogo = model.NewCatalog(tipo,factor)\n return catalogo", "def __init__(self, radio_poll, *args, **kwargs):\n super(RadioPollChoiceForm, self).__init__(*args, **kwargs)\n choices = (((None, '----'),) +\n tuple(radio_poll.answers.values_list('id', 'answer')))\n self.fields['radio_poll__%s' % str(radio_poll.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=choices,\n label=radio_poll.question))", "def __init__(self, data):\n self.data = data\n self.model_func = DecisionTree._deserialize_decision_tree_from_json(data[\"model\"])", "def _make_problem(self, choices, in_type='radiotextgroup', script=''):\r\n return self.build_problem(\r\n choices=choices,\r\n type=in_type,\r\n script=script\r\n )", "def create_meal():", "def pacbio_option_from_dict(d):\n # This should probably be pushed into pbcommand/pb_io/* for consistency\n # Extensions are supported by adding a dispatch method by looking for\n # required key(s) in the dict.\n if \"choices\" in d and d.get('choices') is not None:\n # the None check is for the TCs that are non-choice based models, but\n # were written with \"choices\" key\n return _pacbio_choice_option_from_dict(d)\n else:\n return _pacbio_option_from_dict(d)", "def __init__(self, type, name):\n self.id = len(OFFICES)\n self.type = type\n self.name = name", "def create(self) -> dict:\n\n questions = [\n Text(name=\"name\", message=\"Enter category name\"),\n ]\n\n return prompt(questions)", "def __init__(self, *args, **kwargs):\n user = kwargs.pop('user')\n super(ChooseAppointmentForm, self).__init__(*args, **kwargs)\n if(user.first_name=='patient'):\n self.appointments = user.patient_appointment.all()\n appointment_partner = 'doctor' # patient is partnered with a doctor and vice versa\n else:\n self.appointments = user.doctor_appointment.all()\n appointment_partner = 'patient'\n choices = []\n\n for i, appointment in enumerate(self.appointments):\n partner_first_name = appointment.associated_patient.patient_user_profile.first_name if (appointment_partner=='patient') else appointment.associated_doctor.doctor_user_profile.first_name\n partner_last_name = appointment.associated_patient.patient_user_profile.last_name if (appointment_partner=='patient') else appointment.associated_doctor.doctor_user_profile.last_name\n choices.append((appointment, 'Appointment: {}, on {}, at {} with {} {}'\n .format(appointment.title, appointment.date, appointment.time, partner_first_name, partner_last_name)))\n\n self.fields['appointments'] = forms.ChoiceField(label=\"\", choices=choices, widget=forms.RadioSelect)", "def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = distance_matrix.tolist()\r\n data['time_matrix'] = time_matrix.tolist()\r\n data['time_windows'] = time_windows.tolist()\r\n data['pickups_deliveries'] = pickup_deliveries.tolist()\r\n data['demands'] = demand\r\n data['num_vehicles'] = 20\r\n data['vehicle_capacities'] = [20 * i / i for i in range(1, num_vehicles+1)]\r\n data['depot'] = (2 * length) - 1\r\n return data", "def create(self, validated_data):\n breed_data = validated_data.pop('breed').capitalize()\n breed_id, _ = Breed.objects.get_or_create(title=breed_data)\n # validated_data['breed'] = breed_id\n cat = Cat.objects.create(breed=breed_id, **validated_data)\n return cat", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def to_representation(self, value):\n try:\n return self._choices[value]\n except KeyError:\n raise Exception('Value: {0} not valid!'.format(value))", "def test_framework_selections_post(self):\n pass", "def _createModelFromData(self, data):\n model = QStandardItemModel()\n model.setRowCount(len(data))\n model.setColumnCount(1)#model.setColumnCount(len(data[0]))\n # set model role names\n start_role = Qt.UserRole + 1\n role_names = {}\n for key, _ in data[0].items():\n column_index = list(data[0].keys()).index(key)\n role = start_role + column_index\n role_names[role] = key.encode()\n model.setItemRoleNames(role_names)\n # set model data\n for row_index, row_dict in enumerate(data):\n for key, value in row_dict.items():\n column_index = list(row_dict.keys()).index(key)\n index = model.index(row_index, 0)\n role = start_role + column_index\n model.setData(index, value, role)\n return model", "def __str__(self):\n return self.choice_text", "async def choice_parser(client, event):\n selected_values = event.values\n if (selected_values is None):\n return None\n \n match = CHOICE_RP.fullmatch(selected_values[0])\n if match is None:\n return None\n \n entity_kind, guild_id, entity_id, entity_name, animated = match.groups()\n \n entity_id = int(entity_id)\n \n # At the case of old choices, `guild_id` can be matched as `None`.\n if guild_id is None:\n guild_id = 0\n else:\n guild_id = int(guild_id)\n \n if entity_kind in ('e', 'r'):\n if entity_id:\n entity = Emoji._create_partial(entity_id, entity_name, ANIMATED_RESOLUTION.get(animated, False))\n else:\n try:\n entity = UNICODE_TO_EMOJI.get(entity_name, None)\n except KeyError:\n return None\n \n if entity_kind == 'e':\n choice_type = ChoiceTypeEmoji\n else:\n choice_type = ChoiceTypeReaction\n \n elif entity_kind == 's':\n entity = await get_sticker(client, entity_id)\n if entity is None:\n return None\n \n choice_type = ChoiceTypeSticker\n \n elif entity_kind == 'o':\n entity = await get_soundboard_sound(client, guild_id, entity_id)\n if entity is None:\n return None\n \n choice_type = ChoiceTypeSoundboardSound\n \n else:\n return None\n \n return Choice(entity, choice_type)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n group_choices = Group.objects.all().values_list(\"pk\", \"name\").order_by(\"name\")\n selected_group_ids = []\n if \"instance\" in kwargs:\n preset = kwargs[\"instance\"]\n selected_group_ids = [\n group.pk for group in preset.groups_with_permission(Permission.VIEW)\n ]\n self.fields[\"groups\"].choices = group_choices\n self.fields[\"groups\"].initial = selected_group_ids", "def _create(self, data):\n model = self.model\n data = self._check_odoo_attribute(data)\n binding = model.create(data)\n self._create_attribute_option(binding, data)\n _logger.debug('%s %d created from magento %s',\n self.model._name, binding.id, self.magento_id)\n return binding", "def create(self, **kwargs):\n return OptionValue.objects.create(name=kwargs['name'], option=kwargs['option'])", "def addChoice(self, choice_name, values, is_prefix=False):\n \n\n if choice_name not in self.validIds:\n # check if a prefix of this choice_name is a validId.\n # This is super not robust to catching errors but we need it for multiple\n # RV choices in a reusable class. See Diameter\n is_prefix_valid = is_prefix and any(choice_name.startswith(valId) for valId in self.validIds)\n if not is_prefix_valid:\n raise ValueError(f'Choice name {choice_name} not in validIds of reusable decision. Valid ids=[{self.validIds}]')\n\n super(ReusableDecision, self).addChoice(choice_name, values)", "def test_value_from_datadict(self):\n #\n # I know that this tests Django code. Humor me pls.\n #\n w = SelectMultipleField()\n name = 'test'\n data = {\n name: [self.choices[0][0], self.choices[2][0]]\n }\n #\n # dict miss returns None\n #\n obj = w.value_from_datadict({}, None, name)\n self.assertIs(obj, None)\n #\n # Plain dict returns obj in value, usually a list\n #\n obj = w.value_from_datadict(data, None, name)\n self.assertIsInstance(obj, list)\n self.assertIn(self.choices[0][0], obj)\n self.assertNotIn(self.choices[1][0], obj)\n self.assertIn(self.choices[2][0], obj)\n #\n # MultiValueDict are generated from WSGIRequest\n #\n data_obj = MultiValueDict(data)\n obj = w.value_from_datadict(data_obj, None, name)\n self.assertIsInstance(obj, list)\n self.assertIn(self.choices[0][0], obj)\n self.assertNotIn(self.choices[1][0], obj)\n self.assertIn(self.choices[2][0], obj)\n #\n # MergeDict are generated from QueryDict which are subclasses of\n # MultiValueDict\n #\n # data_obj = MergeDict(MultiValueDict(data))\n # obj = w.value_from_datadict(data_obj, None, name)\n # self.assertIsInstance(obj, list)\n # self.assertIn(self.choices[0][0], obj)\n # self.assertNotIn(self.choices[1][0], obj)\n # self.assertIn(self.choices[2][0], obj)", "def create(self):", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = mtrx.create_distance_matrix(mtrx.create_data()) \r\n data['demands'] = clean.demands\r\n # Each location has a demand corresponding to the quantity—for example, \r\n # weight or volume—of the item to be picked up.\r\n data['vehicle_capacities'] = capacity\r\n # Each vehicle has a capacity: the maximum quantity that the vehicle can hold. \r\n # As a vehicle travels along its route, the total quantity of the items it is carrying \r\n # can never exceed its capacity.\r\n data['num_vehicles'] = number\r\n data['depot'] = 0\r\n return data", "def create(self, validated_data):\n new_spec = Specification(key = validated_data.get('key'),\n value = validated_data.get('value'),\n category = validated_data.get('category'),\n car = validated_data.get('car'),)\n new_spec.save()\n\n return new_spec", "def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self", "def get_choice(choice):\r\n return {\r\n '0': ('Extraversion', 0.07),\r\n '1': ('Neuroticism', 0.27),\r\n '2': ('Agreeableness', 0.11),\r\n '3': ('Conscientiousness', 0.09),\r\n '4': ('Openness', 0.45)\r\n }.get(choice, (None, None))", "def __init__(self):\n self.name = \"Kursawe\"\n objectives = [o_ku_1, o_ku_2]\n decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]\n Model.__init__(self, objectives, None, decisions)", "def create(self, data):\n raise NotImplementedError", "def field_choices_used_to_translated_value():\r\n LANGUAGES = (\r\n ('en', 'English'),\r\n ('ru', 'Russian'),\r\n )\r\n\r\n from django.db import models\r\n\r\n class Article(models.Model):\r\n name = models.CharField(max_length=200)\r\n language = models.CharField(max_length=200, choices=LANGUAGES)\r\n\r\n def __unicode__(self):\r\n return self.name\r\n\r\n class ArticleTable(tables.Table):\r\n class Meta:\r\n model = Article\r\n\r\n table = ArticleTable([Article(name='English article', language='en'),\r\n Article(name='Russian article', language='ru')])\r\n\r\n assert 'English' == table.rows[0]['language']\r\n assert 'Russian' == table.rows[1]['language']", "def __init__(self):\n self.name = '{0} {1}'.format(choice(stars), choice(self.__class__.planets))\n self.casteOrder = (list(self.__class__.castes))\n shuffle(self.casteOrder)\n self.tech = choice(self.__class__.techTiers)\n self.genesis = choice(self.__class__.genesisReasons)\n self.description = ''\n self.attributes = '{0} ~ ruled by {1} ~ founded to {2}'.format(self.tech, self.casteOrder[0], self.genesis)", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)" ]
[ "0.6384629", "0.637918", "0.6361359", "0.6212173", "0.6101701", "0.60345143", "0.5951211", "0.5886236", "0.5857764", "0.5848246", "0.58469194", "0.58059937", "0.5702655", "0.56995493", "0.56930524", "0.5691047", "0.568869", "0.5666549", "0.56657976", "0.5663346", "0.5648866", "0.56329304", "0.5604295", "0.5604295", "0.5574496", "0.55660594", "0.55320215", "0.55271775", "0.5475609", "0.54406965", "0.54319674", "0.5418087", "0.53970057", "0.5382912", "0.53531533", "0.5341318", "0.5333025", "0.5333025", "0.5330705", "0.5327833", "0.53131205", "0.53123504", "0.53118896", "0.53091127", "0.5295494", "0.52937543", "0.5287869", "0.5285785", "0.5279218", "0.5266568", "0.5263614", "0.52576804", "0.5250993", "0.5250993", "0.52365905", "0.52349025", "0.5230033", "0.5230033", "0.52271533", "0.5224623", "0.5223154", "0.5219615", "0.52150106", "0.5213974", "0.5211762", "0.52090335", "0.51979727", "0.5195666", "0.51847816", "0.5179375", "0.5178239", "0.5172866", "0.51645786", "0.5160875", "0.51512045", "0.5150401", "0.5148485", "0.51467997", "0.51452285", "0.5144932", "0.5137024", "0.5136234", "0.5120206", "0.51111335", "0.5106677", "0.51024365", "0.50939524", "0.5088608", "0.5086575", "0.5084311", "0.5082114", "0.50820416", "0.5075345", "0.50731045", "0.5067061", "0.50575036", "0.50563186", "0.5055969", "0.505457", "0.50486404" ]
0.6831114
0
create data that use Answer model
def create_answer(question, user): return Answer.objects.create(question=question,answered_by=user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n teacher_answers = [\n a_stemmed,\n a_stemmed_ordered,\n ]\n \n # Change sentence into multiple versions\n log = dict()\n log['student_answer'] = answer\n log['teacher_answer'] = self.teacher_answer\n log['q_answer'] = answer\n log['q_stopwords'] = sf.remove_stopwords(answer)\n log['q_stemmed'] = sf.stem_sentence(answer)\n log['q_stem_ordered'] = sf.order_sentence(log['q_stemmed'])\n \n # Might need to save scaling until jsut before modeling\n log['wordcount'] = sf.word_count(answer)\n log['wordcount'] = sf.scale_column(self.word_scaler, log['wordcount'])\n\n\n# Stem sim\n log['stem_g_similarity'] = sf.generic_similarity(log['q_stemmed'], a_stemmed)\n log['stem_j_similarity'] = sf.jaccard_similarity(log['q_stemmed'], a_stemmed)\n log['stem_c_similarity'] = sf.cosine_similarity(log['q_stemmed'], a_stemmed)\n # Ordered\n log['stem_ordered_g_similarity'] = sf.generic_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_j_similarity'] = sf.jaccard_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_c_similarity'] = sf.cosine_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n\n\n \n # Appending New Answer\n self.new_answers = self.new_answers.append(log, ignore_index = True)\n \n # Entity Extraction\n types_of_sentences = [\n 'q_stemmed',\n 'q_stem_ordered',\n ]\n \n for sent_type, teach_ans in zip(types_of_sentences, teacher_answers):\n \n self.new_answers = sf.unigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.bigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.trigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }", "def _create_response_model(self, data):\n pass", "def _setData(self):\n #offset = datetime.timedelta(prefs.getNoOfDaysBeforeQuestionSchedule())\n date_formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n def _q_data_item(q):\n item = {}\n item[\"qid\"]= \"q_%s\" % q.question_id\n if q.question_number:\n item[\"subject\"] = u\"Q %s %s\" % (q.question_number, q.short_name)\n else:\n item[\"subject\"] = q.short_name\n item[\"title\"] = q.short_name\n item[\"result_item_class\"] = \"workflow-state-%s\" % q.status\n item[\"url\"] = url.set_url_context(\"questions/obj-%s\" % q.question_id)\n item[\"status\"] = misc.get_wf_state(q)\n item[\"status_date\"] = date_formatter.format(q.status_date)\n item[\"owner\"] = \"%s %s\" %(q.owner.first_name, q.owner.last_name)\n item[\"type\"] = _(q.type)\n item[\"to\"] = q.ministry.short_name\n return item\n self._data = [ _q_data_item(question) for question in self.query.all() ]", "def __init__(self, data):\n self.user_id = data['user_id']\n self.condition_id = data['condition_id']\n self.condition = data['condition']\n self.condition_details = data['condition_details']\n self.user_answer = data['user_answer']", "def __init__(self):\n self.answers = []", "def data_for_question(self, question_type):\n\t\treturn {}", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def __init__(self, createdby, meetup, title, body, votes,createdOn):\n self.question_id = len(Question.question_list) + 1\n self.createdon = datetime.now()\n self.createdby = createdby\n self.meetup = meetup\n self.title = title\n self.body = body\n self.votes = votes", "def get_answers(self):\r\n pass", "def _create_examples(self, lines, kb_data, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n \n context_a_list = kb_data[idx]['answerA']\n context_b_list = kb_data[idx]['answerB']\n context_c_list = kb_data[idx]['answerC']\n\n context_a = \"\"\n for l in context_a_list[:1]:\n context_a += l.replace(\"\\n\",\". \")\n context_a = context_a[:-1]\n\n context_b = \"\"\n for l in context_b_list[:1]:\n context_b += l.replace(\"\\n\",\". \")\n context_b = context_b[:-1]\n\n context_c = \"\"\n for l in context_c_list[:1]:\n context_c += l.replace(\"\\n\",\". \")\n context_c = context_c[:-1]\n \n \n question = item[\"context\"] + item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context_a,context_b,context_c],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def test_initial_answer(self):\n survey = SurveyFactory.create()\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n\n 'question_text': 'ou812?',\n 'variation_id': '1',\n 'score': None,\n 'max_score': None,\n 'flow_began_ts': 0,\n 'flow_offered_ts': 0,\n 'flow_voted_ts': 0,\n 'flow_engaged_ts': 0,\n 'platform': '',\n 'channel': '',\n 'version': '',\n 'locale': '',\n 'country': '',\n 'build_id': '',\n 'partner_id': '',\n 'profile_age': None,\n 'profile_usage': {},\n 'addons': {},\n 'extra': {},\n 'is_test': False\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 201\n\n ans = Answer.objects.latest('id')\n\n for field in data.keys():\n # survey_id is a special case since it's a foreign key.\n if field == 'survey_id':\n # This looks goofy because it's not the normal way to\n # do things, but the \"survey_id\" attribute is a\n # Survey rather than the pk for a Survey.\n assert ans.survey_id.name == data[field]\n continue\n\n assert getattr(ans, field) == data[field]", "def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self", "def __init__(self, answers, ranking: Ranking):\n self.answers = answers\n self.ranking = ranking", "def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}", "def generate_answer(self, question):\n\n # Recognize intent of the question using `intent_recognizer`.\n # Don't forget to prepare question and calculate features for the question.\n \n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.Main(question)\n #intent='gcs'\n # Chit-chat part: \n if intent == 'dialogue':\n \"\"\"\n # Pass question to chitchat_bot to generate a response.\n reply=self.college.Main(question)\n if reply !=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else: \n \"\"\"\n reply=self.college.Main(question)\n if reply!=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else:\n reply=self.programming.Main(question)\n if reply!=\"Please refer kammand prompt discord or ask you mentor for more info :)\":\n return reply\n else:\n response = str(self.chatbot.get_response(prepared_question))\n temp=np.random.choice(2,p=[0.5,0.5])\n times=np.random.choice([1,2,3,4],p=[0.5,0.3,0.1,0.1])\n if temp==0:\n print(\"EMOJI!!!!!\")\n response= response + times*(label_to_emoji(emojifyer.predict_emoji(model,response,word_to_index)).strip())\n return response\n elif intent==\"mandi\":\n reply=self.college.Main(question)\n return reply\n # Goal-oriented part:\n elif intent==\"stackoverflow\":\n tag = self.tag_classifier.predict(features)[0]\n reply = self.thread_ranker.get_best_thread(prepared_question, tag)\n return reply", "def __init__(self, question):\n self.question = question\n self.responses = []", "def store(self) -> None:\n con, c = db.connect()\n if not db.exists('SELECT * FROM answers WHERE id = ?', self.id, con=con):\n c.execute('INSERT INTO answers VALUES (?, ?, ?, ?, ?, ?, ?)', (self.id, self.answer, \n self.likes, self.created, self.tell, self.user.id, self.parent_id,))\n c.execute('UPDATE answers SET answer=?, likes=?, created=?, tell=?, user=? '+\\\n 'WHERE id = ?', (self.answer, self.likes, self.created, self.tell, \n self.user.id, self.id,))\n db.close(con)", "def create(text, is_correct, question_id):\n answer = Answer(question_id=question_id, text=text, is_correct=is_correct)\n try:\n answer.save()\n return answer\n except IntegrityError:\n return None", "def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer", "def create(self, validated_data):\n \n if(Saved_answers.objects.filter(username=validated_data.get('username'),level=validated_data.get('level')).exists()):\n objects=Saved_answers.objects.filter(username=validated_data.get('username'),level=validated_data.get('level')).update(seconds=validated_data.get('seconds'),answer1=validated_data.get('answer1'),answer2=validated_data.get('answer2'),answer3=validated_data.get('answer3'),answer4=validated_data.get('answer4'),answer5=validated_data.get('answer5'),answer6=validated_data.get('answer6'),answer7=validated_data.get('answer7'),answer8=validated_data.get('answer8'),answer9=validated_data.get('answer9'),answer10=validated_data.get('answer10'),answer11=validated_data.get('answer11'),answer12=validated_data.get('answer12'),answer13=validated_data.get('answer13'),answer14=validated_data.get('answer14'),answer15=validated_data.get('answer15'),answer16=validated_data.get('answer16'),answer17=validated_data.get('answer17'),answer18=validated_data.get('answer18'),answer19=validated_data.get('answer19'),answer20=validated_data.get('answer20'))\n else:\n objects=Saved_answers.objects.create(seconds=validated_data.get('seconds'),username=validated_data.get('username'),level=validated_data.get('level'),answer1=validated_data.get('answer1'),answer2=validated_data.get('answer2'),answer3=validated_data.get('answer3'),answer4=validated_data.get('answer4'),answer5=validated_data.get('answer5'),answer6=validated_data.get('answer6'),answer7=validated_data.get('answer7'),answer8=validated_data.get('answer8'),answer9=validated_data.get('answer9'),answer10=validated_data.get('answer10'),answer11=validated_data.get('answer11'),answer12=validated_data.get('answer12'),answer13=validated_data.get('answer13'),answer14=validated_data.get('answer14'),answer15=validated_data.get('answer15'),answer16=validated_data.get('answer16'),answer17=validated_data.get('answer17'),answer18=validated_data.get('answer18'),answer19=validated_data.get('answer19'),answer20=validated_data.get('answer20'))\n # print >> sys.stderr, objects\n return objects", "def initialize_new_questionnaire(questionnaire, option_type, uuid):\r\n q = {}\r\n if (type(questionnaire) == dict):\r\n for key, val in questionnaire.items():\r\n if key != 'index':\r\n\r\n q[key] = [val] if type(val) != list else val\r\n questionnaire = pd.DataFrame(q)\r\n\r\n\r\n if \"_questionnaire\" not in option_type:\r\n option_type = option_type + \"_questionnaire\"\r\n\r\n option_type = option_type.lower()\r\n if 'option_type' not in questionnaire:\r\n questionnaire['option_type'] = [option_type]\r\n questionnaire['uuid'] = [uuid]\r\n questionnaire['timestamp'] = [datetime.datetime.utcnow()]\r\n print(\"this is questionaire: \", questionnaire)\r\n\r\n questionnaire=questionnaire.set_index('uuid')\r\n print(\"this is questionaire: \", questionnaire)\r\n questionnaire.to_sql(option_type, con=Database.DATABASE.engine, if_exists=\"append\", index=True)", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data = {}\n data[\"qid\"] = (\"i-\" + str(result.parliamentary_item_id))\n if type(result)==domain.AgendaItem:\n g = u\" \" + result.group.type + u\" \" + result.group.short_name\n else:\n g = u\"\" # !+ g?\n data[\"subject\"] = result.short_name\n data[\"title\"] = result.short_name\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"%ss/obj-%i\" % (\n result.type, result.parliamentary_item_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= u\"\"\n # remember original domain object\n data[\"id\"] = result.parliamentary_item_id\n data[\"_obj\"] = result\n # append processed result item\n data_list.append(data)\n self._data = data_list", "def get_or_create(cls, question, student, result, answer, correct=None):\n qa = QuestionAnswer.objects.filter(question=question, student=student,\n result=result).first()\n if qa:\n qa.answer = answer,\n qa.correct = correct\n else:\n ans_data = {\n 'question': question,\n 'student': student,\n 'result': result,\n 'answer': answer,\n 'correct': correct,\n }\n qa = QuestionAnswer(**ans_data)\n qa.save()\n return qa", "def create(self, request):\n if not hasattr(request, \"data\"):\n request.data = request.POST\n attrs = self.flatten_dict(request.data)\n if not attrs.get('include_answer_page', None):\n if 'answer_page_title' in attrs:\n del attrs['answer_page_title']\n if 'answer_page_link' in attrs:\n del attrs['answer_page_link']\n kn = Knowledge(question = attrs['question'], \n search_keywords = attrs.get('search_keywords', ''),\n answer_summary = attrs.get('answer_summary', ''),\n answer_page_title = attrs.get('answer_page_title', ''),\n answer_page_link = attrs.get('answer_page_link', ''),\n tags = attrs.get('tags', ''),\n user=request.user)\n kn.save()\n return kn", "def gen_questions(self, number_of_questions):", "def __init__(self, data={}):\n\n self.config = db_config(BaseConfig.DATABASE_URI)\n self.table = 'questions'\n self.title = data.get('title')\n self.body = data.get('body')\n self.q = data.get('q')\n self.question_id = data.get('id')\n self.user_id = data.get('user_id')\n self.now = str(datetime.now())\n self.logged_in_user_id = Auth.get_logged_in_user(request)[0]['data']['user_id']", "def add_answer(self):\n # Create users\n user = self.create_user(self.user)\n other_user = self.create_user(self.other_user)\n\n # Create question\n question = self.create_question(self.question, other_user.id)\n\n # Add answers\n answer = self.create_answer(self.answer, question.id, user.id)\n\n user_token = self.get_user_token(user)\n other_user_token = self.get_user_token(other_user)\n\n return user_token, other_user_token, question.id, answer.id", "def post(self,request,format=None):\n id_ = request.data.get('questionID')\n selected_answer = request.data.get('answer')\n ques_obj = get_object_or_404(SingleWordQuiz,pk=id_)\n answer_obj,created = SingleWordQuizAnswer.objects.get_or_create(user=request.user.info,quiz_ques=ques_obj)\n answer_obj.selected_answer = selected_answer\n answer_obj.save()\n serializer = SingleWordQuizAnswerSerializer(answer_obj)\n return Response(data=serializer.data,status=status.HTTP_201_CREATED)", "def post(self,request,format=None):\n id_ = request.data.get('questionID')\n selected_answer = request.data.get('answer')\n ques_obj = get_object_or_404(MultipleQuiz,pk=id_)\n answer_obj,created = MultipleQuizAnswer.objects.get_or_create(user=request.user.info,quiz_ques=ques_obj)\n answer_obj.selected_answer = selected_answer\n answer_obj.save()\n serializer = MultipleQuizAnswerSerializer(answer_obj)\n return Response(data=serializer.data,status=status.HTTP_201_CREATED)", "def get_submission_metadata(self, answers, correct_map):\r\n\r\n input_metadata = {}\r\n for input_id, internal_answer in answers.iteritems():\r\n answer_input = self.lcp.inputs.get(input_id)\r\n\r\n if answer_input is None:\r\n log.warning('Input id %s is not mapped to an input type.', input_id)\r\n\r\n answer_response = None\r\n for response, responder in self.lcp.responders.iteritems():\r\n for other_input_id in self.lcp.responder_answers[response]:\r\n if other_input_id == input_id:\r\n answer_response = responder\r\n\r\n if answer_response is None:\r\n log.warning('Answer responder could not be found for input_id %s.', input_id)\r\n\r\n user_visible_answer = internal_answer\r\n if hasattr(answer_input, 'get_user_visible_answer'):\r\n user_visible_answer = answer_input.get_user_visible_answer(internal_answer)\r\n\r\n # If this problem has rerandomize enabled, then it will generate N variants of the\r\n # question, one per unique seed value. In this case we would like to know which\r\n # variant was selected. Ideally it would be nice to have the exact question that\r\n # was presented to the user, with values interpolated etc, but that can be done\r\n # later if necessary.\r\n variant = ''\r\n if self.rerandomize != 'never':\r\n variant = self.seed\r\n\r\n is_correct = correct_map.is_correct(input_id)\r\n if is_correct is None:\r\n is_correct = ''\r\n\r\n input_metadata[input_id] = {\r\n 'question': getattr(answer_input, 'loaded_attributes', {}).get('label', ''),\r\n 'answer': user_visible_answer,\r\n 'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),\r\n 'input_type': getattr(answer_input, 'tag', ''),\r\n 'correct': is_correct,\r\n 'variant': variant,\r\n }\r\n\r\n return input_metadata", "def create_question(self):\n\n locations = [\"meetup_id\", \"user_id\", \"title\", \"body\"]\n\n try:\n\n user = self.sql.get_username_by_id(\n int(self.question_details[\"user\"]))\n\n meetup = self.sql.fetch_details_by_criteria(\n \"meetup_id\", self.question_details[\"meetup\"], \"meetups\")\n\n existing = self.sql.fetch_details_if_text_exists(\n \"title\", self.question_details[\"title\"], \"questions\")\n\n title = self.question_details[\"title\"]\n\n body = self.question_details[\"body\"]\n\n except KeyError as keyerror:\n return self.makeresp(\"{} is a required field\".format(keyerror), 400)\n\n isempty = DataValidators(\n self.question_details).check_values_not_empty()\n\n if isinstance(isempty, str):\n return self.makeresp(isempty, 400)\n\n if not user:\n return self.makeresp(\"User not found\", 404)\n\n if not meetup:\n return self.makeresp(\"Meetup not found\", 404)\n\n if not self.check_is_error(existing):\n\n if [meet_id[1] for meet_id in existing if self.question_details[\"meetup\"] in meet_id]:\n\n return self.makeresp(\"This Question already exists\", 409)\n\n question = {\n \"meetup\": self.question_details[\"meetup\"],\n \"createdBy\": self.question_details[\"user\"],\n \"title\": title,\n \"body\": body\n }\n\n question_id = SqlHelper(question).save_to_database(\n locations, \"questions\")\n\n return self.makeresp(\n {\n \"id\": question_id,\n \"user\": question[\"createdBy\"],\n \"meetup\": question[\"meetup\"],\n \"title\": question[\"title\"],\n \"body\": question[\"body\"]\n }, 201)", "def __init__(self, question, correct_answer):\n\n self.question = question\n self.correct_answer = correct_answer", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item", "def test_questions_answers_add_model(self):\n content = Content(header = \"Test_Header\", content = \"Test_Content\")\n question = Questions(question_text = \"Test_Question?\", content = content)\n answer = Answers(answer_text = \"Answer_Test\", correct = 0, question = question)\n db.session.add(content)\n db.session.add(question)\n db.session.add(answer)\n db.session.commit()\n self.assertEqual(Questions.query.filter_by(question_text = \"Test_Question?\").count(), 1)\n self.assertEqual(Answers.query.filter_by(answer_text = \"Answer_Test\", correct = 0, question = question).count(), 1)", "def as_dict(self):\n\n data = {}\n data['text'] = self.question\n data['tier'] = self._get_points(int(self.game_round), int(self.tier))\n try:\n data['source'] = self.source\n except AttributeError:\n data['source'] = False\n print self.question\n print self.answers\n data['answers'] = [\n {'text': answer[False]} if answer.has_key(False) \\\n else {'text': answer[True], 'right': True} \\\n for answer in self.answers\n ]\n if hasattr(self, 'media'):\n def gen_questions():\n q_data = {}\n for f in self.media['question']:\n q_data[self.__type_by_extension(\n os.path.sep.join(os.path.join([self.media_path, f]))\n )] = os.sep.join([self.web_root, f])\n return q_data\n def gen_explanation():\n \"\"\"Sorry, hacky. Quick fix required only 1st element is taken\"\"\"\n f = self.media['explanation'][0]\n k = self.__type_by_extension(os.path.sep.join(\n os.path.join([self.media_path, f])))\n v = [os.sep.join([self.web_root, expl]) \\\n for expl in self.media['explanation']]\n if v:\n v = v[0]\n else:\n v = \"\"\n return {'explanation': {k: v}}\n #): os.sep.join([self.web_root, f])\n\n #[os.sep.join([self.web_root, expl]) \\\n # for expl in self.media['explanation']]}\n def k_not_found():\n raise KeyError(\"Media keyword not found\")\n\n for k in self.media.keys():\n m_data = dict(\n question = gen_questions,\n explanation= gen_explanation,\n k_not_found = \"lambda x: pass\",\n ).get(k, 'k_not_found')()\n for key, value in m_data.items():\n data[key] = value\n return data", "def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = distance_matrix.tolist()\r\n data['time_matrix'] = time_matrix.tolist()\r\n data['time_windows'] = time_windows.tolist()\r\n data['pickups_deliveries'] = pickup_deliveries.tolist()\r\n data['demands'] = demand\r\n data['num_vehicles'] = 20\r\n data['vehicle_capacities'] = [20 * i / i for i in range(1, num_vehicles+1)]\r\n data['depot'] = (2 * length) - 1\r\n return data", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = mtrx.create_distance_matrix(mtrx.create_data()) \r\n data['demands'] = clean.demands\r\n # Each location has a demand corresponding to the quantity—for example, \r\n # weight or volume—of the item to be picked up.\r\n data['vehicle_capacities'] = capacity\r\n # Each vehicle has a capacity: the maximum quantity that the vehicle can hold. \r\n # As a vehicle travels along its route, the total quantity of the items it is carrying \r\n # can never exceed its capacity.\r\n data['num_vehicles'] = number\r\n data['depot'] = 0\r\n return data", "def get_answers():\n count = 1\n for i in range(200): # TODO : Fetch number of all items first\n r = requests.get('http://api.stackexchange.com/2.2/answers?site=eosio&filter=!b1MMEb*6iF.PM5&pagesize=100&page={}'.format(count))\n data = json.loads(r.text)\n for item in data['items']:\n own = item['owner']['user_id']\n dsp = item['owner']['display_name']\n qn_id = item['question_id']\n try:\n owner = User.objects.get(username=own, se_display_name=dsp)\n question = Question.objects.get(se_question_id=qn_id)\n except Exception:\n owner = None\n question = None\n if owner and question:\n Answer.objects.create(owner=owner, question=question, body=item['body'],\n se_question_id=qn_id, is_accepted=item['is_accepted'],\n se_answer_id=item['answer_id'], score=item['score'])\n\n count += 1\n print(count)", "def __init__(self, data):\n\n data = data['results']\n textList = {}\n ansList = {}\n diffList = {}\n for i in range(len(data)):\n key = str(i)\n if \"&\" in data[i]['question']:\n pass\n else:\n textList[key] = data[i]['question']\n ansList[key] = data[i]['correct_answer'].lower()\n diffList[key] = data[i]['difficulty']\n\n self.textList = textList\n self.ansList = ansList\n self.diffList = diffList\n self.data = data\n return", "def _create_examples(self, lines, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n context = item[\"context\"]\n question = item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context,context,context],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples", "def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict] = None, idx=-1,\n num_choices=1,answer_idx=[]):\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n self.logits = logits\n self.idx = idx\n self.num_choices = num_choices\n self.meta = meta if meta else {}\n self.answer_idx = answer_idx", "def computed_answer_setup(self, name):\r\n\r\n script = self.COMPUTED_ANSWER_SCRIPT\r\n\r\n computed_xml = CustomResponseXMLFactory().build_xml(answer=script)\r\n\r\n ItemFactory.create(\r\n parent_location=self.section.location,\r\n category='problem',\r\n boilerplate='customgrader.yaml',\r\n data=computed_xml,\r\n display_name=name\r\n )\r\n\r\n # define the correct and incorrect responses to this problem\r\n self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT\r\n self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT\r\n\r\n # re-fetch the course from the database so the object is up to date\r\n self.refresh_course()", "def normalize_data(typeform_response):\n survey = json.loads(typeform_response.survey)\n response = json.loads(typeform_response.response)\n answers = {}\n response_set = response.get('answers') or []\n for answer in response_set:\n field_id = answer.get('field').get('id')\n value_key = answer.get('type')\n value = json.dumps(answer.get(value_key))\n\n field = find_field(field_id, survey)\n field_title = field.get('title') if field else '??'\n\n answers[field_id] = {\n 'field_title': field_title,\n 'answer': value,\n }\n\n if typeform_response.study_group:\n answers['study_group_id'] = {\n 'field_title': 'Learning circle ID',\n 'answer': typeform_response.study_group.id,\n }\n answers['study_group_name'] = {\n 'field_title': 'Learning circle name',\n 'answer': typeform_response.study_group.name\n }\n answers['course'] = {\n 'field_title': 'Course',\n 'answer': typeform_response.study_group.course.title,\n }\n answers['facilitator'] = {\n 'field_title': 'Facilitator',\n 'answer': typeform_response.study_group.created_by.email,\n }\n if typeform_response.study_group.team:\n answers['team'] = {\n 'field_title': 'Team',\n 'answer': typeform_response.study_group.team.name\n }\n\n return answers", "def example_data():\n\n # In case this is run more than once, empty out existing data\n User.query.delete()\n Answer.query.delete()\n Question.query.delete()\n\n # Add sample users, answers and questions\n cat = User(user_name=\"Cat\", email=\"[email protected]\", password=\"abc\")\n dog = User(user_name=\"Dog\", email=\"[email protected]\", password=\"abc\")\n horse = User(user_name=\"Horse\", email=\"[email protected]\", password=\"abc\")\n\n db.session.add_all([cat, dog, horse])\n db.session.commit()\n\n question_1 = Question(question_id=\"q1\", title=\"Should we save the planet?\", description=\" \", user_id=3)\n question_2 = Question(question_id=\"q2\", title=\"Is recycling pointless?\", description=\" \", user_id=3)\n question_3 = Question(question_id=\"q3\", title=\"Mustard or Ketchup?\", description=\" \", user_id=1)\n\n db.session.add_all([question_1, question_2, question_3])\n db.session.commit()\n\n answer_1 = Answer(question_id=\"q1\", user_id=1, body=\"Yes, I agree.\")\n answer_2 = Answer(question_id=\"q2\", user_id=2, body=\"No, I disagree.\")\n answer_3 = Answer(question_id=\"q3\", user_id=3, body=\"Hrm, I'm indifferent.\")\n\n db.session.add_all([answer_1, answer_2, answer_3])\n db.session.commit()", "def create(self, validated_data):\n question = Question.objects.create(**validated_data)\n question.save()\n if 'tag' in self.context['request'].data:\n tag = Tag.objects.get(id=self.context['request'].data['tag'])\n question_tag = QuestionTag.objects.create(question=question,\n tag=tag)\n question_tag.save()\n return question", "def populate(self, response):\n answers = self.filter(response=response)\n if response.survey:\n questions = Question.objects.filter(survey=response.survey).exclude(\n pk__in=answers.values('question'))\n answers = list(answers)\n for question in questions:\n answers += [Answer(question=question)]\n return answers", "def create(self):", "def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question", "def create(self, data):\n raise NotImplementedError", "def create():", "def create():", "def _createPoints(self):\n self.doc2quest = self._docMapping()\n\n self.unigram, self.bigram = invertedIndex(self.documents)\n self.points = [dataPoint(key, self) for key in self.questions.keys()]", "def create(cls, interview, question, answer):\n value = int(answer)\n prefix = 'flow-test'\n if interview.interview_channel:\n prefix = interview.interview_channel.user_identifier\n # zero fill to 1billion\n text_value = '%s-%s' % (prefix, cls.prep_value(value))\n return super(\n AutoResponse,\n cls).create(\n interview,\n question,\n answer,\n as_text=value,\n as_value=text_value)", "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "def create_data_model():\n data = {}\n data['distance_matrix'] = transit_c\n data['post'] = pospt_c\n data['fixed_cost'] = fc*1000\n data['demands'] = total_demand\n data['vehicle_capacities'] = capacity_list_function(routes,S)\n data['time_capacities'] = time_list_function(routes,Tmax)\n data['num_vehicles'] = routes+1\n data['depot'] = 0\n return data", "def __init__(self):\n\n self.question_list = self.read_quiz_json()", "def post(self, request, *args, **kwargs):\n # user = request.user\n quizTakerId = kwargs[\"pk\"]\n quizTaker = QuizTakers.objects.filter(id=quizTakerId).first()\n data = request.data[\"questions\"]\n # question = Question.objects.first()\n\n if len(data) == 0:\n data = [{'question': 0 }]\n\n serializer = ResponseSerializer(data=data, many=True)\n if serializer.is_valid():\n serializer.save(quiztaker=quizTaker)\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)", "def get_data_to_create_object(self):\n return {}", "def __init__(self, accepted_answer_id=int, answer_count=int, creation_date=str, is_answered=bool,\n link=str, question_id=int, score=int, title=str, view_count=int, user=StackExchangeUser):\n self.__accepted_answer_id = accepted_answer_id\n self.__answer_count = answer_count\n self.__creation_date = creation_date\n self.__is_answered = is_answered\n self.__link = link\n self.__question_id = question_id\n self.__score = score\n self.__title = title\n self.__view_count = view_count\n self.__user = user", "def _create_guess_datasets(self):\n raise NotImplementedError('Please override the _create_guess_datasets '\n 'specific to your model')", "def set_answers(self, answers):\n self.logger.info(\"Add answer : %s\" % answers)\n try:\n page_index = 0\n for page in self._answer_payload['pages']:\n if page['id'] == self._current_page['id']:\n break\n page_index += 1\n\n if page_index == len(self._answer_payload['pages']): # page not found\n self._answer_payload['pages'].append({\n \"id\": self._current_page['id'],\n \"questions\": []\n })\n\n question_index = 0\n for question in self._answer_payload['pages'][page_index]['questions']:\n if question['id'] == self._current_question['id']:\n break\n question_index += 1\n\n if question_index == len(self._answer_payload['pages'][page_index]['questions']): # question not found\n self._answer_payload['pages'][page_index]['questions'].append({\n \"id\": self._current_question['id']\n })\n\n _answers = []\n for answer in answers:\n _answers.append(answer)\n\n self._answer_payload['pages'][page_index]['questions'][question_index]['answers'] = _answers\n\n if self._current_question['variable_id'] is not None:\n self._answer_payload['pages'][page_index]['questions'][question_index]['variable_id'] = str(self._current_question['variable_id'])\n\n except Exception as e:\n self.logger.error(\"Error on add answer : %s\" % e)", "def __init__(self, name):\n self.name = name\n self.questions = []", "def get_answers(self):\r\n anshtml = '<span class=\"openended-answer\"><pre><code>{0}</code></pre></span>'.format(self.answer)\r\n return {self.answer_id: anshtml}", "def __init__(self, question=u\"\", tier=0, answers=[], game_round=0,\n media=(\"\", \"\", \"\"), media_path=\"data\", web_root=\"data\"):\n self.question = question\n self.answers = answers\n self.tier = tier\n self.game_round = game_round\n self.media = media\n self.media_path = media_path\n self.web_root = web_root", "def answer_question(self, ques, ans):\n res = Response(Answer(ans, len(self.response_list)), ques, self)\n self.response_list.append(res)\n return res", "def setup(self, *args):\n\n responses = [\n ('Yes.', 'eq'),\n ('No.', 'eq'),\n ('Nope.', 'eq'),\n ('Maybe.', 'eq'),\n ('Possibly.', 'eq'),\n ('It could be.', 'eq'),\n (\"No. No, I don't think so.\", 'eq/2'),\n ('Without a doubt.', 'eq/2'),\n ('I think... Yes.', 'eq/2'),\n ('Heck yes!', 'eq/2'),\n ('Maybe. Possibly. It could be.', 'eq/2'),\n ('Ask again later.', 'eq/3'),\n (\"I don't know.\", 'eq/3'),\n (\"I'm sorry, I was thinking of bananas\", 'eq/100'),\n ]\n\n responses += [(x, 'eq/10') for x in obliques]\n self.advices = [(x, 1) for x in obliques]\n total_prob = 0\n real_resp = []\n evens = []\n for resp, prob in responses:\n if isinstance(prob, str):\n if prob.startswith('eq'):\n sp = prob.split('/')\n if len(sp) == 1:\n evens.append((resp, 1))\n else:\n div = int(sp[1])\n evens.append((resp, 1.0 / div))\n\n else:\n real_resp.append((resp, prob))\n total_prob += prob\n\n # Share is the probability of a \"eq\" probability. Share/2 would be the\n # probability of a \"eq/2\" probability.\n share = (1 - total_prob) / sum(div for _, div in evens)\n for resp, divisor in evens:\n real_resp.append((resp, share * divisor))\n\n self.responses = real_resp\n self.is_question = re.compile('.*\\?(\\?|!)*$')", "def create_question(user,title='title',text='text'):\n return Question.objects.create(created_by=user, title=title, text=text)", "def __init__(self, assignment):\n self.assignment = assignment\n self.question_to_attr = get_question_name_to_answer_attribute_table(\n self.__class__\n )\n\n for question_name, attr_name in self.question_to_attr.items():\n answer = get_answer_to_question(self.assignment, question_name)\n setattr(self, attr_name, answer)", "def __init__(self, raw_question_list):\n\n self.question_list = []\n self.total_penetrance = 0.0\n self.question_count = 0\n\n for raw_q in raw_question_list:\n q = Question(raw_q)\n self.question_list.append(q)\n self.total_penetrance += q.penetrance\n self.question_count += 1\n\n return None", "def generate_answer(self, question):\n\n # Recognize intent of the question using `intent_recognizer`.\n # Don't forget to prepare question and calculate features for the question.\n \n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.predict(features)\n print(\"intent:\", intent)\n \n # Chit-chat part: \n if intent == 'dialogue':\n # Pass question to chitchat_bot to generate a response. \n response = self.chatbot.get_response(prepared_question)\n return response\n \n # Goal-oriented part:\n else: \n # Pass features to tag_classifier to get predictions.\n tag = self.tag_classifier.predict( features)[0]\n \n # Pass prepared_question to thread_ranker to get predictions.\n thread_id = self.thread_ranker.get_best_thread(prepared_question, tag)\n \n return self.ANSWER_TEMPLATE % (tag, thread_id)", "def __init__(self, exam_name):\n\n self.name = exam_name\n self.questions = []", "def make_dict(\n nn,\n q_id,\n polarity,\n context_cond,\n cat,\n subcat,\n answer_info,\n bias_targets,\n version,\n notes,\n context,\n question,\n ans_list,\n ans_place,\n):\n this_dict = {\n \"example_id\": nn,\n \"question_index\": q_id,\n \"question_polarity\": polarity,\n \"context_condition\": context_cond,\n \"category\": cat,\n \"answer_info\": answer_info,\n \"additional_metadata\": {\n \"subcategory\": subcat,\n \"stereotyped_groups\": bias_targets,\n \"version\": version,\n \"source\": notes,\n },\n \"context\": context.strip(),\n \"question\": question.strip(),\n \"ans0\": ans_list[0],\n \"ans1\": ans_list[1],\n \"ans2\": ans_list[2],\n \"label\": ans_place,\n }\n return this_dict", "def create_data():\n a_season = create_season()\n create_teams(a_season) # we know they are called Team A and Team B\n create_play_positions() # we need these in place to test the score sheet creation\n create_game_order() # these too ...\n (week_one, week_two) = create_weeks(a_season, 2)\n match_one = Match(\n week=week_one,\n season=a_season,\n away_team=AwayTeam.objects.get(name='Team A'),\n home_team=HomeTeam.objects.get(name='Team B'),\n )\n match_one.save()\n return match_one.id", "def create_question(question_text, days, create_choice=True):\n\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n if create_choice:\n question.choice_set.create(choice_text=\"Choice 1\", votes=0)\n return question", "def create(hints=None,\r\n previous_answers=None,\r\n user_submissions=None,\r\n user_voted=None,\r\n moderate=None,\r\n mod_queue=None):\r\n # Should have a single child, but it doesn't matter what that child is\r\n field_data = {'data': CHModuleFactory.sample_problem_xml, 'children': [None]}\r\n\r\n if hints is not None:\r\n field_data['hints'] = hints\r\n else:\r\n field_data['hints'] = {\r\n '24.0': {'0': ['Best hint', 40],\r\n '3': ['Another hint', 30],\r\n '4': ['A third hint', 20],\r\n '6': ['A less popular hint', 3]},\r\n '25.0': {'1': ['Really popular hint', 100]}\r\n }\r\n\r\n if mod_queue is not None:\r\n field_data['mod_queue'] = mod_queue\r\n else:\r\n field_data['mod_queue'] = {\r\n '24.0': {'2': ['A non-approved hint']},\r\n '26.0': {'5': ['Another non-approved hint']}\r\n }\r\n\r\n if previous_answers is not None:\r\n field_data['previous_answers'] = previous_answers\r\n else:\r\n field_data['previous_answers'] = [\r\n ['24.0', [0, 3, 4]],\r\n ['29.0', []]\r\n ]\r\n\r\n if user_submissions is not None:\r\n field_data['user_submissions'] = user_submissions\r\n else:\r\n field_data['user_submissions'] = ['24.0', '29.0']\r\n\r\n if user_voted is not None:\r\n field_data['user_voted'] = user_voted\r\n\r\n if moderate is not None:\r\n field_data['moderate'] = moderate\r\n\r\n descriptor = Mock(weight='1')\r\n # Make the descriptor have a capa problem child.\r\n capa_descriptor = MagicMock()\r\n capa_descriptor.name = 'capa'\r\n capa_descriptor.displayable_items.return_value = [capa_descriptor]\r\n descriptor.get_children.return_value = [capa_descriptor]\r\n\r\n # Make a fake capa module.\r\n capa_module = MagicMock()\r\n capa_module.lcp = MagicMock()\r\n responder = MagicMock()\r\n\r\n def validate_answer(answer):\r\n \"\"\" A mock answer validator - simulates a numerical response\"\"\"\r\n try:\r\n float(answer)\r\n return True\r\n except ValueError:\r\n return False\r\n responder.validate_answer = validate_answer\r\n\r\n def compare_answer(ans1, ans2):\r\n \"\"\" A fake answer comparer \"\"\"\r\n return ans1 == ans2\r\n responder.compare_answer = compare_answer\r\n\r\n capa_module.lcp.responders = {'responder0': responder}\r\n capa_module.displayable_items.return_value = [capa_module]\r\n\r\n system = get_test_system()\r\n # Make the system have a marginally-functional get_module\r\n\r\n def fake_get_module(descriptor):\r\n \"\"\"\r\n A fake module-maker.\r\n \"\"\"\r\n return capa_module\r\n system.get_module = fake_get_module\r\n module = CrowdsourceHinterModule(descriptor, system, DictFieldData(field_data), Mock())\r\n\r\n return module", "def add_user_answer(self, question, guess, correct):\n user_answer = UserAnswer()\n user_answer.user = self.user\n user_answer.quiz = self.quiz\n user_answer.question = question\n user_answer.answer = guess\n user_answer.correct = correct\n user_answer.save()", "def post_answer(request):\n if request.method == 'GET':\n response_data = list(range(10))\n # geodata = response.json()\n return Response(\n data=response_data\n )\n # snippets = Snippet.objects.all()\n # serializer = SnippetSerializer(snippets, many=True)\n # return Response(serializer.data)\n\n elif request.method == 'POST':\n data = request.data\n print(type(data))\n userResponse.append(data)\n if int(data['questionCode']) in userQuestions:\n user_question = userQuestions[int(data['questionCode'])]\n print(user_question)\n\n # get response and movie list\n updatedMovieList = imdb.get_imdb_movies()\n robotMessage = assistant.ask_assistant(user_question)\n responseData = {\"nextQuestionString\": robotMessage,\"nextQuestionCode\": int(data['questionCode'])+1,\"updatedMovieList\" : updatedMovieList}\n return Response(\n data=responseData\n )", "def create_evaluation_template(client, survey_name):\n\n loremipsum = \"Lorem ipsum dolor sit amet, consecteteur adipiscing elit donec proin nulla vivamus. Augue donec a erat ve sagittis nisi rhoncus curabitur mauris. Nulla ipsum tortor sagittis adipiscing primis interdum suspendisse lobortis etiam risus nullam. Donec massa quam dis at nibh dolor netus quis. Purus etiam. Dolor neque nunc netus eget nulla faucibus vestibulum aenean class senectus. Porta dolor. Donec morbi. Felis lorem tempus luctus malesuada laoreet curae justo rhoncus ante facilisi parturient malesuada elit laoreet amet. Fusce augue nisi ligula praesent condimentum nascetur fringilla in id lectus per nunc. Lacus metus nisl orci odio maecenas adipiscing. Velit nulla a tempor class placerat ac condimentum nisi taciti at eros.\"\n\n loremipsum_A = \"A: \\n\" + loremipsum\n loremipsum_B = \"B: \\n\" + loremipsum\n\n # Create a new survey\n survey_id = client.create_survey(survey_name)\n # Create 2 more pages in the survey\n for i in range(0, 2):\n client.create_new_page(survey_id, str(i), loremipsum) # title and description\n\n # Get the page ids\n page_ids = client.get_pages_ids(survey_id) # There will be 3\n\n answers = [\"A\", \"B\"]\n question_title = \"Which of the following abstract is more relevant to the one above?\"\n for i, ID in enumerate(page_ids):\n client.update_title_description_of_page(survey_id, ID, \"Abstract\" + str(i), loremipsum)\n client.add_single_choice(survey_id, ID, question_title, answers)\n client.add_paragraph(survey_id, ID, loremipsum_A)\n client.add_paragraph(survey_id, ID, loremipsum_B)\n\n return survey_id", "def new_answer(request):\n if request.method == \"POST\":\n author = request.POST.get(\"author\")\n content = request.POST.get(\"content\")\n date = datetime.datetime.now()\n answer_tuple = {\n \"author\": author,\n \"content\": content,\n \"votes\": 0,\n \"topic_index\": request.matchdict[\"url\"],\n \"answer_date\": date.strftime(\"%d/%m/%Y\"),\n }\n request.db[\"answer\"].insert(answer_tuple)\n return HTTPFound(location=\"/topic/\" + request.matchdict[\"url\"])\n return HTTPFound(location=\"/\")", "def create(self):\n\n pass", "def create_models( self ):", "def generate_answer(obj):\n # Initialize variables\n answer = None\n answer_table = None\n answer_q = None\n answer_r = None\n answer_a = None\n answer_b = None\n answer_d = None\n\n # Get object values\n op = obj.get('operation')\n m = obj.get('mod')\n f = obj.get('f')\n g = obj.get('g')\n h = obj.get('h')\n a = obj.get('a')\n b = obj.get('b')\n deg = obj.get('deg')\n additional_data = obj.get('additional_data')\n poly_mod = obj.get('operation_values')\n\n # Operation switch\n if op == \"display-poly\":\n answer = f # Luke\n elif op == \"add-poly\":\n answer = add_poly(f, g, m) # Luke\n elif op == \"subtract-poly\":\n answer = subtract_poly(f, g, m) # Luke\n elif op == \"multiply-poly\":\n answer = mult(f, g, m) # Janneke\n elif op == \"long-div-poly\":\n answer_q, answer_r = long_div_poly(f, g, m) # Pol\n elif op == \"euclid-poly\":\n answer_a, answer_b, answer_d = euclid_extended_poly(f, g, m) # Pol\n elif op == \"equals-poly-mod\":\n answer = equals_poly_mod(f, g, h, m) # Janneke\n elif op == \"irreducible\":\n answer = is_irreducible(f, m) # Edwin\n elif op == \"find-irred\":\n answer = find_irred(deg, m) # Edwin\n elif op == \"mod-poly\":\n if additional_data == 'add-table': # Edwin\n answer_table = add_table_field(m, poly_mod)\n elif additional_data == 'mult-table': # Edwin\n answer_table = multiply_table_field(m, poly_mod)\n elif additional_data == 'display-field': # Luke\n answer = display_field(a, m, poly_mod)\n elif additional_data == 'add-field': # Janneke\n answer = add_field(poly_mod, m, a, b)\n elif additional_data == 'subtract-field': # Janneke\n answer = subtract_field(poly_mod, m, a, b)\n elif additional_data == 'multiply-field': # Janneke\n answer = multiply_field(poly_mod, m, a, b)\n elif additional_data == 'inverse-field': # Luke\n answer = inverse_field(a, m, poly_mod)\n elif additional_data == 'division-field': # Pol\n answer = division_field(a, b, m, poly_mod)\n elif additional_data == 'equals-field': # Luke\n answer = equals_field(a, b, m, poly_mod)\n elif additional_data == 'primitive': # Pol# Different Answer\n answer = is_primitive(a, m, poly_mod)\n elif additional_data == 'find-prim': # Pol\n answer = find_primitive(m, poly_mod)\n else:\n answer = 'Operation not Supported.'\n else:\n answer = 'Operation not Supported.'\n\n # Parse result to a valid polynomial\n if answer:\n obj['answer'] = display_poly(answer, m)\n if answer_table:\n obj['answer'] = display_table(answer_table, m)\n if answer_q:\n obj['answer-q'] = display_poly(answer_q, m)\n if answer_r:\n obj['answer-r'] = display_poly(answer_r, m)\n if answer_a:\n obj['answer-a'] = display_poly(answer_a, m)\n if answer_b:\n obj['answer-b'] = display_poly(answer_b, m)\n if answer_d:\n obj['answer-d'] = display_poly(answer_d, m)\n\n return obj", "def fetchQuestions (self):\n # Create query and get data\n query = \"SELECT * from \" + self.dbTable + \" where main_ID = '\" + str(self.ID) + \"'\";\n data = self.sqlConnection.executeSelectQuery(query);\n \n # Convert the data into Question objects\n self.convertQuestions(data)", "def create_populated_question(answer_states: List[bool], question_text: str = None):\n question = create_question(question_text)\n\n for state in answer_states:\n create_answer(question, state)\n\n return question", "def _forecast_template(self,ifp):\n \n output = [{'answer_id':a['id'],'value':a['probability']} for a in ifp['answers']]\n return output", "def create_data_model():\n data = {}\n\n data['addresses'] = ['-0.068372,109.362745']\n data['demands'] = [0]\n data['depot'] = 0 \n data['vehicle_capacities'] = []\n\n orders = Order.query.all()\n for u in orders:\n data['addresses'].append(u.latlon)\n data['demands'].append(int(u.load))\n\n vehicles = Vehicle.query.all()\n for u in vehicles:\n data['vehicle_capacities'].append(int(u.capacity))\n\n data['num_vehicles'] = len(vehicles)\n\n data['distance_matrix'] = create_distance_matrix(data)\n\n print(len(data['addresses']))\n print(data['demands'])\n\n return data", "def create_model(self):\n pass", "def create_model(self):\n pass", "def prepare_data(self):", "def return_questions_data():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n \n cursor.execute(\"select * from questions\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Answer']\n questions = {}\n for q,a in data:\n table.add_row([q,a])\n questions[q] = a\n conn.close()\n\n return table, questions", "def get(self):\n user = getAuthData()\n question_list = list_questions()\n # user_question_list = list_questions_by_username(user['username'])\n # nonuser_question_list = list_questions_by_username(user['username'], invert=True)\n\n tasks = get_tasks().values()\n\n # filter out the SUCCESS/FAILURE tasks\n tasks = [t for t in tasks if not (t['state'] == 'SUCCESS' or t['state'] == 'FAILURE' or t['state'] == 'REVOKED')]\n\n # get question hashes\n question_tasks = {q.id:[] for q in question_list}\n for t in tasks:\n if not t['args']:\n continue\n match = re.match(r\"[\\[(]'(.*)',?[)\\]]\", t['args'])\n if not match:\n continue\n question_id = match.group(1)\n question_tasks[question_id].append(t)\n\n # split into answer and update tasks\n for t in tasks:\n t['type'] = 'answering' if t['name'] == 'manager.tasks.answer_question' else \\\n 'refreshing KG' if t['name'] == 'manager.tasks.update_kg' else \\\n 'something?'\n\n def augment_info(question):\n answerset_timestamps = [a.timestamp for a in question.answersets]\n if answerset_timestamps:\n latest_idx = answerset_timestamps.index(max(answerset_timestamps))\n latest_answerset_id = question.answersets[latest_idx].id\n latest_answerset_timestamp = question.answersets[latest_idx].timestamp\n else:\n latest_answerset_id = None\n latest_answerset_timestamp = None\n q = question.toJSON()\n q['user_email'] = question.user.email\n q.pop('user_id')\n q.pop('machine_question')\n return {'latest_answerset_id': latest_answerset_id,\n 'latest_answerset_timestamp': latest_answerset_timestamp.isoformat() if latest_answerset_timestamp else None,\n 'tasks': [t['type'] for t in question_tasks[question.id]],\n **q}\n\n return [augment_info(q) for q in question_list], 200", "def collection(self):\n questions = []\n choice_list = []\n answers = []\n\n if self.form=='The correct German word':\n for i in range(self.num_ques):\n question, options, answer = self.generate_eng2ger()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n else:\n for i in range(self.num_ques):\n question, options, answer = self.generate_ger2eng()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n\n return questions, choice_list, answers", "def process(data_item, article_id):\n questions = []\n answers = []\n paragraph = [article_id, data_item['context']]\n\n for item in data_item['qas']:\n question = [item[\"id\"], item[\"question\"], item['is_impossible']]\n questions.append(question)\n if item['is_impossible']:\n continue\n answer_options = item[\"answers\"]\n answer_set = set()\n for option in answer_options:\n answer_tuple = (option['text'], option['answer_start'])\n answer_set.add(answer_tuple)\n for index, answer_tuple in enumerate(answer_set):\n answer = [\"{}_{}\".format(item[\"id\"], index+1), item[\"id\"], answer_tuple[0], answer_tuple[1]]\n answers.append(answer)\n return paragraph, questions, answers", "def __init__(self, question_list):\n self.question_list = question_list\n self.question_number = 0\n self.score = 0", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass" ]
[ "0.64814955", "0.63967526", "0.63827956", "0.6260587", "0.6246229", "0.62410545", "0.6156967", "0.61112285", "0.61091954", "0.61057824", "0.6091933", "0.6012156", "0.6009843", "0.59592485", "0.59487975", "0.5926686", "0.59090936", "0.59057355", "0.5905192", "0.58983946", "0.58766586", "0.58738375", "0.58670944", "0.58562773", "0.5813829", "0.58111894", "0.5784844", "0.5784303", "0.57695705", "0.5737954", "0.57376444", "0.5728867", "0.5709234", "0.5700956", "0.56901145", "0.5684299", "0.56836015", "0.5680687", "0.567658", "0.56625754", "0.5659605", "0.5655674", "0.5647192", "0.56442875", "0.5640988", "0.56355315", "0.5634834", "0.5622103", "0.5610706", "0.55700284", "0.556624", "0.554963", "0.554963", "0.55317116", "0.5522533", "0.5513756", "0.55105144", "0.5505884", "0.5505494", "0.5498732", "0.54900455", "0.54877263", "0.5467479", "0.54627925", "0.5452037", "0.5451389", "0.5432414", "0.5432151", "0.5430184", "0.54278183", "0.54219073", "0.5417124", "0.5392819", "0.5389245", "0.53877467", "0.53870666", "0.53848094", "0.5381078", "0.53804445", "0.5369766", "0.5362304", "0.5361099", "0.53592277", "0.53549814", "0.5354685", "0.5353518", "0.53464377", "0.53447694", "0.5341145", "0.5341145", "0.53401273", "0.5338316", "0.53380364", "0.53287756", "0.53227246", "0.5318545", "0.5316564", "0.5304985", "0.5304985", "0.5304985" ]
0.6539529
0
same as create_user but using user manager
def create_user_using_manager(username,password): manager = UserManager() return manager.create_user(username=username, password=password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user(email, password, f_name, l_name):\n pass", "def _create(cls, model_class, *args, **kwargs):\n manager = cls._get_manager(model_class)\n # The default would use ``manager.create(*args, **kwargs)``\n return manager.create_user(*args, **kwargs)", "def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def sample_user_third(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name3\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email='[email protected]', password='open@123'):\n return get_user_model().objects.create_user(email, password)", "def new_user(cls, user):\r\n pass", "def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)", "def create_user(username,password):\n return User.objects.create_user(username=username,password=password)", "def users_create():", "def sample_user_second(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name2\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def new_user(cls, user):\n pass", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def sample_user(email: str = \"[email protected]\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)", "def create_user(email='[email protected]', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def sample_user_fourth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name4\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def sample_user(email='[email protected]', password='password'):\n return get_user_model().objects.create_user(email, password)", "def new_user():\n pass", "def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def create_user(change):\n return change()", "def sample_user(email='[email protected]', password='testpass'):\n\n return get_user_model().objects.create_user(email, password)", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def user():\n\n user = User.objects.create(name='Janek', surname='Kowalski',\n internal_id='PUHgjdJ', is_administrator=True,\n is_payment_creator=True, is_payment_approver=False,\n can_delete_payment=True)\n return user", "def create_user(self, username, email, password=None, is_staff =False, is_superuser = False):\n print(\"create_user:\", \"creating .....\")\n if not email:\n raise ValueError('Users must have an email address')\n\n # get prefix \n try:\n db = select_write_db(model_name=self.model._meta.model_name)\n prefix = db.get_prefix \n except:\n uuid3 = uuid.uuid3(uuid.NAMESPACE_DNS,settings.USER_INIT_DATABASE)\n prefix = str(uuid3)[:8]\n \n # create uuidn\n uuidn = prefix + \"-\" + str(uuid.uuid4())[9:] \n\n user = self.model(\n username = username, \n email = self.normalize_email(email),\n nid = str(uuidn),\n )\n\n user.set_password(password)\n user.staff = is_staff\n user.admin = is_superuser\n\n if settings.SHARDING_USER_MODEL:\n user.save(using=str(db.get_name))\n db.count = db.count + 1\n db.save()\n else:\n user.save(using=self._db)\n return user", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def post(self):\r\n return create_user(request)", "def sample_user(email=\"[email protected]\", password=\"password123\"):\n\n return get_user_model().objects.create_user(email, password)", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def sample_user_fifth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name5\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create_user(self, username, password):\n return self.User.objects.create_user(username, password=password)", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create_user(self):\n return UserFactory.create()", "def _create_user(self, username, password, is_staff, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n user = self.model(username=username,\n is_staff=is_staff, is_active=True,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, conn, name, password, group):\n user = conn.user.allocate(name, password, \"\", [group])\n return user", "def create_a_user(self, username='fry', email='[email protected]', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def sample_user(username='arturbartecki', password='testpassword'):\n return get_user_model().objects.create_user(username, password)", "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user", "def user(self):\n return self.create_user", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(username, email, password):\n return User.objects.create_user(username=username, email=email, password=password)", "def create(self, validated_data):\n # user = super().create(validated_data)\n # user.set_password(validated_data['password'])\n # user.save()\n\n user = User.objects.create_user(**validated_data)\n return user", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def create(self, validated_data):\n return get_user_model().objects.create_user(**validated_data)", "def _create_user(self, username, email, password, is_staff, is_superuser, first_name, last_name):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email,username=username,\n first_name=first_name, last_name=last_name,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now)\n user.uuid = generate_uuid()\n user.uniqueid = user.uuid[:4]\n user.set_password(password)\n user.save(using=self._db)\n return user", "def user(name, password, **kwargs):\n if not user_exists(name, **kwargs):\n create_user(name, password, **kwargs)", "def _create_user(self, username, email, persona_id, nombre_completo, password, is_staff, is_superuser,\n **kwargs):\n now = timezone.now()\n if not email:\n raise ValueError(_('El email debe ser proporcionado'))\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n persona_id=persona_id,\n nombre_completo=nombre_completo,\n email=email,\n is_staff=is_staff,\n is_active=True,\n is_superuser=is_superuser,\n last_login=now,\n fecha_registro=now,\n **kwargs\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(user_name, password, tenant_name, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenants = keystone.tenants.list()\n my_tenant = [x for x in tenants if x.name==tenant_name][0]\n my_user = keystone.users.create(name=user_name, password=password, tenant_id=my_tenant.id)\n print my_user\n return my_user.to_dict()", "def create(self,validated_data):\n\n user = models.User.object.create_user(\n email = validated_data['email'],\n full_name = validated_data['full_name'],\n phone = validated_data['phone'],\n password = validated_data['password']\n )\n\n #user.set_password(validated_data['password'])\n user.save()\n return user", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n print email, '*****'\n email = self.normalize_email(email)\n print email\n print '*****'\n user = self.model(email=email, is_staff=is_staff, is_active=True,\n is_superuser=is_superuser, **extra_fields)\n print user\n user.set_password(password)\n user.save(using=self._db)\n return user", "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def create(self, validated_data): # The create function is the function that's called when we create a new object. I t basically specifies all the available functions that you can override in the different serializers that are available. We're going to override the create function here. We're going to call the create user function in our model because by default it only calls the create function and we want to use our create user model manager function that we created in our models to create the user so we know that the password that it stores will be encrypted. Otherwise the password that it sets will just be the clear text password that we pass in and then the authentication won't work because it's expecting an encrypted salt key.\n return get_user_model().objects.create_user(**validated_data) # we're going to use this star syntax here to unwind this validated data into the parameters of the create user function.", "def _create_user(self, telephone,username, password, email=None, **kwargs):\n if not username:\n raise ValueError('请输入用户名')\n if not telephone:\n raise ValueError('请输入手机号')\n if not password:\n raise ValueError('请输入密码')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(telephone=telephone, username=username, email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, username, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n username = self.normalize_email(username)\n user = self.model(username=username,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser, last_login=now,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def new_user(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n new_user = User(username=\"test\", password=pwd_context.hash(\"test\"))\n dbsession.add(new_user)", "def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, password, **extra_fields)", "def create_user(self, phone_number, type, password, is_staff):\n return self.__create_user(phone_number, type, password, is_staff, False, False)", "def _create_user(self, password, is_active, is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not self.model.USERNAME_FIELD:\n raise ValueError('User model must have set USERNAME_FIELD')\n identifier = extra_fields.get(self.model.USERNAME_FIELD)\n if not identifier:\n raise ValueError((\"User's %s must be set\", self.model.USERNAME_FIELD))\n user = self.model(is_active=is_active, is_staff=is_staff, is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password, home_zipcode):\n\n user = User(email=email, password=password, home_zipcode=home_zipcode)\n db.session.add(user)\n db.session.commit()\n return user", "def _create_user(self, email, username, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n # if not username:\n # raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_register_user(self, data, user_type):\n data.pop('password_confirm')\n data['user_type'] = user_type\n user = User.objects.create_user(**data)\n return user", "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def create_user(self, email, mobile_number, password, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, mobile_number, password , **extra_fields)", "def _create_user(self, email_or_phone, password, is_staff, is_superuser, **extra_fields):\n if not email_or_phone:\n raise ValueError('The given email_or_phone must be set')\n\n if \"@\" in email_or_phone:\n username, email, phone = (email_or_phone, email_or_phone, \"\")\n else:\n username, email, phone = (email_or_phone, \"\", email_or_phone)\n\n now = timezone.now()\n extra_fields.setdefault('is_staff', True)\n is_active = extra_fields.pop(\"is_active\", True)\n user = self.model(username=username, email=email,\n mobile=phone,\n is_staff=is_staff,\n is_active=is_active,\n is_superuser=is_superuser,\n\n date_joined=now,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff,\n is_superuser=is_superuser,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password, fname, lname):\n\n user = User(email=email, password=password, fname=fname, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def default_user(self):\n self.user = self.create_user(create_token=True)\n return", "def create_user_by_id(cls, m_id):", "def create_user(\n *,\n user_in: schemas.UserCreate,\n) -> schemas.User:\n next_user_id = users[-1].id + 1 # type: ignore\n user = schemas.User(\n id=next_user_id,\n email=user_in.email,\n is_active=user_in.is_active,\n is_superuser=user_in.is_superuser,\n full_name=user_in.full_name,\n )\n users.append(user)\n return user" ]
[ "0.8181188", "0.7908192", "0.78201514", "0.77046186", "0.7671698", "0.7660212", "0.7622274", "0.7616442", "0.759572", "0.7572086", "0.7542684", "0.75390494", "0.7530234", "0.7527077", "0.75035036", "0.7481546", "0.74714476", "0.74663395", "0.74663395", "0.74663395", "0.7426541", "0.7417308", "0.74145496", "0.73876965", "0.7386723", "0.73780423", "0.7376609", "0.7373984", "0.73720276", "0.7370141", "0.7358448", "0.73365027", "0.7330745", "0.73281324", "0.73273313", "0.732427", "0.7323699", "0.73170096", "0.7309761", "0.73086154", "0.7289274", "0.72874445", "0.7275811", "0.72746354", "0.7274415", "0.7274415", "0.7274415", "0.726572", "0.7264546", "0.7248186", "0.7231491", "0.7219747", "0.7214329", "0.7205911", "0.72029364", "0.7196162", "0.7189268", "0.71887857", "0.71745145", "0.716309", "0.71616805", "0.71584076", "0.7154208", "0.71466", "0.7140361", "0.71330315", "0.7130486", "0.7130486", "0.7130486", "0.7130486", "0.7130486", "0.7130486", "0.7130486", "0.71295327", "0.71267074", "0.711792", "0.7105725", "0.7105501", "0.70971245", "0.70940983", "0.7091866", "0.70810723", "0.70802766", "0.7079728", "0.70781654", "0.7075943", "0.7075773", "0.70696706", "0.7067572", "0.7064098", "0.70625985", "0.7062538", "0.70573837", "0.70540476", "0.7053172", "0.7052295", "0.7049061", "0.7033308", "0.703172", "0.703031" ]
0.8066032
1
create random string using string.printable
def create_random_string(total_character): feed=string.printable words="" i=0 while i < total_character: words += feed[random.randrange(0,len(feed)-1)] i+=1 return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rand_string():\n out = ''\n for _ in range(24):\n out += choice(ascii_letters)\n return out", "def generate_random_string():\n return \"\".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16)) # nosec", "def generateRandomString():\n return ''.join(b64encode(urandom(32)).decode('utf-8'))", "def gen_secret() -> str:\n r = random.randrange(0, 255) # INSECURE, just for demo\n r = hex(r)[2:]\n if len(r) == 1:\n return f'0{r}'\n return r", "def generate_rnd_msg() -> str:\n\n char_num = random.randint(8,20)\n i = 0\n s = \"\"\n for n in range(char_num):\n if i == char_num:\n break\n rnd_char = random.randint(0, len(string.ascii_lowercase) - 1)\n s += string.ascii_lowercase[rnd_char]\n i += 1\n\n return s", "def generateRandomePlainText():\n randomPlainTextArray = [random.choice('0123456789abcdef')\n for n in range(24)]\n randomPlainText = \"\".join(randomPlainTextArray)\n return randomPlainText", "def pwgen(length=16, ichars=string.ascii_letters+string.digits):\n return ''.join(random.choice(ichars) for i in range(length))", "def random_password():\n pass_len = secrets.choice(range(32, 49))\n return ''.join(secrets.choice(string.printable)\n for _ in range(pass_len))", "def random_string(length=8):\n return \"\".join([random.choice(string.letters + string.digits) for x in range(length)])", "def genRandString(dl = 10):\n ret = ''\n for i in range(dl) :\n ret += random.choice(string.ascii_letters + string.digits)\n return ret", "def random_string() -> str:\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(8))", "def string(self, string_length=10):\n letters = string.ascii_letters\n return ''.join(random.choice(letters) for i in range(string_length))", "def generate(length):\n alpha = map(chr, range(97, 123))\n alpha.append(' ')\n result = \"\"\n for x in range(length):\n result += alpha[random.randrange(0,27)]\n return result", "def random_string(self, length):\n return \"\".join(\n SystemRandom().choice(string.ascii_letters) for _ in range(length)\n )", "def random_string():\n return \"\".join(random.choice(string.ascii_letters) for i in range(6))", "def _generate_random_string(length: int):\n\tall_chars = string.ascii_letters + string.digits\n\treturn ''.join(random.choices(all_chars, k=length))", "def create_random_string(chars=string.ascii_letters + string.digits, length=16):\n return \"\".join([random.choice(chars) for _ in range(int(length))])", "def get_random_string(length):\n return \"{0:0{1}x}\".format(random.getrandbits(length * 4), length)", "def generate_random_string(length=6):\n n = int(length / 2 + 1)\n x = binascii.hexlify(os.urandom(n))\n s = x[:length]\n return s.decode(\"utf-8\")", "def random_str(length=8, letters=string.ascii_letters + string.digits):\r\n return \"\".join(random.choice(letters) for x in range(length))", "def gen_rand_str(n):\n return \"\".join(random.choice(string.ascii_letters) for _ in range(n))", "def __generate_random_string():\n return uuid4().hex[:6].upper()", "def random_string(length=10):\n\n\tletters = string.ascii_lowercase\n\n\treturn ''.join(random.choice(letters) for i in xrange(length))", "def random_string(length):\n return ''.join(SystemRandom().choice(ascii_letters + digits)\n for i in range(length))", "def random_string(length=25):\n return ''.join(random.choice(string.ascii_letters) for i in range(25))", "def rndstr(length):\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))", "def generate_random_string(length):\r\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))", "def generate_random_string(length: int) -> str:\n charset = string.ascii_letters + string.digits\n return \"\".join(random.choice(charset) for _ in range(length))", "def random_string() -> str:\n\n k = random.randint(5, 10)\n return ''.join(random.choices(string.ascii_letters + string.digits, k=k))", "def _rand_str(size=16):\n return \"\".join(random.SystemRandom().choices(string.ascii_lowercase, k=size))", "def unique_str():\n return hex(random.randint(0, 256 * 256 * 256 * 256 - 1))[2:]", "def gen_hex_str(octets=64):\n # Generate random hex string\n return binascii.b2a_hex(os.urandom(octets)).decode()", "def random_string(length=16):\n secret_chars = string.ascii_letters + string.digits\n return ''.join([random.SystemRandom().choice(string.ascii_letters)] +\n [random.SystemRandom().choice(secret_chars)\n for _ in range(length - 1)])", "def random_string(length=12):\n\n return ''.join(\n [random.choice(string.ascii_letters) for _ in range(length)])", "def random_string(length=None):\r\n chars = string.ascii_uppercase + string.digits\r\n str_length = length if length is not None else random_int()\r\n return unicode(u''.join(random.choice(chars) for x in range(str_length)))", "def random_string(length):\n # this conservatively gets 8*length bits and then returns 6*length of\n # them. Grabbing (6/8)*length bits could lose some entropy off the ends.\n return urlsafe_b64encode(os.urandom(length))[:length]", "def random_string(length=8, chars=string.ascii_letters + string.digits):\n return ''.join([chars[random.randint(0, len(chars) - 1)] for i in range(length)])", "def generate_random_string(length = 30):\n\n m_available_chars = ascii_letters + digits\n\n return \"\".join(choice(m_available_chars) for _ in xrange(length))", "def make_random(length, is_binary=False):\n limit = 255 if is_binary else 126\n return \"\".join([chr(choice(range(32, limit))) for _ in range(length)]) + \"\\n\"", "def generate_string(str_length):\n letters = string.ascii_uppercase + ' '\n return ''.join(random.choice(letters) for i in range(str_length))", "def get_random_string(self, length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))", "def make_random_string(\n self,\n length: int = 8\n ) -> str:\n return ''.join(choice(self.lowercase + self.uppercase + self.digits) for _ in range(length))", "def get_random_string(length: int) -> str:\n return \"\".join(random.choices(string.ascii_letters + string.digits, k=length))", "def _random_password(self):\n return ''.join([\n random.choice(string.ascii_letters + string.digits)\n for _ in range(12)\n ])", "def random_string():\n\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))", "def get_random_str(len):\n return base64.urlsafe_b64encode(os.urandom(len))[0:len]", "def random_string():\n\n k = random.randint(5, 10)\n return ''.join(random.choices(string.ascii_letters + string.digits, k=k))", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def get_random_string(self, stringLength=10):\n import random\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))", "def __randomStringGen(self, len):\n binString = ''\n for _ in range(len):\n binString = binString + str(random.randrange(100)%2)\n\n return binString", "def random_nonb64_string(length):\n return ''.join(\n random.choices('!@#$%^&*(){}[]', k=length)\n )", "def generateToken():\n length = random.randint(8, 32)\n rdmtoken = ''.join(random.choice(string.printable) for i in range(length))\n return f'{rdmtoken}'", "def generate_random_string(symbols, length):\n sym_list = symbols.split()\n str_list = random.sample(sym_list, length)\n gen_string = ''.join(str_list)\n return gen_string", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def passwordGen() :\n\treturn __randomString(12)", "def _hex_string(k):\n sample = random.choices(Record.HEX_DIGITS, k=k)\n return \"\".join(sample)", "def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):\n return ''.join(random.choice(allowed_chars) for __ in range(length))", "def random_hexstring(self, string_len = 10240):\n alphabet = string.digits + 'abcdef'\n hex_string = ''.join(random.choice(alphabet) for _ in range(string_len))\n\n return hex_string", "def generate_random_string(N):\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))", "def randstr(chars=string.ascii_lowercase + string.digits, len=16) -> str:\n return ''.join(random.choices(chars, k=len))", "def get_random_str(length=16):\n if length is None or not isinstance(length, int) or length > 1000 or length <= 0:\n length = 16\n\n alph = list(\"1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\")\n res_str = \"\"\n for idx in range(length):\n res_str += random.choice(alph)\n return res_str", "def random_string(length=4):\n return \"{0}\".format(\n \"\".join(random.choice(string.ascii_uppercase) for _ in range(length))\n )", "def stringGen(size, chars=string.ascii_uppercase + string.digits):\n\treturn ''.join(random.choice(chars) for _ in range(size))", "def random_string(length, characters=string.ascii_letters + string.digits):\n return \"\".join(random.choice(characters) for i in range(length))", "def GenRandom(length = 10, chars=string.letters+string.digits):\n return ''.join([random.choice(chars) for i in range(length)])", "def _random_string(self, size, chars=None):\n return ''.join(random.choice(chars or _DEFAULT_CHARS) for _ in range(size))", "def randstring(length=1):\n charstouse = string.ascii_letters + string.digits + string.punctuation\n newpass = ''\n for _ in range(length):\n newpass += str(charstouse[random.randint(0, len(charstouse) - 1)])\n return newpass", "def randomString():\n randInt = random.randint(0, 10)\n randStr = ''.join(random.choice(\n string.ascii_letters) for _ in range(randInt))\n return randStr", "def random_string(self, stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))", "def generate_random_string(stringLength=4):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))", "def random_string(strlen=10):\n return \"\".join([random.choice(string.ascii_letters) for _ in range(strlen)])", "def randompassword():\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n return ''.join(random.choice(characters) for x in range(size))", "def _randomString(self, stringLength=10):\r\n letters = string.ascii_lowercase\r\n return ''.join(random.choice(letters) for i in range(stringLength))", "def random_string(len = 5):\n return ''.join(random.choice(string.digits) for i in range(len))", "def random_string(i):\r\n\r\n return ''.join(_random.choice(string.ascii_letters) for x in\r\n xrange(i))", "def generate_random_text(length):\r\n text = []\r\n for num in range(length):\r\n text.append(alphabet[random.randint(0, 25)])\r\n return(''.join(text))", "def create_random_string(length):\n return ''.join(random.choice(\"ACDEFGHJKMNPQRTWXYZ\")\n for _ in range(length)\n )", "def gen_random_char_string(n, base_s=\"\"):\n if n == 0:\n return base_s\n \n c = random.choice(string.ascii_letters)\n return gen_random_char_string(n-1, base_s + c)", "def randomStringwithDigitsAndSymbols(stringLength: int = 10) -> str:\n password_characters = string.ascii_letters + string.digits + string.punctuation\n return \"\".join(random.choice(password_characters) for i in range(stringLength))", "def get_random_string(length: int) -> str:\n return ''.join([random.choice(string.ascii_letters + string.digits)\n for _ in range(length)])", "def random_str(length):\n\n def _random_chr():\n if random.randint(0, 1):\n return chr(random.choice(range(65, 91)))\n else:\n return chr(random.choice(range(97, 122)))\n return \"\".join([_random_chr() for _ in range(length)])", "def random_str(length):\n digits = ''.join([str(num) for num in list(range(10))])\n res = ''.join(random.choice(ascii_lowercase + digits)\n for i in range(length))\n return res", "def generate_password(length=20):\r\n # type: (int) -> str\r\n return ('%0'+str(length)+'x') % random.randrange(16 ** length)", "def generate_password():\n chars = string.ascii_letters + string.digits\n key = random.sample(chars, 10)\n keys = \"\".join(key)\n return keys", "def randomstr(ctx, nbytes=\"\"):\n # deprecated function\n logger.info(\"DeprecationWarning: randomstr is deprecated. Use random:str instead\")\n random(ctx, \"str\", nbytes)", "def rand_str(length: int) -> str:\n alphabet = \"0123456789abcdef\"\n return \"\".join(random.choices(alphabet, k=length))", "def get_random_string(length=5):\n return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits)\n for _ in range(length))", "def cli(bytes):\n return logging.info(binascii.b2a_hex(os.urandom(bytes)))", "def generate_passwd(length=6):\n ret = ''\n if length < 6 :\n length = 6\n elif length > 10 :\n length = 10\n for x in xrange(length) :\n if x == 3 :\n ret += '-'\n ret += chr(random.randrange(ord('a'),ord('z'),1))\n return ret", "def randomStringDigits(stringLength=1000):\n lettersAndDigits = string.ascii_letters + string.digits\n return ''.join(random.choice(lettersAndDigits)+'\\r\\n' for i in range(stringLength))", "def random_mac():\n return '\"02:%02x:%02x:%02x:%02x:%02x\"' % (random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255))", "def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))", "def rng_string(alphabet, length):\n bitList = []\n for _ in range(0, length):\n bitList.append(str(randint(0, alphabet)))\n return ''.join(bitList)", "def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def generate_random_alphanumeric(length):\n return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) \\\n for _ in range(length))", "def get_random_alphanumeric_string():\n return get_random_string(char_choice=string.ascii_letters + string.digits)", "def gen_passphrase(self):\n return ''.join(\n random.sample(map(str, range(0,10)) +\n map(chr, range(ord('a'), ord('z') + 1)) +\n map(chr, range(ord('A'), ord('Z') + 1)), self.passphraselen))", "def rand_string(self, string_length=5, text='abcdefghi'):\n return ''.join((random.choice(your_letters) for i in range(string_length)))", "def random_str(l=10):\n password_characters = string.ascii_letters + string.digits\n return ''.join(random.choice(password_characters) for i in range(l))" ]
[ "0.76754934", "0.74476475", "0.7420788", "0.7411069", "0.7401505", "0.72970307", "0.72862035", "0.7236748", "0.7232668", "0.71930367", "0.717935", "0.71791947", "0.71789944", "0.71474946", "0.7141961", "0.7137273", "0.712956", "0.71288073", "0.712568", "0.71228766", "0.71025354", "0.705463", "0.7023413", "0.70198417", "0.7008503", "0.70017344", "0.6990112", "0.69892794", "0.6968604", "0.6918318", "0.6904933", "0.6897666", "0.68972945", "0.6895146", "0.6894246", "0.6891407", "0.68906593", "0.6883781", "0.68818486", "0.68679893", "0.6857896", "0.68518066", "0.68429244", "0.6838538", "0.68377686", "0.68363816", "0.6835756", "0.6834886", "0.68296975", "0.6828952", "0.68256474", "0.6820292", "0.68122625", "0.68117243", "0.68039054", "0.67966145", "0.6784012", "0.6783072", "0.67825013", "0.6778772", "0.67787015", "0.67743146", "0.67736185", "0.67653376", "0.67637384", "0.6752617", "0.67523384", "0.6750852", "0.67502356", "0.67491627", "0.67464453", "0.6739177", "0.6736306", "0.6715354", "0.67147493", "0.6709791", "0.670953", "0.67074555", "0.67067635", "0.6705002", "0.67049855", "0.6704559", "0.6702911", "0.67004395", "0.6693506", "0.66924775", "0.6687693", "0.66819507", "0.66770136", "0.6676314", "0.6663405", "0.6660975", "0.6652441", "0.66460353", "0.663556", "0.66230756", "0.66149944", "0.66134983", "0.6612733", "0.66091573" ]
0.74168414
3
give random value between 0 ~ max
def seed_random(max_integer): return random.randrange(0,max_integer);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number(maxValue):\r\n return random.randint(1, maxValue)", "def random_int(max=1000):\r\n return randint(0, max)", "def randInt(max):\n return int(max * random.random())", "def mt_rand(min = 0, max = sys.maxint):\n return random.randint(min, max)", "def get_random_value():\n return randint(0, 255) / 256.0", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def get_random_value():\n return randint(0, 255) / 256.0", "def _get_random_value(self):\r\n return random.randint(1, 10)", "def random_number(max_number):\n return random.randint(1, max_number)", "def get_next_random(value, max_value, min_value, max_delta):\n # Determine if sensor delta should be added or substracted.\n if value == max_value:\n add = False\n elif value == min_value:\n add = True\n else:\n add = random.random() > 0.5\n\n # Calculate a new delta.\n delta = random.randint(0, max_delta)\n\n # Apply the delta.\n if add:\n value += delta\n else:\n value -= delta\n if value > max_value:\n value = max_value\n elif value < min_value:\n value = min_value\n\n return value", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def randomNumber(maxNumber):\n return random.randint(1, maxNumber)", "def random():\r\n return R.NextDouble()", "def mt_rand (low = 0, high = sys.maxint):\n return random.randint (low, high)", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def random_num(range_start,range_end):\r\n return random.randint(range_start,range_end)", "def random_temp():\n temp_min = 154\n temp_max = 500\n temp_interval = 1\n # `range`s are exclusive [min, max)\n return random.randrange(temp_min, temp_max + 1, temp_interval)", "def float(self, max_=None):\n max_ = self.max_float if max_ is None else max_\n return max_ * (self.rng.random() - 0.5)", "def rand_int(min=0, max=100):\n\n num = random.random() * (max - min) + min\n return round(num)", "def random():\n return constant(1)", "def random(self):\r\n return random.randint(1, 4)", "def get_random_real_number():\n return random.uniform(-MAX_GENERATED_NUMBER_RANGE, MAX_GENERATED_NUMBER_RANGE)", "def uniform(self, key, min_value=0., max_value=1.):\n return min_value + self._random(key) * (max_value - min_value)", "def random() -> float:\n ...", "def get_random_integer():\n return random.randint(-MAX_GENERATED_NUMBER_RANGE, MAX_GENERATED_NUMBER_RANGE)", "def rand_val(max):\n order = math.ceil(math.log10(max)) #Determine the num of digits in size\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n # Yea, this is quite inefficient\n while (index >= max):\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n return index", "def randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "def quasi_rand(values, feature, parent):\r\n seed = values[0]\r\n base = values[1]\r\n min = values[2]\r\n max = values[3]\r\n \r\n return math.floor(halton(seed, base) * (max - min + 1) + min)", "def create_random_index(self, max:int):\n return random.randint(0, max - 1)", "def get_random_int_op(minimum: int, maximum: int) -> int:\n import random\n result = random.randint(minimum, maximum)\n print(result)\n return result", "def randomize_value(self) -> None:", "def generate_guess_value(min_guess_range=1, max_guess_range=10):\n\treturn random.randrange(min_guess_range, max_guess_range), min_guess_range, max_guess_range", "def get_random(self):\n self.random_range = list(np.array(self.friendly_range) * self.conversion)\n return np.random.uniform(self.random_range[0], self.random_range[1], 1)[0]", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def random_float():\n return (random() - 0.5) * 2", "def random_int(self, min_int = 1, max_int = 10240):\n return random.randint(min_int, max_int)", "def rangeSample(val, minLim, maxLim):\n\tif val < minLim or val > maxLim:\n\t\tval = randint(minLim, maxLim)\n\treturn val", "def random_range():\n rnd = int(random.randrange(1,8))\n print \"Random number generated: %s\" %(rnd)\n return rnd", "def safe_rand(self):\n rand_n = np.random.rand()\n if rand_n == float(1):\n rand_n -= 1e-10\n return rand_n", "def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)", "def _get_random_number(self,min_max_array):\n\n # check length of array and caluclate random number ############################################################\n if len(min_max_array) > 1:\n return random.uniform(min_max_array[0],min_max_array[1])\n else:\n return min_max_array[0]\n ##################################################### end of check length of array and caluclate random number #", "def _randomVelocity(self):\n\t\treturn random.choice([-1, 1]) * random.randint(10, 50)", "def getRandSpeed(self) -> int:\n num = int(random.uniform(-4,4))\n while(-1<=num and num<=1):\n num = int(random.uniform(-4,4))\n return num", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def random_number():\n return random.randint(0, 9999)", "def random_randint(lower_bound, upper_bound):\r\n return random_randrange(lower_bound, upper_bound+1)", "def rand_val(val_range):\r\n if isinstance(val_range, (list, tuple, np.ndarray)):\r\n return np.random.uniform(val_range[0], val_range[-1])\r\n # Assume val_range is a number\r\n return val_range", "def random_between_values(self, first_value, last_value):\n return random.randint(first_value, last_value)", "def rand(lo=0, hi=1):\n global Seed\n Seed = (16807 * Seed) % 2147483647\n return lo + (hi - lo) * Seed / 2147483647", "def getRandom(self) -> int:\n size = len(self.value_set)\n if size > 0:\n from random import randint\n x = randint(1, size)\n return self.values[x - 1]", "def generate() -> int:\n return randint(0, 1000000000)", "def base_pick():\n\n rnd = generate_random(2, 15)\n return rnd", "def random_float(low: float, high: float):\n seed = time.time()\n random.seed(seed)\n return random.uniform(low, high)", "def uniform(lower, upper):\n\n return lower + random.random() * (upper - lower)", "def _rand_float(self, low, high):\n\n return self.np_random.uniform(low, high)", "def GetRandomNumberFromLimits():\n smaller = int(input(\"Enter the smaller number: \"))\n larger = int(input(\"Enter a larger number: \"))\n return random.randint(smaller, larger)", "def unforeseen():\r\n return random.gauss(300., 100.)", "def randomRGBValue(self):\n return random.randrange(0, 256)", "def decrease():\n decrease_amount = random.uniform(MIN_DECREASE, MAX_DECREASE)\n return decrease_amount", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def random(self):\n result = self.db.zrangebyscore(REDIS_KEY, MAX_SCORE, MAX_SCORE)\n if len(result):\n return choice(result)\n else:\n result = self.db.zrevrange(REDIS_KEY, 0, 100)\n if len(result):\n return choice(result)\n else:\n raise PoolEmptyError", "def getRandom(self) -> int:\n count = len(self.arr)\n return self.arr[randint(0, count-1)]", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def randj():#could be combined with randik\n return(int(round(random.random()*1)))", "def preturbScalarAbs(value, vrange):\n\tdelta = - vrange + 2.0 * vrange * random.random() \n\treturn value + delta", "def rand(self, lo, hi):\n lo, hi = lo or 0, hi or 1\n\n self.seed = (16807 * self.seed) % 2147483647\n return lo + (hi - lo) * self.seed / 2147483647", "def rand(self, lo, hi):\n lo, hi = lo or 0, hi or 1\n\n self.seed = (16807 * self.seed) % 2147483647\n return lo + (hi - lo) * self.seed / 2147483647", "def getRandom(self) -> int:\n return choice(self.array)", "def getRandom(self) -> int:\n # 此处可以优化\n datas = list(self.data.keys())\n pos = self.rand.randint(0, len(datas) - 1)\n val = datas[pos]\n return val", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return self.nums[random.randint(0, len(self.nums) - 1)]", "def random_from_bound(bound):\n if (isinstance(bound, tuple)):\n val = np.random.uniform(low = bound[0], high = bound[1])\n else:\n val = 0.0\n return val", "def rand(self):\n return self.State.rand()", "def UniformRV(low, high):\n return RV(ss.randint(low, high))", "def generate_random(n):\n lower_bound = 10**(n-1)\n upper_bound = (10**n)-1\n return randint(lower_bound, upper_bound)", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def individual(min_val, max_val):\n value_list = [i for i in range(min_val, max_val+1)] #generate a list of 1 to 10\n random.shuffle(value_list) #shuffle the list\n return value_list", "def my_random(a):\r\n import random\r\n r = random.randint(0, 100)\r\n return a + r", "def vary(num, sigma):\n\tvalue = random.gauss(num, sigma)\n\treturn min(255, max(0, value))", "def rng() -> int:", "def computer_random():\r\n ci = random.sample(range(1,43),5)\r\n return ci", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def non_linearRandomInt(to, const = 1/2):\n return int(np.floor(non_linearRandomFloat(const)*to))", "def _rand_int(self, low, high):\n\n return self.np_random.randint(low, high)", "def Rnd(value=1):\n global _last_rnd_number\n if value == 0:\n return _last_rnd_number\n elif value < 0:\n random.seed(value)\n r = random.random()\n _last_rnd_number = r\n return r", "def rint(lo, hi):\n return round(0.5 + rand(lo, hi))", "def i_rand_a():\n return i_random() % 95 + 32", "def roll(self):\n return randint(1,6)", "def rand(self) -> ZqValue:\n\n return self(randbelow(int(self.q)))", "def cvv(i):\r\n\r\n return '{}'.format(_random.randint(111, (999 if i == 3 else 9999)))", "def random_negative(value, random_negative_prob):\n return -value if np.random.rand() < random_negative_prob else value", "def generate_value(loc, data):\n return np.random.randint(100, size=1)", "def getRandom(self) -> int:\n return random.choice(self.l)", "def select_arm(self):\n\n # Exploitation\n if random.uniform(0, 1) > self.epsilon:\n return np.argmax(self.values)\n\n # Exploration\n else:\n return random.randrange(len(self.values))", "def newRandomState(x):\n\treturn x + random.uniform(-1, 1)", "def getRandom(self) -> int:\n return random.choice(self.array)" ]
[ "0.78364855", "0.7738007", "0.7737629", "0.7560306", "0.7451346", "0.7409387", "0.7381858", "0.73814666", "0.7321527", "0.7308225", "0.7287057", "0.72776306", "0.7269391", "0.7227846", "0.7195339", "0.7195339", "0.714679", "0.7145737", "0.71094", "0.70787364", "0.7073735", "0.7066604", "0.70632565", "0.70470583", "0.7027331", "0.7016506", "0.7011008", "0.69584864", "0.6956608", "0.6950647", "0.69500434", "0.6936954", "0.6929518", "0.6898271", "0.68927205", "0.68927205", "0.68867034", "0.68605065", "0.68208444", "0.6799588", "0.67935705", "0.678548", "0.6764067", "0.6758524", "0.6730021", "0.6716366", "0.67133904", "0.67081285", "0.66967213", "0.6686541", "0.66570634", "0.6649311", "0.664055", "0.6638923", "0.662308", "0.66112375", "0.6607239", "0.66055316", "0.65903336", "0.6581137", "0.6571235", "0.6564049", "0.6540017", "0.65383565", "0.6534221", "0.6526733", "0.65242016", "0.65102583", "0.65102583", "0.64807576", "0.6480099", "0.647711", "0.647711", "0.647711", "0.6474273", "0.6467302", "0.6463427", "0.6461941", "0.6458137", "0.6456218", "0.64495456", "0.64493763", "0.6449055", "0.6447844", "0.6444176", "0.6442708", "0.6439789", "0.64386886", "0.6434631", "0.64296144", "0.6425964", "0.6411845", "0.64104825", "0.6403191", "0.64004666", "0.6397503", "0.63909924", "0.6388775", "0.6388698", "0.6386734" ]
0.7360128
8
populate question object with random string and user
def populate_poll(user="",total=10): user_list = None #create random user only when user argument empty if user == "": create_random_user(20) user_list = User.objects.all() for i in range(total): Question.objects.create( created_by=random.choice(user_list) if user_list is not None else user, title=create_random_string(seed_random(10)), text=create_random_string(seed_random(300)), slug=create_random_string(seed_random(100)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, question):\n self.question = question\n self.responses = []", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }", "def notAFan_questions(user):\n questions = {\n 1: \"GBB: How old are you? \",\n 2: \"GBB: What do you like to do in your free time? \",\n 3: \"GBB: What is your ethnicity? \",\n 4: \"GBB: What did you eat for breakfast? \",\n 5: \"GBB: Are you an early bird or a night owl? \",\n 6: \"GBB: Do you like football? \"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Looks like I know you so well that I don't even need to ask you a question! Type anything to proceed.\"", "async def generate_question(self) -> None:\n topic = choice(list(self._topics.keys()))\n arrayList = await self.session.get(\"https://restcountries.eu/rest/v2\")\n arrayList = await arrayList.json() # get request to the country API\n countries = []\n \n for _ in range(4):\n country = choice(arrayList)\n del arrayList[arrayList.index(country)]\n countries.append(country)\n del arrayList\n \n country = choice(countries)\n del countries[countries.index(country)]\n self.question = f\"What is the {self._topics[topic]} of {country['name']}?\"\n self.correct_order = randint(0, 3)\n self.choices = [i[topic] for i in countries]\n self.choices.insert(self.correct_order, country[topic])\n del countries, topic", "def personal_questions(user):\n questions = {\n 1: 'GBB: How long have you been a fan of the Packers?',\n 2: 'GBB: Why are you a fan of the Packers?',\n 3: \"GBB: What team do you hate the most?\",\n 4: \"GBB: Who's your favorite player on the Packers?\",\n 5: \"GBB: Who's your least favorite player on the Packers?\",\n 6: \"GBB: Do you come from a family of Packer fans, or are you a lone ranger?\"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Look's like I know you so well that I don't even need to ask you a question!\"", "def __init__(self, question, correct_answer):\n\n self.question = question\n self.correct_answer = correct_answer", "def populate_game_questions():\n indices = random.sample(range(0, len(quizquestion.questions_all)), 5) # If user doesn't specify, choose 5 random questions\n return quizquestion.QuizQuestion.get_game_questions(indices)", "def get_question(self, user_state):\n\n if not user_state.current_session:\n\n # Create the new session\n new_session = user_state.create_session()\n # Create the new block for the session\n new_block = Block.objects.create(session=new_session)\n\n # Fill up the new block with random qandas\n while not new_block.is_full:\n QandA.objects.create(question=self.get_random(user_state), block=new_block)\n\n # Add the new block\n user_state.add_block(new_block)\n\n return user_state.current_session.current_block.get_question()", "def create_answer(question, user):\n return Answer.objects.create(question=question,answered_by=user)", "def create_question(self, user_id=0):\n resp = self.create_user()\n if user_id == 0:\n user_id = resp[0]\n\n params = {\n \"user_id\":user_id,\n \"text\":\"What is the fastest programming language and why do you think so?\",\n \"description\":\"I am looking for the fastest programming language in terms\\\n of memory management for a very high performance project.\"\n }\n headers = {\n \"Authorization\":\"Bearer {}\".format(resp[1]),\n \"Content-Type\":\"application/json\"\n }\n path = \"/api/v2/questions\"\n question = self.client.post(path=path,\n data=json.dumps(params),\n headers=headers)\n question_id = question.json['question_id']\n return int(question_id), question", "def create_question(user,title='title',text='text'):\n return Question.objects.create(created_by=user, title=title, text=text)", "def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer", "async def question(self, channel_id, user_infos, user_id, team_id):\n\n q = random.choice(self.questions) # Random question selection from list\n answers = q[\"badAnswers\"] + [q[\"goodAnswer\"]] # Save all possible answers\n goodAnswer = q[\"goodAnswer\"] # Save the good answer\n random.shuffle(answers) # Shuffle everything\n\n choices = {} # Dict of choices\n\n for i in range(len(answers)): # For every possible answer\n choices[str(i + 1)] = answers[i]; # Fill the choices dict with normal people understandable indexes\n\n message = \"{} \\n\".format(q[\"question\"]) # Start the string question message\n\n for key in sorted(choices):\n message += (\"Reponse {} : {} \\n\").format(key, choices[key]) # Add choices to question message\n\n id = 0\n for i in range(len(choices)):\n if choices[str(i + 1)] == goodAnswer: # Retrieve the good answer id (lol). Should probably do differently...\n id = i + 1\n\n self.currentAskedQuestions[user_id] = str(id) # Put the entry in the dict with good answer id\n return await self.sendText(message, channel_id,user_infos, team_id)", "def genQuestion(line):\r\n if type(line) is str: # If the passed variable is of type string.\r\n line = TextBlob(line) # Create object of type textblob.blob.TextBlob\r\n \r\n bucket = {} # Create an empty dictionary\r\n \r\n subject_list = []\r\n question_subject=\"\"\r\n answer_subject=\"\"\r\n for i,j in enumerate(line.tags): # line.tags are the parts-of-speach in English \r\n question_subject += j[0] + \" \"\r\n if (j[1] == \"NNP\" or j[1] == \"NNS\"): \r\n subject_list.append(j[0])\r\n if j[1] not in bucket:\r\n bucket[j[1]] = i # Add all tags to the dictionary or bucket variable\r\n \r\n if len(subject_list):\r\n random_subject_val = random.randint(0, len(subject_list)-1)\r\n question_subject = question_subject.replace(str(subject_list[random_subject_val]), \"______\")\r\n answer_subject = str(subject_list[random_subject_val])\r\n \r\n return question_subject, answer_subject", "def choose_question():\r\n random_index_question = randint(1, question.num_question + 1)\r\n random_question = question.question[random_index_question]\r\n correct_answer = question.answer[random_index_question]\r\n return random_question, correct_answer", "def get_random_question(self):\n available_qs = self.available_qs\n if available_qs.exists():\n return random.choice(available_qs)", "def __init__(self, name):\n self.name = name\n self.questions = []", "def gen_questions(self, number_of_questions):", "def generate_answer(self, question):\n\n # Recognize intent of the question using `intent_recognizer`.\n # Don't forget to prepare question and calculate features for the question.\n \n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.Main(question)\n #intent='gcs'\n # Chit-chat part: \n if intent == 'dialogue':\n \"\"\"\n # Pass question to chitchat_bot to generate a response.\n reply=self.college.Main(question)\n if reply !=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else: \n \"\"\"\n reply=self.college.Main(question)\n if reply!=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else:\n reply=self.programming.Main(question)\n if reply!=\"Please refer kammand prompt discord or ask you mentor for more info :)\":\n return reply\n else:\n response = str(self.chatbot.get_response(prepared_question))\n temp=np.random.choice(2,p=[0.5,0.5])\n times=np.random.choice([1,2,3,4],p=[0.5,0.3,0.1,0.1])\n if temp==0:\n print(\"EMOJI!!!!!\")\n response= response + times*(label_to_emoji(emojifyer.predict_emoji(model,response,word_to_index)).strip())\n return response\n elif intent==\"mandi\":\n reply=self.college.Main(question)\n return reply\n # Goal-oriented part:\n elif intent==\"stackoverflow\":\n tag = self.tag_classifier.predict(features)[0]\n reply = self.thread_ranker.get_best_thread(prepared_question, tag)\n return reply", "def add_user_answer(self, question, guess, correct):\n user_answer = UserAnswer()\n user_answer.user = self.user\n user_answer.quiz = self.quiz\n user_answer.question = question\n user_answer.answer = guess\n user_answer.correct = correct\n user_answer.save()", "def __init__(self, createdby, meetup, title, body, votes,createdOn):\n self.question_id = len(Question.question_list) + 1\n self.createdon = datetime.now()\n self.createdby = createdby\n self.meetup = meetup\n self.title = title\n self.body = body\n self.votes = votes", "def not_given_bot(question_intent, question, answer):\n col_q_not_given.insert_one(\n {\n\n 'Question': question,\n 'Question Intent': question_intent,\n 'Answer': answer\n }\n )", "def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item", "def __init__(self, exam_name):\n\n self.name = exam_name\n self.questions = []", "def __init__(self, data={}):\n\n self.config = db_config(BaseConfig.DATABASE_URI)\n self.table = 'questions'\n self.title = data.get('title')\n self.body = data.get('body')\n self.q = data.get('q')\n self.question_id = data.get('id')\n self.user_id = data.get('user_id')\n self.now = str(datetime.now())\n self.logged_in_user_id = Auth.get_logged_in_user(request)[0]['data']['user_id']", "def ask_question():\n title_question = request.form.get(\"title\")\n question = request.form.get(\"question\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n \n ask = Question(user_id = session[\"user_id\"],question_created=date_string, title_question = title_question, question = question)\n\n db.session.add(ask)\n db.session.commit()\n\n return \"question added\"", "def __init__(self, question, correct_answer):\n self.question = question\n self.correct_answer = correct_answer.lower()", "def create_question(self):\n\n locations = [\"meetup_id\", \"user_id\", \"title\", \"body\"]\n\n try:\n\n user = self.sql.get_username_by_id(\n int(self.question_details[\"user\"]))\n\n meetup = self.sql.fetch_details_by_criteria(\n \"meetup_id\", self.question_details[\"meetup\"], \"meetups\")\n\n existing = self.sql.fetch_details_if_text_exists(\n \"title\", self.question_details[\"title\"], \"questions\")\n\n title = self.question_details[\"title\"]\n\n body = self.question_details[\"body\"]\n\n except KeyError as keyerror:\n return self.makeresp(\"{} is a required field\".format(keyerror), 400)\n\n isempty = DataValidators(\n self.question_details).check_values_not_empty()\n\n if isinstance(isempty, str):\n return self.makeresp(isempty, 400)\n\n if not user:\n return self.makeresp(\"User not found\", 404)\n\n if not meetup:\n return self.makeresp(\"Meetup not found\", 404)\n\n if not self.check_is_error(existing):\n\n if [meet_id[1] for meet_id in existing if self.question_details[\"meetup\"] in meet_id]:\n\n return self.makeresp(\"This Question already exists\", 409)\n\n question = {\n \"meetup\": self.question_details[\"meetup\"],\n \"createdBy\": self.question_details[\"user\"],\n \"title\": title,\n \"body\": body\n }\n\n question_id = SqlHelper(question).save_to_database(\n locations, \"questions\")\n\n return self.makeresp(\n {\n \"id\": question_id,\n \"user\": question[\"createdBy\"],\n \"meetup\": question[\"meetup\"],\n \"title\": question[\"title\"],\n \"body\": question[\"body\"]\n }, 201)", "def question(update, context):\n bot = context.bot\n user = update.message.from_user\n inc_msg = str.lower(update.message.text)\n\n # answer why questions with a reasons from database\n if 'waarom' in inc_msg:\n\n # return a random reason from file\n with open(REASONS) as file:\n lines = file.readlines()\n msg = random.choice(lines)\n\n # answer other questions with\n else:\n # TODO: introduce random silence\n rng = random.random()\n\n if rng < 0.9 and not 'rob' not in inc_msg:\n return\n options = [\n f\"Vraag het maar niet aan mij, ik ben niet alwetend.\",\n (\"https://lmgtfy.com/?q=\" + inc_msg.replace(\" \", \"+\") + \"&pp=1&s=g&t=w\"),\n f\"Ja he dat weet ik toch ook niet, google dat maar ff {user.first_name}...\"\n ]\n\n msg = random.choice(options)\n time.sleep(HUMAN_DELAY * len(msg))\n\n bot.send_message(chat_id=update.message.chat_id, text=msg,\n reply_to_message_id=update.message.message_id,\n parse_mode=ParseMode.MARKDOWN)", "def create_freeform(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_TEXT)\n poll.contacts = contacts \n return poll", "def mutate(self, info, question_text):\n question = Question(\n question_text=question_text,\n pub_date=now()\n )\n question.save()\n ok = True\n return CreateQuestion(\n question=question,\n ok=ok\n )", "def create_undefined_question(query_data: dict, property_name: str):\n \n correct_subject = query_data[0][\"personLabel\"][\"value\"]\n del query_data[0][\"personLabel\"]\n\n correct_object = query_data[0][\"valueLabel\"][\"value\"]\n del query_data[0][\"valueLabel\"]\n\n alternatives = []\n for element in query_data[0]:\n alternatives.append(query_data[0][element][\"value\"])\n \n question_text = f\"{correct_subject} {property_name} _______?\"\n\n print(question_text)\n print(\"correct:\", correct_object)\n print(\"alternatives:\", alternatives)", "async def ask(self, ctx: commands.Context, *, question: str):\n # Check for cooldown\n await self.check_cooldown(ctx)\n\n # Create question context and contact API\n context = contexts.create_question_context(self.bot.config.data_path, question, self.bot.user.display_name)\n async with ctx.typing():\n result = await utils.create_completion_result_from_context(self.bot.loop, context)\n await ctx.send(result)", "def get_questions():\n fields_dt = ['name', 'category', 'key', 'text']\n questions = frappe.db.get_list('Big Five Factor Model',\n fields=fields_dt)\n\n # Ordenamiendo random: se aplica sobre el objeto original\n suffle_data = random.shuffle(questions)\n\n return questions", "def sample_user(self):", "def get_or_create(cls, question, student, result, answer, correct=None):\n qa = QuestionAnswer.objects.filter(question=question, student=student,\n result=result).first()\n if qa:\n qa.answer = answer,\n qa.correct = correct\n else:\n ans_data = {\n 'question': question,\n 'student': student,\n 'result': result,\n 'answer': answer,\n 'correct': correct,\n }\n qa = QuestionAnswer(**ans_data)\n qa.save()\n return qa", "def populate_questions(scores):\n \n print(\"populate_questions, scores: \", str(scores))\n\n try:\n return random.sample(range(len(quiz.list_fragen)), TOTAL_ROUNDS*len(scores))\n except ValueError:\n print(\"List of questions is too short.\")", "def add_question(self, prompt, correct_answer):\n\n self.prompt = prompt\n self.correct_answer = correct_answer\n self.new_question = super(AbstractExam, self).__init__(question=self.prompt, answer=self.correct_answer) \n\n # adds the new question to the list of exam questions\n self.exam_questions.append(self.q_and_a)", "def test_get_answers_by_user(self):\n user = self.create_user()\n user_id = user[0] # answer author user id\n question_id = int(self.create_question()[0])\n auth_token = user[1]\n posted_answers = [\n {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n },\n {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n },\n {\n \"text\":\"\".join(choice(\n string.ascii_letters) for x in range (randint(16,20)))\n }]\n for i, elem, in enumerate(posted_answers):\n self.post_data(question_id, auth_token=auth_token, data=elem)\n path = \"/api/v2/answers/users/{}\".format(user_id)\n headers = {\"Authorization\":\"Bearer {}\".format(auth_token),\n \"Content-Type\":\"application/json\"}\n answers = self.client.get(path, headers=headers)\n self.assertEqual(answers.status_code, 200)\n self.assertEqual(len(answers.json[\"answers\"]), len(posted_answers))", "def setUp(self):\n self.user1 = EngineTestCase.create_random_user()\n self.user2 = EngineTestCase.create_random_user()\n self.user3 = EngineTestCase.create_random_user()\n self.user4 = EngineTestCase.create_random_user()\n\n self.category1 = Category.objects.create(name='Category #1')\n\n self.question_1 = EngineTestCase.create_random_question(self.category1)\n self.question_2 = EngineTestCase.create_random_question(self.category1)\n self.question_3 = EngineTestCase.create_random_question(self.category1)\n self.question_4 = EngineTestCase.create_random_question(self.category1)\n\n self.game = Game.objects.create()\n\n # Add question to game.\n self.game.add_question(self.question_1)\n self.game.add_question(self.question_2)", "def process_question(qu):\n\n ## global ranking\n rank_info = {}\n rank_info_k = [\"viewcount\",\"score\",\"favoritecount\"]\n for k in rank_info_k:\n rank_info[k] = int(qu[k])\n qu.pop(k,None)\n\n rank_info[\"creationdate\"] = qu[\"creationdate\"]\n\n if qu[\"acceptedanswer\"]:\n qu[\"acceptedanswer\"] = list(qu[\"acceptedanswer\"])\n else:\n qu[\"acceptedanswer\"] = []\n\n qu.pop('comments',None) # discard comments, maybe add back later\n qu[\"rank_info\"] = rank_info\n\n return qu", "def get_question(self):\n question = self.raw_question\n if question is not None:\n return {\n \"question\": self.raw_question\n }", "def generate_answer(self, question, mode, user_id):\n def is_unicode(text):\n return len(text) == len(text.encode())\n\n if mode == 'mix':\n # 1. Intent recognition:\n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.predict(features)[0]\n\n # 2. Dialogue-oriented part:\n if intent == 'dialogue':\n if(is_unicode(question)):\n return self.generate_dialogue(question, user_id, False)\n else:\n return self.generate_dialogue(question, user_id, True)\n else:\n return self.generate_goal(prepared_question, features)\n\n elif mode == 'en':\n if(is_unicode(question)):\n return self.generate_dialogue(question, user_id, False)\n else:\n time.sleep(1)\n return 'Hmm, you are sending some weird characters to me...'\n\n elif mode == \"cn\":\n if(is_unicode(question)):\n time.sleep(1)\n return '我不吃中文以外的東西拉!'\n else:\n return self.generate_dialogue(question, user_id, True)\n \n elif mode == 'stof':\n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n return self.generate_goal(prepared_question, features)", "def create_populated_question(answer_states: List[bool], question_text: str = None):\n question = create_question(question_text)\n\n for state in answer_states:\n create_answer(question, state)\n\n return question", "def _create_random_user(self,startname=\"\",site=None):\n \n username = startname + \"\".join([choice('AEOUY')+\n choice('QWRTPSDFGHHKLMNB')\n for x in range(3)])\n \n data = {'username':username,\n 'email':username+\"@test.com\"}\n \n return self._create_user(data,site)", "def get_my_questions(user_id):\n questions = select_query(\n \"SELECT q_id,question FROM question WHERE question.user_id = (%s) ORDER BY create_time DESC \", user_id)\n\n answers = select_query(\n \"SELECT answer.q_id, answer.answer, answer.a_id,answer.is_answer FROM answer Left JOIN question on answer.q_id=question.q_id WHERE question.user_id =(%s)\", user_id)\n my_questions = {q[0]: copy.deepcopy(\n Question(q[1], q_id=q[0], user_id=user_id)) for q in questions}\n\n for a in answers:\n my_questions[a[0]]['answers'].append((a[1], a[2], a[3]))\n return my_questions.values()", "def __init__(self, question_items):\n self.text = question_items[QUESTION_ITEM]\n self.penetrance = question_items[PENETRANCE_ITEM]\n self.difficulty = question_items[DIFFICULTY_ITEM]\n\n return None", "def create_dummy_form(title,text,fill_choice=[],choice_length=[]):\n # fill it with blank for dummy choices\n count=0\n choices=[]\n while count < 8:\n choices.append(None)\n count+=1\n \n # fill choices based on value on fill_choice\n for i in fill_choice:\n try :\n length = choice_length[i]\n except IndexError :\n length = 10\n choices[i] = create_random_string(length)\n\n dummy_form=CreatePollQuestion(\n {\"question_title\":title,\n \"question_text\" :text,\n \"choice_1\":choices[0],\n \"choice_2\":choices[1],\n \"choice_3\":choices[2],\n \"choice_4\":choices[3],\n \"choice_5\":choices[4],\n \"choice_6\":choices[5],\n \"choice_7\":choices[6],\n \"choice_8\":choices[7],\n })\n\n return dummy_form", "def create_question(request):\n\n if request.user.is_authenticated:\n if request.method == \"POST\":\n question_form = UserQuestionForm(request.POST)\n\n if question_form.is_valid():\n question = question_form.save(commit=False)\n question.client = request.user\n question.name = request.user.username\n question.email = request.user.email\n question.save()\n\n messages.success(\n request, \"Thank you for your message, I will get back to you shortly\")\n\n return redirect('profile')\n\n else:\n messages.warning(\n request, \"Sorry your message could not be posted, please try again\")\n\n else:\n question_form = UserQuestionForm()\n\n else:\n if request.method == \"POST\":\n question_form = QuestionForm(request.POST)\n\n if question_form.is_valid():\n question = question_form.save(commit=False)\n question.client = None\n question.save()\n\n messages.success(\n request, \"Thank you for your message, I will get back to you shortly\")\n\n return redirect('index')\n\n else:\n messages.warning(\n request, \"Sorry your message could not be posted, please try again\")\n\n else:\n question_form = QuestionForm()\n\n return render(request, 'question.html', {\"question_form\": question_form})", "def question_generator(self):\n self.status_conv = 'yes_no_question_asked'\n questions = config.questions\n if not self.voyage.get('voyageurs') and 'voyageur_add' not in self.infos_needed:\n self.infos_needed.append('voyageur_add')\n if self.infos_needed:\n if self.is_hotel_needed() and 'hotel' not in self.infos_needed and 'hotel' not in self.voyage:\n self.infos_needed.insert(1, 'hotel')\n self.hotel_asked = True\n key = self.infos_needed[0]\n self.info_asked = key\n return questions[key]\n else :\n self.status_conv = 'confirmation_asked'\n return self.conv_recap()", "def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self", "def __init__(self):\n self.answers = []", "def get_question():\n flag = True\n q = None\n while (flag):\n q = random.choice(questionList)\n flag = q['flag']\n index = questionList.index(q)\n questionList[index]['flag'] = True\n return q", "async def app_questions(self, ctx: commands.Context):\n app_questions = await self.config.guild(ctx.guild).app_questions.get_raw()\n question_1 = app_questions[\"name\"]\n question_2 = app_questions[\"timezone\"]\n question_3 = app_questions[\"age\"]\n question_4 = app_questions[\"days\"]\n question_5 = app_questions[\"hours\"]\n question_6 = app_questions[\"experience\"]\n question_7 = app_questions[\"reasonforinterest\"]\n question_8 = app_questions[\"question8\"]\n question_9 = app_questions[\"question9\"]\n question_10 = app_questions[\"question10\"]\n question_11 = app_questions[\"question11\"]\n question_12 = app_questions[\"question12\"]\n question_13 = app_questions[\"finalcomments\"]\n\n await ctx.send(\n \"There are 13 questions in this application feature, with a few preloaded already for you.\\nHere is the current configuration:\"\n )\n e = discord.Embed(colour=await ctx.embed_colour())\n e.add_field(\n name=\"Question 1\", value=f\"{question_1}\" if question_1 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 2\", value=f\"{question_2}\" if question_2 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 3\", value=f\"{question_3}\" if question_3 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 4\", value=f\"{question_4}\" if question_4 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 5\", value=f\"{question_5}\" if question_5 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 6\", value=f\"{question_6}\" if question_6 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 7\", value=f\"{question_7}\" if question_7 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 8\", value=f\"{question_8}\" if question_8 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 9\", value=f\"{question_9}\" if question_9 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 10\", value=f\"{question_10}\" if question_10 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 11\", value=f\"{question_11}\" if question_11 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 12\", value=f\"{question_12}\" if question_12 else \"Not Set\", inline=False\n )\n e.add_field(\n name=\"Question 13\", value=f\"{question_13}\" if question_13 else \"Not Set\", inline=False\n )\n await ctx.send(embed=e)", "def ask_question(self, question_text, question=None):\n if question is not None:\n q = question.to_dict()\n else:\n q = WatsonQuestion(question_text).to_dict()\n r = requests.post(self.url + '/question', json={'question': q}, headers={\n 'Accept': 'application/json',\n 'X-SyncTimeout': 30\n }, auth=(self.username, self.password))\n try:\n response_json = r.json()\n except ValueError:\n raise Exception('Failed to parse response JSON')\n return WatsonAnswer(response_json)", "def premade():\n global all_questions\n\n choice(all_questions)\n temp = all_questions.pop(0) # Treat scrambled list like a queue\n question,answer = temp\n print(\"\\n\\n{}\".format(question))\n guess = get_user_input(str)\n\n if guess == \"T\": guess = True\n if guess == \"F\": guess = False\n\n if guess == answer:\n print(\"Correct the answer is: {}\".format(answer))\n else:\n print(\"Incorrect the answer is: {}, This question will be asked again.\".format(answer))\n all_questions.append(temp) # Add it back", "def __init__(self, obj, author, quest):\n self.author = author\n self.quest = quest\n self._init = self.initialize(obj)\n self.qtext = self.quest_text(self._init)\n self.rtext = self.response_text(self._init)\n\n self.quest_length = len(self.qtext)\n self.response_lenght = len(self.rtext)", "def create_question(self, input_title, input_details, user_id):\n try:\n query = (u\"INSERT INTO tbl_questions (question_title, \"\n \"question_details, posted_by) VALUES (%s,%s,%s) \"\n \";\")\n inputs = input_title, input_details, user_id\n return run_query(query, inputs)\n except psycopg2.Error as e:\n print(e)", "def set_qs():\n\n print \"Hi there! We're going to give you a fun grammar quiz.\"\n\n user_name = raw_input(\"To start, please enter your name: \")\n\n print \"Thanks, {}!\".format(user_name)\n\n user_num = int(raw_input(\"How many questions would you like us to generate for you? Enter a number: \"))\n\n num_qs = validate_num(user_num)\n\n print \"Ok, we'll make you a quiz with {} questions!\".format(num_qs)\n\n return num_qs", "def generate_question(self, num_question = 10):\n\t\t\n\t\tquestions = []\n\t\tfor q in range(num_question):\n\t\t\tfor r in range(2):\n\t\t\t\tquestion = np.zeros(self.question_dim, dtype = np.float32)\n\t\t\t\tcolor = np.random.randint(len(COLOR))\n\t\t\t\tquestion[color] = 1.0\n\t\t\t\tquestion[6 + r] = 1.0\n\t\t\t\tquestion_label = np.random.randint(3)\n\t\t\t\tquestion[8 + question_label] = 1.0\n\t\t\t\tquestions.append(question)\n\t\treturn questions", "def answer_question(self, ques, ans):\n res = Response(Answer(ans, len(self.response_list)), ques, self)\n self.response_list.append(res)\n return res", "def next_question(self):\n self.user_answers = []\n self.curent_question = choice(self.to_ask)", "def send_question(data):\n chat_id = data['chat_id']\n context = data['context']\n\n # Update question and answer\n choices = list(context.bot_data['quiz'][context.chat_data['topic']].keys())\n question = random.choice(choices)\n solution = context.bot_data['quiz'][context.chat_data['topic']][question]\n\n # Add helper blanks to question\n question += \"\\n\"\n for char in solution:\n if char != \" \":\n question += \"_ \"\n else:\n question += \" \"\n\n context.chat_data['question'] = question\n context.chat_data['solution'] = solution\n context.chat_data['display_solution'] = solution\n\n # Send question\n context.bot.send_message(chat_id, question)", "def shuffle_question(self):\n r = random.SystemRandom()\n r.shuffle(self.question_list)", "def create(self, validated_data):\n question = Question.objects.create(**validated_data)\n question.save()\n if 'tag' in self.context['request'].data:\n tag = Tag.objects.get(id=self.context['request'].data['tag'])\n question_tag = QuestionTag.objects.create(question=question,\n tag=tag)\n question_tag.save()\n return question", "def test_rescoring_randomized_problem(self):\r\n # First define the custom response problem:\r\n problem_url_name = 'H1P1'\r\n self.define_randomized_custom_response_problem(problem_url_name)\r\n location = InstructorTaskModuleTestCase.problem_location(problem_url_name)\r\n descriptor = self.module_store.get_item(location)\r\n # run with more than one user\r\n userlist = ['u1', 'u2', 'u3', 'u4']\r\n for username in userlist:\r\n # first render the problem, so that a seed will be created for this user\r\n self.render_problem(username, problem_url_name)\r\n # submit a bogus answer, in order to get the problem to tell us its real answer\r\n dummy_answer = \"1000\"\r\n self.submit_student_answer(username, problem_url_name, [dummy_answer, dummy_answer])\r\n # we should have gotten the problem wrong, since we're way out of range:\r\n self.check_state(username, descriptor, 0, 1, 1)\r\n # dig the correct answer out of the problem's message\r\n module = self.get_student_module(username, descriptor)\r\n state = json.loads(module.state)\r\n correct_map = state['correct_map']\r\n log.info(\"Correct Map: %s\", correct_map)\r\n # only one response, so pull it out:\r\n answer = correct_map.values()[0]['msg']\r\n self.submit_student_answer(username, problem_url_name, [answer, answer])\r\n # we should now get the problem right, with a second attempt:\r\n self.check_state(username, descriptor, 1, 1, 2)\r\n\r\n # redefine the problem (as stored in Mongo) so that the definition of correct changes\r\n self.define_randomized_custom_response_problem(problem_url_name, redefine=True)\r\n # confirm that simply rendering the problem again does not result in a change\r\n # in the grade (or the attempts):\r\n self.render_problem('u1', problem_url_name)\r\n self.check_state('u1', descriptor, 1, 1, 2)\r\n\r\n # rescore the problem for only one student -- only that student's grade should change\r\n # (and none of the attempts):\r\n self.submit_rescore_one_student_answer('instructor', problem_url_name, User.objects.get(username='u1'))\r\n for username in userlist:\r\n self.check_state(username, descriptor, 0 if username == 'u1' else 1, 1, 2)\r\n\r\n # rescore the problem for all students\r\n self.submit_rescore_all_student_answers('instructor', problem_url_name)\r\n\r\n # all grades should change to being wrong (with no change in attempts)\r\n for username in userlist:\r\n self.check_state(username, descriptor, 0, 1, 2)", "def create_registration(cls, name, question, default_response, contacts, user):\n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_REGISTRATION)\n poll.contacts = contacts \n return poll", "def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')", "def generate_answer(self, question):\n\n # Recognize intent of the question using `intent_recognizer`.\n # Don't forget to prepare question and calculate features for the question.\n \n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.predict(features)\n print(\"intent:\", intent)\n \n # Chit-chat part: \n if intent == 'dialogue':\n # Pass question to chitchat_bot to generate a response. \n response = self.chatbot.get_response(prepared_question)\n return response\n \n # Goal-oriented part:\n else: \n # Pass features to tag_classifier to get predictions.\n tag = self.tag_classifier.predict( features)[0]\n \n # Pass prepared_question to thread_ranker to get predictions.\n thread_id = self.thread_ranker.get_best_thread(prepared_question, tag)\n \n return self.ANSWER_TEMPLATE % (tag, thread_id)", "async def question_setup(self, ctx):\n config = self.bot.db['questions'].setdefault(str(ctx.guild.id), {})\n if str(ctx.channel.id) in config:\n msg = await hf.safe_send(ctx, \"This will reset the questions database for this channel. \"\n \"Do you wish to continue? Type `y` to continue.\")\n try:\n await self.bot.wait_for('message', timeout=15.0, check=lambda m: m.content == 'y' and\n m.author == ctx.author)\n except asyncio.TimeoutError:\n await msg.edit(content=\"Canceled...\", delete_after=10.0)\n return\n msg_1 = await hf.safe_send(ctx,\n f\"Questions channel set as {ctx.channel.mention}. In the way I just linked this \"\n f\"channel, please give me a link to the log channel you wish to use for this channel.\")\n try:\n msg_2 = await self.bot.wait_for('message', timeout=20.0, check=lambda m: m.author == ctx.author)\n except asyncio.TimeoutError:\n await msg_1.edit(content=\"Canceled...\", delete_after=10.0)\n return\n\n try:\n log_channel_id = int(msg_2.content.split('<#')[1][:-1])\n log_channel = self.bot.get_channel(log_channel_id)\n if not log_channel:\n raise NameError\n except (IndexError, NameError):\n await hf.safe_send(ctx, f\"Invalid channel specified. Please start over and specify a link to a channel \"\n f\"(should highlight blue)\")\n return\n config[str(ctx.channel.id)] = {'questions': {},\n 'log_channel': log_channel_id}\n await hf.safe_send(ctx,\n f\"Set the log channel as {log_channel.mention}. Setup complete. Try starting your first \"\n f\"question with `;question <title>` in this channel.\")", "def __init__(self, question=u\"\", tier=0, answers=[], game_round=0,\n media=(\"\", \"\", \"\"), media_path=\"data\", web_root=\"data\"):\n self.question = question\n self.answers = answers\n self.tier = tier\n self.game_round = game_round\n self.media = media\n self.media_path = media_path\n self.web_root = web_root", "async def set_questions(self, ctx: commands.Context):\n\n def check(m):\n return m.author == ctx.author and m.channel == ctx.channel\n\n await ctx.send(\n \"Let's set up those questions we've not pre-filled:\\nYou will be setting questions 8-12. You can view the preloaded questions by passing `{}appq`. To begin, reply with `admin abuse` *spelled exact*\".format(\n ctx.prefix\n )\n )\n try:\n confirmation = await ctx.bot.wait_for(\"message\", check=check, timeout=20)\n if confirmation.content.lower() != \"admin abuse\":\n return await ctx.send(\"Alright, let's do these later then\")\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took to long to respond, gotta be smarter than the users you're hiring for sure.\"\n )\n\n app_questions = await self.config.guild(ctx.guild).app_questions.get_raw()\n question_8 = app_questions[\"question8\"]\n question_9 = app_questions[\"question9\"]\n question_10 = app_questions[\"question10\"]\n question_11 = app_questions[\"question11\"]\n question_12 = app_questions[\"question12\"]\n await ctx.send(\n \"Alright, let's start with question 8: You have 5min to decide and respond with question you'd like, or respond with cancel to do this later\"\n )\n\n if question_8 is not None:\n await ctx.send(\n f\"Looks like question 8 is currently `{question_8}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_8 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_8.content.lower() != \"no\":\n if len(submit_8.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question8.set(\n submit_8.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n\n if question_8 is None:\n try:\n submit_8 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_8.content.lower() != \"cancel\":\n if len(submit_8.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question8.set(\n submit_8.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 9: Please respond with your next app question\")\n\n if question_9 is not None:\n await ctx.send(\n f\"Looks like question 9 is currently `{question_9}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_9 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_9.content.lower() != \"no\":\n if len(submit_9.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question9.set(\n submit_9.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 10: Please respond with your next app question\")\n\n if question_9 is None:\n try:\n submit_9 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_9.content.lower() != \"cancel\":\n if len(submit_9.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question9.set(\n submit_9.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 10: Please respond with your next app question\")\n\n if question_10 is not None:\n await ctx.send(\n f\"Looks like question 10 is currently `{question_10}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_10 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_10.content.lower() != \"no\":\n if len(submit_10.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question10.set(\n submit_10.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 11: Please respond with your next app question\")\n\n if question_10 is None:\n try:\n submit_10 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_10.content.lower() != \"cancel\":\n if len(submit_10.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question10.set(\n submit_10.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 11: Please respond with your next app question\")\n\n if question_11 is not None:\n await ctx.send(\n f\"Looks like question 11 is currently `{question_11}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_11 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_11.content.lower() != \"no\":\n if len(submit_11.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question11.set(\n submit_11.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 12: Please respond with your next app question\")\n\n if question_11 is None:\n try:\n submit_11 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_11.content.lower() != \"cancel\":\n if len(submit_11.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question11.set(\n submit_11.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n await ctx.send(\"Moving to question 12: Please respond with your next app question\")\n\n if question_12 is not None:\n await ctx.send(\n f\"Looks like question 12 is currently `{question_12}`:\\n Do you want to change this? Type `no` to skip or the question you wish to change to if you want to change.\"\n )\n try:\n submit_12 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_12.content.lower() != \"no\":\n if len(submit_12.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question12.set(\n submit_12.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n\n if question_12 is None:\n try:\n submit_12 = await ctx.bot.wait_for(\"message\", check=check, timeout=300)\n if submit_12.content.lower() != \"cancel\":\n if len(submit_12.content) > 750:\n return await ctx.send(\n \"Talkitive are we? Too many characters to fit in final embed, shorten the question some\"\n )\n await self.config.guild(ctx.guild).app_questions.question12.set(\n submit_12.content\n )\n except asyncio.TimeoutError:\n return await ctx.send(\n \"Took too long bud. Let's be coherent for this and try again.\"\n )\n\n await ctx.send(\n \"That's all the questions and your apps are set *maybe, if you answered, anyway*. Check this with `{}appq`\".format(\n ctx.prefix\n )\n )", "def scrape_question(page_text, ans):\n\n sq = BeautifulSoup(page_text, 'html.parser')\n question = Question(sq, ans)\n\n return question.__dict__", "def __init__(self):\n\n self.question_list = self.read_quiz_json()", "def quick_quiz(character_set):", "def create_quiz():\n try:\n\n quiz_category_id = request.json.get(\"quiz_category_id\")\n previous_question_ids = request.json.get(\"previous_question_ids\")\n questions = Question.query.filter(\n ~Question.id.in_(previous_question_ids)\n )\n\n if quiz_category_id != 0:\n questions = questions.filter(\n Question.category_id == quiz_category_id\n )\n\n questions = questions.all()\n\n if len(questions) > 0:\n question = random.choice(questions).format()\n else:\n question = None\n\n response = jsonify({\"success\": True, \"question\": question})\n\n except AttributeError:\n abort(400)\n\n return response", "def initialize_new_questionnaire(questionnaire, option_type, uuid):\r\n q = {}\r\n if (type(questionnaire) == dict):\r\n for key, val in questionnaire.items():\r\n if key != 'index':\r\n\r\n q[key] = [val] if type(val) != list else val\r\n questionnaire = pd.DataFrame(q)\r\n\r\n\r\n if \"_questionnaire\" not in option_type:\r\n option_type = option_type + \"_questionnaire\"\r\n\r\n option_type = option_type.lower()\r\n if 'option_type' not in questionnaire:\r\n questionnaire['option_type'] = [option_type]\r\n questionnaire['uuid'] = [uuid]\r\n questionnaire['timestamp'] = [datetime.datetime.utcnow()]\r\n print(\"this is questionaire: \", questionnaire)\r\n\r\n questionnaire=questionnaire.set_index('uuid')\r\n print(\"this is questionaire: \", questionnaire)\r\n questionnaire.to_sql(option_type, con=Database.DATABASE.engine, if_exists=\"append\", index=True)", "def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question", "def reset_questions(questions):\n random.shuffle(questions)", "def test_asked_by(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('jsocol', 2),\n ('pcraciunoiu', 2),\n )\n\n # Set up all the question data---creats users, creates the\n # questions, shove it all in the index, then query it and see\n # what happens.\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n ques = QuestionFactory(title=u'audio', creator=u)\n ques.tags.add(u'desktop')\n ans = AnswerFactory(question=ques)\n AnswerVoteFactory(answer=ans, helpful=True)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'asked_by': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")", "def get_data_from_user(questions, answers_types, id_storage, id_, is_alpha):\n user_data = []\n\n for i in range(len(questions)):\n user_input = None\n\n while type(user_input) != answers_types[i]:\n user_input = ui.get_inputs([questions[i]], '')[0]\n user_input = get_correct_data_types(user_input, answers_types[i], is_alpha[i])\n\n # Other differences while asking for data here\n\n user_data.append(user_input)\n\n user_data = [str(record) for record in user_data]\n\n return user_data", "def __init__(self, question_list):\n self.question_list = question_list\n self.question_number = 0\n self.score = 0", "def addQuestion(self, question):\n if type(question) != dict:\n raise TypeError(\"Question should be a dict.\")\n\n keys = question.keys()\n # Check to make sure entries for all necessary keys are present.\n necessary = [\"question\", \"correct\"]\n for n in necessary:\n if n not in keys:\n raise ValueError(\"No question in keys.\")\n self.questions.append(question)", "def create(text, is_correct, question_id):\n answer = Answer(question_id=question_id, text=text, is_correct=is_correct)\n try:\n answer.save()\n return answer\n except IntegrityError:\n return None", "def administer(self):\n \n # create a dictionary that will count True and False answers\n score = {True: 0, False: 0}\n\n # iterate through each question in the list of questions\n # keep track of user's score. The question and answer are stored as\n # a list, so convert back into Question class first to use\n # ask_and_evaluate\n\n # for test questions in order:\n\n # for i in range(len(self.questions)):\n # question = Question(self.questions[i][0], self.questions[i][1])\n # score_question = question.ask_and_evaluate()\n # score[score_question] = score.get(score_question, 0) + 1\n\n\n # for random order test questions:\n list_of_questions = self.questions\n\n from random import choice\n \n for i in range(len(list_of_questions)):\n # choose a question randomly:\n question_choice = choice(list_of_questions)\n # delete that from the list of questions so it's not chosen again\n list_of_questions.remove(question_choice)\n # create a Question object from the question and answer\n question = Question(question_choice[0], question_choice[1])\n # ask and evaluate the question\n score_question = question.ask_and_evaluate()\n # record the score\n score[score_question] = score.get(score_question, 0) + 1\n\n\n # print the total number of correct and incorrect responses\n print \"Total correct: {}. Total incorrect: {}\".format(score[True], \n score[False])\n\n # return the number of incorrect and correct responses as a dictionary\n return score", "def convert_practitioner_fhir_to_meta(pract_res, user):\n data = {}\n data['user'] = user\n data['npi']= pract_res['identifier'][0]['value']\n data['fhir_id']= pract_res['id']\n\n\n return data", "def display_possible_answers(question):\n answers = question['incorrect'] + [question['correct']]\n random.shuffle(answers)\n answer_dict = {}\n for i, answer in enumerate(answers):\n answer_dict[str(i + 1)] = answer\n print(f\"{i + 1}: {answer}\\n\")\n return answer_dict", "async def wouldyourather(message: discord.Message, opt: options=None):\n # If there are no options, the bot will ask the questions (if there are any to choose from)\n if opt is None:\n assert message.channel.id not in sessions, \"**A would you rather session is already in progress.**\"\n sessions.add(message.channel.id)\n\n assert db.data[\"questions\"], \"**There are ZERO questions saved. Ask me one!**\"\n\n question = random.choice(db.data[\"questions\"])\n choices = question[\"choices\"]\n await client.say(message, \"Would you rather **{}** or **{}**?\".format(*choices))\n\n timeout = db.data[\"timeout\"]\n replied = []\n\n # Wait for replies from anyone in the channel\n while True:\n reply = await client.wait_for_message(timeout=timeout, channel=message.channel,\n check=lambda m: m.author not in replied)\n # Break on timeout\n if reply is None:\n break\n\n # Check if the choice is vlaid\n choice = get_choice(choices, reply.content)\n if choice is None:\n continue\n\n # Register that this author has replied\n replied.append(reply.author)\n\n # Update the answers in the DB\n # We don't care about multiples, just the amount (yes it will probably be biased)\n question[\"answers\"][choice] += 1\n\n name = reply.author.display_name\n response = random.choice(db.data[\"responses\"]).format(name=name, NAME=name.upper(), choice=choices[choice])\n await client.say(message, response)\n\n # Say the total tallies\n await client.say(message, \"A total of {0} would **{2}**, while {1} would **{3}**!\".format(\n *question[\"answers\"], *choices))\n db.save()\n sessions.remove(message.channel.id)\n\n # Otherwise, the member asked a question to the bot\n else:\n db.data[\"questions\"].append(dict(\n choices=list(opt),\n answers=[0, 0]\n ))\n db.save()\n\n answer = random.choice(opt)\n await client.say(message, \"**I would {}**!\".format(answer))", "def generate_quest(self):\n\n if odds(3):\n\n quest_items = add_dicts_together(items[\"master\"], items[self.p.square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n\n quantity = {'super rare': '1',\n 'rare': '2',\n 'uncommon': '3',\n 'common': '6',\n 'super common': '15'}\n q = quantity[i.rarity]\n\n self.quest = i, int(q), f\"{self.p.name}, if you bring \" \\\n f\"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, \" \\\n f\"I will teach you a valuable skill.\"\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = f\"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}\"\n print(f\"Well, we'll keep this off the record, but I can arrange for some money to find its way \"\n f\"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\")\n self.p.hit_list.append(target)\n return False\n\n else:\n return None", "def get(self):\n user = getAuthData()\n question_list = list_questions()\n # user_question_list = list_questions_by_username(user['username'])\n # nonuser_question_list = list_questions_by_username(user['username'], invert=True)\n\n tasks = get_tasks().values()\n\n # filter out the SUCCESS/FAILURE tasks\n tasks = [t for t in tasks if not (t['state'] == 'SUCCESS' or t['state'] == 'FAILURE' or t['state'] == 'REVOKED')]\n\n # get question hashes\n question_tasks = {q.id:[] for q in question_list}\n for t in tasks:\n if not t['args']:\n continue\n match = re.match(r\"[\\[(]'(.*)',?[)\\]]\", t['args'])\n if not match:\n continue\n question_id = match.group(1)\n question_tasks[question_id].append(t)\n\n # split into answer and update tasks\n for t in tasks:\n t['type'] = 'answering' if t['name'] == 'manager.tasks.answer_question' else \\\n 'refreshing KG' if t['name'] == 'manager.tasks.update_kg' else \\\n 'something?'\n\n def augment_info(question):\n answerset_timestamps = [a.timestamp for a in question.answersets]\n if answerset_timestamps:\n latest_idx = answerset_timestamps.index(max(answerset_timestamps))\n latest_answerset_id = question.answersets[latest_idx].id\n latest_answerset_timestamp = question.answersets[latest_idx].timestamp\n else:\n latest_answerset_id = None\n latest_answerset_timestamp = None\n q = question.toJSON()\n q['user_email'] = question.user.email\n q.pop('user_id')\n q.pop('machine_question')\n return {'latest_answerset_id': latest_answerset_id,\n 'latest_answerset_timestamp': latest_answerset_timestamp.isoformat() if latest_answerset_timestamp else None,\n 'tasks': [t['type'] for t in question_tasks[question.id]],\n **q}\n\n return [augment_info(q) for q in question_list], 200", "def buildQuestion():\n #example.com\n QNAME = b\"\\x07\\x65\\x78\\x61\\x6d\\x70\\x6c\\x65\\x03\\x63\\x6f\\x6d\\x00\"\n\n \"\"\"\n A two octet code which specifies the type of the query.\n The values for this field include all codes valid for a\n TYPE field, together with some more general codes which\n can match more than one type of RR.\n \"\"\" \n QTYPE = b\"\\x00\\x01\"\n\n \"\"\"\n A two octet code that specifies the class of the query.\n For example, the QCLASS field is IN for the Internet.\n \"\"\"\n QCLASS = b\"\\x00\\x01\"\n\n dnsBody = QNAME + QTYPE + QCLASS\n #print(dnsBody)\n return dnsBody", "def ask_msg(self, context):\n msg = self._get_base_message(self.ASK_QUESTION)\n self._add_thread(msg)\n self._add_relationship(msg, self.for_relationship)\n msg['text'] = self.question\n msg['detail'] = self.descr\n msg['valid_responses'] = self.valid_responses or []\n msg['signature_required'] = self.signature_required\n return msg", "def add_random_id(self, user_id, random_id, survey_url):\n if user_id not in self.user_id_to_random_ids:\n self.user_id_to_random_ids[user_id] = []\n self.user_id_to_survey_urls[user_id] = []\n self.user_id_to_random_ids[user_id].append(random_id)\n self.user_id_to_survey_urls[user_id].append(survey_url)", "def data_for_question(self, question_type):\n\t\treturn {}", "def get_question():\n last_line = '\\n'\n question = \"\"\n\n print(\"Type in your question here:\")\n while last_line != \"\":\n current_line = input()\n if current_line == \"\":\n break\n question += current_line + \"\\n\"\n\n variables = {}\n\n # Special syntax variables\n rand = random.randint\n\n array_of_lines = question.split(\"\\n\")\n\n question_is_present = False\n\n for line in array_of_lines:\n if \"QUESTION\" in line:\n question_is_present = True\n break\n\n if question_is_present:\n variable_lines = []\n secret_lines = []\n question_lines = []\n\n for index in range(len(array_of_lines)):\n if array_of_lines[index] == \"SECRET\":\n secret_index = index\n if array_of_lines[index] == \"QUESTION\":\n question_index = index\n break\n\n # Adds variables to variable_lines\n for index in range(secret_index):\n variable_lines.append(array_of_lines[index])\n\n # Adds answer to secret_lines\n for index in range(secret_index + 1, question_index):\n secret_lines.append(array_of_lines[index])\n\n # Adds question lines to question_lines\n for index in range(question_index + 1, len(array_of_lines)):\n question_lines.append(array_of_lines[index])\n\n for line in variable_lines:\n variable, value = line.split(\"=\")\n variables[\"$\" + variable] = str(eval(value)).replace(\"_\", \" \")\n\n for index in range(len(secret_lines)):\n secret_lines[index] = science_utils.multiple_replace(line, variables)\n variable, value = secret_lines[index].split(\"=\")\n variables[\"answer\"] = str(eval(value)).replace(\"_\", \" \")\n\n for line in question_lines:\n question_lines[question_lines.index(line)] = science_utils.multiple_replace(line, variables)\n\n return question_lines, variables[\"answer\"]\n\n # This is the backup method, for when there is no QUESTION line found\n else:\n # Calculate values\n for word in array_of_lines:\n if \"=\" in word:\n variable, value = word.split(\"=\")\n variables[\"$\" + variable] = str(eval(value)).replace(\"_\", \" \")\n\n for words in array_of_lines:\n array_of_lines[array_of_lines.index(words)] = science_utils.multiple_replace(words, variables)\n\n to_return = []\n for line in array_of_lines:\n if \"=\" not in line:\n to_return.append(line)", "def create_choice(question, choice_text, votes=0):\n return question.choice_set.create(choice_text=choice_text, votes=votes)", "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "def test_create_new_question(self):\n response = self.client().post('/questions', json=self.new_question)\n body = json.loads(response.data)\n\n question = Question.query.filter_by(id=body['created']).one_or_none()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertIsNotNone(question)", "def post_question(payload):\n new_question = deepcopy(payload)\n new_question[\"answers\"] = {}\n new_question.update(\n {\"id\": generate_a_new_primary_id(), \n \"postedAt\": str(datetime.now()), \n \"randomId\": str(uuid())}\n )\n for answer in payload[\"answers\"]:\n new_question[\"answers\"].update({answer: 0})\n \n print(f\"the to be posted question is: {new_question}\")\n client.put_item(Item=new_question)" ]
[ "0.6517821", "0.6408259", "0.6362986", "0.6347021", "0.6313082", "0.61639714", "0.6121847", "0.6114391", "0.6093224", "0.6044101", "0.6040264", "0.5993357", "0.59512204", "0.5943956", "0.5889812", "0.58555055", "0.5852014", "0.58435476", "0.58026284", "0.57996535", "0.5797478", "0.57950056", "0.57722247", "0.5760061", "0.57479465", "0.5746581", "0.5733781", "0.5723847", "0.57101524", "0.5651535", "0.5598139", "0.5562515", "0.55471843", "0.5543439", "0.5522998", "0.55161107", "0.5503315", "0.5492159", "0.5490168", "0.5473506", "0.5473032", "0.546193", "0.54469305", "0.54453146", "0.5436557", "0.54269105", "0.5399184", "0.53886575", "0.5388178", "0.53825355", "0.53805417", "0.5375255", "0.5372629", "0.5367978", "0.5357881", "0.5348438", "0.53412455", "0.5338901", "0.53307414", "0.5321771", "0.531557", "0.5310455", "0.5309288", "0.5300261", "0.5297142", "0.52951324", "0.5291962", "0.52897656", "0.5275692", "0.5271609", "0.5270891", "0.5269381", "0.52647066", "0.5264191", "0.52616096", "0.52570105", "0.5253911", "0.52512944", "0.5247392", "0.52452654", "0.52403486", "0.52389264", "0.5236066", "0.52250105", "0.5221453", "0.52182364", "0.5216819", "0.5212495", "0.52083963", "0.52065253", "0.51940095", "0.5180088", "0.51773363", "0.51727355", "0.5172491", "0.51713496", "0.51683736", "0.5167967", "0.5159762", "0.5155574" ]
0.69050956
0
create CreatePollQuestion dummy form
def create_dummy_form(title,text,fill_choice=[],choice_length=[]): # fill it with blank for dummy choices count=0 choices=[] while count < 8: choices.append(None) count+=1 # fill choices based on value on fill_choice for i in fill_choice: try : length = choice_length[i] except IndexError : length = 10 choices[i] = create_random_string(length) dummy_form=CreatePollQuestion( {"question_title":title, "question_text" :text, "choice_1":choices[0], "choice_2":choices[1], "choice_3":choices[2], "choice_4":choices[3], "choice_5":choices[4], "choice_6":choices[5], "choice_7":choices[6], "choice_8":choices[7], }) return dummy_form
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_freeform(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_TEXT)\n poll.contacts = contacts \n return poll", "def __init__(self, radio_poll, *args, **kwargs):\n super(RadioPollChoiceForm, self).__init__(*args, **kwargs)\n choices = (((None, '----'),) +\n tuple(radio_poll.answers.values_list('id', 'answer')))\n self.fields['radio_poll__%s' % str(radio_poll.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=choices,\n label=radio_poll.question))", "def createForm(request):\n if request.method == 'POST':\n form = QuestionFormForm(request.POST)\n if form.is_valid():\n #return the uuid so the organization can use that link in the post to connect to the questionform\n formID = form.save().UUID\n #send them the url for the form\n messages.success(request, 'You have made your question form accessible at: ' + request.build_absolute_uri('/post/') + f'apply/{formID}')\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)\n form = QuestionFormForm()\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)", "def test_create_new_form(self):\n\n survey = self._create_test_survey()\n assert survey is not None\n\n new_survey = SurveyForm.get(self.test_survey_name)\n assert new_survey is not None\n assert new_survey.form == self.test_form", "def create_poll(question, days):\n\treturn Poll.objects.create(\n\t\tquestion=question, \n\t\tpub_date=timezone.now() + datetime.timedelta(days=days)\n\t\t)", "def test_question_without_choices(self):\n set_up_user(self)\n self.assertFalse(self.user.is_superuser)\n\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:detail', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def test_question_with_choices(self):\n create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], ['<Question: Question with choices>'])", "def test_question_with_choices(self):\n question = create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:details', args=(question.id, )))\n self.assertContains(response, question.question_text)", "def test_create_single_poll_submission(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def get_form(self):\n return QuestionForm()", "def _create_test_survey(self):\n return SurveyForm.create(self.test_survey_name, self.test_form)", "def make_form(self):", "def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question", "def create_choices(question_model, text=\"text\", total_votes = 0):\n return Choice.objects.create(question=question_model, \n text=text, \n total_votes=total_votes)", "def create_question(question_text, days, create_choice=True):\n\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n if create_choice:\n question.choice_set.create(choice_text=\"Choice 1\", votes=0)\n return question", "def test_create_new_question(self):\n response = self.client().post('/questions', json=self.new_question)\n body = json.loads(response.data)\n\n question = Question.query.filter_by(id=body['created']).one_or_none()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertIsNotNone(question)", "def new_from_post():\n # If you make a post request with a question_id we will assume you want a new question editor\n # we will prepopulate the question new page with data from that question (if it is a valid question id)\n question_id = request.form['question_id'] if request.form['question_id'] else ''\n\n return render_template('questionNew.html', question_id=question_id)", "def test_create_questions(self):\n res = self.client().post('/questions',\n json={\n \"question\": \"What is chemical \\\n composition of water\",\n \"answer\": \"H2O\",\n \"category\": 1,\n \"difficulty\": 2\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])", "def testQuestionField(self):\n sdq1 = getattr(self.s1, 'sdq1')\n self.app.REQUEST.form['showYMD'] = False\n self.app.REQUEST.form['showHM'] = False\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors != {}, \"Validation error not raised\"\n assert errors.has_key('showYMD')\n assert errors.has_key('showHM')", "def test_question_with_choices(self):\n question = create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:results', args=(question.id, )))\n self.assertContains(response, question.question_text)", "def question_new_validate():", "def test_question_without_choices_for_admin(self):\n set_up_super_user(self)\n self.assertTrue(self.user.is_superuser)\n\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:detail', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertContains(response, question_no_choices.question_text)", "def test_create_question(self):\n res = self.client().post('/api/questions', json=self.new_question)\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['created'])\n \n new_question = Question.query.filter(Question.id == res_body['created']).one_or_none()\n self.assertTrue(new_question)", "def __init__(self,\n quiz_size_slug=Quiz.DEFAULT_QUIZ_SIZE_SLUG,\n *args, **kwargs):\n super(QuizForm, self).__init__(*args, **kwargs)\n quiz_json = QuizJson()\n question_count = Quiz.get_question_count_for_slug(quiz_size_slug)\n self.question_count = question_count\n\n for question_no in range(0, question_count):\n question_no_str = str(question_no)\n question_no_2_chars = question_no_str.zfill(2)\n question_key = 'question_' + question_no_2_chars\n form_question_no_str = str(question_no + 1)\n question_text = quiz_json.get_question_text(question_no)\n label = form_question_no_str + '. ' + question_text\n radio_widget = forms.RadioSelect(attrs={'class': 'quiz_answer'})\n choices = quiz_json.get_choices(question_no)\n self.fields[question_key] = forms.ChoiceField(\n widget=radio_widget, label=label, choices=choices\n )", "def create_registration(cls, name, question, default_response, contacts, user):\n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_REGISTRATION)\n poll.contacts = contacts \n return poll", "def test_make_form():", "def create(self):\n\n if self.data.get('hydrogeology', None):\n self.form = self._make_form(\n self.well.hydrogeology_parameter if self.well.hydrogeology_parameter else HydrogeologyParameter()\n , HydrogeologyParameterForm, self.data['hydrogeology'])\n\n if self.data['hydrogeology'].get('pumping_test'):\n self.pumping_test_form = self._make_form(\n self.form.instance.pumping_test if self.form.instance.pumping_test else PumpingTest(),\n PumpingTestForm, self.data['hydrogeology']['pumping_test']\n )", "def test_create_new_question(self):\n\n # get number of questions before post\n questions_before = Question.query.all()\n\n # create new question and load response data\n response = self.client().post('/questions', json=self.new_question)\n data = json.loads(response.data)\n\n # get number of questions after post\n questions_after = Question.query.all()\n\n # see if the question has been created\n question = Question.query.filter_by(id=data['created']).one_or_none()\n\n # check status code and success message\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check if one more question after post\n self.assertTrue(len(questions_after) - len(questions_before) == 1)\n\n # check that question is not None\n self.assertIsNotNone(question)", "def test_meeting_poll_create(self):\n pass", "def create_question(user,title='title',text='text'):\n return Question.objects.create(created_by=user, title=title, text=text)", "def ask_question():\n title_question = request.form.get(\"title\")\n question = request.form.get(\"question\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n \n ask = Question(user_id = session[\"user_id\"],question_created=date_string, title_question = title_question, question = question)\n\n db.session.add(ask)\n db.session.commit()\n\n return \"question added\"", "def test_vote_view_with_question_which_has_no_choices(self):\n past_question_without_choices = create_question(question_text=\"Test \\\n question without choices\", days=-30, create_choice=False)\n response = self.client.get(reverse('polls:vote', \n args=(past_question_without_choices.id,)))\n self.assertEqual(response.status_code, 404)", "def createqn(quizID):\n if not current_user.check_educator():\n return render_template('errors/error403.html'), 403\n quiz = validate_quiz_link(current_user, quizID)\n form = QuestionForm()\n delQuizForm = DeleteForm(prefix='quiz')\n delQnForm = DeleteForm(prefix='qn')\n if form.validate_on_submit():\n #Commit inputs to database\n options = (form.op1.data, form.op2.data, form.op3.data, form.op4.data)\n question = add_question(current_user, form.qn.data, options, form.corrOp.data, form.topic.data)\n if form.img.data:\n question.image_file = update_qn_image(form.img.data)\n add_question_quiz(quiz, question)\n flash('Question added')\n if form.complete.data:\n return redirect(url_for('quiz.createquizsuccess', quizID=quizID))\n return redirect(url_for('quiz.createqn', quizID=quizID))\n\n return render_template('quiz/createqn.html', title=' | Create Quiz', form=form, quiz=quiz,delQuizForm=delQuizForm, delQnForm=delQnForm)", "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "def populate_poll(user=\"\",total=10):\n user_list = None\n #create random user only when user argument empty\n if user == \"\":\n create_random_user(20)\n user_list = User.objects.all()\n \n for i in range(total):\n Question.objects.create(\n created_by=random.choice(user_list) if user_list is not None else user,\n title=create_random_string(seed_random(10)),\n text=create_random_string(seed_random(300)),\n slug=create_random_string(seed_random(100)) )", "def create_default_question(self, question_data, question_link):\n self.driver.find(question_link).click()\n self.fill_question_and_code_tb(question_data)\n return self", "def test_create_new_question(self):\n\n res = self.client().post('/questions', json=self.new_question)\n # print(\"Response: \", res.data)\n\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)", "def post_question(self):\n self.post_meetup()\n return self.client.post(\"api/v2/questions\", headers={\"Authorization\": \"{}\".format(self.token())}, data=json.dumps(self.question), content_type='application/json')", "def test_question_with_out_choices(self):\n question = create_question(question_text='Question with out choices', days=0, choices=[])\n response = self.client.get(reverse('polls:details', args=(question.id, )))\n self.assertEqual(response.status_code, 404)", "def create_choice(question, choice_text, votes=0):\n return question.choice_set.create(choice_text=choice_text, votes=votes)", "def create_appointment():\n\n msg = render_template('date')\n return question(msg)", "def create_question(request):\n\n if request.user.is_authenticated:\n if request.method == \"POST\":\n question_form = UserQuestionForm(request.POST)\n\n if question_form.is_valid():\n question = question_form.save(commit=False)\n question.client = request.user\n question.name = request.user.username\n question.email = request.user.email\n question.save()\n\n messages.success(\n request, \"Thank you for your message, I will get back to you shortly\")\n\n return redirect('profile')\n\n else:\n messages.warning(\n request, \"Sorry your message could not be posted, please try again\")\n\n else:\n question_form = UserQuestionForm()\n\n else:\n if request.method == \"POST\":\n question_form = QuestionForm(request.POST)\n\n if question_form.is_valid():\n question = question_form.save(commit=False)\n question.client = None\n question.save()\n\n messages.success(\n request, \"Thank you for your message, I will get back to you shortly\")\n\n return redirect('index')\n\n else:\n messages.warning(\n request, \"Sorry your message could not be posted, please try again\")\n\n else:\n question_form = QuestionForm()\n\n return render(request, 'question.html', {\"question_form\": question_form})", "def _create_mc_question(self, description):\n\n mc_dict = {\n 'description': description,\n 'type': models.QuestionDTO.MULTIPLE_CHOICE,\n 'choices': [\n {\n 'text': 'correct answer',\n 'score': 1.0\n },\n {\n 'text': 'incorrect answer',\n 'score': 0.0\n }],\n 'version': '1.5'\n }\n question = models.QuestionDTO(None, mc_dict)\n qid = models.QuestionDAO.save(question)\n return models.QuestionDAO.load(qid)", "def create(self, validated_data):\n question = Question.objects.create(**validated_data)\n question.save()\n if 'tag' in self.context['request'].data:\n tag = Tag.objects.get(id=self.context['request'].data['tag'])\n question_tag = QuestionTag.objects.create(question=question,\n tag=tag)\n question_tag.save()\n return question", "def test_build_forms_from_questionnaire(self):\n self.view._build_forms_from_questionnaire()\n self.assertIsInstance(self.view.form_list, types.DictType)\n self.assertIs(self.view.form_list.get(str(len(self.view.form_list) - 1)), TempLanguageForm)", "def test_future_question(self):\n set_up_user(self)\n self.assertFalse(self.user.is_superuser)\n\n future_question = create_question(question_text='Future question.', days=5)\n url = reverse('polls:detail', args=(future_question.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def createQuiz(repo_url):\n CourseSet.quizcontent()\n ExtractChoices()\n # pass", "def QuestionAskForm(user, *args, **kwargs):\n logger.debug(\">>>>> QuestionAskForm \")\n #logger.debug(\"lot_form: {0}\".format(f))\n if user.is_anonymous():\n if not settings.ALLOW_ANONYMOUS:\n return None\n else:\n selected_fields = ['name', 'email', 'company', 'body', 'categories', 'phone_number']\n else:\n selected_fields = ['user', 'company', 'body', 'categories', 'phone_number']\n\n if settings.ALERTS:\n selected_fields += ['alert']\n\n class _QuestionAskForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(_QuestionAskForm, self).__init__(*args, **kwargs)\n\n for key in self.fields:\n if not key in OPTIONAL_FIELDS:\n self.fields[key].required = True\n\n # hide the internal status for non-staff\n qf = self.fields.get('status', None)\n if qf and not user.is_staff:\n choices = list(qf.choices)\n choices.remove(('internal', _('Internal')))\n qf.choices = choices\n\n # a bit of a hack...\n # hide a field, and use clean to force\n # a specific value of ours\n for key in ['user']:\n qf = self.fields.get(key, None)\n if qf:\n qf.widget = qf.hidden_widget()\n qf.required = False\n\n # honey pot!\n #phone_number = forms.CharField(label=_('Phone number'), required=False)\n if user.is_anonymous():\n captcha = ReCaptchaField(attrs={'theme': 'clean', 'lang': 'ru'})\n #categories = forms.MultipleChoiceField(choices=CAT_CHOICES, required=True)\n #categories = forms.ChoiceField(choices=Category.objects.values_list('id','title'), required=True)\n #categories = forms.MultipleHiddenInput(choices=Category.objects.all())\n #categories = forms.HiddenInput(initial=2)\n\n # attachment = forms.FileField(required=False,\n # label=_('Attach File'),\n # help_text=_('You can attach a file such as a document or screenshot to this ticket.'),\n # )\n #\n def clean_user(self):\n return user\n # def save(self, user):\n # files = []\n # if self.cleaned_data['attachment']:\n # import mimetypes\n # file = self.cleaned_data['attachment']\n # filename = file.name.replace(' ', '_')\n # a = Attachment(\n # followup=f,\n # filename=filename,\n # mime_type=mimetypes.guess_type(filename)[0] or 'application/octet-stream',\n # size=file.size,\n # )\n # a.file.save(file.name, file, save=False)\n # a.save()\n #\n # if file.size < getattr(settings, 'MAX_EMAIL_ATTACHMENT_SIZE', 512000):\n # # Only files smaller than 512kb (or as defined in\n # # settings.MAX_EMAIL_ATTACHMENT_SIZE) are sent via email.\n # files.append(a.file.path)\n #\n\n class Meta:\n model = Question\n fields = selected_fields\n\n return _QuestionAskForm(*args, **kwargs)", "def test_create_form_with_no_fields(self):\n with pytest.raises(ValidationError):\n SurveyForm.create('badform', '<p>no input fields here</p>')\n\n with pytest.raises(ValidationError):\n SurveyForm.create('badform', '<input id=\"input_without_name\" />')", "def test_future_question(self):\n question = createQuestion(\"Future Question\", 30)\n response = self.client.get(reverse(\"polls:index\"))\n self.assertContains(response, \"No polls are available.\")\n self.assertQuerysetEqual(response.context['latest_question_list'], [])", "def test_create_invalid_questions(self):\r\n print(\"Create survey with invalid questions\")\r\n s_name = \"\"\r\n c_id = 1\r\n questions = []\r\n\r\n prev_noSurveys = len(Survey.query.all())\r\n self.assertEqual(self.system.create_survey(s_name, c_id, questions), 0)\r\n curr_noSurveys = len(Survey.query.all())\r\n self.assertEqual(prev_noSurveys, curr_noSurveys)", "def create_question():\n body = request.get_json()\n\n question_text = body.get('question', None)\n answer = body.get('answer', None)\n category = body.get('category', 1)\n difficulty = body.get('difficulty', 1)\n\n try:\n question = Question(question=question_text,\n answer=answer,\n category=category,\n difficulty=difficulty)\n question.insert()\n\n selection = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, selection)\n\n return jsonify({\n 'success': True,\n 'created': question.id,\n 'questions': current_questions,\n 'total_questions': len(selection)\n })\n\n except Exception:\n abort(422)", "def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self", "def create(self, body):\n\t\tif self.has_permission('RightTPI') is False:\n\t\t\tself.no_access()\n\n\t\tid_survey = uuid.uuid4()\n\t\tid_language_content = MultiLang.set(body['name'], True)\n\n\t\twith Database() as db:\n\t\t\tdb.insert(Table(id_survey, id_language_content, body['survey_type']))\n\t\t\tdb.commit()\n\n\t\treturn {\n\t\t\t'id_survey': id_survey,\n\t\t\t'message': 'survey successfully created'\n\t\t}", "def __init__(self, name):\n self.name = name\n self.questions = []", "def test_question_with_out_choices(self):\n create_question(question_text='Question with out choices', days=0, choices=[])\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], [])", "def questions_collection(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n task = Task.objects.get(id=data.get(\"taskId\", \"\"))\n commenter = User.objects.get(username=data.get(\"commenter\", \"\"))\n content = data.get(\"content\", \"\")\n\n question = Question(\n task=task,\n commenter=commenter,\n content=content\n )\n question.save()\n return JsonResponse({\"message\": \"Question created successfully\"}, status=201)", "def test_create_question(self):\n meetups_url = '/api/v1/meetups'\n questions_url = '/api/v1/questions'\n # Post meetup2\n response = self.client.post(meetups_url, data=json.dumps(self.meetup2),\n content_type=\"application/json\")\n # Test meetup2 was posted successfully\n self.assertEqual(response.status_code, 201)\n # Post a question1 for meetup2\n questions_response = self.client.post(questions_url,\n data=json.dumps(self.question1),\n content_type=\"application/json\")\n # Test question1 was posted successfully\n self.assertEqual(questions_response.status_code, 201)\n self.assertIn(\"Python Data Science\", str(questions_response.data))", "def createquiz():\n if not current_user.check_educator():\n return render_template('errors/error403.html'), 403\n classForm = NameForm(prefix='class')\n quizForm = NameForm(prefix='quiz')\n image_file = get_image_file(current_user)\n if quizForm.validate_on_submit():\n quiz = add_quiz(current_user, quizForm.title.data)\n if quiz is None:\n flash('You have already created a Quiz with this name. Please choose a different name.', 'warning')\n return redirect(url_for('main.dashboard'))\n return redirect(url_for('quiz.createqn', quizID=quiz.id))\n return render_template('dashboard.html', image_file=image_file, classForm=classForm, quizForm=quizForm)", "def create_numeric(cls, name, question, default_response, contacts, user):\n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_NUMERIC)\n poll.contacts = contacts\n return poll", "def create_evaluation_template(client, survey_name):\n\n loremipsum = \"Lorem ipsum dolor sit amet, consecteteur adipiscing elit donec proin nulla vivamus. Augue donec a erat ve sagittis nisi rhoncus curabitur mauris. Nulla ipsum tortor sagittis adipiscing primis interdum suspendisse lobortis etiam risus nullam. Donec massa quam dis at nibh dolor netus quis. Purus etiam. Dolor neque nunc netus eget nulla faucibus vestibulum aenean class senectus. Porta dolor. Donec morbi. Felis lorem tempus luctus malesuada laoreet curae justo rhoncus ante facilisi parturient malesuada elit laoreet amet. Fusce augue nisi ligula praesent condimentum nascetur fringilla in id lectus per nunc. Lacus metus nisl orci odio maecenas adipiscing. Velit nulla a tempor class placerat ac condimentum nisi taciti at eros.\"\n\n loremipsum_A = \"A: \\n\" + loremipsum\n loremipsum_B = \"B: \\n\" + loremipsum\n\n # Create a new survey\n survey_id = client.create_survey(survey_name)\n # Create 2 more pages in the survey\n for i in range(0, 2):\n client.create_new_page(survey_id, str(i), loremipsum) # title and description\n\n # Get the page ids\n page_ids = client.get_pages_ids(survey_id) # There will be 3\n\n answers = [\"A\", \"B\"]\n question_title = \"Which of the following abstract is more relevant to the one above?\"\n for i, ID in enumerate(page_ids):\n client.update_title_description_of_page(survey_id, ID, \"Abstract\" + str(i), loremipsum)\n client.add_single_choice(survey_id, ID, question_title, answers)\n client.add_paragraph(survey_id, ID, loremipsum_A)\n client.add_paragraph(survey_id, ID, loremipsum_B)\n\n return survey_id", "def test_future_questions(self):\n q = create_question(question_text=\"Future question\", days=30)\n res = self.client.get(reverse(\"polls:detail\", args=(q.id,)))\n self.assertEqual(res.status_code, 404)\n # self.assertQuerysetEqual(res.context[\"question\"], [])", "def test_take_transcription_survey(self):\n survey_name = 'test take a transcription survey'\n ids_in_hand = Message.objects.values_list('id', flat=True)\n form = NewTranscriptionSurveyForm({\n 'name': survey_name,\n 'messages': ','.join(map(str, ids_in_hand)),\n 'num_transcriptions_per_taker': 2,\n })\n form.save()\n\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_id('id_transcriptions_list').click()", "def create_answer(question, user):\n return Answer.objects.create(question=question,answered_by=user)", "def test_form_create(self):\n create = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': False,\n }\n\n form = self.form_cls(create)\n print(form.errors)\n\n form.save()\n\n actual = models.Entry.objects.get(slug='last-post-final')\n self.assertEquals(actual.title, create['title'])\n self.assertEquals(actual.content.raw, create['content'])\n self.assertIsNone(actual.published_timestamp)", "def test_vote_view_with_qeustion_which_has_choices(self):\n past_question_with_choices = create_question(question_text=\"Test \\\n question with one choice\", days=-30, create_choice=True)\n response = self.client.get(reverse('polls:vote', \\\n args=(past_question_with_choices.id,)))\n\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, past_question_with_choices.question_text,\\\n status_code=200)", "def create_quiz():\n try:\n\n quiz_category_id = request.json.get(\"quiz_category_id\")\n previous_question_ids = request.json.get(\"previous_question_ids\")\n questions = Question.query.filter(\n ~Question.id.in_(previous_question_ids)\n )\n\n if quiz_category_id != 0:\n questions = questions.filter(\n Question.category_id == quiz_category_id\n )\n\n questions = questions.all()\n\n if len(questions) > 0:\n question = random.choice(questions).format()\n else:\n question = None\n\n response = jsonify({\"success\": True, \"question\": question})\n\n except AttributeError:\n abort(400)\n\n return response", "def test_future_question_for_admin(self):\n set_up_super_user(self)\n self.assertTrue(self.user.is_superuser)\n\n future_question = create_question(question_text='Future question.', days=5)\n url = reverse('polls:detail', args=(future_question.id,))\n response = self.client.get(url)\n self.assertContains(response, future_question.question_text)", "def gen_questions(self, number_of_questions):", "def create_question(self):\n\n locations = [\"meetup_id\", \"user_id\", \"title\", \"body\"]\n\n try:\n\n user = self.sql.get_username_by_id(\n int(self.question_details[\"user\"]))\n\n meetup = self.sql.fetch_details_by_criteria(\n \"meetup_id\", self.question_details[\"meetup\"], \"meetups\")\n\n existing = self.sql.fetch_details_if_text_exists(\n \"title\", self.question_details[\"title\"], \"questions\")\n\n title = self.question_details[\"title\"]\n\n body = self.question_details[\"body\"]\n\n except KeyError as keyerror:\n return self.makeresp(\"{} is a required field\".format(keyerror), 400)\n\n isempty = DataValidators(\n self.question_details).check_values_not_empty()\n\n if isinstance(isempty, str):\n return self.makeresp(isempty, 400)\n\n if not user:\n return self.makeresp(\"User not found\", 404)\n\n if not meetup:\n return self.makeresp(\"Meetup not found\", 404)\n\n if not self.check_is_error(existing):\n\n if [meet_id[1] for meet_id in existing if self.question_details[\"meetup\"] in meet_id]:\n\n return self.makeresp(\"This Question already exists\", 409)\n\n question = {\n \"meetup\": self.question_details[\"meetup\"],\n \"createdBy\": self.question_details[\"user\"],\n \"title\": title,\n \"body\": body\n }\n\n question_id = SqlHelper(question).save_to_database(\n locations, \"questions\")\n\n return self.makeresp(\n {\n \"id\": question_id,\n \"user\": question[\"createdBy\"],\n \"meetup\": question[\"meetup\"],\n \"title\": question[\"title\"],\n \"body\": question[\"body\"]\n }, 201)", "def survey_new(request):\n if request.user.is_authenticated:\n if not request.user.groups.filter(name='Survey Creators').exists():\n raise Http404(\"Page not found\")\n else:\n raise Http404(\"Page not found\")\n\n my_surveys = Survey.objects.filter(author=request.user).order_by('title')\n\n if request.method == \"POST\":\n form = SurveyForm(request.POST)\n if form.is_valid():\n survey = form.save(commit=False)\n survey.author = request.user\n survey.save()\n messages.add_message(request, messages.INFO, \"Created new survey \" + survey.title,)\n return redirect('skip_logic:survey_detail', survey_slug=survey.slug)\n else:\n new_slug = ''.join(random.choice(string.ascii_uppercase +\n string.ascii_lowercase +\n string.digits) for _ in range(8))\n form = SurveyForm(initial={'slug': new_slug,\n 'title': \"My New Survey\"})\n\n return render(request, 'skip_logic/survey_edit.html', {'form': form, 'my_surveys': my_surveys})", "def test_new_thread(self):\n self.helper('create_user', 'testuser', 'password')\n self.login('testuser', 'password', 'auth_login','id_login')\n self.url(settings.LOGIN_REDIRECT_URL)\n self.go200('forum_new_thread', [forum.slug])\n self.showforms()\n self.formclear('id_newthread')\n self.fv('id_newthread', 'id_title', 'Some cool new thread')\n self.fv('id_newthread', 'id_body', 'Hello world!')\n self.submit200()\n self.go200('forum_thread_list', [forum.slug])\n self.find('Some cool new thread')\n self.go200('forum_view_thread', [2])\n self.find('Hello world!')", "def test_question_submission_successfully(self):\n with self.client:\n \n response = self.add_question(\"1\",\"hello\",\"hello world\",\"java\",\"kenneth\")\n self.assertEqual(response.status_code, 201)", "def test_future_question(self):\n future_question = createQuestion(\"Future Question\", 5)\n url = reverse(\"polls:detail\", args=(future_question.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def create_question(question_text, days):\n\ttime = timezone.now()+dt.timedelta(days=days)\n\treturn Question.objects.create(question_text=question_text, pub_date=time)", "def test_question_with_out_choices(self):\n question = create_question(question_text='Question with out choices', days=0, choices=[])\n response = self.client.get(reverse('polls:results', args=(question.id, )))\n self.assertEqual(response.status_code, 404)", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def test_initial_answer(self):\n survey = SurveyFactory.create()\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n\n 'question_text': 'ou812?',\n 'variation_id': '1',\n 'score': None,\n 'max_score': None,\n 'flow_began_ts': 0,\n 'flow_offered_ts': 0,\n 'flow_voted_ts': 0,\n 'flow_engaged_ts': 0,\n 'platform': '',\n 'channel': '',\n 'version': '',\n 'locale': '',\n 'country': '',\n 'build_id': '',\n 'partner_id': '',\n 'profile_age': None,\n 'profile_usage': {},\n 'addons': {},\n 'extra': {},\n 'is_test': False\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 201\n\n ans = Answer.objects.latest('id')\n\n for field in data.keys():\n # survey_id is a special case since it's a foreign key.\n if field == 'survey_id':\n # This looks goofy because it's not the normal way to\n # do things, but the \"survey_id\" attribute is a\n # Survey rather than the pk for a Survey.\n assert ans.survey_id.name == data[field]\n continue\n\n assert getattr(ans, field) == data[field]", "def test_future_question(self):\n create_question(question_text=\"Future Question\", days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], [])", "def test_future_question(self):\n future_question = create_question(question_text='Future question', days=30)\n response = self.client.get(reverse('polls:details', args=(future_question.id,)))\n self.assertEqual(response.status_code, 404)", "def test_post_valid_data_question(self):\n\n response = self.post_question(self.valid_question)\n self.assertEqual(response.status_code, 201)", "def __init__(self, createdby, meetup, title, body, votes,createdOn):\n self.question_id = len(Question.question_list) + 1\n self.createdon = datetime.now()\n self.createdby = createdby\n self.meetup = meetup\n self.title = title\n self.body = body\n self.votes = votes", "def test_can_view_question_and_result(self):\n self.client.login(username='Marry', password='secret')\n self.assertTrue(self.user.is_authenticated)\n question = create_question(question_text='Future question.', days=0)\n url = reverse('polls:detail', args=(question.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_future_questions(self):\n create_question(question_text=\"Future question\", days=30)\n res = self.client.get(reverse(\"polls:index\"))\n self.assertEqual(res.status_code, 200)\n self.assertContains(res, \"No Polls available\")\n self.assertQuerysetEqual(res.context[\"latest_questions\"], [])", "def test_add_new_question_success(self):\n res = self.client().post('/api/questions', json={\n \"question\": \"This is a question\",\n \"answer\": \"This is the answer\",\n \"category\": 1,\n \"difficulty\": 2\n })\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertTrue(data[\"success\"])", "def test_posting_question(self):\n self.is_authenticated()\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def get_choice(cls, polls):\n\n cl = cls()\n items = []\n for poll in polls:\n items.append((poll.id, poll.question))\n\n setattr(cl.poll, 'items', items)\n return cl", "def test_create_quiz(self):\n\n self.browser.get(self.live_server_url)\n\n # the user begins by logging in\n self.login()\n\n # the user uses the link in the home page to create a quiz\n create_button = self.browser.find_element_by_id(\"create-quiz\")\n create_button.click()\n\n # the user has to login first\n username_input = self.browser.find_element_by_id(\"id_username\")\n username_input.send_keys(\"A\")\n password_input = self.browser.find_element_by_id(\"id_password\")\n password_input.send_keys(\"_MyKindOfPassword123_\")\n button = self.browser.find_element_by_id(\"login_button\")\n button.click()\n\n assert self.browser.title == \"Créer un quiz\"\n\n # Since it's the first time the user is creating a quiz, he visits the tutorial\n tutorial = self.browser.find_element_by_id(\"tutorial\")\n tutorial.click()\n\n assert self.browser.title == \"Tutorial\"\n\n # get back to create a quiz\n back_to_create = self.browser.find_element_by_id(\"create-quiz\")\n back_to_create.click()\n\n # Basic elements of the quiz :\n quiz_title = self.browser.find_element_by_id(\"id_quiz-title\")\n quiz_title.send_keys(\"Le théorème de Pythagore\")\n quiz_description = self.browser.find_element_by_id(\"id_quiz-description\")\n quiz_description.send_keys(\"Ce qui a pour but but de vérifier les connaissances \\\n des participants sur le théorème de Pythagore.\")\n quiz_category = Select(self.browser.find_element_by_id(\"id_quiz-category\"))\n quiz_category.select_by_visible_text('Sciences')\n self.browser.implicitly_wait(1)\n quiz_subcategory = Select(self.browser.find_element_by_id(\"id_quiz-sub_category\"))\n quiz_subcategory.select_by_visible_text(\"Mathématiques\")\n\n # add a first MC question :\n add_mc = self.browser.find_element_by_id(\"add-mc\")\n add_mc.click()\n self.browser.implicitly_wait(1)\n mc1_content = self.browser.find_element_by_id(\"forms\")\n #mc1_content.send_keys(\"Le théorème de Pythagore concerne\")", "def test_incomplete_form(self):\n page = self.get_assert_200(self.url, user=self.voting_user1.username)\n form = page.forms[\"student-vote-form\"]\n self.fill_form(form, fill_complete=False)\n response = form.submit()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"vote for all rating questions\", response)\n\n form = page.forms[\"student-vote-form\"]\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_text_question)].value, \"some text\")\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_likert_question)].value, \"1\")\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_grade_question)].value, \"3\")\n\n self.assertEqual(form[question_id(self.contribution1, self.contributor_questionnaire, self.contributor_text_question)].value, \"some other text\")\n self.assertEqual(form[question_id(self.contribution1, self.contributor_questionnaire, self.contributor_likert_question)].value, \"4\")\n\n self.assertEqual(form[question_id(self.contribution2, self.contributor_questionnaire, self.contributor_text_question)].value, \"some more text\")", "def QuestionForm(user, *args, **kwargs):\n\n if user.is_anonymous():\n if not settings.ALLOW_ANONYMOUS:\n return None\n else:\n selected_fields = ['name', 'email', 'title', 'body', 'phone_number']\n else:\n selected_fields = ['user', 'title', 'body', 'status', 'phone_number']\n\n if settings.ALERTS:\n selected_fields += ['alert']\n\n class _QuestionForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(_QuestionForm, self).__init__(*args, **kwargs)\n\n for key in self.fields:\n if not key in OPTIONAL_FIELDS:\n self.fields[key].required = True\n\n # hide the internal status for non-staff\n qf = self.fields.get('status', None)\n if qf and not user.is_staff:\n choices = list(qf.choices)\n choices.remove(('internal', _('Internal')))\n qf.choices = choices\n\n # a bit of a hack...\n # hide a field, and use clean to force\n # a specific value of ours\n for key in ['user']:\n qf = self.fields.get(key, None)\n if qf:\n qf.widget = qf.hidden_widget()\n qf.required = False\n\n # honey pot!\n #phone_number = forms.CharField(label=_('Phone number'), required=False)\n # if user.is_anonymous():\n # captcha = ReCaptchaField(attrs={'theme': 'clean', 'lang': 'ru'})\n\n def clean_user(self):\n return user\n\n class Meta:\n model = Question\n fields = selected_fields\n\n return _QuestionForm(*args, **kwargs)", "def question(num):\n if request.method == 'GET':\n questionGroup = Question.query.filter(Question.num == num).from_self().all()\n answers = [answer.value for answer in questionGroup if '_text' in answer.variable]\n responses = [response.value for response in questionGroup if '_response' in response.variable]\n tooltips = [response.value for response in questionGroup if '_hover' in response.variable]\n questionType = questionGroup[0].question_type\n if questionType == 'Multiple Choice':\n return render_template('/demo/question-multiplechoice.html',\n question=questionGroup[0].question,\n choices=answers,\n responses=responses,\n tooltips=tooltips,\n # variables=variables,\n explanation=questionGroup[0].info,\n template=\"question-multiplechoice\")\n return render_template('/demo/question-freeform.html',\n question=questionGroup[0].question,\n choices=answers,\n responses=responses,\n tooltips=tooltips,\n # variables=variables,\n explanation=questionGroup[0].info,\n template=\"question-freeform\")", "def create_populated_question(answer_states: List[bool], question_text: str = None):\n question = create_question(question_text)\n\n for state in answer_states:\n create_answer(question, state)\n\n return question", "def __init__(self, question):\n self.question = question\n self.responses = []", "def submit_question():\n body = request.get_json()\n\n question = body.get('question', None)\n answer = body.get('answer', None)\n difficulty = body.get('difficulty', None)\n category = body.get('category', None)\n\n try:\n\n new_question = Question(\n question=question,\n answer=answer,\n difficulty=difficulty,\n category=category\n )\n\n new_question.insert()\n\n return jsonify({\n 'success': True,\n 'created': new_question.id\n })\n\n except:\n abort(422)", "def mutate(self, info, question_text):\n question = Question(\n question_text=question_text,\n pub_date=now()\n )\n question.save()\n ok = True\n return CreateQuestion(\n question=question,\n ok=ok\n )", "def test_make_form_field():", "def question_form(number, next_question, form_class, title, alternative=None, options=None):\n alternative = alternative or (lambda form: False)\n answer = 's_{}_a'.format(number)\n form = form_class(**session.get(answer, {}))\n if hasattr(form, '_categories'):\n form._categories = OrderedDict([\n (str(k), v) for k, v in form._categories().items()\n ])\n if options is not None:\n form.options.choices = options\n if alternative(form) or form.validate_on_submit():\n session[answer] = copy(form.data)\n del session[answer]['submit']\n del session[answer]['csrf_token']\n save_answer(number)\n # ToDo: save\n return goto(next_question)\n return question(form, title)", "def createQuestion(question_text, days):\n time = timezone.now() + datetime.timedelta(days = days)\n return Question.objects.create(question_text= question_text, pub_date = time)", "def create_poll(self, poll_data_raw):\n now = datetime.datetime.utcnow()\n close = datetime.datetime.strptime(poll_data_raw['close'],\n '%Y-%m-%dT%H:%M:%S')\n ongoing = close - now > datetime.timedelta(minutes = 0)\n init_key = helpers.generateKeyString(poll_data_raw['initiator'],\n 'init_')\n part_list = poll_data_raw['participants']\n part_list = list(set(part_list)) # remove duplicates\n part_map = {}\n for email in part_list:\n part_key = helpers.generateKeyString(email, 'part_')\n part_map[part_key] = email\n poll_data_processed = {\n 'name': poll_data_raw['name'],\n 'choices': filter(bool, poll_data_raw['choices']),\n 'ongoing': ongoing,\n 'close': poll_data_raw['close'],\n 'type': poll_data_raw['type'],\n 'initiator': init_key,\n 'participants': part_map\n }\n poll_key = helpers.generateKeyString(poll_data_raw['name'],\n 'poll_')\n self.client.set(poll_key, dumps(poll_data_processed))\n\n # Create initiator's record\n init_data = {\n 'email': poll_data_raw['initiator'],\n 'poll': poll_key\n }\n self.set_initiator(init_key, init_data)\n\n # Create participants' records\n for part_key in part_map:\n part_data_raw = {\n 'email': part_map[part_key],\n 'poll': poll_key,\n 'voted': False,\n 'choice': None\n }\n self.set_participant(part_key, part_data_raw)\n\n return poll_key", "def create_yesno(cls, name, question, default_response, contacts, user):\n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response,\n user=user,\n type=Poll.TYPE_TEXT)\n poll.contacts = contacts\n poll.categories.create(name='yes')\n poll.categories.get(name='yes').rules.create(\n regex=(STARTSWITH_PATTERN_TEMPLATE % '|'.join(YES_WORDS)),\n rule_type=Rule.TYPE_REGEX,\n rule_string=(STARTSWITH_PATTERN_TEMPLATE % '|'.join(YES_WORDS)))\n poll.categories.create(name='no')\n poll.categories.get(name='no').rules.create(\n regex=(STARTSWITH_PATTERN_TEMPLATE % '|'.join(NO_WORDS)),\n rule_type=Rule.TYPE_REGEX,\n rule_string=(STARTSWITH_PATTERN_TEMPLATE % '|'.join(NO_WORDS)))\n poll.categories.create(name='unknown', default=True)\n return poll" ]
[ "0.68965745", "0.664663", "0.66033286", "0.6529158", "0.64919966", "0.64177126", "0.6361779", "0.6354872", "0.6351356", "0.6324996", "0.62474936", "0.62169385", "0.6213089", "0.62068975", "0.6191489", "0.6160336", "0.61465067", "0.6138712", "0.6129136", "0.61282086", "0.6116563", "0.61049587", "0.6067563", "0.6061492", "0.60264516", "0.59967256", "0.59941244", "0.5980024", "0.5972787", "0.59696853", "0.59515667", "0.5934669", "0.5880389", "0.58692896", "0.58686644", "0.5863954", "0.584826", "0.5817834", "0.5798075", "0.5776345", "0.5769919", "0.576478", "0.57527465", "0.5730393", "0.5726189", "0.57204324", "0.57124984", "0.57063526", "0.56941646", "0.56866324", "0.5686244", "0.56795377", "0.56761837", "0.5666347", "0.5659734", "0.5651766", "0.5643818", "0.56335825", "0.5630645", "0.5626521", "0.5617714", "0.5610939", "0.5597454", "0.55879676", "0.5584396", "0.558378", "0.5574025", "0.5569183", "0.5554331", "0.5549239", "0.55491275", "0.55368793", "0.5532466", "0.553069", "0.5525163", "0.55167943", "0.5505702", "0.5503808", "0.5502032", "0.54986906", "0.5472009", "0.54527193", "0.5450531", "0.54503375", "0.5449271", "0.5444494", "0.54407144", "0.5426207", "0.54152095", "0.5409287", "0.5397183", "0.5396676", "0.5396144", "0.5393141", "0.5386427", "0.5384917", "0.5381841", "0.53730494", "0.53680146", "0.5366354" ]
0.7856849
0