query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Returns tuple of the form, (max_hit, accuracy), for the given levels after factoring in the weapons available and the selected attack style. Assumes enemy has level 1 defence and 0 defence bonus | def get_max_hit_and_accuracy(
levels, attack_style, attack_bonus, strength_bonus):
weapon_attack, weapon_strength = get_weapon_stats(levels.attack)
attack_bonus += weapon_attack
strength_bonus += weapon_strength
if attack_style == Attack_Style.ATTACK:
effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)
effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)
elif attack_style == Attack_Style.STRENGTH:
effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)
effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)
enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)
max_hit = osrs.max_hit(effective_strength, strength_bonus)
accuracy = osrs.accuracy(effective_attack, attack_bonus,
enemy_effective_defence, 0)
return (max_hit, accuracy) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)",
"def difficulty_for_level(level):\n return 0 if level==\"easy\" else (1 if level==\"medium\" else 2)",
"def update_hp_for_higher_level(chosen_class,level):\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]",
"def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus",
"def set_game_level(user_level_input):\n if user_level_input == \"easy\":\n return sample_1, answer_sample_1\n elif user_level_input == \"medium\":\n return sample_2, answer_sample_2\n elif user_level_input == \"medium\":\n return sample_3, answer_sample_3\n else:\n return \"That level does not exist\"",
"def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_hit = accuracy * max_hit / 2\n exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE\n ticks = experience / exp_per_hit * ticks_per_attack\n return ticks",
"def get_max_hit_increases(\n start_strength_level, end_strength_level,\n strength_bonus, stance_adder):\n greatest_max_hit = 0\n max_hit_increases = []\n cur_strength_level = start_strength_level\n while cur_strength_level < end_strength_level:\n effective_strength = osrs.effective_level(\n cur_strength_level, 1, stance_adder, 1)\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n\n if max_hit > greatest_max_hit:\n greatest_max_hit = max_hit\n max_hit_increases.append((cur_strength_level, max_hit))\n\n cur_strength_level += 1",
"def find_ability(abilities: list, character_class: str, attack_type: str) -> Dict:\n # Find the ability to use\n ability_to_use = {\"effects\": [], \"enhancements\": []}\n for ability in abilities:\n if (ability[\"class\"] == character_class) and (ability[\"type\"] == attack_type):\n ability_to_use = ability\n break\n\n return ability_to_use",
"def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)",
"def _determine_level(levels, points):\n import operator\n level = None\n sorted_levels = sorted(levels.iteritems(), key=operator.itemgetter(1))\n for el in sorted_levels:\n if points <= el[1]:\n level = el[0]\n break\n\n max_level = max(levels.iterkeys(), key=lambda threshold: levels[threshold])\n if points >= levels[max_level]:\n level = max_level\n return level",
"def getAbilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n print \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])\n print \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])",
"def attack_bonus_on_level(self, level):\n raise NotImplementedError",
"def calculate_score(friendly_tiles, enemy_tiles):\n friendly_permanent_tiles = [tile for tile in friendly_tiles if tile.is_permanently_owned()]\n enemy_permanent_tiles = [tile for tile in enemy_tiles if tile.is_permanently_owned()]\n num_friendly_permanent_tiles = len(friendly_permanent_tiles)\n num_enemy_permanent_tiles = len(enemy_permanent_tiles)\n num_friendly_non_permanent_tiles = len(friendly_tiles) - num_friendly_permanent_tiles\n num_enemy_non_permanent_tiles = len(enemy_tiles) - num_enemy_permanent_tiles\n return (num_friendly_non_permanent_tiles + 2 * num_friendly_permanent_tiles, num_enemy_non_permanent_tiles + 2 * num_enemy_permanent_tiles)",
"def attack(health_meter):\n hit_list = 4 * ['player'] + 6 * ['enemy']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"ATTACK! \", end='')\n show_health(health_meter)",
"def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value",
"def Value(self,enemies):\n if self.type == \"Goblin\":\n if \"Bard\" in enemies.inteam and not \"Fighter\" in enemies.inteam:\n return 2\n else:\n return 1\n\n if self.type == \"Ork\":\n if \"Archer\" in enemies.inteam or \"Fighter\" in enemies.inteam:\n return 3\n else:\n return 2\n if self.type == \"Skeleton\":\n if \"Mage\" in enemies.inteam or \"Archer\" in enemies.inteam:\n return 5\n else:\n return 3\n \n if self.type == \"Troll\":\n if \"Fighter\" in enemies.inteam and not \"Mage\" in enemies.inteam:\n return 7\n else:\n return 4",
"def get_leveling_args(cards, card_attrs):\n if (len(card_attrs['evolve']) < len(card_attrs['level']) and\n len(cards) > 15):\n cards_to_consume = set()\n candidates = set(card_attrs['level'].keys())\n cards_by_xp = list(set(swizzle(cards, 'xp01')) & candidates)\n cards_by_rarity = list(set(swizzle(cards, 'type')) & candidates)\n cards_by_xp, cards_by_rarity, top_third = remove_rarest_third(\n cards_by_xp, cards_by_rarity)\n\n if cards_by_xp and top_third:\n # Number of cards to consume into our destination card will be between\n # min and max values (defined in config).\n num_to_consume = randint(\n cfg['level']['min_cards'],\n min(cfg['level']['max_cards'], len(top_third)))\n\n # Get the bottom n number of cards by xp to consume into a rare card\n lesser = min(num_to_consume, len(cards_by_xp))\n for i in range(lesser): # pylint: disable=unused-variable\n cur_card = cards_by_xp.pop(0)\n if cur_card in cards_by_rarity:\n cards_by_rarity.remove(cur_card)\n if cur_card not in cards_to_consume:\n cards_to_consume.add(cur_card)\n\n logger.debug(\"Cards to consume:\")\n logger.debug(cards_to_consume)\n\n # Choose one of the more rare cards as the target to level.\n # TODO: prefer rare cards with more xp pylint: disable=fixme\n dest_id = choice(top_third)\n\n return (dest_id, cards_to_consume)\n\n return False",
"def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]",
"def get_highest_accuracy(self) -> Tuple[float, Dict[str, Any]]:\n highest_val = float('-inf')\n data = None\n for instance in self.stats:\n if self.stats[instance][\"Accuracy\"] > highest_val:\n highest_val = self.stats[instance][\"Accuracy\"]\n data = self.stats[instance]\n return highest_val, data",
"def level_time_simulate(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n enemy_health = 60 # Sand crab health\n\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_ticks = combat_simulator.ticks_until_exp(max_hit, accuracy,\n ticks_per_attack, enemy_health, experience,\n osrs.BASE_EXP_PER_DAMAGE, ITERATIONS)\n return avg_ticks",
"def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)",
"def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon",
"def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)",
"def abilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n return \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])+\"\\n\" \\\n \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])",
"def _get_max_hits(build: Build, decimals:int) -> str:\n stats = ['Physical', 'Fire', 'Cold', 'Lightning', 'Chaos']\n emojis = [':drop_of_blood:', ':fire:', ':snowflake:', ':zap:', ':skull:']\n lines = []\n\n show = False\n for i, stat in enumerate(stats):\n max_hit_key = stat + 'MaximumHitTaken'\n max_hit_val = shorten_number_string(build.get_player_stat(max_hit_key, 0, 0), decimals)\n res_key = stat + 'DamageReduction' if stat == 'Physical' else stat + 'Resist'\n res_val = build.get_player_stat(res_key)\n if res_val:\n lines.append(f\"{emojis[i]} {max_hit_val} ({res_val:.0f}%)\")\n show = True\n\n output = '\\n'.join(lines)\n output += \"\\n\"\n return output if show else \"\"",
"def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self.self_id = allies[0].unit_type\n self_weapon_range = weapon_ranges[self.self_id]\n self_radius = unit_sizes[self.self_id] / float(2)\n self_unit_type = unit_type[self.self_id]\n self_speed = unit_speed[self.self_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, self.self_id,\n self.enemy_id]",
"def calc_tohit(attr, level):\n return level + calc_attr_mod(attr)",
"def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type]",
"def level_and_fall_freq(\n complete_lines: float,\n base_speed: float=8.0,\n speed_limit: float=0.1,\n) -> tuple:\n # get the level that the player is on\n level = int(complete_lines / 10) + 1\n # get the frequency with which to move pieces down\n fall_freq = base_speed / level\n # reset the fall_frequency if it's below the speed limit\n if fall_freq < speed_limit:\n fall_freq = speed_limit\n\n return level, fall_freq",
"def retrieve_handcrafted_inputs(self, obs):\n self.detect_self_unit_types(obs)\n\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n selected_allies = [unit for unit in allies if unit.unit_type == self.current_group_id]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n hitpoints = 0\n for unit in selected_allies:\n hitpoints += unit.health\n\n if self.current_group_id in unit_health.keys():\n init_hp = 0\n init_hp = unit_health[self.current_group_id] * self.init_unit_counts[self.current_group_id]\n else:\n init_hp = self.initial_self_hit_points\n current_hp = hitpoints / init_hp\n\n weapon_cooldown = 0\n for ally in selected_allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(selected_allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(selected_allies) > 0:\n self_weapon_range = weapon_ranges[self.current_group_id]\n self_radius = unit_sizes[self.current_group_id] / float(2)\n self_unit_type = unit_type[self.current_group_id]\n self_speed = unit_speed[self.current_group_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n # TODO can be inaccurate if using melee units\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in selected_allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n if in_enemy_range:\n break\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs, for_subgroup=True)\n\n if self.previous_commands[self.current_group_id] == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_commands[self.current_group_id] == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs,\n for_subgroup=True)\n\n distance_to_enemy = self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_avg_location_of_self_subgroup(obs))\n distance_to_enemy = distance_to_enemy / float((32 ** 2 + 20 ** 2) ** 0.5)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, distance_to_enemy]"
] | [
"0.6369547",
"0.6126176",
"0.5764121",
"0.5721204",
"0.56999665",
"0.56620437",
"0.5656497",
"0.5552577",
"0.5546967",
"0.554252",
"0.5487898",
"0.54512966",
"0.5447283",
"0.5400377",
"0.53890437",
"0.5385318",
"0.5371194",
"0.53344166",
"0.532357",
"0.5270881",
"0.5253192",
"0.5244573",
"0.52291995",
"0.5176173",
"0.51564693",
"0.5151702",
"0.51353246",
"0.5125361",
"0.5108481",
"0.5092772"
] | 0.8360239 | 0 |
Returns tuple of the form (attack_bonus, strength_bonus) for the best scimitar (weapon) at a given attack level. Scimitars are almost always the most efficient weapon | def get_weapon_stats(attack_level):
if attack_level >= 60:
# Dragon scimitar
return (67, 66)
elif attack_level >= 40:
# Rune scimitar
return (45, 44)
elif attack_level >= 30:
# Adamant scimitar
return (29, 28)
elif attack_level >= 20:
# Mithril scimitar
return (21, 20)
elif attack_level >= 10:
# Black scimitar
return (19, 14)
elif attack_level >= 5:
# Steel scimitar
return (15, 14)
else:
# Iron scimitar
return (10, 9) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def most_powerful_weapon(self):\n # sets inital damge to 0\n max_damage = 0\n # sets the best weapon to nothing\n best_weapon = None\n # Loop for each item in inventory\n for item in self.inventory:\n # Code adapted from Make Your own Python Text Based Adventure\n # tries to see if the item damage is greator than the current max\n # damage and then replaces the best weapon in inventory\n try:\n if item.damage > max_damage:\n best_weapon = item\n max_damage = item.damage\n except AttributeError:\n pass\n # sends the best weapon to function\n return best_weapon",
"def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)",
"def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between\n # half of max_damage and max_damage\n print(\"max damage of \" + self.name + \" is \")\n print(str(self.attack_strength))\n min_damage = self.attack_strength // 2\n weapon_attack_value = random.randint(min_damage, self.attack_strength)\n return weapon_attack_value",
"def attack(self):\n # TODO: Use integer division to find half of the max_damage value\n # then return a random integer between half of max_damage and max_damage\n \n weapon_attack_value = random.randint(self.max_damage//2, self.max_damage)\n return weapon_attack_value",
"def SADamageFunction(\n skill: AdventurerSkill | None,\n adventurer: \"Adventurer\",\n enemy: \"Enemy\",\n memboost: dict[str, int | float],\n combo: int,\n saRng: float,\n) -> int:\n if skill is None:\n return 0\n\n # lowercase everything\n target = skill.target.lower()\n tempBoostName = skill.tempBoost.lower()\n powerCoefficientName = skill.powerCoefficient.lower()\n powerCoefficient = 1.0\n\n if tempBoostName == \"none\":\n tempBoost = 1.0\n elif \"normal\" in tempBoostName:\n tempBoost = 1.4\n else:\n tempBoost = 1.7\n\n if skill.target == \"foe\":\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.5\n case \"mid\" | \"medium\":\n powerCoefficient = 1.7\n case \"high\":\n powerCoefficient = 1.9\n case \"super\":\n powerCoefficient = 2.1\n case \"ultra\":\n powerCoefficient = 4.0\n else:\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.1\n case \"mid\" | \"medium\":\n powerCoefficient = 1.15\n case \"high\":\n powerCoefficient = 1.2\n case \"super\":\n powerCoefficient = 1.4\n case \"ultra\":\n powerCoefficient = 3.6\n\n if \"physical\" in skill.type:\n stat_key = \"strength\"\n resist_key = \"physical\"\n else:\n stat_key = \"magic\"\n resist_key = \"magic\"\n\n tempPower = adventurer.stats[stat_key]\n tempPowerBoostAdv = adventurer.statsBoostAdv[stat_key]\n tempPowerBoostAst = adventurer.statsBoostAst[stat_key]\n tempMemBoost = memboost[stat_key]\n\n tempTypeResistDownBase = enemy.typeResistDownBase[resist_key]\n tempTypeResistDownAdv = enemy.typeResistDownAdv[resist_key]\n tempTypeResistDownAst = enemy.typeResistDownAst[resist_key]\n # check enemy buffs p/m resist\n tempTypeResistBuff = enemy.get_buff_mod(f\"{resist_key}_resist\")\n\n # get strength/magic debuff\n powerDebuff = adventurer.get_boostCheckAdv(False, stat_key)\n tempPowerBoostDebuff = 0.0\n if powerDebuff is not None:\n tempPowerBoostDebuff = abs(powerDebuff.modifier)\n else:\n tempPowerBoostDebuff = 0\n\n if len(skill.index_to) != 0:\n tempPower = 0\n tempPowerBoostAdv = 0.0\n tempPowerBoostAst = 0.0\n tempMemBoost = 0\n powerCoefficient = powerCoefficient * 1.96\n for index_to_attributes in skill.index_to:\n tempPower += adventurer.stats[index_to_attributes]\n tempPowerBoostAdv += adventurer.statsBoostAdv[index_to_attributes]\n tempPowerBoostAst += adventurer.statsBoostAst[index_to_attributes]\n tempMemBoost += memboost[index_to_attributes]\n tempElementBoostDebuff = 0.0\n if skill.element != \"\" and skill.noType != 1:\n # elementResistDownBase\n tempElementResistDownBase = enemy.elementResistDownBase[skill.element]\n # elementResistDownAdv\n tempElementResistDownAdv = enemy.elementResistDownAdv[skill.element]\n # elementResistDownAst\n tempElementResistDownAst = enemy.elementResistDownAst[skill.element]\n # elementDamageBoostAdv[location]\n\n tempElementDamageBoostAdv = adventurer.elementDamageBoostAdv[skill.element]\n if memboost.get(f\"{skill.element}_attack\") is not None:\n tempElementDamageBoostAdv += memboost[f\"{skill.element}_attack\"]\n # elemental damage boost from weapon\n if adventurer.stats.get(skill.element) is not None:\n tempElementDamageBoostAdv += cast(float, adventurer.stats[skill.element])\n # elementDamageBoostAst[location]\n tempElementDamageBoostAst = adventurer.elementDamageBoostAst[skill.element]\n # element debuff\n tempEleDebuff = adventurer.get_boostCheckAdv(False, f\"{skill.element}_attack\")\n if tempEleDebuff is not None:\n tempElementBoostDebuff = abs(tempEleDebuff.modifier)\n else:\n tempElementResistDownBase = 0.0\n tempElementResistDownAdv = 0.0\n tempElementResistDownAst = 0.0\n tempElementDamageBoostAdv = 0.0\n tempElementDamageBoostAst = 0.0\n\n if target == \"foe\":\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"st\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"st\"]\n # foes\n else:\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"aoe\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"aoe\"]\n\n temp_enemy_end = enemy.stats\n\n tempDamage = (\n (\n max(\n 2\n * tempPower\n * tempBoost\n * (\n 1\n + tempPowerBoostAdv\n + tempPowerBoostAst\n + tempMemBoost\n - tempPowerBoostDebuff\n )\n - temp_enemy_end[\"endurance\"],\n 0,\n )\n )\n * (\n 1\n - tempElementResistDownBase\n - tempElementResistDownAdv\n - tempElementResistDownAst\n - tempTypeResistDownBase\n - tempTypeResistDownAdv\n - tempTypeResistDownAst\n - tempTypeResistBuff\n )\n * (\n 1\n + tempElementDamageBoostAdv\n + tempElementDamageBoostAst\n - tempElementBoostDebuff\n )\n * (1 + adventurer.critPenBoost + 0.06)\n * (1 - temptargetResistDownAdv - temptargetResistDownAst)\n * powerCoefficient\n * 1.5\n * (skill.extraBoost)\n * (0.8 + combo * 0.2)\n * saRng\n )\n return int(tempDamage)",
"def weaponValue(self, level):\n if level == 1:\n bonus = 2\n elif level == 2:\n bonus = 4\n elif level == 3:\n bonus = 6\n elif level == 4:\n bonus = 8\n else:\n bonus = 0\n\n return bonus",
"def get_max_hit_and_accuracy(\n levels, attack_style, attack_bonus, strength_bonus):\n weapon_attack, weapon_strength = get_weapon_stats(levels.attack)\n attack_bonus += weapon_attack\n strength_bonus += weapon_strength\n\n if attack_style == Attack_Style.ATTACK:\n effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)\n elif attack_style == Attack_Style.STRENGTH:\n effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)\n\n enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)\n\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n accuracy = osrs.accuracy(effective_attack, attack_bonus,\n enemy_effective_defence, 0)\n\n return (max_hit, accuracy)",
"def Attack_Weapon(self, bonus=0):\n bonus = str(bonus);\n if (bonus == \"0\"):\n return \"\".join((\"[[1d20+\", self.Attribute_Power(\"attack\"), \"]] vs \", self.Attribute_Power(\"def\")));\n else:\n return \"\".join((\"[[1d20+\", self.Attribute_Power(\"attack\"), \"+\", bonus, \"]] vs \", self.Attribute_Power(\"def\")));",
"def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int",
"def Attack_Skill(self, bonus=0):\n bonus = str(bonus);\n if (bonus == \"0\"):\n return \"\".join((\"[[1d20+\", Attribute(\"halflevel\"), \"[level/2]+\", self.Attribute_Power(\"mod\"), \"+\", self.Attribute_Power(\"attack-misc\"), \"]] vs \", self.Attribute_Power(\"def\")));\n else:\n return \"\".join((\"[[1d20+\", Attribute(\"halflevel\"), \"[level/2]+\", self.Attribute_Power(\"mod\"), \"+\", self.Attribute_Power(\"attack-misc\"), \"+\", bonus, \"]] vs \", self.Attribute_Power(\"def\")));",
"def attack(self):\n\n lowest_attack = int(self.attack_strength)// 2\n attack_strength = random.randint(lowest_attack, int(self.attack_strength))\n return attack_strength",
"def find_desired_outcome(\n base_player_stats: Tuple[int, int, int],\n base_boss_stats: Tuple[int, int, int],\n weapons,\n armors,\n rings,\n character: str = \"player\",\n key=min,\n) -> int:\n\n outcomes = []\n for item_combination in generate_item_combinations(weapons, armors, rings):\n player = Character(\"player\", *base_player_stats)\n boss = Character(\"boss\", *base_boss_stats)\n total_cost: int = 0\n for item in item_combination:\n total_cost += item.cost\n player.armor += item.armor\n player.damage += item.damage\n\n winner = fight_battle(player, boss)\n if winner.name == character:\n outcomes.append(total_cost)\n\n return key(outcomes)",
"def bless_advanced(unit):\n return {DAMAGE: unit.maximum_damage + 1}",
"def attack(self):\n if random.random() < self.chance_critical:\n return self.strength * 2\n return self.strength",
"def analysis(self, game_info):\n available_cards_indices = []\n for card_index in range(len(game_info['cards'])):\n card = game_info['cards'][card_index]\n cost_color, cost_value = card.get_cost()\n if cost_color == 0:\n available_cards_indices.append(card_index)\n continue\n resource_name = getResourceName(cost_color)\n if game_info[resource_name] >= cost_value:\n available_cards_indices.append(card_index)\n\n self_coeff = -1\n enemy_coeff = 1\n if len(available_cards_indices) > 0:\n index = 0\n optimal_priority = 0\n for i in xrange(len(available_cards_indices)):\n priority = 0\n actions = game_info['cards'][available_cards_indices[i]].get_actions()\n for action in actions['player']:\n if action[0] == 0:\n priority += self_coeff * action[1]\n for action in actions['opponent']:\n if action[0] == 0:\n priority -= enemy_coeff * action[1]\n if i == 0:\n optimal_priority = priority\n elif priority > optimal_priority:\n index, optimal_priority = i, priority\n return game_info['cards'][available_cards_indices[index]], TO_PRESS\n else:\n index = 0\n for i in xrange(1, 5):\n if game_info['cards'][i].cost_value > game_info['cards'][index].cost_value:\n index = i\n return game_info['cards'][index], TO_DROP",
"def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)",
"def get_weapon(self):\n\n return self.suggestion_set[1]",
"def get_best_action(self, strategy, player):\n actions = self.game.get_actions(player)\n action = None\n if not actions:\n action = (player, None)\n elif strategy == \"q\":\n action = actions[np.argmax([self.weights @ extractor(self.game, a) for a in actions])]\n elif strategy == \"random\":\n action = actions[random.randint(0, len(actions) - 1)]\n feature = extractor(self.game.copy(), action)\n return feature, action",
"def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}",
"def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating",
"def getWeightsAttack(self, gameState, action):\r\n return {'minDistToFood': -1,'getFood': 100}",
"def getDamage(self, player, is_random=True):\n \n if \"restrained\" in self.debuffs:\n return 0, 0\n \n mitigation, num_cats = player.getCatBonus(player.defending_kittens,\n \"defending\")\n raw_dmg = random.randint(self._damage[0], self._damage[1])\n \n true_dmg = raw_dmg - mitigation\n if true_dmg < 0:\n true_dmg = 0\n \n return true_dmg, num_cats",
"def getDamage(self):\n \n weapon_dmg = self.weapon.getDamage()\n cat_bonus, att_cats = self.getCatBonus(self.attacking_kittens,\n \"attacking\")\n true_dmg = weapon_dmg + cat_bonus + self.getBonusDamageFromInsanity()\n return true_dmg, att_cats",
"def find_best(self):\n best_st = 0\n best_bt = 0\n best_perf = -1.1\n for bt in self.btl:\n for st in self.stl:\n if self.total[bt, st, \"perf\"] > best_perf:\n best_perf = self.total[bt, st, \"perf\"]\n best_st = st\n best_bt = bt\n return (best_perf, self.total[best_bt, best_st, \"count\"], best_bt, best_st)",
"def attack(self):\n return random.randint(self.max_damage//2, self.max_damage)",
"def get_max_hit_increases(\n start_strength_level, end_strength_level,\n strength_bonus, stance_adder):\n greatest_max_hit = 0\n max_hit_increases = []\n cur_strength_level = start_strength_level\n while cur_strength_level < end_strength_level:\n effective_strength = osrs.effective_level(\n cur_strength_level, 1, stance_adder, 1)\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n\n if max_hit > greatest_max_hit:\n greatest_max_hit = max_hit\n max_hit_increases.append((cur_strength_level, max_hit))\n\n cur_strength_level += 1",
"def interpretSkillAdventurerAttack(\n skillEffectsWithName: tuple[str, list], adventurer: \"Adventurer\", enemy: \"Enemy\"\n) -> AdventurerSkill | None:\n # for index_to maybe list {\"modifier\": \"End. & Mag.\", \"target\": \"skill\", \"attribute\": \"indexed_to\",\"speed\": \"None\" }\n\n # test if skill effects empty\n if skillEffectsWithName:\n _, skillEffects = skillEffectsWithName\n else:\n skillEffects = []\n\n damage_skills = [\n x\n for x in skillEffects\n if x.attribute.lower().strip() == \"damage\"\n or (\n (x.element is not None and x.element != \"\")\n and (x.type == \"physical_attack\" or x.type == \"magic_attack\")\n )\n ]\n if len(damage_skills) > 0:\n damage_skill = damage_skills[0]\n # do the damage first if attribute == element and modifier== high/medium etc, type = attack\n index_to_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"indexed_to\"\n ]\n index_to_modifier = set()\n # modifier is the index_to target\n for index_to_effect in index_to_effects:\n # \"attribute\" index_to\n index_to_modifier.add(index_to_effect.modifier)\n \"\"\"\n For temp boosts\n {\n \"modifier\": \"normal2_str\",\n \"target\": \"skill\",\n \"attribute\": \"temp_boost\",\n }\n \"\"\"\n temp_boost_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"temp_boost\"\n ]\n if len(temp_boost_effects) > 0:\n temp_boost_mod = temp_boost_effects[0].modifier\n else:\n temp_boost_mod = \"none\"\n\n # loop through the variables to check if attribute exists\n extra_boosts_effects = [\n x for x in skillEffects if \"per_each\" in x.attribute.lower().strip()\n ]\n extra_boosts_value = 1.0\n # for example str/mag debuff\n if len(extra_boosts_effects) > 0:\n for extra_boosts in extra_boosts_effects:\n temp_extra_boosts = interpretExtraBoostWrapper(\n extra_boosts, adventurer, enemy\n )\n extra_boosts_value = extra_boosts_value + temp_extra_boosts\n # SELECT ase.AdventurerSkillEffectsid, ase.AdventurerSkillid, ase.duration, e.name AS element, m.value AS modifier, ty.name AS type, ta.name AS target, a.name AS attribute, s.name AS speed, ad.stars, ad.title, ad.alias, ad.limited, c.name\n ret = AdventurerSkill(\n damage_skill.target,\n temp_boost_mod,\n damage_skill.modifier,\n extra_boosts_value,\n 0,\n damage_skill.type,\n damage_skill.element,\n index_to_modifier,\n )\n return ret\n else:\n return None",
"def __calcSuitTarget(self, attackIndex):\n attack = self.battle.suitAttacks[attackIndex]\n suitId = attack[SUIT_ID_COL]\n if self.SuitAttackers.has_key(suitId) and \\\n random.randint(0, 99) < 75:\n # first calculate the total damage done to this suit by all\n # recorded attackers, this is so we can create a frequency\n # list of damage percentages that we can randomly pick from\n totalDamage = 0\n for currToon in self.SuitAttackers[suitId].keys():\n totalDamage += self.SuitAttackers[suitId][currToon]\n\n # create a list of damage percentages and pick one of the\n # weighted values, this tells us which toon attacker that\n # the suit should attack\n dmgs = []\n for currToon in self.SuitAttackers[suitId].keys():\n dmgs.append((self.SuitAttackers[suitId][currToon] /\n totalDamage) * 100)\n dmgIdx = SuitBattleGlobals.pickFromFreqList(dmgs)\n if (dmgIdx == None):\n toonId = self.__pickRandomToon(suitId) \n else:\n toonId = self.SuitAttackers[suitId].keys()[dmgIdx]\n if (toonId == -1 or toonId not in self.battle.activeToons):\n return -1\n self.notify.debug(\"Suit attacking back at toon \" + str(toonId))\n return self.battle.activeToons.index(toonId)\n else:\n #return random.randint(0, len(self.battle.activeToons) - 1)\n # make sure we only randomly choose from the active toons\n # that are still alive at this point in the round\n return self.__pickRandomToon(suitId)",
"def difficulty_for_level(level):\n return 0 if level==\"easy\" else (1 if level==\"medium\" else 2)",
"def optimal_battle(hard=False):\n winners = []\n\n def _run(spells, best_cost):\n for spell in all_spells.keys():\n spells.append(spell)\n\n # Calc cost to skip useless battles\n cost = sum([all_spells[s] for s in spells])\n if cost < best_cost:\n\n # Battle with this spells list\n hero = Player('Hero', hit=50, mana=500)\n boss = Player('Boss', hit=71, damage=10)\n outcome = battle(hero, boss, spells, hard=hard)\n # print outcome\n if outcome == 'win':\n if cost < best_cost:\n # Save this spells list\n print cost, ', '.join(map(lambda x : x.func_name, spells))\n winners.append(list(spells))\n best_cost = cost\n elif outcome == 'moar':\n # Add more spells\n best_cost = min(best_cost, _run(spells, best_cost))\n else:\n # Don't progress further\n pass\n\n spells.pop()\n\n return best_cost\n\n\n return _run([], float('inf'))"
] | [
"0.6914016",
"0.66133755",
"0.647129",
"0.6280319",
"0.6277309",
"0.62479544",
"0.6210469",
"0.6196124",
"0.6190553",
"0.61680675",
"0.60728323",
"0.60306805",
"0.6020015",
"0.59235066",
"0.5873241",
"0.5860358",
"0.5845829",
"0.5792037",
"0.57798666",
"0.5775046",
"0.5746795",
"0.5742029",
"0.57380325",
"0.5729014",
"0.5720506",
"0.5672314",
"0.56190413",
"0.5595903",
"0.5594676",
"0.55684036"
] | 0.733018 | 0 |
Returns list of tuples of the form (level, max_hit) for levels between start_strength_level and end_strength_level that increase max_hit. Assumes start_strength_level < end_strength_level and no multipliers | def get_max_hit_increases(
start_strength_level, end_strength_level,
strength_bonus, stance_adder):
greatest_max_hit = 0
max_hit_increases = []
cur_strength_level = start_strength_level
while cur_strength_level < end_strength_level:
effective_strength = osrs.effective_level(
cur_strength_level, 1, stance_adder, 1)
max_hit = osrs.max_hit(effective_strength, strength_bonus)
if max_hit > greatest_max_hit:
greatest_max_hit = max_hit
max_hit_increases.append((cur_strength_level, max_hit))
cur_strength_level += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_max_hit_and_accuracy(\n levels, attack_style, attack_bonus, strength_bonus):\n weapon_attack, weapon_strength = get_weapon_stats(levels.attack)\n attack_bonus += weapon_attack\n strength_bonus += weapon_strength\n\n if attack_style == Attack_Style.ATTACK:\n effective_attack = osrs.effective_level(levels.attack, 1, 3, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 0, 1)\n elif attack_style == Attack_Style.STRENGTH:\n effective_attack = osrs.effective_level(levels.attack, 1, 0, 1)\n effective_strength = osrs.effective_level(levels.strength, 1, 3, 1)\n\n enemy_effective_defence = osrs.effective_level(1, 1, 0, 1)\n\n max_hit = osrs.max_hit(effective_strength, strength_bonus)\n accuracy = osrs.accuracy(effective_attack, attack_bonus,\n enemy_effective_defence, 0)\n\n return (max_hit, accuracy)",
"def calculate_hit(self):\n weapon = self.game_data['player inventory']['equipped weapon']\n weapon_power = self.game_data['player inventory'][weapon]['power']\n max_strength = weapon_power\n min_strength = max_strength - 7\n return random.randint(min_strength, max_strength)",
"def _hit_range_get(self):\n return (self.hit_start, self.hit_end)",
"def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)",
"def checkRange(currentNumRange: tuple, currentLevel: int):\n\n\tlowerNumber, higherNumber = currentNumRange[0], currentNumRange[1]\n\tmid = (higherNumber + lowerNumber) // 2\n\tans = getAnswer(f\"Does your number is greater than {mid}?\", mid)\n\n\tif ans:\n\t\tlowerNumber = mid\n\telse:\n\t\thigherNumber = mid\n\n\n\treturn (lowerNumber, higherNumber)",
"def calc_tohit(attr, level):\n return level + calc_attr_mod(attr)",
"def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res",
"def get_max_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x800c+i,100)/100 for i in range(4)])",
"def _determine_level(levels, points):\n import operator\n level = None\n sorted_levels = sorted(levels.iteritems(), key=operator.itemgetter(1))\n for el in sorted_levels:\n if points <= el[1]:\n level = el[0]\n break\n\n max_level = max(levels.iterkeys(), key=lambda threshold: levels[threshold])\n if points >= levels[max_level]:\n level = max_level\n return level",
"def bounds_slope(regressions):\n max_up_slope = 0\n min_down_slope = 0\n for regression in regressions.itervalues():\n min_slope = regression.find_min_slope()\n max_up_slope = max(max_up_slope, min_slope)\n min_down_slope = min(min_down_slope, min_slope)\n \n return (max_up_slope, min_down_slope)",
"def query_range(tree, start_y, start_x, end_y, end_x):\n res = 0\n start_y -= 1\n\n while end_y > start_y:\n res += bit.query_range(tree[end_y], start_x, end_x)\n end_y -= (end_y & -end_y)\n\n while start_y > end_y:\n res -= bit.query_range(tree[start_y], start_x, end_x)\n start_y -= (start_y & -start_y)\n\n return res",
"def __calculateSupportResistenceLevels(self):\n\n for i in range(2, self.df.shape[0] - 2):\n if self.__isSupport(self.df, i):\n l = self.df['low'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n elif self.__isResistance(self.df, i):\n l = self.df['high'][i]\n if self.__isFarFromLevel(l):\n self.levels.append((i, l))\n return self.levels",
"def get_combined_energy(start, end, max_level, group=ms2sp(80)):\n # Dictionaries to store the energy intervals for each lead\n dicts = {}\n for lead in sig_buf.get_available_leads():\n dicts[lead] = {}\n for i in range(max_level + 1):\n dicts[lead][i] = []\n # Energy intervals detection and combination\n idx = start\n while idx < end:\n wfs = {}\n for lead in dicts:\n wfs[lead] = get_deflection_observations(\n start + idx, start + idx + TWINDOW, lead=lead, max_level=max_level, group=group\n )\n for i in range(max_level + 1):\n if dicts[lead][i] and wfs[lead][i]:\n if wfs[lead][i][0].earlystart - dicts[lead][i][-1].lateend <= group:\n dicts[lead][i][-1].end.value = wfs[lead][i][0].start.value\n wfs[lead][i].pop(0)\n dicts[lead][i].extend(wfs[lead][i])\n idx += TWINDOW\n # Remove overlapping intervals\n combine_energy_intervals(dicts.values())\n # Now we flatten the dictionaries, putting all the intervals in a sequence\n # sorted by the earlystart value.\n return SortedList(\n w for w in it.chain.from_iterable(it.chain.from_iterable(dic.values() for dic in dicts.values()))\n )",
"def getLevels():",
"def max_diaphragmatic_level(levels):\n return [max(x) for x in levels]",
"def get_gain_range(self, *args):\n return _uhd_swig.usrp_source_get_gain_range(self, *args)",
"def get_hit_points(min, max):\n return random.randint(min, max)",
"def calc_level(xp, dominion):\n if xp < 3:\n xp_potential = 1\n if xp >= 3 and xp < 6:\n xp_potential = 2\n if xp >= 6 and xp < 12:\n xp_potential = 3\n if xp >= 12 and xp < 24:\n xp_potential = 4\n if xp >= 24 and xp < 48:\n xp_potential = 5\n if xp >= 48 and xp < 72:\n xp_potential = 6\n if xp >= 72 and xp < 96:\n xp_potential = 7\n if xp >= 96 and xp < 130:\n xp_potential = 8\n if xp >= 130 and xp < 170:\n xp_potential = 9\n if xp >= 170:\n xp_potential = 10\n if dominion < 2:\n dom_potential = 1\n if dominion >= 2 and dominion < 4:\n dom_potential = 2\n if dominion >= 4 and dominion < 10:\n dom_potential = 3\n if dominion >= 10 and dominion < 22:\n dom_potential = 4\n if dominion >= 22 and dominion < 38:\n dom_potential = 5\n if dominion >= 38 and dominion < 57:\n dom_potential = 6\n if dominion >= 57 and dominion < 76:\n dom_potential = 7\n if dominion >= 76 and dominion < 95:\n dom_potential = 8\n if dominion >= 95 and dominion < 124:\n dom_potential = 9\n if dominion >= 124:\n dom_potential = 10\n return min(xp_potential, dom_potential)",
"def get_gain_range(self, *args):\n return _uhd_swig.usrp_sink_get_gain_range(self, *args)",
"def assign_level(self, minibatch_reference_proboxes):\n with tf.name_scope('assign_levels'):\n ymin, xmin, ymax, xmax = tf.unstack(minibatch_reference_proboxes, axis=2)\n\n w = tf.maximum(xmax - xmin, 0.) # avoid w is negative\n h = tf.maximum(ymax - ymin, 0.) # avoid h is negative\n\n levels = tf.round(4. + tf.log(tf.sqrt(w*h + 1e-8)/224.0) / tf.log(2.)) # 4 + log_2(***)\n\n levels = tf.maximum(levels, tf.ones_like(levels) * (np.float32(self.min_level))) # level minimum is 2\n levels = tf.minimum(levels, tf.ones_like(levels) * (np.float32(self.max_level))) # level maximum is 5\n\n return tf.cast(levels, tf.int32)",
"def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)",
"def max_gain(self):\n if self.val1:\n val1_gain_tuple, val0_gain_tuple = self.val1.max_gain(), self.val0.max_gain()\n if val1_gain_tuple.gain > val0_gain_tuple.gain:\n return val1_gain_tuple\n else:\n return val0_gain_tuple\n elif self.attributes:\n filtered_data = filter_data(self.data,self.ancestors)\n max_attribute, max_gain = max([(attribute,\n self.heuristic(self,attribute)) for attribute in self.attributes],\n key = lambda x: x[1])\n return gain_tuple(self, max_attribute, max_gain)\n return gain_tuple(None, '', 0)",
"def lvl_algo(next_level):\n total_xp_needed = (next_level * next_level)\n return total_xp_needed",
"def on_max_hit_points(self):\n pass",
"def extract_levels(enemy_behavior: List[Any]):\n levels = set()\n levels.add(1)\n for b in enemy_behavior:\n if type(b) == ESBranchLevel:\n levels.add(b.branch_value)\n elif hasattr(b, 'level'):\n levels.add(b.level)\n return levels",
"def get_strength_text(currentstrength):\n for i in range(0, 5): \n strengthrange = (79, 59, 39, 19, 0)\n if currentstrength in range(strengthrange[i], strengthrange[i] + 20):\n strength = STRENGTH_TEXT[i]\n if currentstrength > 99:\n strength = STRENGTH_TEXT[0]\n\n return strength",
"def compute_pair_bounds(self, edges, pair):\n lower_bounds =[]\n upper_bounds = []\n for arc in edges:\n l_e = self.arc_info[arc][\"lower_bound\"]\n u_e = self.arc_info[arc][\"upper_bound\"]\n f_mij = self.compute_f_mij(arc, pair)\n lower_bounds.append(l_e - f_mij)\n upper_bounds.append(u_e - f_mij)\n lb = max(lower_bounds + [0])\n # in case no edges in here, make max of 5,000\n if len(upper_bounds) == 0:\n i = pair[0]\n j = pair[1]\n print(\"Path i ({}): {}\".format(i, self.paths[i]))\n print(\"Path j ({}): {}\".format(j, self.paths[j]))\n ub = min(upper_bounds + [5000])\n #print(\"lower bounds: {}\".format(lower_bounds))\n #print(\"upper bounds: {}\".format(upper_bounds))\n return(lb, ub)",
"def getSupportResistanceLevels(self):\n return self.levels",
"def attack_bonus_on_level(self, level):\n raise NotImplementedError",
"def get_bounds(group: Group, player: int) -> Tuple[Position, Position]:\n tiles = []\n for couple in group:\n tiles.append(couple[0])\n if couple[2] != -1:\n tiles.append(couple[1])\n\n maximum = max(tiles, key=lambda t: t[player])\n minimum = min(tiles, key=lambda t: t[player])\n\n return minimum, maximum"
] | [
"0.58623534",
"0.56103396",
"0.5589479",
"0.556326",
"0.5557862",
"0.5534794",
"0.5484521",
"0.54517794",
"0.5345242",
"0.53201896",
"0.5267493",
"0.522973",
"0.5207859",
"0.5173894",
"0.5169486",
"0.51680756",
"0.5166746",
"0.5157776",
"0.51527137",
"0.51492393",
"0.51178026",
"0.5090489",
"0.50744903",
"0.5052406",
"0.5035216",
"0.4999881",
"0.49979913",
"0.4989706",
"0.49859378",
"0.49853504"
] | 0.79078573 | 0 |
Generates steric beads required for checking for steric clashes between motifs. Each residues has three beads modeled after the typical three bead models used in coarse grain modeling. The three beads are, Phosphate (P, OP1, OP2) Sugar (O5',C5',C4',O4',C3',O3',C1',C2',O2') and Base (All remaining atoms). | def get_beads(self):
phos_atoms,sugar_atoms,base_atoms = [],[],[]
for i,a in enumerate(self.atoms):
if a is None:
continue
if i < 3:
phos_atoms.append(a)
elif i < 12:
sugar_atoms.append(a)
else:
base_atoms.append(a)
beads = []
types = [residue.BeadType.PHOS, residue.BeadType.SUGAR, residue.BeadType.BASE]
for i,alist in enumerate([phos_atoms,sugar_atoms,base_atoms]):
if len(alist) > 0:
beads.append(residue.Bead(util.center(alist), types[i]))
return beads | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bespoke_bond_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"CC\")\n\n bond_smirks = gen._get_bespoke_bond_smirks(molecule=mol)\n # there should be 2 unique bond smirks\n assert len(bond_smirks) == 2\n all_bonds = []\n for smirk in bond_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_bonds.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all bonds are covered\n for bond in mol.bonds:\n assert (bond.atom1_index, bond.atom2_index) in all_bonds",
"def test_get_all_bespoke_smirks():\n gen = SmirksGenerator()\n gen.target_smirks = [SmirksType.Vdw, SmirksType.Bonds, SmirksType.Angles, SmirksType.ProperTorsions]\n\n mol = Molecule.from_smiles(\"CO\")\n\n all_bespoke_smirks = gen._get_all_bespoke_smirks(molecule=mol, forcefield_editor=ForceFieldEditor(\"openff_unconstrained-1.3.0.offxml\"))\n # this is a list of all bespoke smirks with real initial values\n all_matches = []\n for smirk in all_bespoke_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n assert compare_matches(atoms, smirk.atoms) is True\n all_matches.extend(atoms)\n\n assert all_covered(all_matches, mol) is True",
"def test_bespoke_torsion_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_file(get_data(\"OCCO.sdf\"))\n\n torsion_smirks = gen._get_bespoke_torsion_smirks(molecule=mol)\n # there should be 5 unique torsions\n assert len(torsion_smirks) == 5\n\n all_torsions = []\n for smirk in torsion_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_torsions.extend(atoms)\n assert compare_matches(atoms, smirk.atoms) is True\n\n for torsion in mol.propers:\n dihedral = tuple([atom.molecule_atom_index for atom in torsion])\n assert dihedral in all_torsions or tuple(reversed(dihedral)) in all_torsions",
"def guess_potentialisation(self, sysargs):\n\n print(\"Guessing potentialisation...\")\n print(\"Copying reference basis...\")\n shutil.copyfile(self.reference_guess_basis_path, os.path.join(os.getcwd(), 'basis'))\n\n sp2_replacement_list = []\n sp2_deletion_list = []\n sp2_carbon_list = []\n sp3_replacement_list = []\n sp3_deletion_list = []\n sp3_carbon_list =[]\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n\n # Sort through carbons to decide what needs potentialising. Find atoms bonded to each carbon\n for atom in carbon_atoms:\n distanced_atoms = self.order_atoms_by_distance_from(atom['#'])\n nearest_4_distances = [self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) for distanced_atom in\n distanced_atoms[1:5]]\n bonded_distances = [less_than_distance for less_than_distance in nearest_4_distances if\n less_than_distance < self.bond_deciding_distance]\n\n # if 3 bonded atoms, may be sp2, check if they're hydrogens\n if len(bonded_distances) == 3:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n sp2_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp2_replacement_list.append(str(atom['#']))\n sp2_carbon_list.append(atom)\n\n # if 4 bonded atoms, may be sp3, check if they're hydrogens\n elif len(bonded_distances) == 4:\n hydrogens_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_atoms[1:5] if\n distanced_atom['el'] == 'h' and self.measure_atom_atom_dist(atom['#'], distanced_atom['#']) < self.bond_deciding_distance]\n if len(hydrogens_bonded_to_this_atom) == 3:\n sp3_replacement_list.extend([str(hydrogen['#']) for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_deletion_list.extend([hydrogen['#'] for hydrogen in hydrogens_bonded_to_this_atom])\n sp3_carbon_list.append(atom)\n\n log_file = open('pseudification.log', 'w+')\n log_file.writelines(\n 'sp2 carbon indices: %s \\nsp3 carbon indices: %s \\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_carbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_carbon_list)\n ))\n\n sp2_coord_command = 'mn sp2 %s' % (','.join(sp2_replacement_list))\n print(\"sp2 command: %s\" % sp2_coord_command)\n sp3_coord_command = 'mn sp3 %s' % (','.join(sp3_replacement_list))\n print(\"sp3 command: %s\" % sp3_coord_command)\n\n if 'nosp3' not in sysargs:\n self.pseudopotentialise_ethane_like_molecule(sp3_coord_command.split(), execute_deletion=False)\n self.pseudopotentialise_molecule(sp2_coord_command.split(), execute_deletion=False)\n\n self.delete_specified_atoms(sp2_deletion_list + sp3_deletion_list)\n\n print(\"Identifying 2-electron sp2 carbons...\")\n # Now need to work out where the 2e sp2 carbons are\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n if len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n print(\"Re-discovered %s sp2 carbons.\" % str(len(sp2_pseudocarbon_list)))\n\n # Now check for ncore=4 sp2 pseudocarbons\n pseudopotential_hashes_to_delete = []\n for atom in sp2_pseudocarbon_list:\n distanced_carbon_list = self.order_atoms_by_distance_from(atom['#'], element='c')\n carbons_bonded_to_this_atom = [distanced_atom for distanced_atom in distanced_carbon_list[1:5] if\n self.measure_atom_atom_dist(atom['#'],\n distanced_atom[\n '#']) < self.bond_deciding_distance]\n print(\"Carbons bonded to atom %s: %s\" % (str(atom['#']),\n str([carbon['#'] for carbon in carbons_bonded_to_this_atom])))\n\n for carbon_bonded_to_this_atom in carbons_bonded_to_this_atom:\n if carbon_bonded_to_this_atom not in sp2_pseudocarbon_list:\n def distance_from(list_atom):\n return self.measure_atom_atom_dist(carbon_bonded_to_this_atom['#'], list_atom['#'])\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n # find pseudos closest to the other carbon\n pseudos_distanced_from_sp2_2e = sorted(carbon_pseudos, key=distance_from)\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[0]['#'])\n pseudopotential_hashes_to_delete.append(pseudos_distanced_from_sp2_2e[1]['#'])\n\n self.delete_specified_atoms(pseudopotential_hashes_to_delete)\n\n # Read final coordinates\n self.coord_list = []\n self.read_coords()\n carbon_atoms = [atom for atom in self.coord_list if atom[\"el\"] == 'c']\n sp2_pseudocarbon_list = []\n sp2_2e_pseudocarbon_list = []\n sp2_2e_pseudohydrogen_list = []\n sp3_pseudocarbon_list = []\n\n for atom in carbon_atoms:\n carbon_pseudos = self.identify_pseudocarbon_potentials(atom['#'])\n\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(carbon_pseudos) == 3:\n sp3_pseudocarbon_list.append(atom)\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(carbon_pseudos) == 4:\n sp2_2e_pseudocarbon_list.append(atom)\n sp2_2e_pseudohydrogen_list.extend(carbon_pseudos)\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(carbon_pseudos) == 6:\n sp2_pseudocarbon_list.append(atom)\n\n\n log_file.writelines(\n 'sp2 pseudocarbon indices: %s \\nsp3 pseudocarbon indices: %s\\nsp2 2e pseudocarbon indices: %s\\nsp2 2e pseudohydrogen indices: %s\\n' % (\n ','.join(str(carbon['#']) for carbon in sp2_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp3_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudocarbon_list),\n ','.join(str(carbon['#']) for carbon in sp2_2e_pseudohydrogen_list)\n ))\n\n # Need to supply potentials to atoms\n define_cmds_path = 'define_add_pseudos'\n with open(os.path.join(define_cmds_path), 'w') as var_file:\n var_file.writelines(define_cmds % (\n # sp2 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_pseudocarbon_list], 'ecp', self.sp2_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'ecp', self.sp2_hydrogen_ecp),\n # sp3 potentials\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp3_pseudocarbon_list], 'ecp', self.sp3_carbon_ecp),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define(self.sp3_pseudo_element, 'ecp', self.sp3_hydrogen_ecp),\n # sp2 2e potentials\n self.supply_ecps_bases_to_define(self.sp2_pseudo_element, 'b', 'none'),\n self.supply_ecps_bases_to_define([hydrogen['#'] for hydrogen in sp2_2e_pseudohydrogen_list], 'ecp', self.sp2_2e_hydrogen_ecp),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'b', self.pseudo_carbon_basis),\n self.supply_ecps_bases_to_define([carbon['#'] for carbon in sp2_2e_pseudocarbon_list], 'ecp', self.sp2_2e_carbon_ecp),\n ))\n\n self.run_define('define_add_pseudos')",
"def test_bespoke_target_torsion_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_file(get_data(\"OCCO.sdf\"))\n\n torsion_smirks = gen._get_bespoke_torsion_smirks(molecule=mol, central_bonds=[(1, 2)])\n # there should be 3 unique smirks for this molecule\n # H-C-C-H, H-C-C-O, O-C-C-O\n assert len(torsion_smirks) == 3\n for smirk in torsion_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n assert compare_matches(atoms, smirk.atoms) is True",
"def generate(self, analysis):\n\n #analysis = ['p','a','n','i','c','+past form']\n # Let's define our first FST\n\n f1 = FST('morphology-generate')\n \n f1.add_state('1')\n f1.add_state('2')\n f1.add_state('3')\n f1.add_state('4')\n f1.add_state('5') \n f1.add_state('6') #non-c state\n f1.add_state('7') #c state\n f1.add_state('8') #add k\n f1.add_state('9') #+present \n f1.add_state('10') #+past\n \n f1.initial_state = '1'\n #f1.set_final('8')\n f1.set_final('9')\n f1.set_final('10')\n \n #state 1 to 2, and 2 to 3. we don't care about vowel or consonant here\n for letter in list(string.ascii_letters):\n f1.add_arc('1', '2', letter, letter)\n f1.add_arc('2', '3', letter, letter)\n \n #3 to 5 input/output consonants\n vowels = ['a','e','i','o','u','A','E','I','O','U']\n consonants = [c for c in list(string.ascii_letters) if c not in vowels]\n non_c_con = [c for c in consonants if c not in ['c', 'C']]\n for letter in consonants:\n f1.add_arc('3', '5', letter, letter)\n f1.add_arc('5', '5', letter, letter)\n \n #the third and fourth input should be a vowel\n for letter in vowels:\n f1.add_arc('3', '4', letter, letter)\n f1.add_arc('4', '4', letter, letter)\n \n #if the fourth input is a non c consonant, go to 5\n for letter in non_c_con:\n f1.add_arc('4', '5', letter, letter)\n \n #if the input at state 5 is a vowel, go back to 4 \n for letter in vowels:\n f1.add_arc('5', '4', letter, letter)\n \n #if the second last letter is a c, go to 7\n f1.add_arc('4', '7', 'c', 'c')\n \n #add k after 7\n f1.add_arc('7', '8', '', 'k')\n #output nothing from 5 to 8\n f1.add_arc('5', '8', '', '')\n \n f1.add_arc('8','9','+present participle form','ing')\n f1.add_arc('8','10','+past form','ed')\n \n output = f1.transduce(analysis)[0]\n return ''.join(output)",
"def test_bespoke_angle_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"CC\")\n\n angle_smirks = gen._get_bespoke_angle_smirks(molecule=mol)\n # there should be 2 unique smirks\n assert len(angle_smirks) == 2\n all_angles = []\n for smirk in angle_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_angles.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all angles are covered\n for angle in mol.angles:\n assert tuple([atom.molecule_atom_index for atom in angle]) in all_angles",
"def generate_SBB_representation (nffg, add_sg_hops=False,\n log=logging.getLogger(\"SBB\")):\n if nffg is None:\n log.error(\"Missing global resource info! Skip OneBisBis generation!\")\n return None\n # Create Single BiSBiS NFFG\n log.debug(\"Generate trivial SingleBiSBiS NFFG based on %s:\" % nffg)\n log.debug(\"START SBB generation...\")\n sbb = NFFG(id=\"SingleBiSBiS\", name=\"Single-BiSBiS-View\")\n # Create the single BiSBiS infra\n sbb_infra = sbb.add_infra(id=\"SingleBiSBiS\",\n name=\"SingleBiSBiS\",\n domain=NFFG.DEFAULT_DOMAIN,\n infra_type=NFFG.TYPE_INFRA_BISBIS)\n # Compute and add resources\n # Sum of available CPU\n try:\n sbb_infra.resources.cpu = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.cpu for n in nffg.infras if\n n.resources.cpu is not None) or None)\n except TypeError:\n sbb_infra.resources.cpu = None\n # Sum of available memory\n try:\n sbb_infra.resources.mem = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.mem for n in nffg.infras if\n n.resources.mem is not None) or None)\n except TypeError:\n sbb_infra.resources.mem = None\n # Sum of available storage\n try:\n sbb_infra.resources.storage = sum(\n # If iterator is empty, sum got None --> TypeError thrown by sum\n (n.resources.storage for n in nffg.infras if\n n.resources.storage is not None) or None)\n except TypeError:\n sbb_infra.resources.storage = None\n # Minimal available delay value of infras and links in DoV\n try:\n # Get the minimum delay in Dov to avoid false negative mapping result\n sbb_infra.resources.delay = min(itertools.chain(\n # If the chained iterators is empty --> ValueError thrown by sum\n (n.resources.delay for n in nffg.infras if\n n.resources.delay is not None),\n (l.delay for l in nffg.links if l.delay is not None)))\n except ValueError:\n sbb_infra.resources.delay = None\n # Maximum available bandwidth value of infras and links in DoV\n try:\n max_bw = max(itertools.chain(\n (n.resources.bandwidth for n in nffg.infras if\n n.resources.bandwidth is not None),\n (l.bandwidth for l in nffg.links if l.bandwidth is not None)))\n # Number of infras and links in DoV\n sum_infra_link = sum(1 for _ in itertools.chain(nffg.infras, nffg.links))\n # Overestimate switching capacity to avoid false positive mapping result\n sbb_infra.resources.bandwidth = max_bw * sum_infra_link\n except ValueError:\n sbb_infra.resources.bandwidth = None\n log.debug(\"Computed SingleBiBBiS resources: %s\" % sbb_infra.resources)\n # Add supported types\n s_types = set()\n for infra in nffg.infras:\n s_types = s_types.union(infra.supported)\n sbb_infra.add_supported_type(s_types)\n log.debug(\"Added supported types: %s\" % s_types)\n log.debug(\"Added Infra BiSBiS: %s\" % sbb_infra)\n log.log(5, \"SBB:\\n%s\" % sbb_infra.dump())\n # Add existing NFs\n for nf in nffg.nfs:\n c_nf = sbb.add_nf(nf=nf.copy())\n log.debug(\"Added NF: %s\" % c_nf)\n log.log(5, \"NF:\\n%s\" % nf.dump())\n # Discover and add NF connections\n for u, v, l in nffg.real_out_edges_iter(nf.id):\n if l.type != NFFG.TYPE_LINK_DYNAMIC:\n continue\n # Explicitly add links for both direction\n link1, link2 = sbb.add_undirected_link(port1=c_nf.ports[l.src.id],\n port2=sbb_infra.add_port(\n id=l.dst.id),\n p1p2id=l.id,\n p2p1id=\"%s-back\" % l.id,\n dynamic=True,\n delay=l.delay,\n bandwidth=l.bandwidth)\n log.debug(\"Added connection: %s\" % link1)\n log.debug(\"Added connection: %s\" % link2)\n # Use SAP id --> SBB port id cache for delay matrix calculation\n delay_matrix_cache = {}\n # Add existing SAPs and their connections to the SingleBiSBiS infra\n for sap in nffg.saps:\n c_sap = sbb.add_sap(sap_obj=sap.copy())\n log.debug(\"Added SAP: %s\" % c_sap)\n log.log(5, \"SAP:\\n%s\" % c_sap.dump())\n # Discover and add SAP connections\n for u, v, l in nffg.real_out_edges_iter(sap.id):\n if len(sap.ports) > 1:\n log.warning(\"SAP contains multiple port!\")\n sbb_infra_port = sbb_infra.add_port(id=str(c_sap.id),\n sap=sap.ports.container[0].sap)\n # Explicitly add links for both direction\n link1, link2 = sbb.add_undirected_link(port1=c_sap.ports[l.src.id],\n port2=sbb_infra_port,\n p1p2id=l.id,\n p2p1id=\"%s-back\" % l.id,\n delay=l.delay,\n bandwidth=l.bandwidth)\n log.debug(\"Added connection: %s\" % link1)\n log.debug(\"Added connection: %s\" % link2)\n delay_matrix_cache[c_sap.id] = sbb_infra_port.id\n # Shortest paths in format of dict in dict keyed with node ids\n # e.g. SAP2 --> EE1 --> 4.9\n latency_paths = NFFGToolBox.shortestPathsInLatency(G=nffg.network)\n log.log(5, \"Calculated latency paths for delay matrix:\\n%s\"\n % pprint.pformat(latency_paths))\n log.log(5, \"Collected SAP ports for delay matrix:\\n%s\"\n % pprint.pformat(delay_matrix_cache))\n dm_elements = itertools.permutations(delay_matrix_cache.keys(), 2)\n for src, dst in dm_elements:\n if src not in latency_paths:\n log.warning(\"Missing node: %s for latency paths: %s!\"\n % (src, (src, dst)))\n continue\n if dst not in latency_paths[src]:\n log.warning(\"Missing node: %s for latency paths: %s!\"\n % (src, (src, dst)))\n else:\n sbb_infra.delay_matrix.add_delay(src=src,\n dst=dst,\n delay=latency_paths[src][dst])\n log.debug(\"Added delay matrix element [%s --> %s]: %s\"\n % (src, dst, latency_paths[src][dst]))\n # Recreate flowrules based on NBalazs functions\n sg_hop_info = NFFGToolBox.get_all_sghop_info(nffg=nffg)\n log.debug(\"Detected SG hop info:\\n%s\" % pprint.pformat(sg_hop_info))\n log.debug(\"Recreate flowrules...\")\n for sg_id, value in sg_hop_info.iteritems():\n sg_src_node = value[0].node.id\n sg_src_port = value[0].id\n sg_dst_node = value[1].node.id\n sg_dst_port = value[1].id\n flowclass = value[2]\n fr_bw = value[3]\n fr_delay = value[4]\n fr_hop = sg_id\n sbb_src_port = [l.dst for u, v, l in\n sbb.network.out_edges_iter(sg_src_node, data=True) if\n l.src.id == sg_src_port and l.src.node.id == sg_src_node]\n if len(sbb_src_port) < 1:\n log.warning(\"No opposite Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node\" % (\n sg_src_node, sg_src_port, fr_hop))\n continue\n if len(sbb_src_port) > 1:\n log.warning(\"Too much Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node: %s\" % (\n sg_src_node, sg_src_port, fr_hop, sbb_src_port))\n continue\n sbb_src_port = sbb_src_port.pop()\n sbb_dst_port = [l.dst for u, v, l in\n sbb.network.out_edges_iter(sg_dst_node, data=True) if\n l.src.id == sg_dst_port and l.src.node.id == sg_dst_node]\n if len(sbb_dst_port) < 1:\n log.warning(\"No opposite Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node\" % (\n sg_dst_node, sg_dst_port, fr_hop))\n continue\n if len(sbb_dst_port) > 1:\n log.warning(\"Too much Port(node: %s, id: %s) was found for SG hop: \"\n \"%s in new SingleBiSBiS node: %s\" % (\n sg_dst_node, sg_dst_port, fr_hop, sbb_dst_port))\n continue\n sbb_dst_port = sbb_dst_port.pop()\n if flowclass:\n fr_match = \"in_port=%s;flowclass=%s\" % (sbb_src_port.id, flowclass)\n else:\n fr_match = \"in_port=%s\" % sbb_src_port.id\n fr_action = \"output=%s\" % sbb_dst_port.id\n if value[0].node.type == NFFG.TYPE_SAP and \\\n value[1].node.type == NFFG.TYPE_NF and \\\n value[0].sap is not None:\n # Update action for flowrule connecting inter-domain SAP to NF\n fr_action += \";UNTAG\"\n fr = sbb_src_port.add_flowrule(id=fr_hop,\n match=fr_match,\n action=fr_action,\n bandwidth=fr_bw,\n delay=fr_delay, )\n log.debug(\"Added flowrule: %s\" % fr)\n if add_sg_hops:\n log.debug(\"Recreate SG hops...\")\n for sg_id, value in sg_hop_info.iteritems():\n sg_src_port = value[0]\n sg_dst_port = value[1]\n hop_fc = value[2]\n hop_bw = value[3]\n hop_delay = value[4]\n sg = sbb.add_sglink(id=sg_id,\n src_port=sg_src_port,\n dst_port=sg_dst_port,\n flowclass=hop_fc,\n delay=hop_delay,\n bandwidth=hop_bw)\n log.debug(\"Added SG hop: %s\" % sg)\n else:\n log.debug(\"Skip SG hop recreation for the SingleBiSBiS!\")\n NFFGToolBox.rewrite_interdomain_tags([(sbb.id, sbb)])\n log.debug(\"END SBB generation...\")\n # Return with Single BiSBiS infra\n return sbb",
"def test_noise_model_basis_gates(self):\n basis_gates = ['u1', 'u2', 'u3', 'cx']\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n self.assertEqual(model.basis_gates, target)\n\n # Check adding readout errors doesn't add to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_readout_error([[0.9, 0.1], [0, 1]], False)\n self.assertEqual(model.basis_gates, target)\n model.add_readout_error([[0.9, 0.1], [0, 1]], [2], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a reset instruction error isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), ['reset'], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a non-standard gate isn't added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates)\n model.add_all_qubit_quantum_error(reset_error(0.2), ['label'], False)\n self.assertEqual(model.basis_gates, target)\n\n # Check a standard gate is added to basis gates\n model = NoiseModel(basis_gates)\n target = sorted(basis_gates + ['h'])\n model.add_all_qubit_quantum_error(reset_error(0.2), ['h'], False)\n self.assertEqual(model.basis_gates, target)",
"def builder(plates, start, name, assay, isolate, layout, exp_date, mic):\n plateno = 1\n rid = start # record ID\n readno = 1\n segno = 1\n for plate in plates:\n seg = plateno * 8\n startseg = seg - 8\n segment = layout[startseg:seg]\n plate_mic = mic[startseg:seg]\n with open(plate, 'r') as infile:\n # 3 reads per plate\n front = 'INSERT INTO `mic` VALUES ('\n sep = ','\n row = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n row_num = 0\n for line in infile:\n this_row = row[row_num]\n pep = segment[row_num].split(' ')[0]\n this_mic = plate_mic[row_num]\n # note that blood is hard-coded to NA right now\n buff = [str(rid), str(assay), str(isolate), '1', str(pep), name, 'assayed', 'experiment',\n str(readno), exp_date, this_row]\n rec = line.strip().split(' ')\n buff.extend(rec)\n buff.extend([this_mic, 'NA'])\n buff_form = buff[:5] + [\"'\" + x + \"'\" for x in buff[5:]] + ['NULL', 'NULL);']\n outbuff = front + ','.join(buff_form)\n outbuff = re.sub(\"experiment','4',\",\"experiment','AVERAGE',\",outbuff)\n\n # increment counters\n rid += 1\n if row_num == 7:\n row_num = 0\n if readno == 4: # assumes 3 reads and an average\n plateno += 1\n readno = 1\n else:\n readno += 1\n else:\n row_num += 1\n\n yield outbuff",
"def test_generate_smirks(bespoke_smirks):\n gen = SmirksGenerator()\n gen.target_smirks = [SmirksType.Vdw, ]\n gen.generate_bespoke_terms = bespoke_smirks\n\n mol = Molecule.from_smiles(\"CC\")\n smirks_list = gen.generate_smirks(molecule=mol)\n\n # we only request one parameter type\n types = set([smirk.type for smirk in smirks_list])\n assert len(types) == 1",
"def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):\n if base is None:\n base = []\n if gens is None:\n gens = self.generators[:]\n degree = self.degree\n id_af = list(range(degree))\n # handle the trivial group\n if len(gens) == 1 and gens[0].is_Identity:\n if slp_dict:\n return base, gens, {gens[0]: [gens[0]]}\n return base, gens\n # prevent side effects\n _base, _gens = base[:], gens[:]\n # remove the identity as a generator\n _gens = [x for x in _gens if not x.is_Identity]\n # make sure no generator fixes all base points\n for gen in _gens:\n if all(x == gen._array_form[x] for x in _base):\n for new in id_af:\n if gen._array_form[new] != new:\n break\n else:\n assert None # can this ever happen?\n _base.append(new)\n # distribute generators according to basic stabilizers\n strong_gens_distr = _distribute_gens_by_base(_base, _gens)\n strong_gens_slp = []\n # initialize the basic stabilizers, basic orbits and basic transversals\n orbs = {}\n transversals = {}\n slps = {}\n base_len = len(_base)\n for i in range(base_len):\n transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],\n _base[i], pairs=True, af=True, slp=True)\n transversals[i] = dict(transversals[i])\n orbs[i] = list(transversals[i].keys())\n # main loop: amend the stabilizer chain until we have generators\n # for all stabilizers\n i = base_len - 1\n while i >= 0:\n # this flag is used to continue with the main loop from inside\n # a nested loop\n continue_i = False\n # test the generators for being a strong generating set\n db = {}\n for beta, u_beta in list(transversals[i].items()):\n for j, gen in enumerate(strong_gens_distr[i]):\n gb = gen._array_form[beta]\n u1 = transversals[i][gb]\n g1 = _af_rmul(gen._array_form, u_beta)\n slp = [(i, g) for g in slps[i][beta]]\n slp = [(i, j)] + slp\n if g1 != u1:\n # test if the schreier generator is in the i+1-th\n # would-be basic stabilizer\n y = True\n try:\n u1_inv = db[gb]\n except KeyError:\n u1_inv = db[gb] = _af_invert(u1)\n schreier_gen = _af_rmul(u1_inv, g1)\n u1_inv_slp = slps[i][gb][:]\n u1_inv_slp.reverse()\n u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]\n slp = u1_inv_slp + slp\n h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)\n if j <= base_len:\n # new strong generator h at level j\n y = False\n elif h:\n # h fixes all base points\n y = False\n moved = 0\n while h[moved] == moved:\n moved += 1\n _base.append(moved)\n base_len += 1\n strong_gens_distr.append([])\n if y is False:\n # if a new strong generator is found, update the\n # data structures and start over\n h = _af_new(h)\n strong_gens_slp.append((h, slp))\n for l in range(i + 1, j):\n strong_gens_distr[l].append(h)\n transversals[l], slps[l] =\\\n _orbit_transversal(degree, strong_gens_distr[l],\n _base[l], pairs=True, af=True, slp=True)\n transversals[l] = dict(transversals[l])\n orbs[l] = list(transversals[l].keys())\n i = j - 1\n # continue main loop using the flag\n continue_i = True\n if continue_i is True:\n break\n if continue_i is True:\n break\n if continue_i is True:\n continue\n i -= 1\n\n strong_gens = _gens[:]\n\n if slp_dict:\n # create the list of the strong generators strong_gens and\n # rewrite the indices of strong_gens_slp in terms of the\n # elements of strong_gens\n for k, slp in strong_gens_slp:\n strong_gens.append(k)\n for i in range(len(slp)):\n s = slp[i]\n if isinstance(s[1], tuple):\n slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1\n else:\n slp[i] = strong_gens_distr[s[0]][s[1]]\n strong_gens_slp = dict(strong_gens_slp)\n # add the original generators\n for g in _gens:\n strong_gens_slp[g] = [g]\n return (_base, strong_gens, strong_gens_slp)\n\n strong_gens.extend([k for k, _ in strong_gens_slp])\n return _base, strong_gens",
"def create_GO(init_file, no_COOH, no_epoxy, no_OH, filename1):\n global atoms\n global bond_list\n bond_list = bond_list_1\n atoms = read_in_graphene(init_file)\n global anywhere_map\n anywhere_map = get_map_anywhere(atoms)\n global edge_map\n edge_map = get_map_edge(atoms)\n \n list_residue_numbers = [x.residue_number for x in atoms]\n added_functional_groups = max(list_residue_numbers)\n \n must_add = no_COOH + no_epoxy + no_OH\n while (must_add > 0):\n print(\"Left to add: \", \"cooh: \", no_COOH, \"epoxy: \", no_epoxy, \"hydroxyl: \", no_OH)\n chosen = random.choice(pick_to_add(no_COOH, no_epoxy, no_OH))\n if (chosen == \"carboxyl\"):\n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_carboxyl(random_pick_spot(\"carboxyl\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_COOH -= 1\n attempt = 1888\n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n elif (chosen == \"epoxy\"): \n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_epoxy(random_pick_spot(\"epoxy\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_epoxy -= 1\n attempt = 1888\n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n elif (chosen == \"hydroxyl\"):\n attempt = 0\n while (attempt < 50):\n old_length = len(atoms)\n new_atoms = add_hydroxyl(random_pick_spot(\"hydroxyl\", edge_map, anywhere_map), atoms, added_functional_groups, top_or_down())\n if (old_length != len(new_atoms)):\n atoms = new_atoms\n added_functional_groups += 1\n must_add -= 1\n no_OH -=1\n attempt = 1888 \n else:\n attempt += 1\n if (attempt == 50):\n must_add = -1\n atno = 1\n new_list = []\n for atom in atoms:\n if (atom.atom_name == \"CX\"):\n New_CX = Atom(atno, \"CX\", \"GGG\", atno, atom.x, atom.y, atom.z)\n new_list.append(New_CX)\n atno += 1 \n \n for atom in atoms:\n if (atom.atom_name == \"C4\"):\n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"C1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_OJ in atoms:\n if ((atom_OJ.atom_name == \"OJ\") and (atom_OJ.residue_name == \"C1A\") and (atom_OJ.residue_number == atom.residue_number)):\n for atom_OK in atoms:\n if ((atom_OK.atom_name == \"OK\") and (atom_OK.residue_name == \"C1A\") and (atom_OK.residue_number == atom.residue_number)):\n for atom_HK in atoms:\n if ((atom_HK.atom_name == \"HK\") and (atom_HK.residue_name == \"C1A\") and (atom_HK.residue_number == atom.residue_number)):\n New_CY = Atom(atno + 0, \"CY\", \"C1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z )\n New_C4 = Atom(atno + 1, \"C4\", \"C1A\", atom.residue_number, atom.x, atom.y, atom.z)\n New_OJ = Atom(atno + 2, \"OJ\", \"C1A\", atom.residue_number, atom_OJ.x, atom_OJ.y, atom_OJ.z)\n New_OK = Atom(atno + 3, \"OK\", \"C1A\", atom.residue_number, atom_OK.x, atom_OK.y, atom_OK.z)\n New_HK = Atom(atno + 4, \"HK\", \"C1A\", atom.residue_number, atom_HK.x, atom_HK.y, atom_HK.z)\n atno += 5\n new_list.append(New_CY); new_list.append(New_C4); new_list.append(New_OJ); new_list.append(New_OK); new_list.append(New_HK);\n check = True\n break\n if (check == True):\n break\n if (check == True):\n break\n if (check == True):\n break \n \n elif (atom.atom_name == \"OE\"): \n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"E1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_CY2 in atoms: \n if ((atom_CY2.atom_name == \"CZ\") and (atom_CY2.residue_name == \"E1A\") and (atom_CY2.residue_number == atom.residue_number) and (atom_CY2 != atom_CY)):\n New_CY = Atom( atno + 0, \"CY\", \"E1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z)\n New_CY2 = Atom(atno + 1, \"CZ\", \"E1A\", atom.residue_number, atom_CY2.x, atom_CY2.y, atom_CY2.z)\n New_OE = Atom( atno + 2, \"OE\", \"E1A\", atom.residue_number, atom.x, atom.y, atom.z)\n atno += 3\n new_list.append(New_CY); new_list.append(New_CY2); new_list.append(New_OE);\n check = True\n break\n if (check == True):\n break\n elif (atom.atom_name == \"OL\"):\n check = False\n for atom_CY in atoms:\n if ((atom_CY.atom_name == \"CY\") and (atom_CY.residue_name == \"H1A\") and (atom_CY.residue_number == atom.residue_number)):\n for atom_HK in atoms:\n if ((atom_HK.atom_name == \"HK\") and (atom_HK.residue_name == \"H1A\") and (atom_HK.residue_number == atom.residue_number)):\n New_CY = Atom(atno + 0, \"CY\", \"H1A\", atom.residue_number, atom_CY.x, atom_CY.y, atom_CY.z)\n New_OL = Atom(atno + 1, \"OL\", \"H1A\", atom.residue_number, atom.x, atom.y, atom.z)\n New_HK = Atom(atno + 2, \"HK\", \"H1A\", atom.residue_number, atom_HK.x, atom_HK.y, atom_HK.z)\n atno += 3\n new_list.append(New_CY); new_list.append(New_OL); new_list.append(New_HK);\n check = True\n break\n if (check == True):\n break\n \n atoms = new_list.copy()\n writepdb(atoms, filename1)\n sum_c1a = 0; sum_e1a = 0; sum_h1a = 0; sum_ggg = 0\n for atom in atoms:\n if (atom.residue_name == \"C1A\"):\n sum_c1a += 1\n elif (atom.residue_name == \"E1A\"):\n sum_e1a += 1\n elif (atom.residue_name == \"H1A\"):\n sum_h1a += 1\n elif (atom.residue_name == \"GGG\"):\n sum_ggg += 1\n print(\"Placed:\")\n print(\"carboxyl: \", sum_c1a/5)\n print(\"epoxy: \", sum_e1a/3)\n print(\"hydroxyl: \", sum_h1a/3)\n print(\"graphene atoms (CX - GGG) left: \", sum_ggg)\n return 'done.'",
"def generate_bnd(cli_file, geo_file, slf_file, bnd_file, varnames, varunits):\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ cli+slf new mesh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(cli_file):\n raise TelemacException(\\\n '... the provided cli_file does not seem to exist:'\n ' {}\\n\\n'.format(cli_file))\n if not path.exists(geo_file):\n raise TelemacException(\\\n '... the provided geo_file does not seem to exist: '\n '{}\\n\\n'.format(geo_file))\n\n if len(varnames) != len(varunits):\n raise TelemacException(\\\n 'Not the same number of variables and units\\nvarnames: {}\\nvarunits: {}'\n '{}\\n\\n'.format(varnames, varunits))\n\n\n # Read the new CLI file to get boundary node numbers\n print(' +> getting hold of the Conlim file and of its liquid boundaries')\n cli = Conlim(cli_file)\n # Keeping only open boundary nodes\n bor = np.extract(cli.bor['lih'] != 2, cli.bor['n'])\n\n # Find corresponding (x,y) in corresponding new mesh\n print(' +> getting hold of the GEO file and of its bathymetry')\n geo = Selafin(geo_file)\n xys = np.vstack((geo.meshx[bor-1], geo.meshy[bor-1])).T\n _ = geo.get_variables_at(0,\\\n subset_variables_slf(\"BOTTOM: \", geo.varnames)[0])[0]\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ slf existing res ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if not path.exists(slf_file):\n raise TelemacException(\\\n '... the provided slf_file does not seem to exist: '\n '{}\\n\\n'.format(slf_file))\n slf = Selafin(slf_file)\n slf.set_kd_tree()\n slf.set_mpl_tri()\n\n print(' +> support extraction')\n # Extract triangles and weigths in 2D\n support2d = []\n ibar = 0\n pbar = ProgressBar(maxval=len(xys)).start()\n for xyi in xys:\n support2d.append(xys_locate_mesh(xyi, slf.ikle2, slf.meshx, slf.meshy,\n slf.tree, slf.neighbours))\n ibar += 1\n pbar.update(ibar)\n pbar.finish()\n # Extract support in 3D\n support3d = list(zip(support2d, len(xys)*[range(slf.nplan)]))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n bnd = Selafin('')\n bnd.fole = {}\n bnd.fole.update({'hook':open(bnd_file, 'wb')})\n bnd.fole.update({'name':bnd_file})\n bnd.fole.update({'endian':\">\"}) # big endian\n bnd.fole.update({'float':('f', 4)}) # single precision\n\n # Meta data and variable names\n bnd.title = ''\n bnd.nbv1 = len(varnames)\n # /!\\ ELEVATION has to be the first variable\n # (for possible vertical re-interpolation within TELEMAC)\n\n bnd.varnames = []\n bnd.varunits = []\n for var, unit in zip(varnames, varunits):\n new_var = var + (16-len(var))*\" \"\n new_unit = unit + (16-len(unit))*\" \"\n bnd.varnames.append(new_var)\n bnd.varunits.append(new_unit)\n\n bnd.nvar = bnd.nbv1\n bnd.varindex = range(bnd.nvar)\n\n # Sizes and mesh connectivity\n bnd.nplan = slf.nplan\n # Number of nodes per boundary element (ndp2 in 2D and ndp3 in 3D)\n bnd.ndp2 = 2\n bnd.ndp3 = 4\n bnd.npoin2 = len(bor)\n bnd.npoin3 = bnd.npoin2*slf.nplan\n bnd.iparam = [0, 0, 0, 0, 0, 0, bnd.nplan, 0, 0, 1]\n bnd.ipob2 = bor # /!\\ note that ipobo keeps the original numbering\n print(' +> masking and setting connectivity')\n # Set the array that only includes elements of geo.ikle2\n # with at least two nodes in bor\n array_1d = np.in1d(geo.ikle2, np.sort(bor-1))\n mask = geo.ikle2[np.where(np.sum(array_1d.reshape(geo.nelem2, geo.ndp2),\n axis=1) == 2)]\n # this ikle2 keeps the original numbering\n ikle2 = np.ravel(mask)[np.in1d(mask, np.sort(bor-1))].reshape(len(mask), 2)\n # ~~> re-numbering ikle2 as a local connectivity matrix\n knolg, _ = np.unique(np.ravel(ikle2), return_index=True)\n knogl = dict(zip(knolg, range(len(knolg))))\n bnd.ikle2 = - np.ones_like(ikle2, dtype=np.int)\n for k in range(len(ikle2)):\n # /!\\ bnd.ikle2 has a local numbering, fit to the boundary elements\n bnd.ikle2[k] = [knogl[ikle2[k][0]], knogl[ikle2[k][1]]]\n # Last few numbers\n bnd.nelem2 = len(bnd.ikle2)\n if slf.nplan > 1:\n bnd.nelem3 = bnd.nelem2*(slf.nplan-1)\n else:\n bnd.nelem3 = bnd.nelem2\n bnd.ndp3 = bnd.ndp2\n # 3D structures\n if slf.nplan > 1:\n bnd.ipob3 = np.ravel(np.add(np.repeat(bnd.ipob2, slf.nplan)\\\n .reshape((bnd.npoin2, slf.nplan)),\n bnd.npoin2*np.arange(slf.nplan)).T)\n bnd.ikle3 = \\\n np.repeat(bnd.npoin2*np.arange(slf.nplan-1),\n bnd.nelem2*bnd.ndp3)\\\n .reshape((bnd.nelem2*(slf.nplan-1), bnd.ndp3)) + \\\n np.tile(np.add(np.tile(bnd.ikle2, 2),\n np.repeat(bnd.npoin2*np.arange(2), bnd.ndp2)),\n (slf.nplan-1, 1))\n else:\n bnd.ipob3 = bnd.ipob2\n bnd.ikle3 = bnd.ikle2\n # Mesh coordinates\n bnd.meshx = geo.meshx[bor-1]\n bnd.meshy = geo.meshy[bor-1]\n\n print(' +> writing header')\n # Write header\n bnd.append_header_slf()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ writes BND core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n print(' +> setting variables')\n # TIME and DATE extraction\n bnd.datetime = slf.datetime\n bnd.tags['times'] = slf.tags['times']\n # VARIABLE extraction\n list_var = varnames[0]+\": \"\n for var in varnames[1:]:\n list_var += \";\"+var+\": \"\n\n vrs = subset_variables_slf(list_var, slf.varnames)\n\n # Read / Write data, one time step at a time to support large files\n print(' +> reading / writing variables')\n pbar = ProgressBar(maxval=len(slf.tags['times'])).start()\n zeros = np.zeros((bnd.npoin3, 1), dtype=np.float)\n for itime in range(len(slf.tags['times'])):\n data = get_value_history_slf(slf.file, slf.tags, [itime], support3d,\n slf.nvar, slf.npoin3, slf.nplan, vrs)\n data = np.reshape(np.transpose(np.reshape(np.ravel(data),\n (bnd.nvar, bnd.npoin2,\n bnd.nplan)),\n (0, 2, 1)),\n (bnd.nvar, bnd.npoin3))\n bnd.append_core_time_slf(itime)\n bnd.append_core_vars_slf(data)\n pbar.update(itime)\n pbar.finish()\n\n # Close bnd_file\n bnd.fole['hook'].close()",
"def twostr_func(wavelength, F_s, solarzenithangle,albedo_dif, \n\t\t\talbedo_dir, temp_ground, w_0, g, tau_n, temp_c):\n\t\n\t########################\n\t###Import useful libraries\n\t########################\n\timport numpy as np\n\timport pdb\n\timport scipy.linalg\n\n\n\n\n\t########################\n\t###Define model parameters\n\t########################\n\t#Properties of the ground\n\temissivity_ground=1.-albedo_dif #emissivity of ground. 1=perfect BB emitter.\n\n\t#Optical depth structure\n\tNlayer=len(tau_n) #number of layers in the atmospheric model.\n\t\n\ttau_c=np.zeros(Nlayer+1)# tau_c[n] is the cumulative optical depth at the upper edge of layer n. So tau_c[0]=0, and tau_c[N] is the maximum possible.\n\tfor n in range(0, Nlayer):\n\t\ttau_c[n+1]=tau_c[n]+tau_n[n] \n\n\t#In the Toon formalism, j=0 corresponds to space, and j=N+1 corresponds to the planet surface.\n\t#These points in wavelength space define the edges of the bins in tau space. \n\t#Other terminology:\n\t#\ttau_c=cumulative optical depth of layers *above* layer n. \n\t#\ttau_n=total optical depth of the layer n\n\t#\ttau=total optical depth at any point within a layer n, hence satisfying 0<tau<tau_n\n\n\tmu_0=np.cos(solarzenithangle) #\"incident direction of solar beam\"\n\n\n\t########################\n\t###Determine the two-stream approximation coefficients.\n\t########################\n\t#Eddington and quadrature are good at solar wavelengths (i.e., not thermal blackbody dominated). delta scalings of Joseph et al (1976) recommended to replace w_0, g, tau in this case. However, when dominated by internal isotropic sources like the Planck function, hemispheric mean approximation is preferable. When w_0=0, quadrature case has problems. This happens esp at thermal wavelengths. Again this favors using hemispheric mean at these wavelengths\n\t\n\t#We use quadrature because 1) we are at solar wavelengths for this UV work and 2) that's what twostr.f does (which is our comparison case)\n\tgamma_1= np.sqrt(3.)*(2.-w_0*(1.+g))/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_1\n\tgamma_2=np.sqrt(3.)*w_0*(1.-g)/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_2\n\tgamma_3=(1.-np.sqrt(3.)*g*mu_0)/2. #consistent with Toon et al; equal to the Pierrehumbert gamma_plus/w_0\n\tgamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\tmu_1=1./np.sqrt(3.)+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\t##Eddington\n\t#gamma_1= (7.-w_0*(4.+3.*g))/4.\n\t#gamma_2=-1.*(1.-w_0*(4.-3.*g))/4.\n\t#gamma_3=(2.-3.*g*mu_0)/4.\n\t#gamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\t#mu_1=1./2.+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\talambda=np.sqrt(np.abs(gamma_1*gamma_1-gamma_2*gamma_2)) #this is the lower-case lambda, from eqn 21 of Toon et al\n\t\t\t\t\t\t\t\t #The absolute value was added based on the code Toon just sent us. This corresponds to his AK(L,J) parameter. But it should not matter since gamma_1>gamma_2 for w_0<1.\n\tclambda=(gamma_1-alambda)/(gamma_2) #this is the upper-case lambda, from eqn 22 of Toon et al\n\n\tEMLT=np.exp(-alambda*tau_n) #this appears to be a prefactor used to facilitate computation of eqn 44 of Toon et al\n\te1=1.+clambda*EMLT\n\te2=1.-clambda*EMLT\n\te3=clambda+EMLT\n\te4=clambda-EMLT\n\n\t########################\n\t###Set up calculation\n\t########################\n\t\"\"\"\n\tThe fundamental equation we are solving is of form:\n\tA_{l}*Y_{l-1}+B_{l}*Y_{l}+D{l+1}=E_{l} (equation 39 of Toon et al)\n\tHere, A_l, B_l, D_l, E_l are quantities we determine, and the Y_l is what we solve for.\n\tHence, we can summarize that we are solving a matrix equation that takes form:\n\tPY=E\n\twhere Y[l]=Y_l\n\t E[l]=E_l\n\t P[l, l-1]=A_l [row, column]\n\t P[l, l]=B_l\n\t P[l, l+1]=D_l\n\t P[i,j]=0 else\n\tToon et al use 1-indexing. Hence n runs from 1 to N, l runs from 1 to 2N, where N is the number of layers, and they have:\n\tY_l=Y_{1n} for l=1,3,5,...2n-1...2N-1\n\tY_l=Y_{2n} for l=2,4,6,...2n...2N\n\n\tHowever, we use Python, which has 0-indexing. Hence *we* choose that n runs from 0 to N-1, l runs from 0 to 2N-1, and:\n\tY_l=Y_{1n} for l=0,2,4...2n...2N-2\n\tY_l=Y_{2n} for l=1,3,5...2n+1...2N-1\n\n\tThe Y_{1n} and Y_{2n} are related to F^+_n and F^-_n via equations 31 and 32 of Toon et al.\n\tThis parametrization has been done to remove exponentials with positive operands (ie ones that could grow large and lead to numerical instabilities) from the matrix.\n\n\tNote: The mapping of this PQ=R to the F+ and F- is unclear because of 1) this parametrization in terms of Y_l (done to eliminate numerical instabilities) and 2)further linear combinations done to convert a pentagiagonal matrix to an even simpler tridiagonal matrix. Hence intuitive checks are hard.\n\t\"\"\"\n\n\t########################\n\t###Set up surface flux\n\t########################\n\tS_sfc=albedo_dir*mu_0*np.exp(-tau_c[-1]/mu_0)*np.pi*F_s+emissivity_ground*np.pi*Planck(temp_ground, wavelength)\n\t#Surface emission. Formed by adding blackbody emission from the ground to the reflected energy from the direct beam. The direct beam's reflected energy is assumed to be purely diffuse. This corresponds to equations 37 and 38 of Toon et al. Note that this does NOT match equation 5.31 of Pierrehumbert because it does not include the reflected diffuse radiation. So, this implicitly assumes the diffuse albedo to be 0. \n\n\t########################\n\t###Set up C-values\n\t########################\n\t#In the reshuffled set of parameters used in this formalism, these seem analagous to the forcing term in Pierrehumbert. All the added radiation is contained in here.\n\n\tdef C_plus(n, tau): #implementation of superposition of eqns 23 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]-1./mu_0)*gamma_3[n]+gamma_4[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau+1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\tdef C_minus(n, tau): #implementation of superposition of eqns 24 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]+1./mu_0)*gamma_4[n]+gamma_3[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau-1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\t########################\n\t###Calculate matrix coefficients\n\t#########################\n\t#initialize the A, B, D, and E.\n\tA=np.zeros(Nlayer*2)\n\tB=np.zeros(np.shape(A))\n\tD=np.zeros(np.shape(A))\n\tE=np.zeros(np.shape(A))\n\n\n\t#For l=0 (n=0) we have the boundary condition that the downward diffuse flux at the top of the first layer is equal to any incident diffuse downward flux. We set this to be zero.\n\tA[0]=0.\n\tB[0]=e1[0]\n\tD[0]=-1.*e2[0]\n\tE[0]=0.-1*C_minus(0,0) #This is really F_minus[0,0], i.e. we are assuming there is no downward diffuse flux from the top of the atmosphere.\n\n\t#for l=2N-1 (n=N-1), we have the boundary condition that the upward flux at the surface is the sume of the reflected downward diffuse flux and energy from any other sources (e.g. reflected direct beam, BB emission of the ground)/np.sqrt(3.)\n\tA[2*Nlayer-1]=e1[Nlayer-1]-albedo_dif*e3[Nlayer-1]\n\tB[2*Nlayer-1]=e2[Nlayer-1]-albedo_dif*e4[Nlayer-1]\n\tD[2*Nlayer-1]=0.\n\tE[2*Nlayer-1]=S_sfc-C_plus(Nlayer-1, tau_n[Nlayer-1])+albedo_dif*C_minus(Nlayer-1, tau_n[Nlayer-1])\n\n\t#There is a problem in the Toon paper. As written, the l=2n depends on e_n+1, running over the array edge. twostr.f resolves this by adopting a different mapping: their definition reduces to defining l=2(n+1) and running n from 0 to N-1. In this case, l=2 (The third value in the list of ls) depends on n=0 and n=1. This eliminates the overflow problem. We have implemented this below.\n\t\n\t##For n=1,2,3...N-1, l=2,4,6,...2N-2:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*(n+1)\n\t\tA[l]=e2[n]*e3[n]-e4[n]*e1[n]\n\t\tB[l]=e1[n]*e1[n+1]-e3[n]*e3[n+1]\n\t\tD[l]=e3[n]*e4[n+1]-e1[n]*e2[n+1]\n\t\t\n\t\tE[l]=e3[n]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))+e1[n]*(C_minus(n,tau_n[n])-C_minus(n+1,0.))\n\n\n\t#For n=0...N-2, l=1,3...2N-3:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*n+1\n\t\tA[l]=e2[n+1]*e1[n]-e3[n]*e4[n+1]\n\t\tB[l]=e2[n]*e2[n+1]-e4[n]*e4[n+1]\n\t\tD[l]=e1[n+1]*e4[n+1]-e2[n+1]*e3[n+1]\n\t\t\n\t\tE[l]=e2[n+1]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))-e4[n+1]*(C_minus(n+1, 0)-C_minus(n, tau_n[n])) #twostr.f has a -1*e_{4,n+1}. We have applied the same even though this is NOT what is written in the Toon et al paper. We have done this because Toon told us (6/26/2015) that there are some sign errors in the coefficients, and we currently trust the validated CLIMA code over the paper we know has errors in it. EDIT: Looking at the code Toon shared with us, he does the same. \n\n\n\t########################\n\t###Assemble matrix equation components\n\t#########################\n\tP=np.zeros([Nlayer*2,Nlayer*2])\n\n\t#l=0: no \"A\" coefficient b/c l-1 has no meaning\n\tP[0,0]=B[0]\n\tP[0,1]=D[0]\n\n\t#l=2N-1: no \"D\" coefficient b/c l+1 has no meaning\n\tP[2*Nlayer-1,2*Nlayer-1-1]=A[2*Nlayer-1]\n\tP[2*Nlayer-1,2*Nlayer-1]=B[2*Nlayer-1]\n\n\tfor l in range(1, Nlayer*2-1): #This populates the matrix P in PY=E. \n\t\tP[l, l-1]=A[l]\n\t\tP[l,l]=B[l]\n\t\tP[l,l+1]=D[l]\n\n\t########################\n\t###Invert matrix\n\t#########################\n\t#Y=np.linalg.solve(P, E) #this is the Y_l\n\t\n\t#try using a specialized solver\n\tab=np.zeros([3,2*Nlayer])\n\tab[0,:]=np.append(0.0, np.diag(P, k=1))\n\tab[1,:]=np.diag(P, k=0)\n\tab[2,:]=np.append(np.diag(P, k=-1),0.0)\n\t#pdb.set_trace()\n\tY=scipy.linalg.solve_banded((1,1), ab, E) #this is the Y_l\n\n\n\t########################\n\t###Convert from Y_l to Y_1n, Y_2n\n\t#########################\n\t#The Y_1n as defined in Toon et al correspond to l=1,3, 5...2N-1. Adjusting for the zero-indexing of Python as we have done, they instead correspond to l=0,2,...2N-2\n\t#The Y_2n as defined in Toon et al correspond to l=2,4,6...2N. Adjusting for Python zero-indexing as we have done, they instead correspond to l=1,3,5...2N-1.\n\t#For detail, see eq. 40.\n\tY_1=np.zeros(Nlayer)\n\tY_2=np.zeros(Nlayer)\n\tfor n in range(0, Nlayer):\n\t\tY_1[n]=Y[2*n]\n\t\tY_2[n]=Y[2*n+1] \n\t\t#last number called is Nlayer-1=N-1, so is consistent.\n\t\n\t########################\n\t###Convert from Y_1n, Y_2n to F_plus, F_minus\n\t#########################\n\tdef F_plus(n,tau): #defined from Eqn 31 of Toon et al.\n\t\tterm1=Y_1[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))+clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))-clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm3=C_plus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\n\tdef F_minus(n, tau): #defined from Eqn 32 of Toon et al.\n\t\tterm1=Y_1[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))+np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))-np.exp(-alambda[n]*tau))\n\t\tterm3=C_minus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\t\n\t########################\n\t###Evaluate F_plus, F_minus at boundary edges\n\t#########################\n\tF_plus_tau0=np.zeros(np.shape(tau_n))\n\tF_plus_taumax=np.zeros(np.shape(tau_n))\n\tF_minus_tau0=np.zeros(np.shape(tau_n))\n\tF_minus_taumax=np.zeros(np.shape(tau_n))\n\n\tfor n in range(0, Nlayer):\n\t\tF_plus_tau0[n]=F_plus(n, 0.)\n\t\tF_plus_taumax[n]=F_plus(n, tau_n[n])\n\t\tF_minus_tau0[n]=F_minus(n, 0.)\n\t\tF_minus_taumax[n]=F_minus(n, tau_n[n])\n\n\n\t########################\n\t###Convert from Y_1n, Y_2n to F_net, mean intensity.\n\t#########################\n\t#test if diffuse flux dominates over direct flux. If direct flux dominant, instead set mu_1=mu_0\n\t\n\t#if F_minus_taumax[-1]<mu_0*np.pi*F_s*np.exp(-tau_c[-1]/mu_0):\n\t\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t\n\tF_net=np.zeros(np.shape(tau_n)) #defined from Eqn 48 of Toon et al. This quantity is the net flux at the BASE of layer n.\n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\n\t\tterm1=Y_1[n]*(e1[n]-e3[n])\n\t\tterm2=Y_2[n]*(e2[n]-e4[n])\n\t\tterm3=C_plus(n, tau_n[n])-C_minus(n, tau_n[n])\n\t\t\n\t\tF_net[n]=term1+term2+term3 -direct\n\n\tAMEAN=np.zeros(np.shape(tau_n)) #defined from Eqn 49 of Toon et al. This is the equivalent of the quantity AMEAN in the twostr.f code. It is equal to 4*np.pi*J_n, where J_n is the mean intensity at the base of layer n. Hence this quantity AMEAN should be equal to the total intensity received by a point at the base of layer n. \n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\t\n\t\tterm1=Y_1[n]*(e1[n]+e3[n])\n\t\tterm2=Y_2[n]*(e2[n]+e4[n])\n\t\tterm3=C_plus(n, tau_n[n])+C_minus(n, tau_n[n])\n\t\t\n\t\t#AMEAN[n]=(1./mu_1[n])*(term1+term2+term3)+direct/mu_0\t\n\t\tAMEAN[n]=(1./mu_1[n])*(F_plus_taumax[n]+F_minus_taumax[n])+direct/mu_0\t\n\t\n\t########################\n\t###Compute \"surface intensity\"\n\t#########################\t\n\t#\"Surface intensity\" refers to the total intensity that would be intercepted by a particle at the surface of the planet. Whereas the total intensity is equal to (F_plus[-1]+F_minus[-1])/mu_1+direct[-1]/mu_0, the surface intensity is instead equal to (F_minus[-1])/mu_1+direct[-1]/mu_0, i.e. the downwelling diffuse intensity (since the bottom intensity is cut out due to there being a planet there) plus the direct intensity\n\t\n\tsurface_intensity=(F_minus_taumax[-1]/mu_1[-1])+(np.pi*F_s)*np.exp(-(tau_c[-1])/mu_0)\n\t\n\t########################\n\t###Return Result\n\t#########################\n\t#F_minus_tau0\n\t#np.max(np.abs((F_minus_taumax[:-1]-F_minus_tau0[1:]))/F_minus_tau0[1:])\n\t#np.max(np.abs((F_plus_taumax[:-1]-F_plus_tau0[1:]))/F_plus_tau0[1:])\n\t\n\treturn (F_plus_tau0, F_plus_taumax, F_minus_tau0, F_minus_taumax, F_net, AMEAN, surface_intensity)",
"def test_f1_circuit_maker(self):\n fho = tfho.test_file_handle_object()\n W = 5\n G = 20\n fg = .9\n X = 10\n fx = .85\n gate_maker = g.TYPE_TO_GATE_GEN[g.TEST_TYPES.RANDOM]\n # family 1 files:\n t_circuit_file_name = \"circuit_file_trimming\"\n t_circuit_file = fho.get_file_object(t_circuit_file_name, 'w')\n t_input_file_name = \"input_file_trimming\"\n t_input_file = fho.get_file_object(t_input_file_name, 'w')\n t_output_file_name = \"output_file_trimming\"\n t_output_file = fho.get_file_object(t_output_file_name, 'w')\n nt_circuit_file_name = \"circuit_file_no_trimming\"\n nt_circuit_file = fho.get_file_object(nt_circuit_file_name, 'w')\n nt_input_file_name = \"input_file_no_trimming\"\n nt_input_file = fho.get_file_object(nt_input_file_name, 'w')\n nt_output_file_name = \"output_file_no_trimming\"\n nt_output_file = fho.get_file_object(nt_output_file_name, 'w')\n level_type_array = [g.LEVEL_TYPES.RANDOM]\n F = 1\n # make a family 1 circuit with trimming:\n sr.seed(self.rand_seed)\n t_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n t_circuit_file,\n t_input_file,\n t_output_file,\n X, fx, gate_maker,\n level_type_array, True)\n t_gen.generate()\n # make a family 1 circuit without trimming, with the same randomness:\n sr.seed(self.rand_seed)\n nt_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n nt_circuit_file,\n nt_input_file,\n nt_output_file,\n X, fx, gate_maker,\n level_type_array, False)\n nt_gen.generate()\n # obtain strings representing the contents of all the resulting files:\n t_circuit_string = fho.get_file(t_circuit_file_name).getvalue()\n t_input_string = fho.get_file(t_input_file_name).getvalue()\n t_output_string = fho.get_file(t_output_file_name).getvalue()\n nt_circuit_string = fho.get_file(nt_circuit_file_name).getvalue()\n nt_input_string = fho.get_file(nt_input_file_name).getvalue()\n nt_output_string = fho.get_file(nt_output_file_name).getvalue()\n # make sure that the inputs and outputs produced by the trimming and\n # no trimming algorithms are the same:\n self.assertEqual(t_input_string, nt_input_string)\n self.assertEqual(t_output_string, nt_output_string)\n # make sure that the input begins and ends with a bracket:\n self.assertEqual(\"[\", t_input_string[0])\n self.assertEqual(\"]\", t_input_string[-1])\n # make sure that each input element is a bit:\n for bit in t_input_string[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # make sure that the output is a bit:\n self.assertTrue((t_output_string == '0') or (t_output_string == '1'))\n # make sure that the two circuit headers are the same, and that they\n # contain the correct values:\n t_circuit_header = t_circuit_string.split(\"\\n\")[0]\n nt_circuit_header = nt_circuit_string.split(\"\\n\")[0]\n self.assertEqual(t_circuit_header, nt_circuit_header)\n (W_string, G_string, F_string) = t_circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n G_value = int(G_string.split(\"=\")[-1])\n F_value = int(F_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(G, G_value)\n self.assertEqual(F, F_value)\n # note that we cannot test that the circuits themselves are the same,\n # because the trimming algorithm produces a circuit with gates listed\n # in a different order.",
"def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)",
"def XsamsRadTranBroadening(G):\n s=''\n if countReturnables('RadTransBroadeningNatural'):\n # attempt at making a loop to support multiple natural broadening effects\n if hasattr(G('RadTransBroadeningNatural'), \"Broadenings\"):\n for Broadening in makeiter(G('RadTransBroadeningNatural').Broadenings):\n GB = lambda name: GetValue(name, Broadening=Broadening)\n s += makeBroadeningType(GB, name='Natural')\n else:\n s += makeBroadeningType(G, name='Natural')\n if countReturnables('RadTransBroadeningInstrument'):\n s += makeBroadeningType(G, name='Instrument')\n if countReturnables('RadTransBroadeningDoppler'):\n s += makeBroadeningType(G, name='Doppler')\n if countReturnables('RadTransBroadeningPressure'):\n s += makeBroadeningType(G, name='Pressure')\n return s",
"def _define_biophysics(self):\n\t\tfor node in self.node:\n\t\t\tnode.nseg=1\n\t\t\tnode.diam=self._nodeD\n\t\t\tnode.L=self._nodeLength\n\t\t\tnode.Ra=self._rhoa/10000\n\t\t\tnode.cm=2\n\t\t\tnode.insert('axnode')\n\t\t\tnode.insert('extracellular')\n\t\t\tnode.xraxial[0]=self._Rpn0\n\t\t\tnode.xg[0]=1e10\n\t\t\tnode.xc[0]=0\n\n\t\tfor mysa in self.mysa:\n\t\t\tmysa.nseg=1\n\t\t\tmysa.diam=self._fiberD\n\t\t\tmysa.L=self._paraLength1\n\t\t\tmysa.Ra=self._rhoa*(1/(self._paraD1/self._fiberD)**2)/10000\n\t\t\tmysa.cm=2*self._paraD1/self._fiberD\n\t\t\tmysa.insert('pas')\n\t\t\tmysa.g_pas=0.001*self._paraD1/self._fiberD\t\t\n\t\t\tmysa.e_pas=-80\n\t\t\tmysa.insert('extracellular')\n\t\t\tmysa.xraxial[0]=self._Rpn1\n\t\t\tmysa.xg[0]=self._mygm/(self._nl*2)\n\t\t\tmysa.xc[0]=self._mycm/(self._nl*2)\n\n\t\tfor flut in self.flut:\n\t\t\tflut.nseg=1\n\t\t\tflut.diam=self._fiberD\n\t\t\tflut.L=self._paraLength2\n\t\t\tflut.Ra=self._rhoa*(1/(self._paraD2/self._fiberD)**2)/10000\n\t\t\tflut.cm=2*self._paraD2/self._fiberD\n\t\t\tflut.insert('pas')\n\t\t\tflut.g_pas=0.0001*self._paraD2/self._fiberD\t\t\n\t\t\tflut.e_pas=-80\n\t\t\tflut.insert('extracellular')\n\t\t\tflut.xraxial[0]=self._Rpn2\n\t\t\tflut.xg[0]=self._mygm/(self._nl*2)\n\t\t\tflut.xc[0]=self._mycm/(self._nl*2)\n\t\t\n\t\tfor stin in self.stin:\n\t\t\tstin.nseg=1\n\t\t\tstin.diam=self._fiberD\n\t\t\tstin.L=self._interLength\n\t\t\tstin.Ra=self._rhoa*(1/(self._axonD/self._fiberD)**2)/10000\n\t\t\tstin.cm=2*self._axonD/self._fiberD\n\t\t\tstin.insert('pas')\n\t\t\tstin.g_pas=0.0001*self._axonD/self._fiberD\n\t\t\tstin.e_pas=-80\n\t\t\tstin.insert('extracellular')\n\t\t\tstin.xraxial[0]=self._Rpx\n\t\t\tstin.xg[0]=self._mygm/(self._nl*2)\n\t\t\tstin.xc[0]=self._mycm/(self._nl*2)",
"def stim_conditions(angles, onebeep_nb, twobeep_nb, onebeep_tc, twobeep_tc):\n##### make single auditory stim################################################\n\n #conditions_1A = [-30_1A, 0_1A, 30_1A, -30_2A, 0_2A, 30_2A]\n spatials = ('-30', '0', '30')\n beep_combos_1a = ('onebeep_nb', 'twobeep_nb', 'onebeep_tc', 'twobeep_tc')\n\n##### make competing auditory stim#############################################\n\n #conditions_2A = []\n spatials = ('-30x0', '0x30', '-30x30')\n beep_combos_2a = ('onebeep_nbxonebeep_tc', 'twobeep_nbxonebeep_tc',\n 'onebeep_nbxtc2', 'twobeep_nbxtwobeep_tc')\n\n all_spatials = [s.split('x') for s in spatials]\n for s in all_spatials[1:]:\n all_spatials[0] += s\n all_spatials = all_spatials[0]\n all_spatials = list(np.unique([float(s) for s in all_spatials]))\n\n all_combos = [ss.split('x') for ss in beep_combos_2a]\n for ss in all_combos[1:]:\n all_combos[0] += ss\n all_combos = all_combos[0]\n all_combos = list(np.unique([float(ss) for ss in all_combos]))\n\n##### convolve with HRTF at appropriate angles ################################\n\n move_sig = np.concatenate([convolve_hrtf(stim, fs, ang)\n for ang in range(-30, 30)], axis=1)\n return move_sig",
"def test_bespoke_atom_smirks():\n gen = SmirksGenerator()\n mol = Molecule.from_smiles(\"C\")\n\n atom_smirks = gen._get_bespoke_atom_smirks(molecule=mol)\n # there should only be 2 unique smirks\n assert len(atom_smirks) == 2\n # make sure the correct atoms are hit\n all_atoms = []\n for smirk in atom_smirks:\n atoms = condense_matches(mol.chemical_environment_matches(smirk.smirks))\n all_atoms.extend(atoms)\n assert set(atoms) == smirk.atoms\n # make sure all atoms have a bespoke smirks\n for i in range(mol.n_atoms):\n assert (i, ) in all_atoms",
"def create_duck1010(self):\n assignments = ['oblig{num}:pub({pub}):ln(Obligatorisk oppgave {num})'.format(num=num, pub=num*40) for num in xrange(1, 4)]\n periods = ['springcur:begins(-2):ends(6):ln(Spring Current)',\n 'springold:begins(-14):ends(6):ln(Spring Old)']\n self.testhelper.add(nodes=\"duckburgh:admin(duckburghadmin).ifi:admin(ifiadmin)\",\n subjects=[\"duck1010:ln(DUCK1010 - Objektorientert programmering)\"],\n periods=periods,\n assignments=assignments)\n self.testhelper.duck1010.admins.add(self.testhelper.thor)\n\n for year in range(2000, 2011): # Add some extra old semesters just to make it easier to test layouts with many semesters\n logging.info('Creating duck1010 spring%s', year)\n self.testhelper.duck1010.periods.create(\n short_name='spring{0}'.format(year),\n long_name='Spring {0}'.format(year),\n start_time=datetime(year, 8, 1),\n end_time=datetime(year, 12, 30)\n )\n\n anotherTryVerdict = {'grade': 'Not approved', 'points': 0, 'is_passing_grade': False}\n failedVerdict = {'grade': 'Not approved', 'points': 0, 'is_passing_grade': False}\n okVerdict = {'grade': 'Approved', 'points': 1, 'is_passing_grade': True}\n goodVerdict = {'grade': 'Approved', 'points': 1, 'is_passing_grade': True}\n\n assignmentnames = [name.split(':')[0] for name in assignments]\n periodnames = self._onlyNames(periods)\n for periodname in periodnames:\n periodpath = 'duckburgh.ifi;duck1010.' + periodname\n logging.info('Creating %s', periodpath)\n period = self.testhelper.get_object_from_path(periodpath)\n self._set_first_deadlines(period)\n self._addRelatedStudents(period)\n self._addRelatedExaminers(period)\n self._addBadGroups(periodpath, assignmentnames, anotherTryVerdict, failedVerdict)\n self._addMediumGroups(periodpath, assignmentnames, anotherTryVerdict, okVerdict)\n self._addGoodGroups(periodpath, assignmentnames, goodVerdict)",
"def build_antibodies(experiment, canonicals, ln):\n # Store all of the antibodies in this list\n antibodies = []\n # Get the list of CDRs to be considered\n cdrs = list(canonicals.keys())\n cdrs.sort()\n # Get the optimal set of canonical structures for this library\n solution = experiment[\"Scores\"][ln-1][1]\n # Go through the antibody chains being designed\n chains = experiment[\"Optcdr Chains\"]\n chains.sort()\n # Find the reference framework molecule\n if experiment[\"Optcdr Frameworks\"] != {}:\n file = experiment[\"Optcdr Framework Reference\"].split(\"/\")[-1]\n path = experiment[\"Optcdr Framework Reference\"].replace(file, '')\n reference = MOLECULES.MoleculeFile(file, path) \n for chain in chains:\n # Store the molecules in a list\n molecules = []\n # Go through the CDR numbers\n for i in range(1, 4):\n # Extract the CDR name\n cdr = chain[0].upper() + str(i) \n # Get the index for the CDR in the solution dictionary\n index = cdrs.index(cdr) + 1\n # Append the canonical structure molecule to the list of molecules\n molecules.append(canonicals[cdr][solution[index]])\n # If a framework has been specified, add it\n name = chain.lower()\n if name in experiment[\"Optcdr Frameworks\"]:\n # Extract the Molecule class object\n molecule = experiment[\"Optcdr Frameworks\"][name][0][2]\n # Properly orient the framework\n orient_framework(molecule, reference[chain[0].upper()])\n # Obtain the list of framework regions\n frameworks = include_framework(molecule)\n # Go through the molecules and convert them to text\n texts = []\n for mol in molecules:\n # Skip the first and last residues of each CDR since they are\n # attach points\n text = \"\"\n for rn in range(len(mol)):\n if rn not in [0, len(mol) - 1]:\n text += str(mol[rn])\n # Append this CDR to the list of CDRs \n texts.append(text)\n # Make sure there are 4 framework regions and 3 CDRs\n if len(frameworks) != 4 or len(texts) != 3:\n text = \"The framework molecule does not include all of the \"\n text += \"necessary regions\"\n raise OptcdrError(text)\n # Concatenate the text into a single string\n atoms = ''\n # Add FR1, CDR1, FR2, CDR2, FR3, CDR3 in that order \n for i in range(0, 3):\n atoms += frameworks[i] + texts[i] \n # Add FR4 \n atoms += frameworks[3]\n # Overwrite the molecules list with this single molecule\n molecules =[MOLECULES.Molecule(atoms, 1, 1, chain[0].upper(), True,\\\n experiment[\"Force Field\"], experiment[\"File Format\"])]\n # Add the list of molecules to the list of antibodies\n antibodies.extend(molecules)\n # Generate the expected number of Design Molecules\n dms = 0\n for chain in chains:\n if chain in experiment[\"Optcdr Frameworks\"]:\n dms += 1\n else:\n dms += 3\n # Create a formatted list\n formatted_antibodies = []\n # If a single molecule is present for each chain, use the framework name\n if len(antibodies) == len(chains):\n for antibody in antibodies:\n formatted_antibodies.append([None, antibody.name, antibody])\n # If there are 3 CDRs for each chain, generate a unique name for each CDR\n elif len(antibodies) == dms: \n # Create a formatted list\n formatted_antibodies = []\n # Create a string of possible names\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n # Use a counter to go through the alphabet\n an = 0\n # Go through the antibodies\n for antibody in antibodies:\n # Use a while loop to find an appropriate name for the molecule\n goOn = True\n while goOn:\n # Increment the counter\n an += 1\n # If the name has not been used, store it\n if alphabet[an] not in experiment[0]:\n goOn = False\n # Store the formatted molecule list\n antibody.name = alphabet[an]\n formatted_antibodies.append([None, alphabet[an], antibody])\n # Otherwise, something is wrong so raise an error\n else:\n text = \"There is an unexpected number of antibodies generated for \"\n text += \"library \" + str(ln)\n raise OptcdrError(text)\n # Update the experiment to include the antibody details\n experiment[\"Molecules\"].extend(formatted_antibodies)\n experiment.make_DesignGroups()\n existing = experiment['Summary']\n experiment.finish_creation()\n experiment['Summary'] += existing\n # Update the OptCDR Scores\n SHARING.load_scores(experiment, experiment[\"Folder\"] + \\\n \"results/initial/\")\n # Output these molecules to the 'Current' folder\n SHARING.output_Current(experiment, './Current/')\n # Refine the initial antibody structure\n initialize_antibodies(experiment, canonicals)",
"def isBGS_sga(gflux=None, rflux=None, zflux=None, w1flux=None, refcat=None, rfibertotflux=None,\n rfiberflux=None, maskbits=None, south=True, targtype=None):\n\n _check_BGS_targtype(targtype)\n\n bgs = np.zeros_like(rflux, dtype='?')\n\n # the SGA galaxies.\n LX = np.zeros_like(rflux, dtype='?')\n # ADM Could check on \"L2\" for DR8, need to check on \"LX\" post-DR8.\n if refcat is not None:\n rc1d = np.atleast_1d(refcat)\n if isinstance(rc1d[0], str):\n LX = [(rc[0] == \"L\") if len(rc) > 0 else False for rc in rc1d]\n else:\n LX = [(rc.decode()[0] == \"L\") if len(rc) > 0 else False for rc in rc1d]\n if np.ndim(refcat) == 0:\n LX = np.array(LX[0], dtype=bool)\n else:\n LX = np.array(LX, dtype=bool)\n\n # Make sure to include all the SGA galaxies.\n bgs |= LX\n # ADM geometric masking cuts from the Legacy Surveys.\n # Remove SGA in BRIGHT and CLUSTER.\n bgs &= imaging_mask(maskbits, bgsmask=True)\n\n g = 22.5 - 2.5*np.log10(gflux.clip(1e-16))\n r = 22.5 - 2.5*np.log10(rflux.clip(1e-16))\n z = 22.5 - 2.5*np.log10(zflux.clip(1e-16))\n w1 = 22.5 - 2.5*np.log10(w1flux.clip(1e-16))\n rfib = 22.5 - 2.5*np.log10(rfiberflux.clip(1e-16))\n\n # BASS r-mag offset with DECaLS.\n offset = 0.04\n\n # D. Schlegel - ChangHoon H. color selection to get a high redshift\n # success rate.\n if south:\n schlegel_color = (z - w1) - 3/2.5 * (g - r) + 1.2\n rfibcol = (rfib < 20.75) | ((rfib < 21.5) & (schlegel_color > 0.))\n else:\n schlegel_color = (z - w1) - 3/2.5 * (g - (r-offset)) + 1.2\n rfibcol = (rfib < 20.75+offset) | ((rfib < 21.5+offset) & (schlegel_color > 0.))\n\n if targtype == 'bright':\n if south:\n bgs &= rflux > 10**((22.5-19.5)/2.5)\n bgs &= rflux <= 10**((22.5-12.0)/2.5)\n bgs &= rfibertotflux <= 10**((22.5-15.0)/2.5)\n else:\n bgs &= rflux > 10**((22.5-(19.5+offset))/2.5)\n bgs &= rflux <= 10**((22.5-12.0)/2.5)\n bgs &= rfibertotflux <= 10**((22.5-15.0)/2.5)\n elif targtype == 'faint':\n if south:\n bgs &= rflux > 10**((22.5-20.3)/2.5)\n bgs &= rflux <= 10**((22.5-19.5)/2.5)\n bgs &= (rfibcol)\n else:\n bgs &= rflux > 10**((22.5-(20.3))/2.5)\n bgs &= rflux <= 10**((22.5-(19.5+offset))/2.5)\n bgs &= (rfibcol)\n\n return bgs",
"def make_seq(scaffold, o_dict):\n scaff_name = scaffold[0]\n sequence = []\n \n nice_scaff = \"contigs__\"\n \n scaff_string = str(scaffold)\n while scaffold:\n \n if len(scaffold) == 1:\n #This should never happen!\n paf(\"\\nWARNING: odd number of elements in scaffold!\")\n paf(\"scaffold is: \" + scaff_string)\n nice_scaff += \"WARNING:_odd_number_of_elements_in_scaffold!\"\n sequence.description = scaff_name\n return sequence, nice_scaff\n\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n \n if end1[0:4] != \"five\" and end1[0:5] != \"three\":\n if end2 in repeat_contigs and end2[0:10] == \"threeprime\":\n #Only attach a repeat if connected by fiveprime end,\n # to avoid creating duplicate copies\n ''' this condition has been removed!\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n #threeprime ends of repeats are not attached\n if end2[0:4] != \"five\" and end2[0:5] != \"three\": end2 = other_end(end1)\n '''\n \n if \"dummy\" in end2:\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n\n if end2[0:4] != \"five\" and end2[0:5] != \"three\":\n #This should never happen! \n paf(\"\\nWARNING: scaffold not included in assembly!\")\n paf(\"scaffold is: \" + scaff_string)\n paf(\"end1 is: \" + str(end1))\n paf(\"end2 is: \" + str(end2)+ \"\\n\")\n nice_scaff += \"scaffold.not.included.in.assembly!\" + str(end1) + \".\" + str(end2)\n sequence.description = scaff_name\n return sequence, nice_scaff\n else:\n sequence, nice_scaff = initiate_seq(end2, nice_scaff)\n elif (end2 != \"link_circular\") and (\"dummy\" not in end1):\n sequence, nice_scaff = extend_seq(sequence, end0, end1, o_dict, nice_scaff)\n end0 = end2\n \n sequence.description = scaff_name\n \n return sequence, nice_scaff",
"def SecondaryComplex_to_Bid():\n Parameter('RIP3_0' , 2.0e4) # molecules per cell\n Parameter('BidK_0' , 5.0e3) # molecules per cell\n \n alias_model_components()\n Initial(RIP3(bRHIM = None, state = 'unmod'), RIP3_0) # RIP3\n Initial(BidK(bf = None), BidK_0) \n # ==============================================================\n # Assembly of Complex II, Riptosome and Necrosome\n # --------------------------------------------------------------\n # FADD + TRADD[active] <-> FADD:TRADD[active]\n # FADD + RIP1 <-> FADD:RIP1\n # TRADD + RIP1 <-> TRADD:RIP1\n\n # CD95_to_secondary complex contains the rules for recruitment of proC8 to FADD.\n # (RIP1 or TRADD):FADD + proC8 <-> (RIP1 or TRADD):FADD:proC8\n # (RIP1 or TRADD):FADD:proC8 + proC8 <-> (RIP1 or TRADD):FADD:proC8:proC8\n # (RIP1 or TRADD):FADD:proC8 + flip_L <-> (RIP1 or TRADD):FADD:proC8:flip_L\n # (RIP1 or TRADD):FADD:proC8 + flip_S <-> (RIP1 or TRADD):proC8:flip_S\n \n # RIP1%ProC8%ProC8(in a complex) >> RIP1[trunc] + C8 + (remains of the complex)\n # RIP1%ProC8%cFlip[L](in a complex) >> RIP1[trunc] + remains of the complex)\n # RIP1%cFlip[S](in a complex) + RIP3 >> RIP1:RIP3(in a complex, i.e. necrosome)\n\n # RIP1 + C8 <-> RIP1:C8 >> RIP1[trunc] + C8\n # RIP3 + C8 <-> RIP3:C8 >> RIP3[trunc] + C8\n # Bid + C8 <-> Bid:C8 >> Bid[trunc] + C8\n \n # -------------Assembling Complex II-----------------\n Parameter('Ka_RIP1_FADD', 1e-7) # Biochemica et Biophysica Acta 1834(2013) 292-300\n Parameter('Kd_RIP1_FADD', 1e-8) # Biochemica et Biophysica Acta 1834(2013) 292-300\n alias_model_components()\n \n bind(FADD(bDD = None, bDED1 = None, bDED2 = None), 'bDD', TRADD(bDD1=None, state = 'active'), 'bDD1', [1e-6, 1e-3])\n bind(FADD(bDD = None), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Ka_RIP1_FADD, Kd_RIP1_FADD])\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bDD', [1e-6, 1e-3])\n # For simplicity, I am neglecting the binary intereaction that occurs between proC8 and RIP1.\n # Binding of proC8 and c-flip to FADD is accomplished in CD95_to_Secondary complex. \n\n #--------------RIP1 Truncation reactions-------------\n #---Truncation by C8---------------------------------\n RIP_CIIA_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n RIP_CIIB_proC8 = RIP1(bDD=ANY, bRHIM = None, state = 'unmod')% FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED=ANY)%proC8(bDED=ANY)\n CIIA = TRADD(bDD2 = None, bDD1 = ANY, state = 'active') % FADD(bDD=ANY, bDED1=None, bDED2=None)\n \n Rule('RIP1_truncation_CIIA', RIP_CIIA_proC8 >> CIIA + C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k11',1e-1))\n Rule('RIP1_truncation_CIIB', RIP_CIIB_proC8 >> FADD(bDD=None, bDED1=None, bDED2=None)+ C8(bf = None, state = 'A') + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k12', 1e-1))\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP1(bDD=None), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n\n #---Truncation by proC8:cFlip_L---------------------\n Riptosome_FADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%FADD(bDD=1, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n Riptosome_TRADD = RIP1(bDD=1, bRHIM = None, state = 'unmod')%TRADD(bDD1=ANY, bDD2=1)%FADD(bDD=ANY, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY)\n\n Rule('RIP1_truncation_FADD', Riptosome_FADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k13', 1e-1))\n Rule('RIP1_truncation_TRADD', Riptosome_TRADD >> FADD(bDD=None, bDED1=ANY, bDED2=ANY)%proC8(bDED = ANY)%flip_L(bDED = ANY) + RIP1(bDD=None, bRHIM = None, state = 'trunc'), Parameter('k14', 1e-1))\n \n # -------------RIP3 Binding Interactions----------------\n Ripto1_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Ripto2_Flip_S = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=None, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY)\n Necrosome1 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=6, state='unmod') % TRADD(bDD1=ANY, bDD2=ANY, state='active') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 6, state = 'unmod')\n Necrosome2 = FADD(bDD=ANY, bDED1=ANY, bDED2=ANY) % RIP1(bDD=ANY, bRHIM=5, state='unmod') % flip_S(bDED=ANY) % proC8(bDED=ANY) % RIP3(bRHIM= 5, state = 'unmod')\n\n Rule('RIP3_binding1', Ripto1_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome1, Parameter('k15', 1e-6), Parameter('k16', 1e-3))\n Rule('RIP3_binding2', Ripto2_Flip_S + RIP3(bRHIM= None, state = 'unmod') <> Necrosome2, Parameter('k17', 1e-6), Parameter('k18', 1e-3))\n \n #RIP3 Truncation\n catalyze_state(C8(bf = None, state = 'A'), 'bf', RIP3(), 'bRHIM', 'state', 'unmod', 'trunc', [1e-6, 1e-3, 1e-1])\n\n #-------------Bid Interactions--------------------------\n # Bid Phosphorylation and Truncation\n catalyze_state(BidK(), 'bf', Bid(), 'bf', 'state', 'U', 'po4', [1e-6, 1e-3, 1e-1])\n catalyze_state(C8(bf = None, state = 'A'), 'bf', Bid(), 'bf', 'state', 'U', 'T', [1.04e-5, 0.005, 0.1])\n\n # Bid-PO4 competing with RIP1 for binding to Complex II\n bind(TRADD(bDD2 = None, state = 'active'),'bDD2', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])\n # Bid-PO4 sequestering RIP1\n bind(RIP1(bDD = None, bRHIM = None, state = 'unmod'), 'bRHIM', Bid(bf = None, state = 'po4'), 'bf', [1e-6, 1e-3])",
"def make_bispectra(self, stokes='postbisp', maxturns=0):\n\n bisp = lambda d: d[:,:,0] * d[:,:,1] * n.conj(d[:,:,2]) # bispectrum for data referenced by triple (data[:,triples])\n\n # set up triples and arrays for bispectrum considering flagged baselines (only having zeros).\n triples = self.make_triples()\n meanbl = self.data.mean(axis=2).mean(axis=0) # find bls with no zeros in either pol to ignore in triples\n self.triples = triples[n.all(meanbl[triples][:,0] != 0j, axis=1) & n.all(meanbl[triples][:,1] != 0j, axis=1) & n.all(meanbl[triples][:,2] != 0j, axis=1) == True] # only take triples if both pols are good. may be smaller than set for an individual pol\n\n # need to select path based on how polarization is handled. assumes only dual-pol data.\n print 'Bispectrum made for stokes =', stokes\n if ( (stokes == 'postbisp') | (stokes == 'prebisp') | (stokes == 'noavg') ): # case of combining two stokes\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples)), dtype='complex')\n elif isinstance(stokes, types.IntType): # case of using single pol\n if stokes >= self.npol:\n raise IndexError, 'Stokes parameter larger than number of pols in data.'\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples)), dtype='complex')\n elif stokes == 'noavg':\n bispectra = n.zeros((len(self.dmarr), len(self.data), len(self.triples), self.npol), dtype='complex')\n\n # iterate over dm trials\n for dmbin in xrange(len(self.dmarr)):\n\n if maxturns == 0:\n dddata = self.dedisperse(dmbin).mean(axis=2) # average over channels\n elif maxturns > 0:\n dddata = self.spectralInterpolate(self.dedisperse(dmbin), axis=2, maxturns=maxturns) # interpolate over channels using fft\n\n if stokes == 'prebisp':\n dddata = dddata.mean(axis=2)\n bispectra[dmbin] = bisp(dddata[:, self.triples])\n elif stokes == 'postbisp':\n bispectra[dmbin] = bisp(dddata[:, self.triples]).mean(axis=2)\n elif stokes == 'noavg':\n bispectra[dmbin] = bisp(dddata[:, self.triples])\n elif isinstance(stokes, types.IntType): # case of using single pol\n bispectra[dmbin] = bisp(dddata[:, self.triples, stokes])\n\n print 'dedispersed for ', self.dmarr[dmbin]\n self.bispectra = n.ma.masked_array(bispectra, bispectra == 0j)",
"def create_B_words(path_to_pairs,\n path_to_librispeech_text,\n path_to_phonemes,\n path_save,\n freq_sim,\n len_sim,\n edit_sim):\n for i in range(len(path_to_pairs)):\n \n pairs = []\n dic_cl_eq = {} # Classe d'equivalence pour le sens des mots\n \n with open(path_to_pairs[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n pairs.append(line)\n if line[0] in dic_cl_eq:\n dic_cl_eq[line[0]].add(line[1])\n else:\n dic_cl_eq[line[0]] = {line[1]}\n if line[1] in dic_cl_eq:\n dic_cl_eq[line[1]].add(line[0])\n else:\n dic_cl_eq[line[1]] = {line[0]}\n \n dic_cl_eq_prev = {}\n while dic_cl_eq_prev != dic_cl_eq:\n dic_cl_eq_prev = copy.deepcopy(dic_cl_eq)\n for word in dic_cl_eq:\n for syn in dic_cl_eq[word]:\n dic_cl_eq[word] = set.union(dic_cl_eq[word], dic_cl_eq[syn])\n \n with open(path_to_librispeech_text) as f:\n text_librispeech = f.read()\n text_librispeech_split = text_librispeech.replace('\\n', ' ').split(' ')\n freq_libri = {}\n for word in text_librispeech_split:\n if word in dic_cl_eq:\n if word in freq_libri:\n freq_libri[word] += 1\n else:\n freq_libri[word] = 1\n \n phonemes = []\n with open(path_to_phonemes[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n phonemes.append(line)\n \n dic_word_phonemes = {}\n for j in range(len(pairs)):\n dic_word_phonemes[pairs[j][0]] = phonemes[j][0]\n dic_word_phonemes[pairs[j][1]] = phonemes[j][1]\n \n file = open(path_save[i], 'w+')\n file.truncate(0)\n \n for j in range(len(pairs)):\n A, X = pairs[j]\n B_0 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > (1-len_sim)*len(A)) and \\\n (len(word) < (1+len_sim)*len(A)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_0.append(word)\n line_0 = ' '.join([A, X] + B_0)\n \n X, A = pairs[j]\n B_1 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > np.around((1-len_sim)*len(A), decimals=2)) and \\\n (len(word) < np.around((1+len_sim)*len(A), decimals=2)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_1.append(word)\n line_1 = ' '.join([A, X] + B_1)\n \n if max(len(B_0), len(B_1)) == 0:\n print(X, A)\n \n line = line_0 if len(line_0) > len(line_1) else line_1\n if j < len(pairs) - 1:\n line += '\\n'\n file.write(line)\n \n file.close()",
"def genChains(self):\n self.numMonomer = 0\n self.numBonds = 0\n self.numMols = 0\n self.numCations = 0\n self.numAnions = 0\n\n self.atomsCoords = []\n self.atomsType = []\n self.atomsCharge = []\n self.molId = []\n self.bondList = []\n \n for i in range(self.numPa + self.numPc):\n\n if i < self.numPc:\n # polycation chains, charge in LJ units of LAMMPS\n # electron charge would be 10.54 using bare LAMMPS LJ units\n # the dielectric constans of solvent is effectively taken as 111 when assign 1 to +e\n # just need to set dielectric as 0.72 in LAMMPS ot mimic water with dielectric constant 80\n self.beadCharge = 1\n self.beadType = 1 # atomic type for neutral beads in polycation chains\n self.chain = self.lenPc\n else:\n self.beadCharge = -1 # polyanion chains\n self.beadType = 3 # atomic type for neutral beads in polyanion chains\n self.chain = self.lenPa\n\n self.numMols += 1\n\n # generate the first bead of each chain randomly\n self.numMonomer += 1\n self.cxyz = np.random.rand(3) * self.box + self.lxyz\n\n self.atomsCoords.append(self.cxyz)\n #self.atomsType.append(self.beadType)\n\n # decide if the first bead is charged or not\n if self.chargeRepeat == 1:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsType.append(self.beadType)\n self.atomsCharge.append(0)\n\n self.molId.append(self.numMols)\n\n self.currpxyz = self.cxyz\n\n # follow random walk to generate the chain\n # generate the seconb bead of the chain\n self.theta, self.phi = np.random.rand(2) * np.array([np.pi, 2 * np.pi])\n self.ds = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n # decide if the second bead is charged or not\n if 2%self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n \n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = self.theta\n self.currphi = self.phi\n\n self.dstot += self.ds\n\n # generating the rest beads of the chain\n\n for k in range(3, self.chain+1):\n # only accept atoms that are beyong certain distance\n # from the atom precding the current atom in the chain\n self.theta, self.phi = np.random.rand() * np.array([np.pi - self.stiffangle, \\\n 2 * np.pi])\n self.ds1 = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.reverseXZrotation()\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n if k % self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = np.arccos(self.ds[0]/self.segment)\n if self.ds[2] > 0:\n self.currphi = np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n else:\n self.currphi = 2*np.pi - np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n\n self.dstot += self.ds\n\n print \"%d beads are generated.\\n\" % self.numMonomer \n assert self.numMonomer == self.numPc * self.lenPc + self.numPa * self.lenPa, \\\n \"The number of monomers in chains is wrong!\\n\"\n assert self.numCations == int(np.floor(self.lenPc * self.chargeFraction)*self.numPc), \\\n \"The number of positively charged beads is wrong!\\n\"\n assert self.numAnions == int(np.floor(self.lenPa * self.chargeFraction)*self.numPa), \\\n \"The number of negatively charged beads is wrong!\\n\"",
"def build_basis(self):\n if self.debug:\n print('sps_basis: rebuilding basis')\n # Setup the internal component basis arrays\n inwave = self.ssp.wavelengths\n nbasis = len(np.atleast_1d(self.params['mass']))\n self.nbasis = nbasis\n # nbasis = ( len(np.atleast_1d(self.params['zmet'])) *\n # len(np.atleast_1d(self.params['tage'])) )\n self.basis_spec = np.zeros([nbasis, len(inwave)])\n self.basis_mass = np.zeros(nbasis)\n\n i = 0\n tesc = self.params['dust_tesc']\n dust1, dust2 = self.params['dust1'], self.params['dust2']\n for j, zmet in enumerate(self.params['zmet']):\n for k, tage in enumerate(self.params['tage']):\n # get the intrinsic spectrum at this metallicity and age\n if self.safe:\n # do it using compsp\n if self.ssp._zcontinuous > 0:\n self.ssp.params['logzsol'] = zmet\n else:\n self.ssp.params['zmet'] = zmet\n w, spec = self.ssp.get_spectrum(tage=tage, peraa=True)\n mass = self.ssp.stellar_mass\n else:\n # do it by hand. Faster but dangerous\n spec, mass, lbol = self.ssp.ztinterp(zmet, tage, peraa=True)\n self.basis_spec[i, :] = spec\n self.basis_mass[i] = mass\n i += 1\n self.basis_dirty = False"
] | [
"0.55873483",
"0.54603845",
"0.54429716",
"0.5408802",
"0.5403582",
"0.5389649",
"0.53191894",
"0.5310863",
"0.52691495",
"0.52479964",
"0.52202135",
"0.5188183",
"0.51712984",
"0.5130737",
"0.512443",
"0.5119801",
"0.5115056",
"0.511297",
"0.51073724",
"0.51057136",
"0.509822",
"0.50856316",
"0.5078628",
"0.5065311",
"0.50559086",
"0.50515634",
"0.50501007",
"0.5047573",
"0.50426716",
"0.50258875"
] | 0.5812885 | 0 |
messages1 and messages2 represent the encoded headlines from two news sources corr represents the correlation between the two currently returns average correlation | def average_similarity(messages1, messages2):
if np.array_equal(messages2, messages1):
return 1
corr = np.corrcoef(messages1, messages2)
return np.average(corr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def corr(arr1, arr2):\n\n\n X = []\n Y = []\n for index in range(len(arr1)):\n if arr1[index] == None or arr2[index] == None:\n continue\n X.append(arr1[index])\n Y.append(arr2[index])\n\n\n r = np.corrcoef(X, Y)[0,1]\n f = 0.5*np.log((1+r)/(1-r))\n se = 1/np.sqrt(len(X)-3)\n ucl = f + 2*se\n lcl = f - 2*se\n\n lcl = (np.exp(2*lcl) - 1) / (np.exp(2*lcl) + 1)\n ucl = (np.exp(2*ucl) - 1) / (np.exp(2*ucl) + 1)\n\n return r,lcl,ucl",
"def _merge_correlation_helper(corr_mat1, mean1, std1, n1,\n corr_mat2, mean2, std2, n2):\n if corr_mat1 is None:\n return corr_mat2\n elif corr_mat2 is None:\n return corr_mat1\n elif len(mean1) == 0:\n return corr_mat2\n elif len(mean2) == 0:\n return corr_mat1\n\n std_mat1 = np.outer(std1, std1)\n std_mat2 = np.outer(std2, std2)\n mean_diff_vector = mean1 - mean2\n mean_diff_mat = np.outer(mean_diff_vector, mean_diff_vector)\n\n cov1 = corr_mat1 * std_mat1\n cov2 = corr_mat2 * std_mat2\n\n n = n1 + n2\n\n cov = cov1 * (n1 - 1) + cov2 * (n2 - 1) + mean_diff_mat * (n1 * n2) / n\n cov = cov / (n - 1)\n\n delta = mean2 - mean1\n M2_1 = (n1 - 1) * (std1 ** 2)\n M2_2 = (n2 - 1) * (std2 ** 2)\n M2 = M2_1 + M2_2 + delta ** 2 * n1 * n2 / n\n std = np.sqrt(M2 / (n - 1))\n\n std_mat = np.outer(std, std)\n corr_mat = cov / std_mat\n\n return corr_mat",
"def getcorrelation(movieid1,movieid2):\n\n #the initialized integer, cosine_sum, has an initialized value of -100\n #such that in the case where correlation shouldn't be updated, the value\n #remains unchanged\n cosine_sum = NEGATIVE\n #variable r_a,i and r_b,i in the formula\n r_mv1 = 0\n r_mv2 = 0\n #numerator\n nume_sum = 0\n #two parts in the denominator (before taking square root)\n deno_mv1_sum = 0\n deno_mv2_sum = 0\n denominator = 0\n #variable that keeps track of count of common users\n currentCommon = 0\n\n #firstly check if the count of user passes the threshold for each movie\n if(len(dictMovie.get(movieid1))<threshold or\n len(dictMovie.get(movieid2))<threshold):\n #if either does not, returns a negative correlation (to be invalid)\n return cosine_sum\n #if both pass threshold, get the intersection (of users) of two movies\n else:\n intersect=dictMovie.get(movieid1).intersection(dictMovie.get(movieid2))\n #if the number of common users is smaller than threshold, return\n if (len(intersect) < threshold):\n return cosine_sum\n #otherwise, start counting correlation\n else:\n #get the average rating of two movies\n mv1_bar = float(dictMovieRate.get(movieid1))\n mv2_bar = float(dictMovieRate.get(movieid2))\n #iterate through common users and use formula\n for commonuser in intersect:\n #increment common user count\n currentCommon += 1\n r_mv1 = int(dictUser.get(commonuser).get(movieid1))\n r_mv2 = int(dictUser.get(commonuser).get(movieid2))\n nume_sum += ( (r_mv1)-mv1_bar )*( (r_mv2)-mv2_bar )\n deno_mv1_sum += ( (r_mv1)-mv1_bar )**2\n deno_mv2_sum += ( (r_mv2)-mv2_bar )**2\n #when done with denominator separate calculation, combine\n denominator = math.sqrt(deno_mv1_sum * deno_mv2_sum)\n #handle the case where denominator=0 (invalid)\n if denominator == 0:\n return cosine_sum\n #otherwise, successful. return valid values and pass in\n #common count to global variable for program to catch\n else:\n cosine_sum = nume_sum / denominator\n global currentCommonCount\n currentCommonCount = currentCommon\n return cosine_sum",
"def _compute_correlation(ts1, ts2, comparison_mode, correlation_type,\r\n tail_type, num_permutations, confidence_level,\r\n perform_detailed_comparisons=False,\r\n expected_sample_id=None):\r\n # Convert our notion of tail type into the format expected by PyCogent's\r\n # correlation_test().\r\n if tail_type == 'two-sided':\r\n tail_type = None\r\n\r\n if comparison_mode != 'paired' and comparison_mode != 'expected':\r\n raise ValueError(\"Invalid comparison mode '%s'. Must be one of %r.\" %\r\n (comparison_mode, comparison_modes))\r\n\r\n # Make sure that the second taxa summary has only one sample if we weren't\r\n # provided an expected sample ID to compare against.\r\n if (comparison_mode == 'expected' and expected_sample_id is None and\r\n len(ts2[0]) != 1):\r\n raise ValueError(\"The second taxa summary file must contain a single \"\r\n \"sample (column) to compare all samples in the first taxa \"\r\n \"summary file against when the comparison mode is 'expected' \"\r\n \"and an expected sample ID is not provided. You provided a \"\r\n \"file with %d samples.\"\r\n % len(ts2[0]))\r\n\r\n if comparison_mode == 'paired':\r\n # Make sure the number of samples match between the two files (the IDs\r\n # do not have to match because of the sample ID map).\r\n if len(ts1[0]) != len(ts2[0]):\r\n raise ValueError(\"The two taxa summaries are incompatible because \"\r\n \"they do not have the same number of sample IDs. \"\r\n \"The taxa summaries must be made compatible \"\r\n \"before attempting to perform \"\r\n \"pairwise-comparisons between samples.\")\r\n\r\n # Make sure the taxa information is the same (i.e. the summaries have been\r\n # sorted and filled).\r\n if ts1[1] != ts2[1]:\r\n raise ValueError(\"The taxa do not match exactly between the two taxa \"\r\n \"summary files. The taxa must be sorted and filled \"\r\n \"before attempting to compare them.\")\r\n\r\n # Find the index of the expected sample ID.\r\n if comparison_mode == 'expected':\r\n if expected_sample_id:\r\n try:\r\n expected_idx = ts2[0].index(expected_sample_id)\r\n except ValueError:\r\n raise ValueError(\"The expected sample ID '%s' is not in the \"\r\n \"taxa summary file.\" % expected_sample_id)\r\n else:\r\n # We know the 'expected' taxa summary has a single sample in it, so\r\n # this is the only possible index.\r\n expected_idx = 0\r\n\r\n # Compute the overall correlation between each sample and the expected\r\n # sample, or each of the paired samples, and optionally the correlation\r\n # between each pair of samples individually.\r\n corr_vec = None\r\n if perform_detailed_comparisons:\r\n corr_vec = []\r\n num_comparisons = len(ts1[0])\r\n\r\n all_ts1_data = []\r\n all_ts2_data = []\r\n for samp_idx, samp_id in enumerate(ts1[0]):\r\n if comparison_mode == 'paired':\r\n paired_idx = samp_idx\r\n elif comparison_mode == 'expected':\r\n paired_idx = expected_idx\r\n else:\r\n # Redundant check, but here for safety in case the one above is\r\n # changed or removed.\r\n raise ValueError(\"Invalid comparison mode '%s'. Must be one of \"\r\n \"%r.\" % (comparison_mode, comparison_modes))\r\n\r\n # Grab the columns of data for the current sample and its pair.\r\n ts1_data = ts1[2].T[samp_idx]\r\n ts2_data = ts2[2].T[paired_idx]\r\n all_ts1_data.extend(ts1_data)\r\n all_ts2_data.extend(ts2_data)\r\n\r\n if perform_detailed_comparisons:\r\n # Compare the current sample and its pair.\r\n corr_coeff, param_p_val, unused, nonparam_p_val, conf_interval = \\\r\n correlation_test(ts1_data, ts2_data,\r\n method=correlation_type,\r\n tails=tail_type,\r\n permutations=num_permutations,\r\n confidence_level=confidence_level)\r\n\r\n # Compute the Bonferroni-corrected p-values.\r\n param_p_val_corr = min(param_p_val * num_comparisons, 1)\r\n nonparam_p_val_corr = None if nonparam_p_val is None else \\\r\n min(nonparam_p_val * num_comparisons, 1)\r\n\r\n corr_vec.append((samp_id, ts2[0][paired_idx], corr_coeff,\r\n param_p_val, param_p_val_corr, nonparam_p_val,\r\n nonparam_p_val_corr, conf_interval))\r\n\r\n # Compare all paired samples at once.\r\n results = correlation_test(all_ts1_data, all_ts2_data,\r\n method=correlation_type, tails=tail_type,\r\n permutations=num_permutations,\r\n confidence_level=confidence_level)\r\n # We don't need to return all of the permuted correlation coefficients.\r\n overall_corr = (results[0], results[1], results[3], results[4])\r\n return overall_corr, corr_vec",
"def _merge_correlation(self, other):\n corr_mat1 = self.correlation_matrix\n corr_mat2 = other.correlation_matrix\n n1 = self.total_samples - self.row_is_null_count\n n2 = other.total_samples - other.row_is_null_count\n if n1 == 0:\n return corr_mat2\n if n2 == 0:\n return corr_mat1\n\n if corr_mat1 is None or corr_mat2 is None:\n return None\n\n # get column indices without nan\n col_ids1 = np.where(~np.isnan(corr_mat1).all(axis=0))[0]\n col_ids2 = np.where(~np.isnan(corr_mat2).all(axis=0))[0]\n\n if len(col_ids1) != len(col_ids2) or len(col_ids1) <= 1:\n return None\n if (col_ids1 != col_ids2).any():\n return None\n\n mean1 = np.array(\n [self._profile[idx].profile['statistics']['mean']\n for idx in range(len(self._profile)) if idx in col_ids1])\n std1 = np.array(\n [self._profile[idx].profile['statistics']['stddev']\n for idx in range(len(self._profile)) if idx in col_ids1])\n\n mean2 = np.array(\n [other._profile[idx].profile['statistics']['mean']\n for idx in range(len(self._profile)) if idx in col_ids2])\n std2 = np.array(\n [other._profile[idx].profile['statistics']['stddev']\n for idx in range(len(self._profile)) if idx in col_ids2])\n return self._merge_correlation_helper(corr_mat1, mean1, std1, n1,\n corr_mat2, mean2, std2, n2)",
"def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]",
"def image_correlation(image1, image2):\n im1=im_to_coord(image1)\n im2=im_to_coord(image2)\n z1=im1[:,2]\n z2=im2[:,2]\n mu_z1 = z1.mean()\n mu_z2 = z2.mean()\n n = z1.shape[0]\n s_z1 = z1.std(0, ddof=n - 1)\n s_z2 = z2.std(0, ddof=n - 1)\n cov = np.dot(z1,\n z2.T) - n * np.dot(mu_z1,\n mu_z2)\n return cov / np.dot(s_z1, s_z2)",
"def coupling_coef_corrs(coupling_coefs1, coupling_coefs2, correlation='pearson'):\n n_neurons = coupling_coefs1.shape[0]\n correlations = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n ccs1 = coupling_coefs1[neuron]\n ccs2 = coupling_coefs2[neuron]\n\n if np.array_equal(ccs1, ccs2):\n correlations[neuron] = 1.\n elif np.all(ccs1 == 0) or np.all(ccs2 == 0):\n correlations[neuron] = 0\n else:\n if correlation == 'pearson':\n correlations[neuron] = np.corrcoef(ccs1, ccs2)[0, 1]\n elif correlation == 'spearman':\n correlations[neuron] = spearmanr(ccs1, ccs2).correlation\n elif correlation == 'cosine':\n correlations[neuron] = cosine_similarity(ccs1, ccs2)\n\n return correlations",
"def _get_correlation(self, user1_id, user2_id):\n shared_ratings = self.get_shared_ratings(user1_id, user2_id)\n\n # Substract means for both users\n shared_ratings['rating_x'] -= self.get_mean_user_rating(user1_id)\n shared_ratings['rating_y'] -= self.get_mean_user_rating(user2_id)\n\n # Compute correlation as inverse of disparity\n disparity = (shared_ratings['rating_x'] - shared_ratings['rating_y']).abs().mean()\n return 1.0/disparity",
"def test__same_text_correlation(self):\n \n _log.info('-'*80)\n \n # arrange \n text1 = \"love is rain as long story short\"\n text2 = text1\n\n dump_file = getInputFile(\"swiki_knowledge_output.xml\")\n parsed_file = getOutputFile(\"swiki_knowledge_output.parsed.xml\")\n #wdb_file = getOutputFile(\"swiki_knowledge_output.wdb\")\n\n articles = ['Rain', 'Love', 'Tree'] \n \n # act\n wn.make_dump(dump_file, articles, compress=False)\n wn.parse_dump(dump_file, parsed_file)\n db_wrapper = wn.build_database_wrapper(parsed_file, StopWordsStemmer([]))\n \n #self.addCleanup(os.remove, self.tmp_dump_file)\n \n comparer = SemanticComparer(db_wrapper)\n correlation = comparer.compare(text1, text2)\n _log.info(test_utils.get_texts_correlation_message(text1, text2, correlation))\n self.assertAlmostEqual(correlation, 1.0, msg=\"for same text correlation should be 1\")",
"def corr_score(file1,file2,delta,bin=1.,dur=100.,ncell=500):\r\n\td1 = numpy.loadtxt(file1)\r\n\td2 = numpy.loadtxt(file2)\r\n\tx = numpy.zeros(int(ncell*dur/bin))\r\n\ty = numpy.zeros(int(ncell*dur/bin))\r\n\tfor j in range(ncell):\r\n\t\tif d1.size == 2:\r\n\t\t\ts1 = numpy.array(d1[0]*(d1[1]==j))\r\n\t\telse:\r\n\t\t\ts1 = d1[d1[:,1]==j,0]\r\n\t\tif d2.size == 2:\r\n\t\t\ts2 = numpy.array(d2[0]*(d2[1]==j))\r\n\t\telse:\r\n\t\t\ts2 = d2[d2[:,1]==j,0]\r\n\t\tkern = numpy.append(numpy.arange(delta/bin),numpy.arange(delta/bin,-1,-1))\r\n\t\tts1,dump = pylab.histogram(s1,numpy.arange(0.,dur+bin,bin))\r\n\t\tts2,dump = pylab.histogram(s2,numpy.arange(0.,dur+bin,bin))\r\n\t\tx[j*dur/bin:(j+1)*dur/bin] = numpy.convolve(ts1,kern,'same')\r\n\t\ty[j*dur/bin:(j+1)*dur/bin] = numpy.convolve(ts2,kern,'same')\r\n x = x - pylab.mean(x)\r\n y = y - pylab.mean(y)\r\n cor = sum(x*y)/(len(x)*pylab.std(x)*pylab.std(y))\r\n return cor",
"def corr(a, b):\n ma = np.mean(a)\n mb = np.mean(b)\n\n a_ = a - ma\n b_ = b - mb\n\n norma = np.sqrt(np.sum(a_ ** 2, axis=0))\n normb = np.sqrt(np.sum(b_ ** 2, axis=0))\n\n norma[norma < TOLERANCE] = 1.0\n normb[normb < TOLERANCE] = 1.0\n\n a_ *= 1.0 / norma\n b_ *= 1.0 / normb\n\n ip = np.dot(a_.T, b_)\n\n if ip.shape == (1, 1):\n return ip[0, 0]\n else:\n return ip",
"def combine_for_correlation(df1=get_us_ridership(), df2=get_sales_data()):\n df1.index.astype(int)\n df2.index.astype(int)\n temp = pd.concat([df1, df2], axis=1)\n return temp.dropna()",
"def calculate_distance(seq1,seq2):\r\n mmcounter = 0 #mismatchcount\r\n seqlen = 0 #sequence length\r\n \r\n #cout the sequence length and mismatches\r\n for i in range(len(seq1)):\r\n if seq1[i]!='-' and seq2[i]!='-':\r\n seqlen += 1\r\n if seq1[i] != seq2[i]:\r\n mmcounter += 1\r\n #compute p\r\n p = (mmcounter/seqlen)\r\n #adjust p \r\n if p >= 0.75:\r\n pcorr = float(30)\r\n else:\r\n pcorr = (-3/4)*np.log(1-((4/3)*p))\r\n \r\n return(pcorr)",
"def find_correspondences(pts1, pts2, desc1, desc2, match_score_type='ratio'):\n N = pts1.shape[0]\n X = np.sum(desc1**2, axis=1, keepdims=True)\n Y = np.sum(desc2**2, axis=1, keepdims=True).T\n XY = np.dot(desc1,desc2.T)\n L = X + Y - 2*XY\n\n D = (np.maximum(L, 0))\n scores = np.min(D, axis = 1)\n indices = np.argmin(D,axis = 1)\n corr = []\n for j,index in enumerate(indices):\n corr.append(np.hstack([pts1[j],pts2[index]]))\n if match_score_type=='ratio': \n p = np.sort(D, axis = 1)\n scores = p[:,0]/p[:,1]\n return np.array(corr), indices, scores",
"def xcorr2_qwik(img0, img1):\n # 2009-12-17 10:13 IJC: Created. Based on idea by J. Johnson.\n from numpy import zeros, max, min, sum\n\n im00 = img0.sum(0)\n im01 = img0.sum(1)\n im10 = img1.sum(0)\n im11 = img1.sum(1)\n n0 = len(im00)\n n1 = len(im01)\n noffsets0 = 2*n0-1\n noffsets1 = 2*n1-1\n corr0 = zeros(noffsets0,float)\n corr1 = zeros(noffsets1,float)\n\n for ii in range(noffsets0):\n firstind0 = max((ii-n0+1,0))\n lastind0 = min((n0, ii+1))\n firstind1 = max((n0-ii-1,0))\n lastind1 = min((2*n0-ii-1,n0))\n corr0[ii] = sum(im00[firstind0:lastind0]*im10[firstind1:lastind1])\n\n for jj in range(noffsets1):\n firstind0 = max((jj-n0+1,0))\n lastind0 = min((n0, jj+1))\n firstind1 = max((n0-jj-1,0))\n lastind1 = min((2*n0-jj-1,n0))\n corr1[jj] = sum(im01[firstind0:lastind0]*im11[firstind1:lastind1])\n\n ret = find([corr0==corr0.max()])-n0+1, find([corr1==corr1.max()])-n0+1\n return ret",
"def compare_emails(first, second):\n match = 0\n ignored = ['Subject', 'From', 'X-Authentication-Warning', 'Received']\n # Compare subject\n if first.subject == second.subject:\n match += SUBJECT_PRIORITY\n elif not_empty(first.subject, second.subject):\n match += compare_dicts(compute_word_frequencies_from_text(first.subject),\n compute_word_frequencies_from_text(second.subject)) * SUBJECT_PRIORITY / 2\n # they are not equal, only some words occurrences\n\n # Compare from\n if first.From == second.From:\n match += FROM_PRIORITY\n\n # compare X authentication warning\n if first.x_authentication_warning == second.x_authentication_warning:\n match += WARNING_PRIORITY\n\n # compare receive history chain\n length = max(len(first.received), len(second.received))\n receive_match = set(first.received).intersection(second.received)\n if length > 0:\n match += (len(receive_match) / length) * RECEIVED_PRIORITY\n\n MatchedHeaders = 0\n # compare secondary headers\n for header in first.AllHeaders:\n if header[0] not in ignored:\n if header in second.AllHeaders:\n MatchedHeaders += 1\n\n match += SECONDARY_PRIORITY * MatchedHeaders / max(len(first.AllHeaders), len(second.AllHeaders))\n # compare payloads\n match += PAYLOAD_PRIORITY * compare_payloads(first.payloads, second.payloads)\n return match",
"def correlate(eye1, eye2):\n\n assert len(eye1) == len(eye2), \"Eyes must come in pairs\"\n\n # Start off this way; since A and B have no particular meaning, this could\n # just as well be reversed.\n eyeA = [eye1[0]]\n eyeB = [eye2[0]]\n\n # Skip frame 0, since it has already been assigned\n for i in range(1, len(eye1)):\n da1 = distance(eyeA[-1], eye1[i])\n da2 = distance(eyeA[-1], eye2[i])\n db1 = distance(eyeB[-1], eye1[i])\n db2 = distance(eyeB[-1], eye2[i])\n if (da1 < db1) and (db2 < da2):\n eyeA.append(eye1[i])\n eyeB.append(eye2[i])\n elif (da2 < db2) and (db1 < da1):\n eyeA.append(eye2[i])\n eyeB.append(eye1[i])\n else:\n raise RuntimeError('Ambiguous eye assignment')\n return (eyeA, eyeB)",
"def calculate_correlation(data):\n pass",
"def correlate(spectrum1, spectrum2, range=None, unit=None, errorweight=False):\n\n if range is not None:\n spectrum1 = spectrum1.slice(*range, unit=unit)\n spectrum2 = spectrum2.slice(*range, unit=unit)\n\n if not (spectrum1.xarr.shape == spectrum2.xarr.shape) or not all(spectrum1.xarr == spectrum2.xarr):\n spectrum2 = interpolation.interp(spectrum2, spectrum1)\n\n data1 = spectrum1.data\n data2 = spectrum2.data\n\n xcorr = np.correlate(data1, data2, mode='same')\n\n # very simple propagation of error\n # each element is multiplied, multiplicative error is given such that (sigma_xy/xy)**2 = (sigma_x/x)**2 + (sigma_y/y)**2\n # error = (np.correlate( (spectrum1.error/spectrum1.data)**2 , np.ones(xcorr.shape), mode='same') +\n # np.correlate( (spectrum2.error/spectrum2.data)**2 , np.ones(xcorr.shape), mode='same'))**0.5 * xcorr\n # That approach sucks - what if data == 0?\n #\n # this might be more correct: http://arxiv.org/pdf/1006.4069v1.pdf eqn 4\n # but it doesn't quite fit my naive expectations so:\n error = (np.correlate((spectrum1.error)**2, np.ones(xcorr.shape), mode='same') +\n np.correlate((spectrum2.error)**2, np.ones(xcorr.shape), mode='same'))**0.5\n\n xarr = spectrum1.xarr\n x_range = xarr.max()-xarr.min()\n xmin = -x_range/2.\n xmax = x_range/2.\n offset_values = np.linspace(xmin, xmax, len(xarr))\n\n offset_xarr = units_module.SpectroscopicAxis(offset_values, unit=xarr.unit)\n\n header = headers.intersection(spectrum1.header, spectrum2.header)\n header['CRPIX1'] = 1\n try:\n header['CRVAL1'] = xmin\n except ValueError:\n try:\n header['CRVAL1'] = xmin.tolist()\n except NotImplementedError:\n header['CRVAL1'] = xmin.value\n try:\n header['CDELT1'] = offset_xarr.cdelt()\n except ValueError:\n header['CDELT1'] = offset_xarr.cdelt().value\n\n return classes.XCorrSpectrum(xarr=offset_xarr, data=xcorr, header=header, error=error)",
"def correlation(self, other):\n dates=self.get_dates(other.get_dates())\n #print(len(self.get_values(dates)))\n #print(len(other.get_values(dates)))\n #print(self.get_values(dates))\n r,p=stats.pearsonr(self.get_values(dates), other.get_values(dates))\n return r",
"def corr_coef_chan0(\n a: Union[np.ndarray, torch.Tensor], b: Union[np.ndarray, torch.Tensor]\n) -> float:\n if a is None or b is None:\n return None\n a = a[0:1,]\n b = b[0:1,]\n return corr_coef(a, b)",
"def main_correlate(tel1, date1, tel2, date2, nchan, tstart, tend, dedisperse,\n do_foldspec, ntbin, ngate,\n do_waterfall, ntw_min,\n save_xcorr, verbose=0):\n comm = MPI.COMM_WORLD\n if comm.size > 1 and save_xcorr:\n if comm.rank == 0:\n\t print(\"Warning, h5py mpio is sometimes slow. Consider disabling save_xcorr\")\n\t# save_xcorr = False\n # observing parameters\n t0 = Time(tstart, scale='utc')\n t1 = Time(tend, scale='utc')\n\n Obs = obsdata()\n obskey1 = Obs[tel1].nearest_observation(date1)\n obskey2 = Obs[tel2].nearest_observation(date2)\n psr1 = Obs[tel1][obskey1]['src']\n psr2 = Obs[tel2][obskey2]['src']\n files1 = Obs[tel1].file_list(obskey1)\n files2 = Obs[tel2].file_list(obskey2)\n\n assert psr1 == psr2\n if comm.rank == 0:\n print(\"forming visibilities from (telescope, observation_key) = \\n\"\n \"\\t ({0}, {1}) and ({2}, {3}), source {4}\".format(tel1, obskey1, tel2, obskey2, psr1))\n dm = Obs['psrs'][psr1]['dm']\n with LOFARdata_Pcombined(*files1, comm=comm) as fh1,\\\n GMRTdata(*files2, comm=comm) as fh2:\n phasepol1 = Obs['lofar'][obskey1].get_phasepol(fh1.time0, rphase=None)\n phasepol2 = Obs['gmrt'][obskey2].get_phasepol(fh2.time0, rphase=None)\n nt = min(fh1.ntimebins(t0, t1), fh2.ntimebins(t0, t1))\n # out = (foldspec, icount, waterfall)\n out = correlate.correlate(fh1, fh2, dm=dm, nchan=nchan, ngate=ngate,\n ntbin=ntbin, nt=nt, ntw=ntw_min,\n t0=t0, t1=t1, dedisperse=dedisperse,\n phasepol=(phasepol1, phasepol2),\n do_waterfall=do_waterfall,\n do_foldspec=do_foldspec,\n save_xcorr=save_xcorr,\n comm=comm)\n myfoldspec = out[0]\n myicount = out[1]\n mywaterfall = out[2]\n\n savepref = \"{0}{1}_{2}chan{3}ntbin\".format(tel1[0], tel2[0], nchan, ntbin)\n dt = t1 - t0\n if do_waterfall:\n waterfall = np.zeros_like(mywaterfall)\n comm.Reduce(mywaterfall, waterfall, op=MPI.SUM, root=0)\n if comm.rank == 0:\n # waterfall = normalize_counts(waterfall)\n np.save(\"{0}waterfall_{1}+{2:08}sec.npy\"\n .format(savepref, t0, dt.sec), waterfall)\n\n if do_foldspec:\n foldspec = np.zeros_like(myfoldspec)\n icount = np.zeros_like(myicount)\n comm.Reduce(myfoldspec, foldspec, op=MPI.SUM, root=0)\n comm.Reduce(myicount, icount, op=MPI.SUM, root=0)\n if comm.rank == 0:\n fname = (\"{0}foldspec_{1}+{2:08}sec.npy\")\n iname = (\"{0}icount_{1}+{2:08}sec.npy\")\n np.save(fname.format(savepref, t0, dt.sec), foldspec)\n np.save(iname.format(savepref, t0, dt.sec), icount)\n\n # get normalized flux in each bin (where any were added)\n f2 = normalize_counts(foldspec, icount)\n foldspec1 = f2.sum(axis=2)\n fluxes = foldspec1.sum(axis=0)\n foldspec3 = f2.sum(axis=0)\n\n with open('{0}flux_{1}+{2:08}sec.dat'\n .format(savepref, t0, dt.sec), 'w') as f:\n for i, flux in enumerate(fluxes):\n f.write('{0:12d} {1:12.9g}\\n'.format(i + 1, flux))\n\n plots = True\n if plots and comm.rank == 0:\n if do_waterfall:\n w = waterfall.copy()\n try:\n pmap('{0}waterfall_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), w, 1, verbose=True)\n except:\n pass\n if do_foldspec:\n pmap('{0}folded_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), foldspec1, 0, verbose)\n # TODO: Note, I (aaron) don't think this works for LOFAR data\n # since nchan=20, but we concatenate several subband files\n # together, so f2.nchan = N_concat * nchan\n # It should work for my \"new\" LOFAR_Pconcate file class\n pmap('{0}foldedbin_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec),\n f2.transpose(0, 2, 1).reshape(nchan, -1), 1, verbose)\n pmap('{0}folded3_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), foldspec3, 0, verbose)",
"def find_similarity(message1, message2):\n total = 0\n for i in range(len(message1)):\n max = 0\n for j in range(len(message2)):\n message1_encoded = embed([message1[i]])\n message2_encoded = embed([message2[j]])\n sim = average_similarity(message1_encoded, message2_encoded)\n if sim > max:\n max = sim\n total += max\n return total/len(message1)",
"def cross_correlation(values1, values2, lags=100):\n lags, corr, line, x = pl.xcorr( values1, values2, maxlags=lags, usevlines=False, marker=None)\n return lags, corr",
"def coupling_coef_corrs(fits_path, dataset1, dataset2):\n fits = h5py.File(fits_path, 'r')\n coefs1 = np.median(fits[dataset1]['coupling_coefs'][:], axis=0)\n coefs2 = np.median(fits[dataset2]['coupling_coefs'][:], axis=0)\n\n n_neurons = coefs1.shape[0]\n corrs = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n corrs[neuron] = np.corrcoef(coefs1[neuron], coefs2[neuron])[0, 1]\n\n return corrs",
"def corr_1d(tensor_a: torch.Tensor, tensor_b: torch.Tensor):\n assert tensor_a.dim() == 2 and tensor_b.dim() == 2, \\\n \"corr_1d :: tensor_a and tensor_b must be 2D\"\n assert tensor_a.size(0) == tensor_b.size(0) and \\\n tensor_a.dim(1) == tensor_b.dim(1), \\\n \"corr_1d :: tensor_a and tensor_b must have same shape\"\n\n num = tensor_a.mul(tensor_b).mean(1) - tensor_a.mean(1)*tensor_b.mean(1)\n den = ((tensor_a.pow(2).mean(1) - tensor_a.mean(1).pow(2)).pow(0.5) *\n (tensor_b.pow(2).mean(1) - tensor_b.mean(1).pow(2)).pow(0.5))\n return num / den.add(1e-8)",
"def mimo_sync(self,re1,im1,re2,im2):\n wnd = np.int_(4*(self._GI + self._FFT))\n Nprep = np.int_(self._FFT/2)\n mavg = np.int_(self._FFT/4) # moving average period for power and corr12\n mavg3 = 2*self._FFT # average period for corr3\n if np.size(re1)!=np.size(im1) or np.size(re2)!=np.size(im2) or np.size(re1)!=np.size(re2):\n raise Exception(\"Vectors re1, im1, re2, im2 do not have the same length!!!\")\n if np.size(re1) < (wnd-mavg+mavg3+self._FFT/2):\n raise Exception(\"Vectors re1, im1, re2, im2 not long enough ({}) to run synchronization (required length={})!!!\".format(np.size(re1),wnd-mavg+mavg3+self._FFT/2))\n iqcpx = np.empty(re1.shape, dtype=complex)\n iqcpx.real = (re1+re2)/2\n iqcpx.imag = (im1+im2)/2\n iqdata = np.concatenate((np.zeros(Nprep,),iqcpx))\n power = np.zeros((wnd,1))\n corr12 = np.zeros((wnd,1), dtype=complex)\n corr3 = np.zeros((wnd,1), dtype=complex)\n # perform the autocorrelation on the STF symbols\n for n in range(0, wnd-mavg):\n power[n] = np.real(np.dot(iqdata[n:n+mavg].transpose(),\n iqdata[n:n+mavg].conjugate())/mavg)\n corr12[n+mavg] = np.sum(iqdata[n+self._FFT/4:n+self._FFT/4+mavg] *\n np.conj(iqdata[n+self._FFT/2:n+self._FFT/2+mavg]) -\n iqdata[n:n+mavg] *\n np.conj(iqdata[n+self._FFT/4:n+self._FFT/4+mavg]))\n corr3[n+mavg] = np.dot(np.transpose(iqdata[n+self._FFT/4:n+self._FFT/4+mavg3]),\n np.conj(iqdata[n+self._FFT/2:n+self._FFT/2+mavg3]))\n # get first index where power rises above threshold\n idx1 = np.flatnonzero((power>0.75*np.sum(power)/np.size(power)))[0]\n idx2 = np.argmax(np.abs(corr12[idx1:idx1+self._FFT/2]))\n idx = idx1+idx2-Nprep\n c3i = idx1+idx2-Nprep-1+mavg\n # get the phase at the start index and calculate the frequency offset\n fo_meas = -np.angle(np.mean(corr3[c3i:c3i+mavg]))/(np.pi/2*self._FFT)*self._FS\n return idx, fo_meas",
"def compute_ncc_impl(image1, image2):\n height, width, _ = image1.shape\n ncc = np.zeros((height, width))\n for i in range(height):\n for j in range(width):\n ncc[i, j] = np.correlate(image1[i, j], image2[i, j])[0]\n return ncc",
"def delay_between(h1, h2):\n h1 = np.atleast_2d(h1)\n h2 = np.atleast_2d(h2)\n assert h1.shape[-1] == h2.shape[-1], \"h1 and h2 must have same number of samples\"\n\n L = h1.shape[-1]\n\n delay = np.zeros((h1.shape[0], h2.shape[0]), dtype=int)\n for i in range(h1.shape[0]):\n for j in range(h2.shape[0]):\n xcorrmax = np.argmax(np.correlate(h2[j], h1[i], mode=\"full\"))\n delay[i, j] = xcorrmax - L + 1\n\n return delay.squeeze()"
] | [
"0.6506768",
"0.6241487",
"0.61946344",
"0.6189344",
"0.6137197",
"0.60439324",
"0.589026",
"0.5836525",
"0.58354324",
"0.58140975",
"0.5799354",
"0.575254",
"0.5746967",
"0.5743244",
"0.57305694",
"0.5727684",
"0.5720525",
"0.56870764",
"0.5681944",
"0.5680134",
"0.56749845",
"0.5632209",
"0.563108",
"0.5617163",
"0.56152314",
"0.5592436",
"0.55767226",
"0.55509746",
"0.554289",
"0.55399644"
] | 0.6749743 | 0 |
represents messages as vectors which are used to calculate similarity | def find_similarity(message1, message2):
total = 0
for i in range(len(message1)):
max = 0
for j in range(len(message2)):
message1_encoded = embed([message1[i]])
message2_encoded = embed([message2[j]])
sim = average_similarity(message1_encoded, message2_encoded)
if sim > max:
max = sim
total += max
return total/len(message1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wrapMsg(self,vec):\n return vec.todense()",
"def plot_similarity(self) -> None:\n if isinstance(self.model, FastTextWrapper):\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x), sentence_level=True))\n else:\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x))[0])\n messages = list(self.valid_data[\"label\"])\n vectors = list(self.valid_data[\"vector\"])\n similarity_matrix(messages=messages, vectors=vectors, name=self.folder, save_path=self.base_path)",
"def semantic_vector(self,words, joint_words, info_content_norm):\n\t sent_set = set(words)\n\t semvec = np.zeros(len(joint_words))\n\t i = 0\n\t for joint_word in joint_words:\n\t if joint_word in sent_set:\n\t # if word in union exists in the sentence, s(i) = 1 (unnormalized)\n\t semvec[i] = 1.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * math.pow(self.info_content(joint_word), 2)\n\t else:\n\t # find the most similar word in the joint set and set the sim value\n\t sim_word, max_sim = self.most_similar_word(joint_word, sent_set)\n\t semvec[i] = self.PHI if max_sim > self.PHI else 0.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * self.info_content(joint_word) * self.info_content(sim_word)\n\t i = i + 1\n\t return semvec",
"def get_message_metrics(\n messages, hidden_sender, hidden_receiver, meta_data, img_features\n ):\n messages = messages.cpu().numpy()\n\n rsa_sr, rsa_si, rsa_ri, rsa_sm, topological_similarity, pseudo_tre = representation_similarity_analysis(\n img_features, meta_data, messages, hidden_sender, hidden_receiver, tre=True\n )\n\n # rsa = representation_similarity_analysis(messages, meta_data)\n l_entropy = language_entropy(messages)\n\n return (\n rsa_sr,\n rsa_si,\n rsa_ri,\n rsa_sm,\n topological_similarity,\n pseudo_tre,\n l_entropy,\n )",
"def tweetToVect(tweet, dicoGlove): \n #return model.infer_vector(tweet) \n \n gArray, wSize = w.wordsToGlove(tweet.split(), dicoGlove) \n meanMatrixOverview = w.meanWords(gArray, wSize)\n \n return meanMatrixOverview",
"def question_to_vec(question, embeddings, dim):\r\n\r\n words = question.split()\r\n\r\n counter = 0\r\n res = np.zeros(dim)\r\n for word in words:\r\n if word in embeddings:\r\n res += np.array(embeddings[word])\r\n counter += 1\r\n if counter!=0:\r\n return res/counter # mean of all word embeddings\r\n else:\r\n return res # vector of zeros\r",
"def seq2Vec(sequences):\r\n global dict_words_n_vectors\r\n for sent in sequences:\r\n for i in range(len(sent)):\r\n if sent[i] in dict_words_n_vectors:\r\n sent[i] = dict_words_n_vectors[sent[i]]\r\n else:\r\n sent[i] = np.zeros(300)\r\n return np.array(sequences, dtype=\"float32\")",
"def get_word_vector():\n\n patten = r\"[0-9\\s+\\.\\!\\/_,$%^*()?;;:-【】+\\\"\\']+|[+——!,;:。?、~@#¥%……&*()]+\"\n s1 = input(\"句子1:\").strip()\n s2 = input(\"句子2:\").strip()\n s1 = re.sub(patten, \" \", s1)\n s2 = re.sub(patten, \" \", s2)\n cut1 = jieba.cut(s1)\n cut2 = jieba.cut(s2)\n\n list_word1 = (' '.join(cut1)).split()\n list_word2 = (' '.join(cut2)).split()\n print(list_word1)\n print(list_word2)\n\n key_word = list(set(list_word1 + list_word2)) # 取并集\n print(key_word)\n\n word_vector1 = np.zeros(len(key_word)) # 给定形状和类型的用0填充的矩阵存储向量\n word_vector2 = np.zeros(len(key_word))\n\n for i in range(len(key_word)): # 依次确定向量的每个位置的值\n for j in range(len(list_word1)): # 遍历key_word中每个词在句子中的出现次数\n if key_word[i] == list_word1[j]:\n word_vector1[i] += 1\n for k in range(len(list_word2)):\n if key_word[i] == list_word2[k]:\n word_vector2[i] += 1\n\n print(word_vector1) # 输出向量\n print(word_vector2)\n return word_vector1, word_vector2",
"def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec",
"def wordSimilarityRatio(sent_1,sent_2):",
"def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))",
"def sentences2vec(self, sentences, unseen=None):\r\n keys = self.keys\r\n # print(sentences)\r\n if unseen:\r\n unseen_vec = self.model.wv.word_vec(unseen)\r\n\r\n # if unseen:\r\n # vec.append([self.model.wv.word_vec(y) if y in set(sentences) & keys\r\n # else unseen_vec for y in sentences])\r\n # else:\r\n # vec.append([self.model.wv.word_vec(y) for y in sentences\r\n # if y in set(sentences) & keys])\r\n vec = np.array([0 for _ in range(300)])\r\n for y in sentences:\r\n if len(vec) == 0:\r\n vec = np.array(self.model.wv.word_vec(y))\r\n elif y in self.keys:\r\n vec = vec + np.array(self.model.wv.word_vec(y))\r\n # print(len(vec))\r\n return vec",
"def question_to_vec(question, embeddings):\n\n dim = embeddings['dog'].size\n result = np.zeros((dim,))\n\n words = question.split(' ')\n\n count = 0\n for word in words:\n if word not in embeddings or not len(embeddings[word]):\n continue\n result += embeddings[word][:dim]\n count += 1\n\n return result / max(count, 1)",
"def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)",
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def solve(vectors):\n\tv = np.array(vectors)\n\tstop = 9\n\talignment_error1 = max(v[:,1])-min(v[:,1])\n\tupdate_positions(v)\n\talignment_error2 = max(v[:,1])-min(v[:,1])\n\trate_of_change = alignment_error1-alignment_error2\n\n\tupdate_positions(v, (alignment_error2-stop)/rate_of_change)\n\n\tmsg_start_y = min(v[:,1])\n\tmsg_start_X = min(v[:,0])\n\tmsg_width = max(v[:,0]) - msg_start_X + 1\n\tmsg_height = max(v[:,1]) - msg_start_y + 1 \n\t\n\tans = \"\"\n\n\tgrid = np.zeros(shape = (msg_width+1, msg_height+1))\n\tfor vector in v:\n\t\tgrid[vector[0]-msg_start_X, vector[1]-msg_start_y] = 1\n\n\n\tfor y in range(msg_height):\n\t\tans +=''.join(\"#\" if 1==grid[x,y] else \".\" for x in range(msg_width))\n\t\tans += \"\\n\"\n\n\treturn ans, (alignment_error1-stop)/rate_of_change",
"def _WordSimAveVec(self,df,a):\r\n #Obtain the course description for the course provided and convert the string into a list of individual words.\r\n Description = df['description'][a].split()\r\n #Create a placeholder zero vector of the same size as the vector embedding.\r\n Vector = np.zeros(self.WordVecModel.layer1_size)\r\n wordCount = 0\r\n #Iterate over each word in the description.\r\n for word in Description:\r\n #If the word is in the trained vocabulary, obtain the word vector. \r\n #Continue to add the word vectors to the placeholder vector to get the running sum.\r\n if word in self.WordVecModel.wv.vocab:\r\n vector = self.WordVecModel.wv.get_vector(word)\r\n Vector +=vector\r\n #Keep track of how many word vectors (which were included in the vocabulary) were added.\r\n wordCount +=1\r\n #Calculate the mean by dividing the sum by the number of vectors.\r\n return Vector/wordCount",
"def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec",
"def word2vec(self, words):\n with torch.no_grad():\n words = torch.LongTensor(self.doc2token(words))\n result = self.model.embedding(words).numpy()\n return result",
"def _words_to_vec(self, sentence):\n return torch.FloatTensor([self._use_embeddings(word) for word in sentence])",
"def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))",
"def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)",
"def embed(self, smi_or_mol):\n if not isinstance(smi_or_mol, Chem.Mol):\n mol = Chem.MolFromSmiles(smi_or_mol)\n else:\n mol = smi_or_mol\n wv = self.model.wv\n sentence = self.substructure(mol)\n vec = np.zeros(self.model.vector_size)\n for fp in sentence:\n if fp in wv.vocab:\n vec += wv[fp]\n return vec",
"def compute_similarity(self, vis_feats, language_feats, gram):\n queries_dim = language_feats.dim()\n M = language_feats.size(1) if queries_dim==3 else language_feats.size(0)\n N = vis_feats.size(0)\n d = self.embed_size\n\n # If too many queries, split computation to avoid out-of-memory\n max_num_queries = 100\n if M <= max_num_queries:\n vis_feats = vis_feats.unsqueeze(1).expand(N, M, d)\n scores_gram = torch.mul(vis_feats, language_feats)\n scores_gram = scores_gram.sum(2)\n scores = scores_gram.view(-1, M)\n #scores = torch.matmul(vis_feats, lang_feats.squeeze().transpose(0,1)) #other version\n\n else:\n scores_gram = [] \n vis_feats = vis_feats.unsqueeze(1).expand(N, M, d)\n num_splits = M//max_num_queries if (M%max_num_queries)==0 else M//max_num_queries+1\n for j in range(num_splits): \n start_query = j*max_num_queries \n end_query = start_query + max_num_queries if start_query + max_num_queries <= M else M\n scores_gram_split = torch.mul(vis_feats[:,start_query:end_query,:], language_feats[:,start_query:end_query,:])\n scores_gram_split = scores_gram_split.sum(2)\n scores_gram.append(scores_gram_split)\n scores = torch.cat([scores_gram_split for scores_gram_split in scores_gram],1)\n\n return scores",
"def average_similarity(messages1, messages2):\n if np.array_equal(messages2, messages1):\n return 1\n corr = np.corrcoef(messages1, messages2)\n return np.average(corr)",
"def compute_similarities_from_vec(self, dataset, a):\n self.model.fit(dataset.X, a)\n return self.model.coef_",
"def vectorize_tweet(tweet):\n tweet_vector = np.zeros(100)\n for word in tokenize(tweet.text):\n if word in word2vec.wv.vocab:\n tweet_vector = tweet_vector + word2vec[word]\n\n components = pca.transform(tweet_vector)\n x = components[0, 0]\n y = components[0, 1]\n\n return str(x), str(y)",
"def w2v_aggregation_letters(X, length_vector=100):\n global w2v_model_3gram\n if w2v_model_3gram == None:\n w2v_model_3gram = gensim.models.KeyedVectors.load_word2vec_format(os.path.join(os.environ['NOBULL_PATH'], 'w2v_char.vec'))\n X_raw = []\n for x in X:\n x_letter = cleanText_letters(x)\n X_raw.append(x_letter)\n\n\n num_row = len(X_raw)\n\n max_matrix = np.zeros(shape=(num_row, length_vector))\n\n average_matrix = np.zeros(shape=(num_row, length_vector))\n\n for row in range(num_row):\n \n temp_text = X_raw[row] \n temp_vector = temp_text.split()\n \n unique_vector = list(set(temp_vector))\n num_index = len(unique_vector)\n \n temp_matrix = np.zeros(shape=(num_index, length_vector))\n \n j = 0\n for word in unique_vector:\n \n temp_matrix[j] = get_vector(word, w2v_model_3gram, 100)\n j += 1\n\n max_matrix[row] = np.maximum.reduce(temp_matrix)\n average_matrix[row] = np.mean(temp_matrix, axis=0)\n \n result = np.concatenate((average_matrix, max_matrix), axis=1)\n result = sparse.csr_matrix(result)\n \n header = []\n \n for i in range(length_vector):\n temp_string = \"neww2v_average_\" + str(i) + \"-th\"\n header.append(temp_string)\n \n for i in range(length_vector):\n temp_string = \"neww2v_maximum_\" + str(i) + \"-th\"\n header.append(temp_string)\n\n return result, header"
] | [
"0.6673192",
"0.6187192",
"0.6166319",
"0.6151653",
"0.60369694",
"0.5990431",
"0.59603184",
"0.593852",
"0.59355754",
"0.5926192",
"0.5897822",
"0.58214325",
"0.577277",
"0.5761639",
"0.57491434",
"0.57217455",
"0.570953",
"0.5705797",
"0.56846756",
"0.56698257",
"0.5666215",
"0.56567603",
"0.5648202",
"0.56411135",
"0.5607599",
"0.55533266",
"0.55509007",
"0.5522629",
"0.55164415",
"0.5516118"
] | 0.6307364 | 1 |
An iterator that will in turn yield all drawable curves in the form of (kind, name, ds, style) tuples (where kind is one of 'algorithm', 'oracle', 'unifpf', 'strategy'). | def _pds_plot_iterator(pds, dim, funcId):
i = 0
for (algname, ds) in pds.algds_dimfunc((dim, funcId)):
yield ('algorithm', algname, ds, _style_algorithm(algname, i))
i += 1
yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())
yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())
i = 0
for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):
yield ('strategy', stratname, ds, _style_strategy(stratname, i))
i += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iter_svgs(self):\n for name in self.parent.layers:\n yield name, self.parent.layers[name]\n for elem in self.parent.elements:\n if isinstance(elem, SVG):\n yield None, elem",
"def efficiency_curves(self):\n for key in self._efficiency_curves:\n yield key, self._data[key]",
"def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)",
"def style_lines(self):\n self.parent.finalize()\n for name, svg in self.iter_svgs(): # recurse here\n for line in svg._meta.style_lines():\n yield line\n if isinstance(self.parent.style, str):\n yield self.parent.style\n else:\n for cls in self.parent.style:\n yield \"%s {\" % str(cls)\n for key, value in self.parent.style[cls].items():\n yield \" %s: %s;\" % (key, value)\n yield \"}\"",
"def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]",
"def lines(self):\n for pair in pairs(self.points):\n yield Line(pair, shape=self)",
"def _get_iterator(self, dataset_type, eval_mode, **kwargs):",
"def __iter__(self):\n if self.use_dic:\n for data in sorted(self.dic):\n self.data = data\n for activity in sorted(self.dic[data]):\n self.activity = activity\n for imsize in sorted(self.dic[data][activity]):\n self.imsize = imsize\n self.allimgs, self.alllabels = [], []\n for img in sorted(self.dic[data][activity][imsize]):\n self.img = img\n self.labels = self.dic[data][activity][imsize][img]\n if self.imlist:\n self.allimgs.append(self.img)\n self.alllabels.append(self.labels)\n else:\n yield data, activity, imsize, img, self.labels\n self.i += 1\n if self.imlist:\n yield data, activity, imsize, self.allimgs, self.alllabels\n self.i += 1\n else:\n for data in sorted(self.dic):\n self.img = data\n self.labels = self.dic[data]\n yield self.img, self.labels\n self.i += 1",
"def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s",
"def parse_and_construct_graphic_layer(ds):\r\n graphic_layers = list()\r\n for item in ds.SegmentSequence:\r\n layer = {\r\n \"GraphicLayer\": str(item.SegmentDescription).upper(),\r\n \"GraphicLayerOrder\": item.SegmentNumber,\r\n \"GraphicLayerRecommendedDisplayCIELabValue\": [49512, 38656, 52736]\r\n }\r\n graphic_layers.append(layer)\r\n return graphic_layers",
"def iterdescriptors(self):",
"def symbols(self):\n # get the names(identifiers) of all curves in the graph:\n curvenames = self.g.element_show()\n # foreach curve, add a diamond symbol, filled with the\n # color of the curve ('defcolor') and with a size of 2:\n for curvename in curvenames:\n self.g.element_configure(curvename, symbol='diamond',\n outlinewidth=2, fill='defcolor')",
"def __iter__(self):\n for feature in itertools.izip(self.shapes, self.records):\n yield feature",
"def iter_implementations(self, opt):\n opt_desc = self.opt_dict[opt]\n for imp in self.imp_dict[opt_desc['imptype']]:\n yield imp",
"def draw_stratas(enum_func):\n maximum_strata_index = 7\n graph = ot.Graph(\"\", \"$\\\\alpha_1$\", \"$\\\\alpha_2$\", True)\n if enum_func.__class__.__name__ == \"LinearEnumerateFunction\":\n graph.setTitle(\"Linear enumeration rule\")\n elif enum_func.__class__.__name__ == \"HyperbolicAnisotropicEnumerateFunction\":\n graph.setTitle(f\"q={enum_func.getQ()}\")\n offset = 0\n for strata_index in range(maximum_strata_index):\n strata_cardinal = enum_func.getStrataCardinal(strata_index)\n sample_in_layer = [enum_func(idx + offset) for idx in range(strata_cardinal)]\n offset += strata_cardinal\n cloud = ot.Cloud(sample_in_layer)\n cloud.setLegend(str(strata_index))\n cloud.setPointStyle(\"circle\")\n graph.add(cloud)\n palette = ot.DrawableImplementation.BuildDefaultPalette(maximum_strata_index)\n graph.setColors(palette)\n graph.setIntegerXTick(True)\n graph.setIntegerYTick(True)\n graph.setLegendPosition(\"topright\")\n return graph",
"def iter_segments(dataset: Dataset) -> Iterator: # noqa: E501\n if not hasattr(dataset, 'PixelData'):\n raise AttributeError(\n 'Data set does not contain a Pixel Data attribute.'\n )\n segment_description_lut = {\n int(item.SegmentNumber): item\n for item in dataset.SegmentSequence\n }\n segment_number_per_frame = np.array([\n int(item.SegmentIdentificationSequence[0].ReferencedSegmentNumber)\n for item in dataset.PerFrameFunctionalGroupsSequence\n ])\n pixel_array = dataset.pixel_array\n if pixel_array.ndim == 2:\n pixel_array = pixel_array[np.newaxis, ...]\n for i in np.unique(segment_number_per_frame):\n indices = np.where(segment_number_per_frame == i)[0]\n yield (\n pixel_array[indices, ...],\n tuple([\n dataset.PerFrameFunctionalGroupsSequence[index]\n for index in indices\n ]),\n segment_description_lut[i],\n )",
"def drawable_iterable(drawable, unpack_stacks = False, reverse_stacks = False):\n # Check if we are using a THStack\n if is_stack(drawable) and unpack_stacks:\n # Extract histograms from the stack\n result = list(drawable.GetHists())\n\n # Reverse if necessary\n if reverse_stacks:\n result.reverse()\n\n return result\n elif is_histo(drawable) or is_graph(drawable) or is_line(drawable):\n return (drawable,)\n\n # Already an iterable\n return drawable",
"def untyped_curves(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n for key in untyped:\n yield key, self._data[key]",
"def pixelIter():\n\t\t\tx,y,i = 0,0,0\n\t\t\tfor i,c in enumerate(space):\n\t\t\t\tx = i % w\n\t\t\t\ty = i / w\n\t\t\t\tisSolid = (c=='#')\n\t\t\t\tyield x,y,i,isSolid",
"def __get_curves_lips(self, uol, uil, lol, lil):\n uol_curve = self.__draw_curve(uol)\n uil_curve = self.__draw_curve(uil)\n lol_curve = self.__draw_curve(lol)\n lil_curve = self.__draw_curve(lil)\n return uol_curve, uil_curve, lol_curve, lil_curve",
"def get_available_figures(self):\n return sorted((method[5:], func) \\\n for method, func in self.__class__.__dict__.iteritems() \\\n if method.startswith(\"plot_\") and callable(func))",
"def volume_curves(self):\n for key in self._volume_curves:\n yield key, self._data[key]",
"def parse_graphs(self, graph_iterator):\n #filter_cache = make_graph_filter_cache() \n for graph in graph_iterator: \n raw_chart = self.parse(graph)\n # The raw chart contains parser operations, need to decode the parse forest from this \n res = td_chart_to_cky_chart(raw_chart)\n yield res",
"def _pathology_iterator(graph):\n for u, v in _iter_pairs(graph):\n if graph.node[u][FUNCTION] == PATHOLOGY:\n yield u\n if graph.node[v][FUNCTION] == PATHOLOGY:\n yield v",
"def iterator(self):\n return _osgAnimation.VertexList_iterator(self)",
"def iter(self, iters, executor=None):\n deps_by_kind = self.dependencies_by_kind()\n\n if len(deps_by_kind) > 1:\n # Sync the iterators that provide time info for each data kind\n # (first in deps_by_kind lists) by endtime\n iters.update(strax.sync_iters(\n partial(strax.same_stop, func=strax.endtime),\n {d[0]: iters[d[0]]\n for d in deps_by_kind.values()}))\n\n # Convert to iterators over merged data for each kind\n new_iters = dict()\n for kind, deps in deps_by_kind.items():\n if len(deps) > 1:\n synced_iters = strax.sync_iters(\n strax.same_length,\n {d: iters[d] for d in deps})\n new_iters[kind] = strax.merge_iters(synced_iters.values())\n else:\n new_iters[kind] = iters[deps[0]]\n iters = new_iters\n\n if self.rechunk_input:\n iters = self.rechunk_input(iters)\n\n pending = []\n for chunk_i in itertools.count():\n try:\n if not self.check_next_ready_or_done(chunk_i):\n # TODO: avoid duplication\n # but also remain picklable...\n self.close(wait_for=tuple(pending))\n return\n compute_kwargs = {k: next(iters[k])\n for k in deps_by_kind}\n except StopIteration:\n self.close(wait_for=tuple(pending))\n return\n except Exception:\n self.close(wait_for=tuple(pending))\n raise\n\n if self.parallel and executor is not None:\n new_f = executor.submit(self.do_compute,\n chunk_i=chunk_i,\n **compute_kwargs)\n pending = [f for f in pending + [new_f]\n if not f.done()]\n yield new_f\n else:\n yield self.do_compute(chunk_i=chunk_i, **compute_kwargs)",
"def _plot_curves(self, curves_dict):\n for name, curve in curves_dict.items():\n fig = plt.figure()\n ax = plt.gca()\n\n plot_type = name.split('_')[-1]\n ax.set_title(plot_type)\n if plot_type == 'PRC':\n precision, recall, _ = curve\n ax.step(recall, precision, color='b', alpha=0.2, where='post')\n ax.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n elif plot_type == 'ROC':\n false_positive_rate, true_positive_rate, _ = curve\n ax.plot(false_positive_rate, true_positive_rate, color='b')\n ax.plot([0, 1], [0, 1], 'r--')\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n else:\n ax.plot(curve[0], curve[1], color='b')\n\n ax.set_ylim([0.0, 1.05])\n ax.set_xlim([0.0, 1.0])\n\n fig.canvas.draw()\n\n curve_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n curve_img = curve_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n self.summary_writer.add_image(name.replace('_', '/'), curve_img, global_step=self.global_step)",
"def _iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n raise NotImplementedError()",
"def __iter__(self):\n return self.cli.essids.essids().__iter__()",
"def get_layer_names_gen(self):\n for lyr in self.get_layer_names_as_array():\n yield lyr"
] | [
"0.625494",
"0.62062955",
"0.61237484",
"0.5620857",
"0.5553316",
"0.54564095",
"0.5426019",
"0.5304748",
"0.5274277",
"0.5261457",
"0.525136",
"0.5238986",
"0.521688",
"0.5185847",
"0.5158221",
"0.5131699",
"0.51237047",
"0.5082937",
"0.5075545",
"0.5069475",
"0.5063264",
"0.505663",
"0.5034377",
"0.5006482",
"0.5004874",
"0.49992245",
"0.49929115",
"0.49888667",
"0.49870312",
"0.49743384"
] | 0.6866871 | 0 |
Show a legend. obj can be an Axes or Figure (in that case, also pass handles and labels arguments). | def legend(obj, ncol=3, **kwargs):
# Font size handling here is a bit weird. We specify fontsize=6
# in legend constructor since that affects spacing. However, we
# need to manually override with 'small' later, because the original
# specification did not take effect on whole-figure legends (and for
# actual text, 6 is a wee bit small). We get a specific cramped
# appearance and correct behavior for whole-figure legends this way.
l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)
plt.setp(l.get_texts(), fontsize='small') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def legend (self, **kwargs):\n axes = self.twin_axes or self.axes\n self.mpl_legend = axes.legend (self.mpl_lines, self.labels, **kwargs)",
"def legend_extras(\n self, handles=None, labels=None, *, loc=None,\n frame=None, frameon=None, ncol=None, ncols=None,\n center=None, order='C', label=None, title=None,\n fontsize=None, fontweight=None, fontcolor=None,\n **kwargs\n):\n # Parse input args\n # TODO: Legend entries for colormap or scatterplot objects! Idea is we\n # pass a scatter plot or contourf or whatever, and legend is generated by\n # drawing patch rectangles or markers using data values and their\n # corresponding cmap colors! For scatterplots just test get_facecolor()\n # to see if it contains more than one color.\n # TODO: It is *also* often desirable to label a colormap object with\n # one data value. Maybe add a legend option for the *number of samples*\n # or the *sample points* when drawing legends for colormap objects.\n # Look into \"legend handlers\", might just want to add own handlers by\n # passing handler_map to legend() and get_legend_handles_labels().\n if order not in ('F', 'C'):\n raise ValueError(\n f'Invalid order {order!r}. Choose from '\n '\"C\" (row-major, default) and \"F\" (column-major).'\n )\n ncol = _not_none(ncols=ncols, ncol=ncol)\n title = _not_none(label=label, title=title)\n frameon = _not_none(frame=frame, frameon=frameon, default=rc['legend.frameon'])\n if handles is not None and not np.iterable(handles): # e.g. a mappable object\n handles = [handles]\n if labels is not None and (not np.iterable(labels) or isinstance(labels, str)):\n labels = [labels]\n if title is not None:\n kwargs['title'] = title\n if frameon is not None:\n kwargs['frameon'] = frameon\n fontsize = kwargs.get('fontsize', None) or rc['legend.fontsize']\n if fontsize is None:\n pass\n elif fontsize in mfonts.font_scalings:\n kwargs['fontsize'] = rc._scale_font(fontsize)\n else:\n kwargs['fontsize'] = units(fontsize, 'pt')\n\n # Handle and text properties that are applied after-the-fact\n # NOTE: Set solid_capstyle to 'butt' so line does not extend past error bounds\n # shading in legend entry. This change is not noticable in other situations.\n kw_text = {}\n for key, value in (('color', fontcolor), ('weight', fontweight)):\n if value is not None:\n kw_text[key] = value\n kw_handle = _pop_props(kwargs, 'lines')\n kw_handle['solid_capstyle'] = 'butt'\n\n # Get axes for legend handle detection\n # TODO: Update this when no longer use \"filled panels\" for outer legends\n axs = [self]\n if self._panel_hidden:\n if self._panel_parent: # axes panel\n axs = list(self._panel_parent._iter_axes(hidden=False, children=True))\n else:\n axs = list(self.figure._iter_axes(hidden=False, children=True))\n\n # Handle list of lists (centered row legends)\n # NOTE: Avoid very common plot() error where users draw individual lines\n # with plot() and add singleton tuples to a list of handles. If matplotlib\n # gets a list like this but gets no 'labels' argument, it raises error.\n list_of_lists = False\n if handles is not None:\n handles = [h[0] if isinstance(h, tuple) and len(h) == 1 else h for h in handles]\n list_of_lists = any(isinstance(h, (list, np.ndarray)) for h in handles)\n if list_of_lists:\n if any(not np.iterable(_) for _ in handles):\n raise ValueError(f'Invalid handles={handles!r}.')\n if not labels:\n labels = [None] * len(handles)\n elif not all(np.iterable(_) and not isinstance(_, str) for _ in labels):\n # e.g. handles=[obj1, [obj2, obj3]] requires labels=[lab1, [lab2, lab3]]\n raise ValueError(f'Invalid labels={labels!r} for handles={handles!r}.')\n\n # Parse handles and legends with native matplotlib parser\n if not list_of_lists:\n if isinstance(handles, np.ndarray):\n handles = handles.tolist()\n if isinstance(labels, np.ndarray):\n labels = labels.tolist()\n handles, labels, *_ = mlegend._parse_legend_args(\n axs, handles=handles, labels=labels,\n )\n pairs = list(zip(handles, labels))\n else:\n pairs = []\n for ihandles, ilabels in zip(handles, labels):\n if isinstance(ihandles, np.ndarray):\n ihandles = ihandles.tolist()\n if isinstance(ilabels, np.ndarray):\n ilabels = ilabels.tolist()\n ihandles, ilabels, *_ = mlegend._parse_legend_args(\n axs, handles=ihandles, labels=ilabels,\n )\n pairs.append(list(zip(ihandles, ilabels)))\n\n # Manage pairs in context of 'center' option\n center = _not_none(center, list_of_lists)\n if not center and list_of_lists: # standardize format based on input\n list_of_lists = False # no longer is list of lists\n pairs = [pair for ipairs in pairs for pair in ipairs]\n elif center and not list_of_lists:\n list_of_lists = True\n ncol = _not_none(ncol, 3)\n pairs = [pairs[i * ncol:(i + 1) * ncol] for i in range(len(pairs))]\n ncol = None\n if list_of_lists: # remove empty lists, pops up in some examples\n pairs = [ipairs for ipairs in pairs if ipairs]\n\n # Bail if no pairs\n if not pairs:\n return mlegend.Legend(self, [], [], loc=loc, ncol=ncol, **kwargs)\n # Multiple-legend pseudo-legend\n elif center:\n objs = _multiple_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)\n # Individual legend\n else:\n objs = [_single_legend(self, pairs, loc=loc, ncol=ncol, order=order, **kwargs)]\n\n # Add legends manually so matplotlib does not remove old ones\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n continue\n if hasattr(self, 'legend_') and self.legend_ is None:\n self.legend_ = obj # set *first* legend accessible with get_legend()\n else:\n self.add_artist(obj)\n\n # Apply legend box properties\n outline = rc.fill({\n 'linewidth': 'axes.linewidth',\n 'edgecolor': 'axes.edgecolor',\n 'facecolor': 'axes.facecolor',\n 'alpha': 'legend.framealpha',\n })\n for key in (*outline,):\n if key != 'linewidth':\n if kwargs.get(key, None):\n outline.pop(key, None)\n for obj in objs:\n if isinstance(obj, mpatches.FancyBboxPatch):\n obj.update(outline) # the multiple-legend bounding box\n else:\n obj.legendPatch.update(outline) # no-op if frame is off\n\n # Apply *overrides* to legend elements\n # WARNING: legendHandles only contains the *first* artist per legend because\n # HandlerBase.legend_artist() called in Legend._init_legend_box() only\n # returns the first artist. Instead we try to iterate through offset boxes.\n # TODO: Remove this feature? Idea was this lets users create *categorical*\n # legends in clunky way, e.g. entries denoting *colors* and entries denoting\n # *markers*. But would be better to add capacity for categorical labels in a\n # *single* legend like seaborn rather than multiple legends.\n for obj in objs:\n try:\n children = obj._legend_handle_box._children\n except AttributeError: # older versions maybe?\n children = []\n for obj in _iter_legend_children(children):\n # Account for mixed legends, e.g. line on top of error bounds shading\n if isinstance(obj, mtext.Text):\n obj.update(kw_text)\n else:\n for key, value in kw_handle.items():\n getattr(obj, 'set_' + key, lambda value: None)(value)\n\n # Append attributes and return, and set clip property!!! This is critical\n # for tight bounding box calcs!\n for obj in objs:\n obj.set_clip_on(False)\n if isinstance(objs[0], mpatches.FancyBboxPatch):\n objs = objs[1:]\n return objs[0] if len(objs) == 1 else tuple(objs)",
"def plot2d(self, obj, options=\"\", label=None, labelfmt=None, **kwargs):\n self._pad.cd()\n self._pad.Update() # Updating the pad prevents spontaneous seg faults...\n\n # Apply formatting (if any) before calling `Draw()`\n root_helpers.set_graphics_attributes(obj, **kwargs)\n\n # Draw the object, depending on its type\n if isinstance(obj, root.TH2):\n if isinstance(self._frame, root.TH1F):\n if not self._is_empty:\n warnings.warn(\"plot2d: overwriting non-empty axes\")\n\n self._frame.Delete()\n self._frame = obj\n\n elif \"SAME\" not in options.upper():\n self._frame = obj\n\n obj.Draw(options)\n\n else:\n try:\n warnings.warn(\n \"plot2d: attempting to plot an object that is not a TH2.\\n\"\n \"This may result in unexpected behaviour.\"\n )\n obj.Draw(options)\n\n except AttributeError:\n raise TypeError(\"Attempting to plot an object with no Draw() method\")\n\n # Add object to list of legend entries if label was provided\n if label is not None:\n self._legend_entries.append((obj, label, labelfmt))\n\n self._is_empty = False # Record that the axes are no longer empty",
"def decorate(**options):\n ax = plt.gca()\n ax.set(**options)\n\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()",
"def _patch_legend(obj, draw_options, legend_type):\n legend = \"\"\n if _is_in_legend(obj):\n # Unfortunately, patch legend entries need \\addlegendimage in Pgfplots.\n do = \", \".join([legend_type] + draw_options) if draw_options else \"\"\n legend += \"\\\\addlegendimage{{{}}}\\n\\\\addlegendentry{{{}}}\\n\\n\".format(\n do, obj.get_label()\n )\n\n return legend",
"def plot_legend(ax):\n\tlines = 4 * [None]\n\tcolors = [\"black\", \"deepskyblue\", \"lime\", \"crimson\"]\n\tlabels = [r\"Constant $y_\\text{Sr}^\\text{CC}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto 1 - e^{-kZ}$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC} \\propto Z$\",\n\t\tr\"$y_\\text{Sr}^\\text{CC}$ = 0\"]\n\tfor i in range(4):\n\t\tlines[i] = ax.plot([1, 2], [1, 2], c = visuals.colors()[\"white\"],\n\t\t\tlabel = labels[i])[0]\n\tleg = ax.legend(loc = visuals.mpl_loc()[\"upper left\"], ncol = 1,\n\t\tbbox_to_anchor = (0.0, 0.99), frameon = False, handlelength = 0)\n\tfor i in range(4):\n\t\tlines[i].remove()\n\t\tleg.get_texts()[i].set_color(colors[i])",
"def legend(colors, labels, shapes='box', loc='best', layout='vertical', reverse_vertical=True, ax=None):\n if ax is None:\n ax = plt.gca()\n\n handles = get_handles(shapes, colors, labels)\n if not all(len(handles) == l for l in [len(colors), len(labels)]):\n warnings.warn('Lengths of one or more of colors, labels, and shapes did not match.', UserWarning)\n\n if layout == 'horizontal' or layout == 'h':\n ncol = len(labels)\n else:\n ncol = 1\n if reverse_vertical: #Reverse so that it goes from bottom to top\n handles = handles[-1::-1]\n\n return ax.legend(handles=handles, loc=loc, ncol=ncol, frameon=False)",
"def legend(self):\n if self.nplots == 1:\n lax = self.ax\n loff = 0.2\n else:\n lax = self.ax1\n loff = 0.4\n box = lax.get_position()\n\n lax.figure.subplots_adjust(bottom=loff) # make space on bottom for legend\n lax.legend(self.plots, self.labels, loc='upper center', bbox_to_anchor=(0.5, -loff), fancybox=True, shadow=True, ncol=3, prop={'size': 8})",
"def legend(colors, labels, **kwds):\n proxies = [pylab.Rectangle((0, 0), 1, 1, fc=color) for color in colors]\n nl = min(len(proxies), len(labels))\n pylab.legend(proxies[:nl], labels[:nl], **kwds)",
"def add_legend(\n self,\n labels=None,\n bcolor=(0.5, 0.5, 0.5),\n border=False,\n size=(0.2, 0.2),\n name=None,\n loc='upper right',\n face='triangle',\n ):\n if self.legend is not None:\n self.remove_legend()\n self._legend = _vtk.vtkLegendBoxActor()\n\n if labels is None:\n # use existing labels\n if not self._labels:\n raise ValueError(\n 'No labels input.\\n\\n'\n 'Add labels to individual items when adding them to'\n 'the plotting object with the \"label=\" parameter. '\n 'or enter them as the \"labels\" parameter.'\n )\n\n self._legend.SetNumberOfEntries(len(self._labels))\n for i, (vtk_object, text, color) in enumerate(self._labels.values()):\n if face is None:\n # dummy vtk object\n vtk_object = pyvista.PolyData([0.0, 0.0, 0.0])\n\n self._legend.SetEntry(i, vtk_object, text, color.float_rgb)\n\n else:\n self._legend.SetNumberOfEntries(len(labels))\n\n legend_face = make_legend_face(face)\n for i, (text, color) in enumerate(labels):\n self._legend.SetEntry(i, legend_face, text, Color(color).float_rgb)\n\n if loc is not None:\n if loc not in ACTOR_LOC_MAP:\n allowed = '\\n'.join([f'\\t * \"{item}\"' for item in ACTOR_LOC_MAP])\n raise ValueError(f'Invalid loc \"{loc}\". Expected one of the following:\\n{allowed}')\n x, y, size = map_loc_to_pos(loc, size, border=0.05)\n self._legend.SetPosition(x, y)\n self._legend.SetPosition2(size[0], size[1])\n\n if bcolor is None:\n self._legend.SetUseBackground(False)\n else:\n self._legend.SetUseBackground(True)\n self._legend.SetBackgroundColor(Color(bcolor).float_rgb)\n\n self._legend.SetBorder(border)\n\n self.add_actor(self._legend, reset_camera=False, name=name, pickable=False)\n return self._legend",
"def show_legend(self, show_legend):\n\n self.container['show_legend'] = show_legend",
"def plot(self, obj, options=\"\", expand=True, label=None, labelfmt=None, **kwargs):\n self._pad.cd()\n self._pad.Update() # Updating the pad prevents spontaneous seg faults...\n\n # Apply formatting (if any) before calling `Draw()`\n root_helpers.set_graphics_attributes(obj, **kwargs)\n\n # Get current axis limits\n old_left, old_right = self.get_xlim()\n old_bottom, old_top = self.get_ylim()\n\n # Draw the object, depending on its type\n if isinstance(obj, root.TH1):\n # Histogram\n obj.Draw(\"HIST SAME \" + options)\n\n # Get new axis limits (to expand if needed)\n left, right = obj.GetXaxis().GetXmin(), obj.GetXaxis().GetXmax()\n bottom, top = root_helpers.hist_min(obj), root_helpers.hist_max(obj)\n\n elif isinstance(obj, root.THStack):\n # Stacked Histogram\n obj.Draw(\"SAME HIST\" + options)\n\n # Get new axis limits (to expand if needed)\n top_hist = obj.GetStack().Last()\n bottom_hist = obj.GetStack().First()\n left, right = top_hist.GetXaxis().GetXmin(), top_hist.GetXaxis().GetXmax()\n bottom, top = root_helpers.hist_min(bottom_hist), root_helpers.hist_max(top_hist)\n\n elif isinstance(obj, root.TGraph):\n # Graph\n obj.Draw(options)\n\n # Get new axis limits (to expand if needed)\n left, right = root_helpers.graph_xmin(obj), root_helpers.graph_xmax(obj)\n bottom, top = root_helpers.graph_ymin(obj), root_helpers.graph_ymax(obj)\n\n elif isinstance(obj, root.TMultiGraph):\n # Multigraph\n obj.Draw(options)\n\n # Get new axis limits (to expand if needed)\n left, right = root_helpers.multigraph_xmin(obj), root_helpers.multigraph_xmax(obj)\n bottom, top = root_helpers.multigraph_ymin(obj), root_helpers.multigraph_ymax(obj)\n\n elif isinstance(obj, root.TLine):\n # Line\n obj.Draw(options)\n\n # Get new axis limits (to expand if needed)\n left, right = obj.GetX1(), obj.GetX2()\n bottom, top = obj.GetY1(), obj.GetY2()\n\n else:\n try:\n obj.Draw(\"SAME \" + options)\n\n # Do not expand axis if we don't know what we're plotting\n left, right = old_left, old_right\n bottom, top = old_bottom, old_top\n\n except AttributeError:\n raise TypeError(\"Attempting to plot an object with no Draw() method\")\n\n # Add object to list of legend entries if label was provided\n if label is not None:\n self._legend_entries.append((obj, label, labelfmt))\n\n # Adjust axis limits\n if expand:\n if self._is_empty:\n # Axes are empty: expand or shrink to the object being drawn\n if left == 0 and right == 0:\n new_left = -0.01\n new_right = 0.01\n elif left == right:\n new_left = left * 0.99\n new_right = right * 1.01\n else:\n new_left = left\n new_right = right\n\n if bottom == 0 and top == 0:\n new_bottom = -0.01\n new_top = 0.01\n elif bottom == top:\n new_bottom = bottom * 0.99\n new_top = top * 1.01\n else:\n new_bottom = bottom\n new_top = top\n\n else:\n # Axes are not empty: expand or leave unaltered\n new_left = left if left < old_left else old_left\n new_right = right if right > old_right else old_right\n new_bottom = bottom if bottom < old_bottom else old_bottom\n new_top = top if top > old_top else old_top\n\n self.set_xlim(new_left, new_right)\n self.set_ylim(new_bottom, new_top)\n\n self._pad.RedrawAxis() # Redraw so axes appear above colour-filled areas\n\n self._is_empty = False # Record that the axes are no longer empty",
"def set_legend(ax):\n l = ax.legend()\n plt.setp(l.get_texts(), fontsize=8)",
"def legend(self, legend):\n\n self.container['legend'] = legend",
"def add_legend(self,\n ax: Optional[Union[Axes, str, int]] = None,\n loc=None,\n labels: Optional[Sequence[str]] = None,\n **kwargs):\n target: Union[Figure, Axes]\n if ax is None:\n # automatic: figure legend or the (unique) axes\n if self.n_plots >= 2:\n target = self.fig\n else:\n target = self.axes_active[0]\n else:\n if isinstance(ax, (int, str)): # see __getitem__\n target = self[ax] # type: ignore\n else:\n target = ax\n\n # TODO: Customize how to sort legend items.\n legend_handles, legend_labels = zip(\n *[(h, l) for (l, h) in sorted(self._collect_legend().items())])\n if labels is not None:\n if len(labels) != len(legend_labels):\n raise ValueError(\n f\"labels {labels} should have length {len(legend_labels)} \"\n f\"but was given {len(labels)}\")\n legend_labels = list(labels)\n legend = target.legend(legend_handles, legend_labels, loc=loc, **kwargs)\n\n if isinstance(target, Axes) and not target.lines:\n target.axis('off')\n\n return legend",
"def _LegendAndSave(Fig,SaveName,loc=\"upper right\",frameon=True,close=False,\n tight=True,use_legend=True,handlelength=1,**kwargs):\n if use_legend and legend_is_useable():\n legend(loc=loc,frameon=frameon,handlelength=handlelength)\n savefig(Fig,SaveName,close=close,tight=tight,**kwargs)",
"def add_legend(ax, sf=16, loc='upper right'):\n ax.autoscale(False)\n #CONUS\n #leg_s = np.array([0.1, 0.5, 1.0, 5.0, 10.0])\n #HMA\n leg_s = np.array([0.1, 1.0, 10.0, 100.0])\n leg_x = np.full(leg_s.size, -999999999)\n leg_y = np.full(leg_s.size, -999999999)\n #leg_sc = ax.scatter(leg_x, leg_y, c='0.8', s=leg_s)\n #ax.legend(leg_sc, ['%0.1f km^2' % s for s in leg_s], scatterpoints=1, loc='upper right')\n for i, s in enumerate(leg_s):\n lbl = r'$%0.1f\\/km^2$' % s\n ax.scatter(leg_x[i], leg_y[i], s=s*sf, c='gray', label=lbl)\n legend = ax.legend(title='Glacier Area', scatterpoints=1, loc=loc, prop={'size':7})\n legend.get_title().set_fontsize('8')\n return legend",
"def _draw_legend(self, labels, title=None):\n\n if len(self.pos) < 1:\n print 'Legend can not be plotted for Gleckler, as no data available!'\n return\n\n pmax = max(self.pos.values())\n\n # generate separate figure for legend\n f = plt.figure()\n ax = f.add_subplot(111, frameon=True, aspect='equal', axisbg='grey')\n f.subplots_adjust(bottom=0.25, top=0.75, left=0.25, right=0.75)\n\n for k in labels.keys():\n if k == 1:\n pos = 'top'\n elif k == 2:\n pos = 'bottom'\n elif k == 3:\n pos = 'left'\n elif k == 4:\n pos = 'right'\n else:\n raise ValueError('Can not draw Gleckler legend! Invalid position value! %s' % str(k))\n\n oldval = self.show_value\n self.show_value = False\n self.__plot_triangle(ax, np.random.random(), pos=pos)\n self.show_value = oldval\n ax.set_xticks([])\n ax.set_yticks([])\n\n fontsize = 16\n linewidth = 3\n\n for k in labels.keys():\n if k == 1: # top\n ax.annotate(labels[k], xy=(0.5, 0.9), xycoords='axes fraction', xytext=(0., 1.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 2:\n ax.annotate(labels[k], xy=(0.5, 0.1), xycoords='axes fraction', xytext=(0., -0.3), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 3:\n ax.annotate(labels[k], xy=(0.1, 0.5), xycoords='axes fraction', xytext=(-0.6, 0.2), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n elif k == 4:\n ax.annotate(labels[k], xy=(0.9, 0.5), xycoords='axes fraction', xytext=(1.1, 0.8), textcoords='axes fraction', arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"angle3,angleA=0,angleB=-90\", linewidth=linewidth), horizontalalignment='left', size=fontsize)\n\n if title is not None:\n f.suptitle(title, size=fontsize)\n\n return f",
"def legend(self, legend):\n\n self._legend = legend",
"def add_to_legend(axes, text, **kwargs):\n text = mpatches.Patch(color='none', label=text)\n handles, labels = axes.get_legend_handles_labels()\n if 'handles' in kwargs:\n handles.append(kwargs.pop('handles'))\n handles.append(text)\n axes.legend(\n handles=handles,\n prop=kwargs.pop('prop', {'family': 'monospace'}),\n **kwargs\n )",
"def finish (self, legend=None):\n if legend is True:\n kwargs = {}\n else:\n kwargs = legend\n if legend:\n self.legend (**kwargs)",
"def set_legend(self, **lgdkwargs):\n\n if 'loc' not in lgdkwargs.keys(): \n lgdkwargs['loc'] = 'upper right'\n \n if 'scatterpoints' not in lgdkwargs.keys(): \n lgdkwargs['scatterpoints'] = 1 \n\n self.sub.legend(**lgdkwargs) \n \n return None",
"def draw_legend(\n data: pd.Series[Any], da: DrawingArea, lyr: Layer\n ) -> DrawingArea:\n msg = \"The geom should implement this method.\"\n raise NotImplementedError(msg)",
"def fl_draw_object_label(ptr_flobject):\n _fl_draw_object_label = library.cfuncproto(\n library.load_so_libforms(), \"fl_draw_object_label\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT)], \\\n \"\"\"void fl_draw_object_label(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_draw_object_label(ptr_flobject)",
"def modify_legend_handles(ax, **kwargs):\r\n hndls, labls = ax.get_legend_handles_labels()\r\n _hndls = []\r\n for h in hndls:\r\n _h = copy(h)\r\n _h.update(kwargs)\r\n _hndls.append(_h)\r\n return _hndls, labls",
"def add_legend(self, mode='image', label=None, color='none', alpha=1,\n size=15, family='sans-serif', properties=None, **kwargs):\n if properties is None:\n properties = {}\n properties = {'size': size, 'family': family, **properties}\n\n # get legend that already exists\n legend = self.ax.get_legend()\n old_handles = getattr(legend, 'legendHandles', [])\n handler_map = getattr(legend, '_custom_handler_map', {})\n\n # make new handles\n new_handles = []\n labels = to_list(label)\n colors = [color] * len(labels) if isinstance(color, str) else color\n alphas = [alpha] * len(labels) if isinstance(alpha, Number) else alpha\n\n for label_item, label_color, label_alpha in zip(labels, colors, alphas):\n if label_item is None:\n continue\n\n if isinstance(label_item, str):\n if mode in ('image', 'histogram'):\n if is_color_like(label_color):\n handle = Patch(color=label_color, alpha=label_alpha, label=label_item)\n else:\n handle = PatchCollection(patches=[], cmap=label_color, label=label_item)\n handler_map[PatchCollection] = ColorMappingHandler()\n elif mode in ('curve', 'loss'):\n handle = Line2D(xdata=[0], ydata=[0], color=label_color, alpha=label_alpha, label=label_item)\n new_handles.append(handle)\n elif not label_item.get_label().startswith('_'):\n new_handles.append(label_item)\n\n if len(new_handles) > 0:\n # extend existing handles and labels with new ones\n handles = old_handles + new_handles\n legend = self.ax.legend(prop=properties, handles=handles, handler_map=handler_map, **kwargs)\n\n return legend",
"def legend(self, loc, options=\"\", **kwargs):\n self._pad.cd()\n\n if self._legend is not None and isinstance(self._legend, root.TLegend):\n warnings.warn(\"These axes already have a legend, will overwrite\", stacklevel=2)\n self._legend.Delete()\n\n self._legend = root.TLegend(*loc)\n\n # Default formatting options: use transparent background\n # Do this here since this option is not available in the `TStyle` class\n self._legend.SetFillColorAlpha(0, 0)\n\n # Set graphics attributes\n root_helpers.set_graphics_attributes(self._legend, **kwargs)\n\n # Columns\n if \"ncol\" in kwargs:\n self._legend.SetNColumns(kwargs[\"ncol\"])\n\n # Legend border size\n if \"bordersize\" in kwargs:\n self._legend.SetBorderSize(kwargs[\"bordersize\"])\n\n for obj, label, option in self._legend_entries:\n if option is not None:\n self._legend.AddEntry(obj, label, option)\n else:\n self._legend.AddEntry(obj, label)\n\n self._legend.Draw(options)\n\n return self._legend",
"def draw_legend(self, *drawables):\n # Check if we already have a legend\n if hasattr(self, '_legend'):\n raise RuntimeError('legend already exists on this plot')\n\n # Switch to the context of the main plot\n self._plot.cd()\n\n # Create the legend\n if self._atlas_label_drawn:\n self._legend = TLegend(self.PLOT_LEGEND_LEFT,\n (self.PLOT_LEGEND_BOTTOM_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_BOTTOM),\n self.PLOT_LEGEND_RIGHT,\n (self.PLOT_LEGEND_TOP_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_TOP))\n else:\n # WJF may need customisation with ratio\n self._legend = TLegend(0.15, 0.7, 0.5, 0.88)\n\n SetOwnership(self._legend, False)\n\n # Style it\n self._legend.SetTextSize((\n self.PLOT_LEGEND_TEXT_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_LEGEND_TEXT_SIZE\n ))\n self._legend.SetBorderSize(0)\n self._legend.SetFillStyle(0) # transparent\n self._legend.SetNColumns(self.PLOT_LEGEND_N_COLUMNS)\n\n # Create a chained list of all drawables. We decompose THStack\n # objects in reverse order, i.e. top-to-bottom.\n drawables = \\\n list(chain(*(drawable_iterable(h, True, True)\n for h\n in drawables)))\n\n # Add anything to this list that we created internally\n drawables.extend(self._legend_extras)\n\n # Because ROOT draws legend entries from left-to-right across rows and\n # not top-to-bottom along columns, we need to do a bit of a pivot on\n # the list so that the histograms appear in the vertical order of the\n # stack\n n_entries = len(drawables)\n n_col = self.PLOT_LEGEND_N_COLUMNS\n n_row = int(ceil(float(n_entries) / n_col))\n legend_order = []\n for r in xrange(0, n_row):\n for c in xrange(0, n_col):\n if (r * n_col + c) == n_entries:\n # Don't need an outer break, this would only happen on the\n # last row if n_row * n_col != n_entries\n break\n legend_order.append(drawables[r + c * n_row])\n\n # Add the drawables\n for drawable in legend_order:\n SetOwnership(drawable, False)\n title = drawable.GetTitle()\n # HACK: Convention: legend for drawables with a non-default\n # marker style (data) to be drawn as point, and with\n # empty fill (signal) to be drawn as line\n #print 'Adding plottable {0} to legend. Has MarkerStyle {1} and fill colour {2}'.format(drawable.GetName(), drawable.GetMarkerStyle(), drawable.GetFillColor())\n #self._legend.AddEntry(drawable, title, 'f')\n this_marker = drawable.GetMarkerStyle()\n if this_marker == 20:\n self._legend.AddEntry(drawable, title, 'p')\n #self._legend.AddEntry(drawable, title, 'l')\n elif drawable.GetTitle() == 'Total Background' or drawable.GetTitle() == 'Total background':\n self._legend.AddEntry(drawable, title, 'lf')\n elif drawable.GetFillColor() == 0:\n self._legend.AddEntry(drawable, title, 'l')\n elif this_marker == 21 or this_marker == 3 or this_marker == 22:\n self._legend.AddEntry(drawable, title, 'lp')\n else:\n self._legend.AddEntry(drawable, title, 'f')\n\n # Draw the legend\n self._legend.Draw()",
"def legend(self, marks, bounds=None, rect=None, corner=None, grid=None, gutter=50, style=None, label_style=None, id=None):\n gutter = _require_scalar(gutter)\n style = _combine_styles(_require_style(style))\n label_style = _combine_styles(_require_style(label_style))\n id = _require_optional_id(id)\n\n xmin, xmax, ymin, ymax = _region(0, self._width, 0, self._height, bounds=bounds, rect=rect, corner=corner, grid=grid, gutter=gutter)\n self._children.append(LegendMark(xmin, xmax, ymin, ymax, marks, style, label_style, id))\n return self._children[-1]",
"def add_plot_legend(fig, labright='M.', lableft='S.'):\n #............................................\n _leg = fig.add_axes([0.92, 0.865, 0.055, 0.085])\n _leg.fill((0, 0.5, 0.5, 0), (0, 0, 1, 1), fc=ENSOpolygons['W'])\n _leg.text(0.05, 0.5, 'EN', fontsize='smaller')\n _leg.fill((0.5, 1, 1, 0.5), (0, 0, 1, 1), fc=ENSOpolygons['C'])\n _leg.text(0.6, 0.5, 'LN', fontsize='smaller')\n _leg.set_xticks([])\n _leg.set_yticks([])\n #............................................\n _leg = fig.add_axes([0.92, 0.75, 0.055, 0.085])\n _leg.plot((0, 1,), (0, 1), ls='-', c='k', marker='')\n _leg.set_xticks([])\n _leg.set_yticks([])\n _leg.text(0.6, 0.15, labright, fontsize='smaller')\n _leg.text(0.1, 0.5, lableft, fontsize='smaller')"
] | [
"0.649047",
"0.61699635",
"0.6117448",
"0.6017658",
"0.59494585",
"0.59489495",
"0.59252846",
"0.5908187",
"0.58153063",
"0.58062863",
"0.5786078",
"0.5747794",
"0.5728206",
"0.5683173",
"0.56398714",
"0.56154585",
"0.5608667",
"0.5580261",
"0.5554319",
"0.5549609",
"0.55420744",
"0.5499324",
"0.5493276",
"0.54257244",
"0.5386864",
"0.5367721",
"0.53475153",
"0.5302455",
"0.529229",
"0.5288217"
] | 0.723461 | 0 |
Plot each algorithm/method's rank evolving as budget increases. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. Note that funcId may be an array of id numbers; in that case, an average rank over listed functions is taken. | def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
try: # funcId is array?
# _pds_plot_iterator[] uses funcId only for things we don't care for
fakeFuncId = funcId[0]
manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])
rankcount = np.shape(manyranking[0])[1] - 1
amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))
budget = amanyranking[:,0]
rankings = np.hsplit(amanyranking[:,1:], len(funcId))
avgranking = np.average(rankings, axis=0)
ranking = np.vstack([budget, avgranking.T]).T
except TypeError: # funcId is scalar
fakeFuncId = funcId
ranking = pds.ranking((dim, funcId), groupby)
i = 0
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):
if kind != 'algorithm' and kind != 'strategy':
continue
#print name, ds
budgets = ranking[:,0]
ranks = ranking[:,1+i]
style['markevery'] = 64
ax.plot(budgets, ranks, label=name, **style)
i += 1
ax.set_xlabel('Budget')
ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')
ax.set_xscale('log', basex=pfsize)
ax.grid() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ranking(self, dimfun, groupby, ftarget=10**-8):\n nameds = list(itertools.chain(self.algds_dimfunc(dimfun), self.stratds_dimfunc(dimfun)))\n count = len(nameds)\n\n # Produce \"fv\" items, one per dataset, containing single function value\n # for each budget\n fvset = []\n for (name, ds) in nameds:\n budgets = ds.funvals[:,0]\n f1vals = np.maximum(groupby(ds.funvals[:, 1:], axis=1), ftarget)\n fv = np.transpose(np.vstack([budgets, f1vals]))\n fvset.append(fv)\n\n # Align the \"fv\" items by budget and merge them\n fva = ra.alignArrayData(ra.VArrayMultiReader(fvset))\n budgets = fva[:,0]\n\n # Assign function values and rank them\n # However, we want to resolve eventual ties by ranking first\n # converging function first. So we do a trick and rewrite ftarget\n # values in increasing convergence sort order.\n values = fva[:,1:].copy()\n firstconv = np.ones(count) * (np.size(budgets)+1) # runlength+1 is default\n for i in range(count): # XXX: drop the loop\n try:\n firstconv[i] = np.nonzero(values[:,i] == ftarget)[0][0]\n except IndexError:\n continue # no rewriting needed\n firstconvranks = ss.mstats.rankdata(firstconv)\n for i in range(count):\n r = firstconvranks[i]\n values[firstconv[i]:, i] = ftarget - (1-r/count)*ftarget\n\n ranks = ss.mstats.rankdata(values, axis=1)\n\n return np.transpose(np.vstack([budgets, ranks.T]))",
"def fval_by_budget(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n if baseline_ds:\n baseline_budgets = baseline_ds.funvals[:, 0]\n baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)\n baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros\n # fvb is matrix with each row being [budget,funval]\n baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n budgets = ds.funvals[:, 0]\n funvals = groupby(ds.funvals[:, 1:], axis=1)\n\n # Throw away funvals after ftarget reached\n try:\n limit = np.nonzero(funvals < 10**-8)[0][0] + 1\n except IndexError:\n limit = np.size(budgets)+1\n budgets = budgets[:limit]\n funvals = funvals[:limit]\n\n fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))\n\n if baseline_ds:\n # Relativize by baseline\n fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))\n budgets = fvba[:, 0]\n funvals = fvba[:, 1] / fvba[:, 2]\n\n style['markevery'] = 16\n ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)\n if baseline_ds:\n ax.set_yticks([1], minor=True)\n ax.set_xlabel('Budget')\n ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))",
"def _pds_plot_iterator(pds, dim, funcId):\n i = 0\n for (algname, ds) in pds.algds_dimfunc((dim, funcId)):\n yield ('algorithm', algname, ds, _style_algorithm(algname, i))\n i += 1\n yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())\n yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())\n i = 0\n for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):\n yield ('strategy', stratname, ds, _style_strategy(stratname, i))\n i += 1",
"def summarize(group, fs=None, include_source=True):\n _line_break = '{0:-<120}\\n'.format('')\n tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))\n log = StringIO.StringIO()\n log.write('Call statement:\\n\\n')\n log.write('\\t' + tests[0].stmt)\n log.write('\\n\\n\\n')\n fmt = \"{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\\n\"\n log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))\n log.write(_line_break)\n log.write('\\n')\n\n for i, t in enumerate(tests):\n func_name = \"{}.{}\".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__\n if i == len(tests)-1:\n time_percent = 'Slowest'\n else:\n time_percent = \"{:.1f}\".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)\n log.write(fmt.format(i+1,\n func_name,\n convert_time_units(t.time_average_seconds),\n time_percent,\n t.timeit_repeat,\n t.timeit_number))\n log.write(_line_break)\n\n if include_source:\n log.write('\\n\\n\\nSource Code:\\n')\n log.write(_line_break)\n for test in tests:\n log.write(test.log.getvalue())\n log.write(_line_break)\n\n if isinstance(fs, str):\n with open(fs, 'w') as f:\n f.write(log.getvalue())\n\n elif fs is None:\n print(log.getvalue())\n else:\n try:\n fs.write(log.getvalue())\n except AttributeError as e:\n print(e)",
"def group_apply_edges(self, group_by, func, edges=ALL, inplace=True):\n super(BaseGraphStore, self).group_apply_edges(group_by, func, edges, inplace=True)",
"def plot(self, ax=None, savefile=None, shells=None, color='b', title=None,\n xlabel=None, ylabel=None, withavg=False):\n import matplotlib.pyplot as plt\n if ax is None:\n plt.figure()\n axset=plt\n else:\n axset=ax\n\n cmax = float(max(self.counts))\n total = sum(self.counts)\n nalpha = 0.85 if cmax/total > 0.33 else 0.65\n maxy = 1.\n for di, df in enumerate(self.dfs):\n alpha=nalpha*self.counts[di]/cmax\n axset.plot(df.x, df.df, color=color, alpha=alpha)\n maxy_ = np.max(df.df)\n if maxy_ > maxy:\n maxy = maxy_\n\n if withavg and len(self) > 0:\n x = self.dfs[0].x\n axset.plot(x, self.average, 'r-')\n maxy_ = np.max(self.average)\n if maxy_ > maxy:# pragma: no cover\n maxy = maxy_\n\n if len(self) > 0:\n dtype = self.dfs[0].dtype\n unit = \"Ang.\" if dtype == \"R\" else \"Rad.\"\n tstr = \"Radial\" if dtype == \"R\" else \"Angular\"\n else:# pragma: no cover\n unit = \"unknown units\"\n tstr = \"\"\n \n if ax is None:\n if title is None:\n plt.title(\"{} Distribution Function of Collection\".format(tstr))\n else:\n plt.title(title)\n if xlabel is None:\n plt.xlabel(\"Distance ({})\".format(unit))\n else:\n plt.xlabel(xlabel)\n if ylabel is None:\n plt.ylabel(\"Accumulated Density\")\n else:\n plt.ylabel(ylabel)\n\n _plot_shells(axset, shells, maxy)\n \n if savefile is not None:\n plt.savefig(savefile)\n\n from gblearn.base import testmode\n if not testmode:# pragma: no cover\n plt.show()\n return axset",
"def plot_scoring(\n graphs: list,\n ref_partitions: object,\n graph_names: list,\n methods: list,\n scoring: Callable[\n [object, object], object\n ] = cdlib.evaluation.adjusted_mutual_information,\n nbRuns: int = 5,\n) -> object:\n forDF = []\n for i, g in enumerate(graphs):\n for m in methods:\n for r in range(nbRuns):\n partition = m(g)\n\n score = scoring(partition, ref_partitions[i]).score\n forDF.append([graph_names[i], score, partition.get_description()])\n df = pd.DataFrame(columns=[\"graph\", \"score\", \"method\"], data=forDF)\n ax = sns.lineplot(x=\"graph\", y=\"score\", hue=\"method\", data=df, legend=\"brief\")\n ax.legend(loc=\"best\")\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n plt.tight_layout()\n\n return ax",
"def parallel_group(\n G, group_by, ax=None, y_offset=-0.3, rotation=45, ha=\"right\", va=\"top\"\n):\n if ax is None:\n ax = plt.gca()\n nt = utils.node_table(G)\n # groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()\n groups = sorted(nt[group_by].unique())\n\n for i, label in enumerate(groups):\n x = i * 4\n y = y_offset\n ax.annotate(label, xy=(x, y), ha=ha, va=va, rotation=rotation)\n ax.relim()",
"def editing_type_count_by_group_plot(lib_name,group_dict:Dict,dirstruct:DirectoryStructure,dict_colors):\n\n group_counts=dict()\n\n # get aggregate counts per group\n for group_name,group_nodes in group_dict.items():\n # get editing percent pileup and summary file names\n editing_percent_pileups=[dirstruct.pathName(lib_name,node,Stages.editing_type_count,EditTypeStage.edit_percent_pileup)\n for node in group_nodes ]\n summary_files=[dirstruct.pathName(lib_name,node,Stages.editing_type_count,EditTypeStage.file_summary)\n for node in group_nodes ]\n\n # calculatte aggregate distribution\n aggregate_counts,count_summary,pileup_length=editing_site_count_per_type(editing_percent_pileups,summary_files)\n # save it for plot\n group_counts[group_name]=aggregate_counts\n\n #output aggregate counts to file\n aggregate_summary_file=dirstruct.pathName(lib_name,group_name,Stages.editing_type_count,EditTypeStage.group_distribution_summary)\n count_summary.to_csv(aggregate_summary_file)\n #output counts per file to file\n group_summary_file=dirstruct.pathName(lib_name,group_name,Stages.editing_type_count,EditTypeStage.group_count_summary)\n count_summary.to_csv(group_summary_file)\n\n # generating the plot\n try:\n plt.figure()\n group_names=[name for name in group_dict.keys()]\n data=pd.concat(aggregate_counts for aggregate_counts in group_counts.values())\n\n data.index=group_names\n data=data.transpose()\n\n plt_res, axes = stacked_bar(data, show_values=True, value_format=\"{:.3f}\",\n y_label=\"Percent of sites\",size_plot=[18,20],use_dataframe=True,throw_zeros=True,dict_colors=dict_colors)\n\n #puts the ledgends outside of the plot\n plt_res.subplots_adjust(right=0.62)\n plt_res.legend(loc='center left',bbox_to_anchor=(1, 0.5),handles=axes[::-1])\n\n output_path = dirstruct.pathName(lib_name,None,Stages.editing_type_count,EditTypeStage.plot)\n plt_res.savefig(output_path)\n plt_res.show()\n except:\n logging.exception(\"edit plot failed\")",
"def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1",
"def plot_group(self, group_name, domains, get_time_data, fs, get_freq_data=None, get_const_data=None):\n plots = []\n \n def many(f, n=4):\n return np.concatenate([f() for _ in range(n)])\n \n for domain in domains:\n \n if domain=='frequency':\n \n # HW accelerated FFT\n if get_freq_data != None:\n f_plot = sdr_plots.HWFreqPlot(\n [get_freq_data() for _ in range(4)],\n fs, animation_period=100, w=700)\n f_dt = dma_timer.DmaTimer(f_plot.add_frame, get_freq_data, 0.3)\n # SW FFT\n else:\n f_plot = sdr_plots.IQFreqPlot(\n [many(get_time_data) for _ in range(4)],\n fs, x_range=(-2000,2000), animation_period=100, w=700)\n f_dt = dma_timer.DmaTimer(f_plot.add_frame, lambda:many(get_time_data), 0.3)\n plots.append(dict(title='Frequency domain', plot=f_plot, control=f_dt))\n \n elif domain=='time' or domain=='time-binary':\n if domain=='time-binary':\n iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700, scaling=1, ylabel='Symbol value')\n iq_plot.set_line_mode(lines=True, markers=True, shape='hvh')\n iq_plot.get_widget().layout.yaxis.dtick=1\n else:\n iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700)\n iq_plot.set_line_mode(markers=False)\n iq_dt = dma_timer.DmaTimer(iq_plot.add_data, get_time_data, 0.05)\n plots.append(dict(title='Time domain', plot=iq_plot, control=iq_dt))\n \n elif domain=='constellation':\n c_plot = sdr_plots.IQConstellationPlot(many(get_const_data or get_time_data, n=10), h=550, fade=True)\n c_dt = dma_timer.DmaTimer(c_plot.add_data, get_const_data or get_time_data, 0.05)\n plots.append(dict(title='Constellation', plot=c_plot, control=c_dt,\n layout=ipw.Layout(width='550px', margin='auto')))\n \n self.timers.register_timers(group_name, list(map(lambda tab: tab['control'], plots)))\n return QpskOverlay.tab_plots(plots)",
"def evalRun(rerankRun, qrelsDict, metricFunc, debug=False):\n resArr = []\n\n for qid, scoreDict in rerankRun.items():\n relsSortedByScores = []\n\n val = 0\n\n if qid in qrelsDict:\n queryQrelDict = qrelsDict[qid]\n\n for did, score in getSorteScoresFromScoreDict(scoreDict):\n rel_score = 0\n if did in queryQrelDict:\n rel_score = queryQrelDict[did]\n\n relsSortedByScores.append(rel_score)\n\n val = metricFunc(relsSortedByScores, queryQrelDict) if queryQrelDict else 0\n\n if debug:\n print('%s %g' % (qid, val))\n\n resArr.append(val)\n\n res = np.mean(resArr)\n if debug:\n print('mean %g' % res)\n\n return res",
"def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics",
"def makeaplot(events,\n sensitivities,\n hrf_estimates,\n roi_pair,\n fn=True):\n import matplotlib.pyplot as plt\n\n # take the mean and transpose the sensitivities\n sensitivities_stacked = mv.vstack(sensitivities)\n\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.bilat_ROIs)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.all_ROIs)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # some parameters\n # get the conditions\n block_design = sorted(np.unique(events['trial_type']))\n reorder = [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11]\n block_design = [block_design[i] for i in reorder]\n # end indices to chunk timeseries into runs\n run_startidx = np.array([0, 157, 313, 469])\n run_endidx = np.array([156, 312, 468, 624])\n\n runs = np.unique(mean_sens_transposed.sa.chunks)\n\n for j in range(len(hrf_estimates.fa.bilat_ROIs_str)):\n comparison = hrf_estimates.fa.bilat_ROIs[j][0]\n if (roi_pair[0] in comparison) and (roi_pair[1] in comparison):\n roi_pair_idx = j\n roi_betas_ds = hrf_estimates[:, roi_pair_idx]\n roi_sens_ds = mean_sens_transposed[:, roi_pair_idx]\n\n for run in runs:\n fig, ax = plt.subplots(1, 1, figsize=[18, 10])\n colors = ['#7b241c', '#e74c3c', '#154360', '#3498db', '#145a32', '#27ae60',\n '#9a7d0a', '#f4d03f', '#5b2c6f', '#a569bd', '#616a6b', '#ccd1d1']\n plt.suptitle('Timecourse of sensitivities, {} versus {}, run {}'.format(roi_pair[0],\n roi_pair[1],\n run + 1),\n fontsize='large')\n plt.xlim([0, max(mean_sens_transposed.sa.time_coords)])\n plt.ylim([-5, 7])\n plt.xlabel('Time in sec')\n plt.legend(loc=1)\n plt.grid(True)\n # for each stimulus, plot a color band on top of the plot\n for stimulus in block_design:\n onsets = events[events['trial_type'] == stimulus]['onset'].values\n durations = events[events['trial_type'] == stimulus]['duration'].values\n stimulation_end = np.sum([onsets, durations], axis=0)\n r_height = 1\n color = colors[0]\n y = 6\n\n # get the beta corresponding to the stimulus to later use in label\n beta = roi_betas_ds.samples[hrf_estimates.sa.condition == stimulus.replace(\" \", \"\"), 0]\n\n for i in range(len(onsets)):\n r_width = durations[i]\n x = stimulation_end[i]\n rectangle = plt.Rectangle((x, y),\n r_width,\n r_height,\n fc=color,\n alpha=0.5,\n label='_'*i + stimulus.replace(\" \", \"\") + '(' + str('%.2f' % beta) + ')')\n plt.gca().add_patch(rectangle)\n plt.legend(loc=1)\n del colors[0]\n\n times = roi_sens_ds.sa.time_coords[run_startidx[run]:run_endidx[run]]\n\n ax.plot(times, roi_sens_ds.samples[run_startidx[run]:run_endidx[run]], '-', color='black', lw=1.0)\n glm_model = hrf_estimates.a.model.results_[0.0].predicted[run_startidx[run]:run_endidx[run], roi_pair_idx]\n ax.plot(times, glm_model, '-', color='#7b241c', lw=1.0)\n model_fit = hrf_estimates.a.model.results_[0.0].R2[roi_pair_idx]\n plt.title('R squared: %.2f' % model_fit)\n if fn:\n plt.savefig(results_dir + 'timecourse_localizer_glm_sens_{}_vs_{}_run-{}.svg'.format(roi_pair[0], roi_pair[1], run + 1))",
"def make_group_plot(args):\n directory = args.directory\n prefix = args.prefix\n buckets = args.buckets \n\n # Collect all the results and create placeholder for results.\n all_files = glob.glob(directory + \"/\" + prefix + \"*.csv\")\n df = pd.concat((pd.read_csv(f) for f in all_files), axis=1)\n df.columns = all_files\n results_raw = df.as_matrix()\n num_bins = int(np.ceil(results_raw.shape[0]/buckets))\n results_binned = np.zeros((results_raw.shape[1], num_bins))\n\n # Bin the results.\n for run in range(results_raw.shape[1]):\n for bin_idx in range(num_bins):\n results_binned[run, bin_idx] = (np.mean(results_raw[\n int(bin_idx*buckets):int(bin_idx*buckets+buckets), run]))\n\n # Build the plot.\n fig, ax = plt.subplots(figsize=(args.figSizeX, args.figSizeY))\n sns.tsplot(data = results_binned, ax=ax, ci=[68, 95], color=\"m\")\n\n # Save the plot.\n ax.set_title(prefix + ' -- Average Binned Return', fontsize=18)\n ax.set_xlabel('Bin', fontsize=18)\n ax.set_ylabel('Average Return', fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=18)\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n plt.savefig(os.path.join(directory, prefix+'_groupfig.png'), \n bbox_inches='tight')\n \n # Return binned results for group figure.\n return results_binned",
"def series_measure(function,group_filters,**options):\n\tresults=pd.Series()\n\tfor group_key, group_filter in group_filters.items():\n\t\tjoined_options={**options,**group_filter}\n\t\tif not callable(function):\n\t\t\tif \"func\" in joined_options.keys():\n\t\t\t\tfunc=joined_options.pop('func')\n\t\t\t\tresults[group_key]=func(**joined_options)\n\t\t\telse:\n\t\t\t\traise TypeError('function passed is not callable and no functions\\\n\t\t\t\t referenced in filters!')\n\t\telse:\n\t\t\tresults[group_key]=function(**joined_options)\n\treturn results",
"def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()",
"def avgPlotter(graph, contribution_curves, mean_contribs, ax_degree, ax_avg, box_plot=False, median=True, log_scale=True, size_marker=5, network=\"\"):\n\n # Plot scatter\n contributions = [y[len(y) - 1] for _, y in contribution_curves]\n degree = [graph.degree(i) for i in range(graph.order())]\n existing_degrees = [d for d in sorted(set(degree))]\n min_degree = min(degree)\n max_degree = max(degree)\n ordered_contribs = [[] for i in range(len(existing_degrees))]\n for idx in range(len(degree)):\n ordered_contribs[existing_degrees.index(degree[idx])].append(contributions[idx])\n if box_plot:\n ax_degree.boxplot(ordered_contribs, positions=existing_degrees)\n elif median:\n median_contribs_degree = [np.median(ordered_contribs[i]) for i in range(len(existing_degrees))]\n error_bars = np.zeros((2, len(existing_degrees)))\n error_bars[0, :] = [median_contribs_degree[i] - np.percentile(ordered_contribs[i], 25) for i in range(len(existing_degrees))]\n error_bars[1, :] = [np.percentile(ordered_contribs[i], 75) - median_contribs_degree[i] for i in range(len(existing_degrees))]\n\n size_marker = [len(ordered_contribs[i]) * size_marker for i in range(len(existing_degrees))]\n ax_degree.scatter(existing_degrees, median_contribs_degree, s=size_marker)\n ax_degree.errorbar(existing_degrees, median_contribs_degree, error_bars,\n alpha=0.5, linestyle='--')\n else:\n mean_contribs_degree = [mean(ordered_contribs[i]) for i in range(len(existing_degrees))]\n std_mean_contribs_degree = []\n for i in range(len(existing_degrees)):\n if len(ordered_contribs[i]) > 1:\n std_mean_contribs_degree.append(stdev(ordered_contribs[i]) / np.sqrt(len(ordered_contribs[i])))\n else:\n std_mean_contribs_degree.append(0)\n\n size_marker = [len(ordered_contribs[i])*size_marker for i in range(len(existing_degrees))]\n ax_degree.scatter(existing_degrees, mean_contribs_degree, s=size_marker)\n ax_degree.errorbar(existing_degrees, mean_contribs_degree, std_mean_contribs_degree,\n alpha=0.5, linestyle='--')\n\n if log_scale:\n ax_degree.set_xscale('log')\n\n\n # Plot avg. contribution\n mean_color = (np.random.rand(), np.random.rand(), np.random.rand(), 0.3)\n if network == \"WS\":\n mean_color = \"green\"\n elif network == \"BA\":\n mean_color = \"orange\"\n elif network == \"FB\":\n mean_color = \"blue\"\n x = list(range(len(mean_contribs[0, :])))\n #ax_avg.plot(mean_contribs[0, :], color=mean_color)\n ax_avg.plot(mean_contribs[0, :], color=mean_color, )\n plt.fill_between(x, (mean_contribs[1, :]), (mean_contribs[2, :]), color=mean_color, alpha=0.3 ,edgecolor=None)\n plt.ylim(0, 100);",
"def hive_group(G, group_by, ax=None, offset=np.pi / 12):\n nt = utils.node_table(G)\n groups = sorted(nt[group_by].unique())\n\n if ax is None:\n ax = plt.gca()\n\n for grp in groups:\n theta = item_theta(groups, grp) + offset\n radius = 2 * (8 + len(nt[nt[group_by] == grp]) + 1)\n x, y = to_cartesian(radius, theta)\n ha, va = text_alignment(x, y)\n ax.annotate(grp, xy=(x, y), ha=ha, va=va)",
"def RunEstimate(update_func, num_points=31, median_flag=False):\n d = ReadHeights(nrows=None)\n labels = {1:'male', 2:'female'}\n\n suites = {}\n for key, xs in d.items():\n label = labels[key]\n print(label, len(xs))\n Summarize(xs)\n\n xs = thinkbayes2.Jitter(xs, 1.3)\n\n mus, sigmas = FindPriorRanges(xs, num_points, median_flag=median_flag)\n suite = Height(mus, sigmas, label)\n suites[label] = suite\n update_func(suite, xs)\n print('MAP', suite.MaximumLikelihood())\n\n suite1 = suites['male']\n suite2 = suites['female']\n\n mu1 = suite1.Marginal(0)\n sigma1 = suite1.Marginal(1)\n\n mu2 = suite2.Marginal(0)\n sigma2 = suite2.Marginal(1)\n\n diff = mu1 - mu2\n sigma = (sigma1 + sigma2) / 2\n\n pmf_d = diff / sigma\n\n thinkplot.Cdf(pmf_d.MakeCdf())\n thinkplot.Show(xlabel='# stddev between means',\n ylabel='PMF')",
"def aggregateFunction():\r\n global aggFunc\r\n aggFunc = []\r\n for objFunc in P_prime:\r\n aggFunc.append(objFunc[0]*FileSettings.settingsdict['weights'][0] +\r\n objFunc[1]*FileSettings.settingsdict['weights'][1] +\r\n objFunc[2]*FileSettings.settingsdict['weights'][2] +\r\n objFunc[3]*FileSettings.settingsdict['weights'][3])\r\n return aggFunc",
"def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )",
"def data_group():\n ...",
"def plot_slice_wise_measures(labels, preds, args):\n\n cal_roc = [[], []]\n cal_prrcf1 = [[], [], []] # save PR, RC, F1 respectively\n noncal_prrcf1 = [[], [], []]\n thres_all = []\n noncal_roc = [[], []]\n n_slices = len(labels)\n for thres in range(500, -1, -5):\n print(\"[Threshold # of pixels: {}]\".format(thres))\n thres_all.append(thres)\n cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = \\\n plaque_detection_rate(labels, preds, thres=thres)\n\n\n cal_prrcf1[0].append(float(cal_tp) / cal_pp if cal_pp != 0 else 0.0)\n cal_prrcf1[1].append(float(cal_tp) / cal_pgt)\n cal_prrcf1[2].append(2.0 * cal_tp / (cal_pgt + cal_pp))\n noncal_prrcf1[0].append(float(noncal_tp) / noncal_pp if noncal_pp != 0 else 0.0)\n noncal_prrcf1[1].append(float(noncal_tp) / noncal_pgt)\n noncal_prrcf1[2].append(2.0 * noncal_tp / (noncal_pgt + noncal_pp))\n\n cal_roc[0].append((cal_pp - cal_tp) / (n_slices - cal_pgt)) # false negative ratio\n cal_roc[1].append(cal_tp / cal_pgt) # true positive ratio\n noncal_roc[0].append((noncal_pp - noncal_tp) / (n_slices - noncal_pgt)) # false negative ratio\n noncal_roc[1].append(noncal_tp / noncal_pgt) # true positive ratio\n\n print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(\n cal_prrcf1[0][-1], cal_prrcf1[1][-1], cal_prrcf1[2][-1],\n noncal_prrcf1[0][-1], noncal_prrcf1[1][-1], noncal_prrcf1[2][-1]))\n print('Cal: fpr - {:.4f} tpr - {:.4f} Noncal: fpr - {:.4f} tpr - {:.4f}'.format(\n cal_roc[0][-1], cal_roc[1][-1], noncal_roc[0][-1], noncal_roc[1][-1]))\n\n # plot the roc curve and calculate AUC\n fig_names = ['calcified', 'non-calcified']\n for plq_metrics, fig_name in zip([cal_roc, noncal_roc], fig_names):\n plt.figure()\n lw = 2\n auc_metric = auc(plq_metrics[0], plq_metrics[1])\n print(\"{} : {}\".format(fig_name, auc_metric))\n plt.plot(plq_metrics[0], plq_metrics[1], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc_metric)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('slice-wise ROC curve of {} plaques'.format(fig_name))\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./{}/{}_roc.png\".format(args.fig_dir, fig_name))\n\n for plq_metrics, fig_name in zip([cal_prrcf1, noncal_prrcf1], fig_names):\n plt.figure()\n lw = 2\n plt.plot(thres_all, plq_metrics[0], color='r', lw=lw, label='precision')\n plt.plot(thres_all, plq_metrics[1], color='g', lw=lw, label='recall')\n plt.plot(thres_all, plq_metrics[2], color='b', lw=lw, label='f1')\n\n plt.xlim([min(thres_all), max(thres_all)])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Threshold Number of Pixels')\n plt.title('{} measures under different thresholds'.format(fig_name))\n plt.legend(bbox_to_anchor=(1, 0.95), loc=\"upper right\")\n plt.savefig(\"./{}/{}_prrcf1.png\".format(args.fig_dir, fig_name))",
"def plot_model_ranking(self, var, show_text=False, obslabels=None):\n\n # search for model keys\n tmp = []\n for i in xrange(4):\n tmp = self._get_model_ranking(i + 1, var)\n if len(tmp) > 0:\n break # assumes that all datasets with observations have same models\n if len(tmp) == 0:\n print var\n print self.pos\n print self.data\n print('FATAL error: no model keys provided!')\n return None\n\n fig = plt.figure()\n gs = gridspec.GridSpec(1, 2, wspace=0.05, hspace=0.05, bottom=0.2, width_ratios=[3, 1])\n ax = fig.add_subplot(gs[0])\n\n # 1 vs. 2\n self.__draw_ranking_scatter(1, 2, var, color='red', marker='o', show_text=show_text, ax=ax, obslabels=obslabels)\n # 1 vs. 3\n self.__draw_ranking_scatter(1, 3, var, color='green', marker='*', ax=ax, show_text=show_text, obslabels=obslabels)\n # 1 vs. 4\n self.__draw_ranking_scatter(1, 4, var, color='blue', marker='^', ax=ax, show_text=show_text, obslabels=obslabels)\n # 2 vs. 3\n self.__draw_ranking_scatter(2, 3, var, color='grey', marker='x', ax=ax, show_text=show_text, obslabels=obslabels)\n # 2 vs 4\n self.__draw_ranking_scatter(2, 4, var, color='m', marker='+', ax=ax, show_text=show_text, obslabels=obslabels)\n # 3 vs 4\n self.__draw_ranking_scatter(3, 4, var, color='c', marker='h', ax=ax, show_text=show_text, obslabels=obslabels)\n\n if ax is not None:\n ax.legend(prop={'size': 8}, ncol=1, fancybox=True, loc='upper left')\n ax.set_xlabel('rank(observation X)')\n ax.set_ylabel('rank(observation Y)')\n ax.set_ylim(ymin=0, ymax=len(tmp) + 1)\n ax.set_xlim(xmin=0, xmax=len(tmp) + 1)\n ax.grid()\n ax.set_title('Comparison of model ranking: ' + var.upper())\n ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') # 1:1 line\n\n # legend\n ax2 = fig.add_subplot(gs[1])\n dy = 0.1\n yoff = dy\n for k in tmp:\n ax2.text(0.1, yoff, self._model2short_label(k) + ': ' + k)\n yoff += dy\n ax2.set_ylim(0., yoff)\n ax2.set_xticks([])\n ax2.set_yticks([])\n\n return fig",
"def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes",
"def det_plot(data, group_by, plot_title, save_figure_path=None):\n subgroups = data.groupby(group_by)\n li_subgroups = subgroups.groups\n\n fontsize = 12\n fig, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)\n for subgroup in li_subgroups:\n # for each subgroup\n df_subgroup = subgroups.get_group(subgroup)\n labels, scores = (\n df_subgroup[\"label\"].values.astype(int),\n df_subgroup[\"score\"].values,\n )\n fpr, fnr, thresholds = calculate_det_curves(labels, scores)\n ax = draw_det_curve(\n fpr, fnr, ax=ax, label=subgroup, fontsize=fontsize, title=plot_title\n )\n\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(\"%.e\"))\n plt.minorticks_off()\n ax.set_ylabel(\"FNR (%)\", fontsize=fontsize)\n ax.set_xlabel(\"FPR\", fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n ax.set_xlim([1e-4, 1])\n ax.set_ylim([0, 30])\n\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)",
"def plot_groups(sb, **kw):\n\n #check kws\n B_flag = True\n if('B' in kw):\n B_flag = bool(kw['B'])\n E_flag = True\n if('E' in kw):\n E_flag = bool(kw['E'])\n ugroups = sb.unique_group_names\n if('groups' in kw):\n ugroups = set(kw['groups'])\n if('return_figs' in kw):\n if(kw['return_figs']):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n else:\n if((not B_flag) or (not E_flag)):\n group_lim = 8\n else:\n group_lim = 4\n if(len(ugroups) <= group_lim):\n return_figs = True\n figs = {'E': {}, 'B': {}}\n else:\n return_figs = False\n\n flags = [B_flag, E_flag]\n fields = ['Bmax', 'Emax']\n ylabels = ['Maximum Magnetic Field (mG)', 'Maximum Electric Field (kV/m)']\n title_pre = ['Maximum Magnetic Field - ',\n 'Maximum Electric Field - ']\n keys = ['B', 'E']\n it = zip(flags, fields, ylabels, title_pre, keys)\n\n #iterate over groups with more than 1 CrossSection\n for xss in sb.groups:\n if(xss[0].group in ugroups):\n for (fl, fi, yl, ti, k) in it:\n if(fl):\n #get plotting objects\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n #plot the Bmax results for each xs in the group\n _plot_group_fields(ax, xss, fi, **kw)\n #plot wires\n max_field = max([xs.fields[fi].max() for xs in xss])\n _plot_group_wires(ax, xss, max_field, **kw)\n #draw ground surface if necessary\n if(len(xss) <= 2):\n _check_und_conds(xss, [ax], **kw)\n #plot ROW lines\n _plot_group_ROW_edges(ax, xss, **kw)\n #set axis text and legend\n ax.set_xlabel('Distance (ft)')\n ax.set_ylabel(yl)\n ax.set_title(textwrap.fill(ti + str(xss[0].group)))\n ax.legend(kw['H'], kw['L'], **_leg_kw)\n _format_line_axes_legends(ax)\n #save the figure if keyword 'save' == True, and append fig\n _save_fig('group_%s-%s' % (str(xss[0].group), fi), fig, **kw)\n #store the fig or close it\n if(return_figs):\n figs[k][xss[0].group] = fig\n else:\n plt.close(fig)\n\n if(return_figs):\n return(figs)"
] | [
"0.63259864",
"0.6046818",
"0.53915054",
"0.5336046",
"0.5121077",
"0.5051932",
"0.48781553",
"0.48333043",
"0.48324347",
"0.47948903",
"0.47788268",
"0.4766968",
"0.47533748",
"0.47356328",
"0.47074386",
"0.46614596",
"0.46601972",
"0.465941",
"0.46516296",
"0.46457088",
"0.46375453",
"0.46337804",
"0.4626232",
"0.46241716",
"0.4578506",
"0.45781216",
"0.4568808",
"0.45685348",
"0.45646355",
"0.45399988"
] | 0.7678806 | 0 |
Plot a rotated convergence plot. It is essentially like fval_by_budget(), but rotated by 90 degrees, showing how big budget is required to reach every target. While this is a little less intuitive at first, it allows better judgement of performance impact of each strategy. With fval_by_budget(), performance change is represented by a curve phase shift, while in evals_by_target(), it simply translates position on the y axis. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. By default, absolute evaluations count is shown, but relative values to some baseline dataset can be shown instead. | def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline_ds:
baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs = groupby(ds.detEvals(targets), axis=1)
if baseline_ds:
fevs /= baseline_fevs
style['markevery'] = 64
ax.loglog(targets, fevs, label=name, basey=pfsize, **style)
ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))
if baseline_ds:
ax.set_yticks([2, 3.5], minor=True)
ax.set_xlabel('Function Value Targets')
ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fval_by_budget(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n if baseline_ds:\n baseline_budgets = baseline_ds.funvals[:, 0]\n baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)\n baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros\n # fvb is matrix with each row being [budget,funval]\n baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n budgets = ds.funvals[:, 0]\n funvals = groupby(ds.funvals[:, 1:], axis=1)\n\n # Throw away funvals after ftarget reached\n try:\n limit = np.nonzero(funvals < 10**-8)[0][0] + 1\n except IndexError:\n limit = np.size(budgets)+1\n budgets = budgets[:limit]\n funvals = funvals[:limit]\n\n fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))\n\n if baseline_ds:\n # Relativize by baseline\n fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))\n budgets = fvba[:, 0]\n funvals = fvba[:, 1] / fvba[:, 2]\n\n style['markevery'] = 16\n ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)\n if baseline_ds:\n ax.set_yticks([1], minor=True)\n ax.set_xlabel('Budget')\n ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def build_scatterplot(budget):\n frame = load_frames(budget=budget)\n X = frame[metrics]\n Y = frame['y']\n\n predicted = cross_val_predict(get_best_models(budget=budget, tool=settings.algo), X, Y, cv=20)\n\n fig, ax = plt.subplots()\n ax.scatter(Y, predicted, edgecolors=(0, 0, 0))\n ax.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'k--', lw=4)\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n plt.savefig('{}/cv-error-{}-{}.pdf'.format(settings.PLOTS, settings.algo, budget))",
"def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label=\"\", baseline2_ds=None, baseline2_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline1_ds:\n baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))\n if baseline2_ds:\n baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs1 = groupby(ds.detEvals(targets), axis=1)\n if baseline1_ds:\n fevs1 /= baseline1_fevs\n fevs2 = groupby(ds.detEvals(targets), axis=1)\n if baseline2_ds:\n fevs2 /= baseline2_fevs\n\n infsx = np.nonzero(fevs1 == inf)\n infs = infsx[0]\n if np.size(infs) > 0:\n #print infs\n fevs1 = fevs1[:infs[0]-1]\n fevs2 = fevs2[:infs[0]-1]\n\n #print name, fevs1, fevs2\n style['markevery'] = 64\n ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)\n ax.grid()\n ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1\n ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))\n ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))",
"def ecdf(data, group_by=None, targets=None, ax=None, **kwargs):\n text_color = plt.rcParams.get('ytick.color')\n linewidth = 2\n # Handle keyword arguments\n for k, v in kwargs.items():\n if k not in ['linewidth']:\n raise TypeError('ecdf got an unexpeted keyword argument: {}'.format(k))\n else:\n if k == 'linewidth':\n linewidth = v\n # Deal with input data\n if group_by is not None:\n if type(data) == pd.core.frame.DataFrame:\n print(\"Grouping DataFrame by {}\".format(group_by))\n print(\"Target Features:\", targets)\n if type(targets) == str:\n targets = [targets]\n else:\n try:\n it = iter(targets)\n except:\n targets = [targets]\n cols = targets + [group_by]\n data = data[cols]\n variables = data.columns[:-1]\n data = data.groupby(group_by)\n else:\n return(\"Error: only DataFrame input works with group_by functionality\")\n else: \n if type(data) == pd.core.series.Series:\n variables = [data.name]\n elif type(data) == pd.core.frame.DataFrame:\n if targets is None:\n variables = list(data.columns)\n else:\n if type(targets) == str:\n targets = [targets]\n else: \n try:\n it = iter(targets)\n except:\n targets = [targets]\n print(\"Target Features:\", targets)\n variables = targets\n elif type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n variables = list(data.obj.columns)\n else:\n data = pd.Series(data, name='data')\n variables = [data.name]\n \n \n if type(data) == pd.core.groupby.generic.DataFrameGroupBy:\n for variable in variables:\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for name, group in data:\n x = np.sort(group[variable])\n n = len(group)\n y = np.arange(1, n+1) / n\n ax.plot(x, y, marker='.', label=name, alpha=0.7, linewidth=linewidth)\n if max(x) > max_x:\n max_x = max(x)\n #max_x = 0\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n ax.legend()\n plt.title(\"ECDF for feature: {}\".format(variable), color=text_color)\n plt.show()\n \n else:\n n = len(data)\n y = np.arange(1, n+1) / n\n if not ax:\n fig, ax = plt.subplots(figsize=(12,8))\n max_x = 0\n for variable in variables:\n if type(data) == pd.core.series.Series:\n x = np.sort(data)\n string = variable\n else:\n x = np.sort(data[variable])\n string = 'Data'\n ax.plot(x, y, marker='.', label=variable)\n if max(x) > max_x:\n max_x = max(x)\n ax.axhline(y=0.5, ls=':', color='gray')\n ax.axhline(y=0.05, ls=':', color='gray')\n ax.axhline(y=0.95, ls=':', color='gray')\n ax.annotate('0.5', xy=(max_x, 0.47))\n ax.annotate('0.95', xy=(max_x, 0.92))\n ax.annotate('0.05', xy=(max_x, 0.02))\n plt.title(\"ECDF for {}\".format(string), color=text_color)\n plt.legend()\n plt.show()",
"def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics",
"def plot_coupling_grid(baseline_group, fits_groups, metrics, fax=None):\n n_algorithms = len(fits_groups)\n n_metrics = len(metrics)\n\n if fax is None:\n fig, axes = plt.subplots(n_metrics, n_algorithms,\n figsize=(3 * n_algorithms, 3 * n_metrics))\n else:\n fig, axes = fax\n\n # iterate over metrics\n for row_idx, metric in enumerate(metrics):\n if metric == 'selection_ratio':\n baseline_coefs = baseline_group['coupling_coefs'][:]\n baseline_selection_ratio = \\\n calculate_selection_ratio(baseline_coefs).mean(axis=0)\n\n # iterate over algorithms\n for col_idx, algorithm in enumerate(fits_groups):\n if metric == 'selection_ratio':\n # calculate selection ratio for algorithm\n coefs = algorithm['coupling_coefs'][:]\n selection_ratio = calculate_selection_ratio(coefs).mean(axis=0)\n\n # plot direct comparison\n axes[row_idx, col_idx].scatter(\n baseline_selection_ratio,\n selection_ratio,\n alpha=0.5,\n color='k',\n edgecolor='w')\n else:\n axes[row_idx, col_idx].scatter(\n baseline_group[metric][:].mean(axis=0),\n algorithm[metric][:].mean(axis=0),\n alpha=0.5,\n color='k',\n edgecolor='w')\n\n return fig, axes",
"def plot_evaluation(values, info, measures = ['Dice','Jaccard', 'TPR', 'TNR', '1-GCE', 'VS', 'RI', 'ARI', 'MI', '1-VOI', 'ICC','1/(1+PBD)', 'KAP', 'AUC', '1/(1+HD)', '1/(1+AVD)', 'MHD' ], colourmap=None, outfile='polar_results.png'):\n _min = info['minimum']\n _max = info['maximum']\n if colourmap is None:\n colourmap = [[86./255.,180./255.,233./255.] for ii in range(values.shape[0])]\n else:\n # normalize colourmap values between 0 and 1\n colourmap = (colourmap-_min)/(_max-_min)\n # apply cividis, returns the RBG1 values for cividis, for dots\n colourmap = [[cm.cividis(ii)] for ii in colourmap] \n\n # elements of the circle\n N = len(measures)\n # evenly space measures around circle\n x_as = [n / float(N) * 2 * pi for n in range(N)] \n\n # Set color of axes\n plt.rc('axes', linewidth=0.5, edgecolor=\"#888888\")\n\n # Create polar plot\n fig = plt.figure(figsize = (11,9.5))\n gs = gridspec.GridSpec(1, 3, width_ratios=[17,2,1])\n ax = plt.subplot(gs[0], polar=True)\n \n # Set position of y-labels\n ax.set_rlabel_position(0)\n\n # Set color and linestyle of grid\n ax.xaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n ax.yaxis.grid(True, color=\"#888888\", linestyle='solid', linewidth=0.5)\n\n # Set yticks\n plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], [\"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"], fontsize=15)\n pos=ax.get_rlabel_position()\n ax.set_rlabel_position(pos+0.4*360./float(len(measures)))\n\n # Plot data\n for ii in np.arange(values.shape[0]):\n xx = np.asarray(x_as) + np.random.randn(len(x_as))*np.diff(x_as)[0]/15.\n data_norm = None\n if info['logplot']:\n data_norm = matplotlib.colors.LogNorm(vmin=_min, vmax=_max)\n sc = ax.scatter(xx, values[ii,:], 23, color=colourmap[ii]*len(xx), norm=data_norm, zorder=3) \n\n # Fill area\n # close the circle\n median = list(np.median(values, axis=0))\n median += median[:1]\n upper = list(np.percentile(values, 75, axis=0))\n upper += upper[:1]\n lower = list(np.percentile(values, 25, axis=0))\n lower += lower[:1]\n x_as += x_as[:1]\n ax.plot(x_as, median, color=[86./255.,180./255.,233./255.], zorder=5)\n ax.fill_between(x_as, upper, lower, zorder=4, color=[86./255.,180./255.,233./255.], alpha=0.3)\n\n # Set number of radial axes and remove labels\n plt.xticks(x_as[:-1], [])\n\n # Set axes limits\n plt.ylim(0, 1)\n\n # Draw ytick labels to make sure they fit properly\n for i in range(N):\n angle_rad = i / float(N) * 2 * pi-0.05\n text_size = 21\n if i in {3,8}:\n ax.text(angle_rad, 1.15, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {0}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {1,5,7}:\n ax.text(angle_rad, 1.29, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {4}:\n ax.text(angle_rad, 1.32, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"top\")\n elif i in {10}:\n ax.text(angle_rad, 1.26, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {6}:\n ax.text(angle_rad, 1.25, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n elif i in {9}:\n ax.text(angle_rad, 1.18, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n else:\n ax.text(angle_rad, 1.22, measures[i]+\"\\n(m=%0.2f)\" %median[i], size=text_size, horizontalalignment='center', verticalalignment=\"center\")\n\n # colorbar location on figure\n cbaxes = plt.subplot(gs[2])\n\n # log scaling option\n norm = None\n if info['logplot']:\n norm = matplotlib.colors.LogNorm(vmin=_min,vmax=_max)\n\n img = plt.imshow(np.array([[_min,_max]]), aspect='auto', cmap=\"cividis\", norm=norm)\n img.set_visible(False)\n\n # initialize colorbar\n cbar = plt.colorbar(cax = cbaxes)\n\n # ticks and label\n c_values = cbar.get_ticks().tolist()\n \n ticklabels = [\"\" for ii in c_values]\n if _min < np.min(c_values):\n c_values = [_min] + c_values\n ticklabels = [\"%0.1f %s\" %(np.min(c_values), info['unit'])] + ticklabels\n else:\n ticklabels[0] = \"%0.1f %s\" %(np.min(c_values), info['unit'])\n\n if _max > np.max(c_values):\n c_values = c_values + [_max]\n ticklabels = ticklabels + [\"%0.1f %s\" %(np.max(c_values), info['unit'])]\n else:\n ticklabels[-1] = \"%0.1f %s\" %(np.max(c_values), info['unit'])\n \n cbar.set_ticks(c_values)\n cbar.set_ticklabels(ticklabels)\n cbaxes.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())\n cbar.ax.set_ylabel(info[\"label\"], labelpad=-20)\n \n # font sizes for colorbar\n cbar.ax.yaxis.label.set_size(19)\n cbar.ax.tick_params(labelsize=14)\n\n # Save and show polar plot \n plt.savefig(outfile)\n if info['display']:\n plt.show()\n plt.clf()\n plt.close('all')",
"def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)",
"def target_cov_plot(context):",
"def plot_convergence(\n optimizers: list = [\"COBYLA\", \"SLSQP\", \"L-BFGS-B\", \"NELDER-MEAD\"],\n g2N: float = 0.2,\n maxit: int = 10000,\n varform: list = [\"ry\"],\n depth: int = 3,\n nrep: int = 10,\n dataprefix: str = \"data/miniBMN\",\n datasuffix: str = \"h5\",\n figprefix: str = \"figures/miniBMN\",\n ht: float = 0.0,\n up: int = 1000,\n):\n # setup parameters\n params = dict()\n params[\"l\"] = str(g2N).replace(\".\", \"\")\n params[\"d\"] = depth\n params[\"v\"] = \"-\".join(varform)\n params[\"m\"] = maxit\n params[\"n\"] = nrep\n params[\"f\"] = dataprefix\n params[\"s\"] = datasuffix\n assert type(optimizers).__name__ == \"list\"\n # collect data\n result = collect_data(optimizers, params)\n # get best runs\n gs = dict()\n for r in optimizers:\n gs[r] = result.loc[r].groupby(\"rep\").apply(min).energy\n gsdf = pd.DataFrame.from_dict(gs, dtype=float)\n print(gsdf.describe().T[[\"min\", \"max\", \"mean\", \"std\"]])\n # Plot\n # select the best runs for each optimizer\n fig, ax = plt.subplots()\n for o in optimizers:\n result.loc[o, gsdf[o].idxmin()].plot(\n x=\"counts\", y=\"energy\", xlim=[0, up], label=o, ax=ax\n )\n ax.axhline(ht, c=\"k\", ls=\"--\", lw=\"2\", label=\"HT\")\n ax.set_xlabel(\"iterations\")\n ax.set_ylabel(\"VQE energy\")\n ax.legend(loc=\"upper right\")\n filename = f\"{figprefix}_l{params['l']}_convergence_{params['v']}_depth{params['d']}_nr{params['n']}_max{params['m']}_xlim{up}\"\n plt.savefig(f\"{filename}.pdf\")\n plt.savefig(f\"{filename}.png\")\n plt.savefig(f\"{filename}.svg\")\n plt.close()",
"def plot_cross_validation_metric(\n df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6)\n):\n if ax is None:\n fig = plt.figure(facecolor='w', figsize=figsize)\n ax = fig.add_subplot(111)\n else:\n fig = ax.get_figure()\n # Get the metric at the level of individual predictions, and with the rolling window.\n df_none = performance_metrics(df_cv, metrics=[metric], rolling_window=0)\n df_h = performance_metrics(df_cv, metrics=[metric], rolling_window=rolling_window)\n\n # Some work because matplotlib does not handle timedelta\n # Target ~10 ticks.\n tick_w = max(df_none['horizon'].astype('timedelta64[ns]')) / 10.\n # Find the largest time resolution that has <1 unit per bin.\n dts = ['D', 'h', 'm', 's', 'ms', 'us', 'ns']\n dt_names = [\n 'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds',\n 'nanoseconds'\n ]\n dt_conversions = [\n 24 * 60 * 60 * 10 ** 9,\n 60 * 60 * 10 ** 9,\n 60 * 10 ** 9,\n 10 ** 9,\n 10 ** 6,\n 10 ** 3,\n 1.,\n ]\n for i, dt in enumerate(dts):\n if np.timedelta64(1, dt) < np.timedelta64(tick_w, 'ns'):\n break\n\n x_plt = df_none['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n x_plt_h = df_h['horizon'].astype('timedelta64[ns]').astype(np.int64) / float(dt_conversions[i])\n\n ax.plot(x_plt, df_none[metric], '.', alpha=0.5, c='gray')\n ax.plot(x_plt_h, df_h[metric], '-', c='b')\n ax.grid(True)\n\n ax.set_xlabel('Horizon ({})'.format(dt_names[i]))\n ax.set_ylabel(metric)\n return fig",
"def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)",
"def det_plot(data, group_by, plot_title, save_figure_path=None):\n subgroups = data.groupby(group_by)\n li_subgroups = subgroups.groups\n\n fontsize = 12\n fig, ax = plt.subplots(figsize=(8, 8), constrained_layout=True)\n for subgroup in li_subgroups:\n # for each subgroup\n df_subgroup = subgroups.get_group(subgroup)\n labels, scores = (\n df_subgroup[\"label\"].values.astype(int),\n df_subgroup[\"score\"].values,\n )\n fpr, fnr, thresholds = calculate_det_curves(labels, scores)\n ax = draw_det_curve(\n fpr, fnr, ax=ax, label=subgroup, fontsize=fontsize, title=plot_title\n )\n\n ax.xaxis.set_major_formatter(mtick.FormatStrFormatter(\"%.e\"))\n plt.minorticks_off()\n ax.set_ylabel(\"FNR (%)\", fontsize=fontsize)\n ax.set_xlabel(\"FPR\", fontsize=fontsize)\n plt.legend(fontsize=fontsize)\n ax.set_xlim([1e-4, 1])\n ax.set_ylim([0, 30])\n\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path)",
"def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()",
"def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n try: # funcId is array?\n # _pds_plot_iterator[] uses funcId only for things we don't care for\n fakeFuncId = funcId[0]\n\n manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])\n rankcount = np.shape(manyranking[0])[1] - 1\n amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))\n budget = amanyranking[:,0]\n rankings = np.hsplit(amanyranking[:,1:], len(funcId))\n avgranking = np.average(rankings, axis=0)\n ranking = np.vstack([budget, avgranking.T]).T\n\n except TypeError: # funcId is scalar\n fakeFuncId = funcId\n ranking = pds.ranking((dim, funcId), groupby)\n\n i = 0\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):\n if kind != 'algorithm' and kind != 'strategy':\n continue\n #print name, ds\n budgets = ranking[:,0]\n ranks = ranking[:,1+i]\n\n style['markevery'] = 64\n ax.plot(budgets, ranks, label=name, **style)\n i += 1\n\n ax.set_xlabel('Budget')\n ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')\n ax.set_xscale('log', basex=pfsize)\n ax.grid()",
"def finalize_plot(self, artifact_name, attacker_x=None, attacker_y=None):\n # Plot the axis ticks.\n plt.ylim((self.min_y - 10.0, self.max_y + 10.0))\n plt.xlim((self.min_x - 10.0, self.max_x + 10.0))\n plt.xticks([self.min_x + 1000, 0.0, self.max_x], size=15)\n plt.yticks([self.min_y + 1000, 0.0, self.max_y], size=15)\n # Add and place the labels.\n ax = plt.gca()\n plt.ylabel(\"Crossrange (ft)\", size=15)\n plt.xlabel(\"Downrange (ft)\", size=15)\n plt.subplots_adjust(bottom=0.25, left=0.25)\n ax.yaxis.set_label_coords(-0.1, 0.5)\n # Place the plane.\n plane = plt.imread(\"plane.png\").transpose((1, 0, 2))\n width = (self.max_x - self.min_x) / 10\n height = (496.0 / 499.0) * width\n x_start = -(width / 2.0)\n y_start = -(height / 2.0)\n plt.imshow(plane, extent=[x_start, x_start + width,\n y_start, y_start + height], zorder=100)\n plane = np.flip(plane, 1)\n if attacker_x is None:\n attacker_x = self.max_x - (2 * width)\n if attacker_y is None:\n attacker_y = self.max_y - (2 * height)\n red_plane = self.color_plane_png(plane, [1.0, 0, 0], True)\n plt.imshow(red_plane, zorder=100,\n extent=[attacker_x, attacker_x + width,\n attacker_y, attacker_y + height])\n self.record_artifact(plt, artifact_name, \"matplotlib\")\n plt.clf()",
"def plot_calibration_curve(est, name, fig_index):\n # Calibrated with isotonic calibration\n isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n lr = LogisticRegression(C=1.)\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(lr, 'Logistic'),(est, name),(isotonic, name + ' + Isotonic'),(sigmoid, name + ' + Sigmoid')]:\n #Para cada modelo, entrenamos y predecimos \n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()",
"def plotDistributionWithLimitsRefine(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)",
"def make_accuracy_plot(ax,\n groundtruth_boxes,\n hpu_boxes,\n cpu_boxes,\n hpu_strategy,\n label,\n N=10,\n num_graph_points=20,\n match_mode=\"ellipse\",\n):\n print \"Making plot for\", repr(label)\n print \"TODO: this should graph seconds per image\"\n mix_fractions = np.linspace(0, 1.0, num_graph_points)\n # Plot confidence intervals\n min_ci = []\n max_ci = []\n mean_accs = []\n stderr_accs = []\n for mix_fraction in mix_fractions:\n accuracies = [\n maximum_F_score(\n groundtruth_boxes,\n hpu_strategy(hpu_boxes, cpu_boxes, mix_fraction),\n match_mode=match_mode,\n )\n for _ in xrange(N)\n ]\n mean_accs.append(np.mean(accuracies))\n stderr_accs.append(np.std(accuracies, ddof=1) / np.sqrt(N))\n #print mix_fraction, np.mean(accuracies)\n ax.errorbar(mix_fractions, mean_accs, stderr_accs, label=label)\n ax.set_xlabel(\"Fraction of HPU-labeled images\")\n ax.set_ylabel(\"Maximum F-score\")",
"def plot_cdf_compare(self, output_fn_base=\"CDF_compare.png\"):\n self.logger.debug(\"Plot CDF to %s_[train|test].png\", output_fn_base)\n\n timeout = self.scenario.cutoff\n\n data = self.data\n\n def prepare_data(x_data):\n \"\"\" Helper function to keep things easy, generates y_data and\n manages x_data-timeouts \"\"\"\n x_data = sorted(x_data)\n y_data = np.array(range(len(x_data)))/(len(x_data)-1)\n for idx in range(len(x_data)):\n if (timeout != None) and (x_data[idx] >= timeout):\n x_data[idx] = timeout\n y_data[idx] = y_data[idx-1]\n return (x_data, y_data)\n\n # Generate y_data\n data = {config_name : {label : prepare_data(x_data) for label, x_data in\n data[config_name].items()}\n for config_name in data}\n\n output_fn = [output_fn_base + \"_\" + inst_set + '.png' for inst_set in\n ['train', 'test']]\n\n for inst_set, out in zip(['train', 'test'], output_fn):\n f = plt.figure(1, dpi=100, figsize=(10,10))\n ax1 = f.add_subplot(1,1,1)\n ax1.step(data['default'][inst_set][0],\n data['default'][inst_set][1], color='red',\n linestyle='-', label='default train')\n ax1.step(data['incumbent'][inst_set][0],\n data['incumbent'][inst_set][1], color='blue',\n linestyle='-', label='incumbent train')\n ax1.legend()\n ax1.grid(True)\n ax1.set_xscale('log')\n ax1.set_ylabel('probability of being solved')\n ax1.set_xlabel('time')\n # Plot 'timeout'\n if timeout:\n ax1.text(timeout,\n ax1.get_ylim()[0] - 0.1 * np.abs(ax1.get_ylim()[0]),\n \"timeout \", horizontalalignment='center',\n verticalalignment=\"top\", rotation=30)\n ax1.axvline(x=timeout, linestyle='--')\n\n f.tight_layout()\n f.savefig(out)\n plt.close(f)\n return output_fn",
"def parameter_forecast_plot(model_obj,time_index,start,end,num_samples = 100,cached_samples=None,col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']):\n \n f = plt.figure(figsize = (8,10))\n num_components = len(col_labels)\n gs = gridspec.GridSpec(8+2*num_components,6)\n ax0 = plt.subplot(gs[-8:-6,:])\n ax1 = plt.subplot(gs[-6::,:])\n col_labels = ['P','PET','Lag-1 Q','Lag-1 P','Seasonal','P$^2$','Constant']\n ffbs = model_obj # 120 is French Broad River at Blantyre, NC\n if cached_samples is None:\n samples = ffbs.backward_sample(num_samples=num_samples)\n else: \n samples = cached_samples\n for i in range(7):\n ax_new = plt.subplot(gs[2*i:2*i+2,:])\n\n upper = np.percentile(samples[start:end,i,:],75,axis = 1)\n mid = np.percentile(samples[start:end,i,:],50,axis = 1)\n lower = np.percentile(samples[start:end,i,:],25,axis = 1)\n\n ax_new.plot(time_index[start:end],mid,color='k')\n ax_new.fill_between(time_index[start:end],upper,lower,color='0.8')\n ax_new.tick_params(labelbottom=False,direction='in')\n ax_new.text(0.02, 0.82,col_labels[i],\n horizontalalignment='left',\n verticalalignment='center',transform=ax_new.transAxes)\n\n ax1.plot(time_index[start:end],ffbs.f[start:end],color='k',label='1-step forecast')\n ax1.plot(time_index[start:end],ffbs.Y[start:end],color='k',linestyle='',marker='+',\n markersize = 10,label='Observed streamflow')\n\n ax1.fill_between(time_index[start:end],\n np.squeeze(ffbs.f[start:end] + 2*ffbs.Q[start:end,0]),\n np.squeeze(ffbs.f[start:end] - 2*ffbs.Q[start:end,0]),color='0.8',\n label = 'Forecast $\\pm 2V_t$')\n ax1.tick_params(direction='in')\n ax1.legend(loc='upper right',ncol=1,frameon=True)\n #ax1.set_ylabel('Standardized streamflow')\n ax1.set_xlabel('Date',fontsize=16)\n ax1.get_yaxis().set_label_coords(-0.1,0.5)\n ax1.text(0.02, 0.92,'Standardized streamflow',\n horizontalalignment='left',\n verticalalignment='center',transform=ax1.transAxes,)\n ax0.plot(time_index[start:end],ffbs.s[start:end],color='k')\n ax0.text(0.02, 0.82,'$E[V_t]$',\n horizontalalignment='left',\n verticalalignment='center',transform=ax0.transAxes,)\n ax0.get_yaxis().set_label_coords(-0.1,0.5)\n return f,samples",
"def plot_evaluation(parameters_dict, log_df, settings, evaluation_set_kde, plotname):\n\n\n plots = []\n\n\n ### setup the colors for each component\n if int(settings['nr_components']) < 3:\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)']\n elif int(settings['nr_components']) < 13:\n colors = np.array(cl.scales[str(settings['nr_components'])]['qual']['Paired'])\n else:\n colors = cl.interp(cl.scales['10']['qual']['Paired'], 20)\n\n\n ### set up ab list\n ab_list = evaluation_set_kde['contact'].keys()\n\n\n\n\n ####################### plotting of settings\n print_to_table = {}\n for key in sorted(settings.keys()):\n if key not in ['fold_id_dir','plot_name', 'fixed_parameters', 'threads_proteins', 'qijab_dir',\n 'debug_mode', 'parameter_file', 'settings_file', 'optimization_log_file', 'braw_dir', 'pdb_dir', 'paramdir',\n 'mask_sse', 'lambda_w_fix', 'lfactor', 'plotdir', 'psicov_dir', 'contact', 'hessian_pseudocount']:\n print_to_table[key] = settings[key]\n\n print(\"Generate settings table...\")\n table_settings_1 = plot_settings_table(print_to_table, 1)\n table_settings_2 = plot_settings_table(print_to_table, 2)\n table_settings_3 = plot_settings_table(print_to_table, 3)\n plots.append(table_settings_1)\n plots.append(table_settings_2)\n plots.append(table_settings_3)\n\n\n ####################### negLL and realted plots\n if 'step' in log_df.columns and 'pass' in log_df.columns:\n\n if 'negLL' in log_df.columns:\n plot_negll = plot_convergence_trace_plotly(log_df,\n name=['negLL', 'negLL_crossval'],\n plot_title='neg LL trace for training and cross-val set')\n plots.append(plot_negll)\n\n plot_expfit_negll = plot_exponentialFit_negLL(log_df, plot_title='exponential Fit neg LL')\n plots.append(plot_expfit_negll)\n\n if 'timestamp' in log_df.columns:\n plot_timestamps = plot_convergence_trace_plotly(log_df,\n name=['timestamp'],\n plot_title='time (s) per iteration')\n plots.append(plot_timestamps)\n\n\n if 'gradient_norm_weights' in log_df.columns:\n plot_grad_norm_weights = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_weights'],\n plot_title='norm of weight gradients')\n plots.append(plot_grad_norm_weights)\n\n if 'gradient_norm_means' in log_df.columns:\n plot_grad_norm_means = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_means'],\n plot_title='norm of mean gradients')\n plots.append(plot_grad_norm_means)\n\n if 'gradient_norm_prec' in log_df.columns:\n plot_grad_norm_prec = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_prec'],\n plot_title='norm of precMat gradients')\n plots.append(plot_grad_norm_prec)\n\n\n ####################### plotting of parameters\n print(\"Generate distribution of parameters...\")\n\n #weights\n weights_dict = {}\n for component in range(settings['nr_components']):\n weights_dict['component ' + str(component)] = {\n 'weights (contact)': parameters_dict[\"weight_contact_\" + str(component)][0],\n 'weights (bg)': parameters_dict[\"weight_bg_\" + str(component)][0]\n }\n plot_weights = plot_barplot(\n weights_dict,\n 'Distribution of weights',\n 'component weights',\n type='group',\n colors=colors\n #,plot_out=\"/home/vorberg/weights.html\"\n )\n\n #mu\n mu_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'mu' in k))\n plot_means = plot_boxplot(\n mu_df,\n 'Distribution of Means',\n \"values of mean parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/mus.html\"\n )\n\n #std deviation\n prec_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'prec' in k))\n try:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/p))\n if settings['prec_wrt_L']:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/(p*142))) #in case precision is specified depending on L=142\n except ZeroDivisionError as e:\n print(e)\n std_dev=prec_df\n\n std_dev.columns = [column_name.replace(\"prec\", \"std\") for column_name in std_dev.columns]\n plot_stddev = plot_boxplot(\n std_dev,\n 'Distribution of std deviations',\n \"values of std deviation parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/std.html\"\n )\n\n\n plots.append(plot_weights)\n plots.append(plot_means)\n plots.append(plot_stddev)\n\n ####################### Scatterplot mu vs std dev\n print(\"Generate scatter plot mu vs std...\")\n scatter_dict = {}\n for component in range(settings['nr_components']):\n scatter_dict['mu_'+str(component)] = [\n mu_df['mu_'+str(component)].tolist(),\n std_dev['std_'+str(component)].tolist(),\n AB.values()\n ]\n plot_mu_vs_stddev = plot_scatter(scatter_dict,\n 'Mean vs std deviation',\n 'mean',\n \"std deviation\",\n False,\n colors\n #,plot_out=\"/home/vorberg/mu_vs_std.html\"\n )\n\n plots.append(plot_mu_vs_stddev)\n\n\n ############################################## plotting of gradient norms\n print(\"Generate gradient norms plot...\")\n\n #gradients for mu\n mu_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'mu_'+str(component)\n mu_grad_dict[key] = log_df[key].tolist()[-1]\n annotations_dict[key] = AB\n\n\n plot_gradient_mu_stats = jitter_plot(mu_grad_dict,\n 'Distribution of gradients for mean in last iteration',\n annotations_dict,\n colors,\n None)\n plots.append(plot_gradient_mu_stats)\n\n\n #gradients for precMat\n precMat_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'prec_'+str(component)\n precMat_grad_dict['diagPrecMat_'+str(component)] = log_df[key].tolist()[-1]\n annotations_dict['diagPrecMat_'+str(component)] = AB\n\n\n plot_gradient_precMat_stats = jitter_plot(\n precMat_grad_dict,\n 'Distribution of gradients for precMat in last iteration',\n annotations_dict,\n colors,\n None\n )\n plots.append(plot_gradient_precMat_stats)\n\n ##################################### plotting of gradient trace of a specific ab pair for all components\n print(\"Generate gradient trace plot...\")\n\n gradient_df = log_df.filter(regex=(\"mu_[0-9]*\"))\n plot_gradient_mu_ab_trace = plot_gradient_ab_trace(gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_mu_ab_trace)\n\n gradient_df = log_df.filter(regex=(\"prec_[0-9]*\"))\n plot_gradient_prec_ab_trace = plot_gradient_ab_trace(\n gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_prec_ab_trace)\n\n\n ##################################### plotting of univariate mixtures\n if len(evaluation_set_kde['contact']) == 0 or len(evaluation_set_kde['bg']) == 0:\n print \"Evaluation set is empty. Cannot plot Mixture Visualization.\"\n else:\n print(\"Generate parameter visualization 1d plots...\")\n plots.append(plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L']))\n # plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L'], plot_out=\"/home/vorberg/1d_vis.html\")\n\n # ------------------------------------------------------------------------------\n ### define merged plot\n # ------------------------------------------------------------------------------\n cols = 3.0\n rows = int(np.ceil((len(plots)-1) / cols)) + 2\n subplot_titles = []\n\n # set up titles\n for plot in range(len(plots)-1):\n subplot_titles.append(plots[plot]['layout']['title'])\n if len(subplot_titles) < (cols * (rows-2)):\n for i in range(int((cols * (rows-2))) - len(subplot_titles) ):\n subplot_titles.append(\" \")\n subplot_titles.append(plots[-1]['layout']['title'])\n\n\n # plot all plots as subplots\n fig = tools.make_subplots(rows=rows,\n cols=3,\n specs = [ [{} for col in range(int(cols))] for row in range(rows-2)] + \\\n [[{'rowspan':2, 'colspan': 3}, None, None], [None, None, None]],\n subplot_titles=tuple(subplot_titles),\n print_grid=False)\n\n\n\n\n for i, plot in enumerate(plots[:-1]):\n col = i % int(cols)\n row = (i - col) / int(cols)\n\n #add traces to subplot\n for trace in plot['data']:\n trace['showlegend']=False\n fig.append_trace(trace, row + 1, col + 1)\n\n # adjust x and y axis for table plotting\n if 'annotations' in plot['layout'].keys():\n for cell in plot['layout']['annotations']:\n cell['yref'] = 'y' + str(i + 1)\n cell['xref'] = 'x' + str(i + 1)\n fig['layout']['annotations'] += plot['layout']['annotations']\n\n # adjust axis for all plots\n fig['layout']['xaxis' + str(i + 1)].update(plot['layout']['xaxis1'])\n fig['layout']['yaxis' + str(i + 1)].update(plot['layout']['yaxis1'])\n\n ## add mixture visualisation plot - spans 3 columns\n for trace in plots[-1]['data']:\n fig.append_trace(trace, int(rows)-1, 1)\n fig['layout']['xaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['xaxis1'])\n fig['layout']['yaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['yaxis1'])\n\n #check which plots are visible/invisible according to menu selection\n trace_visibility_ab = {}\n for ab in range(len(ab_list)):\n trace_visibility_ab[ab] = []\n for i, plot in enumerate(plots):\n if 'updatemenus' not in plot['layout'].keys():\n trace_visibility_ab[ab].extend([True] * len(plot['data']))\n else:\n trace_visibility_ab[ab].extend(plot['layout']['updatemenus'][0]['buttons'][ab]['args'][1])\n\n\n #use menu of last plot (=vis of mixture) as template for multiplot menu\n fig['layout']['updatemenus'] = plots[-1]['layout']['updatemenus']\n for ab in range(len(ab_list)):\n fig['layout']['updatemenus'][0]['buttons'][ab]['args'][1] = trace_visibility_ab[ab]\n\n\n fig['layout']['legend']['yanchor'] = 'bottom'\n fig['layout']['legend']['y'] = 0\n fig['layout']['height'] = rows * 250\n fig['layout']['font'] = {'size': 18} # set global font size\n\n plotly_plot(fig, filename=plotname, auto_open=False)",
"def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig",
"def plot_optimization_history(\n study: Study | Sequence[Study],\n *,\n target: Callable[[FrozenTrial], float] | None = None,\n target_name: str = \"Objective Value\",\n error_bar: bool = False,\n) -> \"Axes\":\n\n _imports.check()\n\n info_list = _get_optimization_history_info_list(study, target, target_name, error_bar)\n return _get_optimization_history_plot(info_list, target_name)",
"def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self",
"def plot_comparisons(self, exact, blocked, blockederr, axdelta=None):\n if axdelta is None:\n axdelta = plt.gca()\n delta = self.means - exact\n axdelta.errorbar(list(range(1, self.max_dets)), delta[0], yerr=self.stderr[0], label='independent')\n axdelta.errorbar(list(range(1, self.max_dets)), delta[1], yerr=self.stderr[1], label='correlated')\n axdelta.axhline(delta[0, 0], linestyle=':', color='grey', label='reference')\n axdelta.axhline(0, linestyle='-', linewidth=1, color='black')\n if blocked:\n axdelta.axhline(blocked-exact, linestyle='--', color='darkgreen', label='reblocked')\n if blockederr:\n axdelta.fill_between([0, self.max_dets], [blocked-exact-blockederr,blocked-exact-blockederr],\n [blocked-exact+blockederr,blocked-exact+blockederr], color='green', alpha=0.2)\n axdelta.set_xlabel('Number of determinants in estimator')\n axdelta.set_ylabel(r'$E-E_\\mathrm{CCSD}$ / ha')\n axdelta.legend()\n return axdelta",
"def plot(\n self,\n group_delay=False,\n slce=None,\n flim=None,\n dblim=None,\n tlim=None,\n grpdlim=None,\n dbref=1,\n show=False,\n use_fig=None,\n label=None,\n unwrap_phase=False,\n logf=True,\n third_oct_f=True,\n plot_kw={},\n **fig_kw,\n ):\n if use_fig is None:\n fig_kw = {**{\"figsize\": (10, 10)}, **fig_kw}\n fig, axes = plt.subplots(nrows=3, constrained_layout=True, **fig_kw)\n else:\n fig = use_fig\n axes = fig.axes\n\n self.plot_magnitude(\n use_ax=axes[0],\n slce=slce,\n dblim=dblim,\n flim=flim,\n dbref=dbref,\n label=label,\n plot_kw=plot_kw,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n if group_delay:\n self.plot_group_delay(\n use_ax=axes[1],\n slce=slce,\n flim=flim,\n ylim=grpdlim,\n plot_kw=plot_kw,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n else:\n self.plot_phase(\n use_ax=axes[1],\n slce=slce,\n flim=flim,\n plot_kw=plot_kw,\n unwrap=unwrap_phase,\n logf=logf,\n third_oct_f=third_oct_f,\n )\n self.plot_time(\n use_ax=axes[2], tlim=tlim, slce=slce, plot_kw=plot_kw\n )\n\n if show:\n plt.show()\n\n return fig",
"def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()",
"def plot_effective_beta(t, recalled_ctx, ctx, ctx_test_env, ax=None):\n if ax is None:\n ax = plt.gca()\n\n ax.set_prop_cycle('color', sns.color_palette(\"husl\", ctx_test_env.n))\n y = np.sum(recalled_ctx * ctx, axis=1)\n for i in range(1, ctx_test_env.n):\n sel = (t > i) & (t <= i + 1)\n ax.plot(t[sel], y[sel])\n\n ax.axhline(y=ctx_test_env.beta, c='k', ls='--')\n ax.set_xlabel(r\"Time $t/\\mathrm{s}$\")\n ax.set_ylabel(r\"$\\beta'$\")\n ax.set_yticks([0, ctx_test_env.beta, 1])"
] | [
"0.57839125",
"0.5279949",
"0.50594544",
"0.50512856",
"0.50476915",
"0.4997148",
"0.4956228",
"0.48934639",
"0.4885822",
"0.4879313",
"0.48789495",
"0.48726746",
"0.48418292",
"0.48268393",
"0.48255506",
"0.4809495",
"0.47626424",
"0.4736917",
"0.47095123",
"0.46958274",
"0.4682187",
"0.46796298",
"0.46700087",
"0.4663738",
"0.464751",
"0.46452853",
"0.46386296",
"0.4635964",
"0.46338877",
"0.46318033"
] | 0.5902508 | 0 |
Plot the evolution of relative evaluations for a target based on increasing absolute evaluations. In other words, for each absolute number of evaluations, determine the target reached and show how faster did baseline reach it. groupby is the method of aggregating results of multiple instances a callable, stringable object, GroupByMedian by default. It's not clear whether this will eventually be useful at all, but it offers another perspective that might aid some analysis. | def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None):
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline1_ds:
baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))
if baseline2_ds:
baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs1 = groupby(ds.detEvals(targets), axis=1)
if baseline1_ds:
fevs1 /= baseline1_fevs
fevs2 = groupby(ds.detEvals(targets), axis=1)
if baseline2_ds:
fevs2 /= baseline2_fevs
infsx = np.nonzero(fevs1 == inf)
infs = infsx[0]
if np.size(infs) > 0:
#print infs
fevs1 = fevs1[:infs[0]-1]
fevs2 = fevs2[:infs[0]-1]
#print name, fevs1, fevs2
style['markevery'] = 64
ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)
ax.grid()
ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1
ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))
ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evals_by_target(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)\n target_values = pp.RunlengthBasedTargetValues(runlengths,\n reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)\n targets = target_values((funcId, dim))\n\n if baseline_ds:\n baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n fevs = groupby(ds.detEvals(targets), axis=1)\n if baseline_ds:\n fevs /= baseline_fevs\n style['markevery'] = 64\n ax.loglog(targets, fevs, label=name, basey=pfsize, **style)\n ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))\n if baseline_ds:\n ax.set_yticks([2, 3.5], minor=True)\n ax.set_xlabel('Function Value Targets')\n ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))",
"def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)",
"def plot_optimization_history(\n study: Study | Sequence[Study],\n *,\n target: Callable[[FrozenTrial], float] | None = None,\n target_name: str = \"Objective Value\",\n error_bar: bool = False,\n) -> \"Axes\":\n\n _imports.check()\n\n info_list = _get_optimization_history_info_list(study, target, target_name, error_bar)\n return _get_optimization_history_plot(info_list, target_name)",
"def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()",
"def fval_by_budget(ax, pds, baseline_ds=None, baseline_label=\"\", dim=None, funcId=None, groupby=None):\n if groupby is None: groupby = GroupByMedian()\n pfsize = len(pds.algds.keys())\n\n if baseline_ds:\n baseline_budgets = baseline_ds.funvals[:, 0]\n baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)\n baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros\n # fvb is matrix with each row being [budget,funval]\n baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))\n\n for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):\n #print name, ds\n budgets = ds.funvals[:, 0]\n funvals = groupby(ds.funvals[:, 1:], axis=1)\n\n # Throw away funvals after ftarget reached\n try:\n limit = np.nonzero(funvals < 10**-8)[0][0] + 1\n except IndexError:\n limit = np.size(budgets)+1\n budgets = budgets[:limit]\n funvals = funvals[:limit]\n\n fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))\n\n if baseline_ds:\n # Relativize by baseline\n fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))\n budgets = fvba[:, 0]\n funvals = fvba[:, 1] / fvba[:, 2]\n\n style['markevery'] = 16\n ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)\n if baseline_ds:\n ax.set_yticks([1], minor=True)\n ax.set_xlabel('Budget')\n ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))\n ax.grid()\n if baseline_ds:\n ax.yaxis.grid(True, which = 'minor')",
"def plot_results(outputs, x, e, t, a, folds, groups,\n quantiles, strat='quantile', adj='KM', plot=True):\n if plot:\n mpl.rcParams['hatch.linewidth'] = 2.0\n\n fig, big_axes = plt.subplots(\n figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),\n nrows=len(quantiles),\n ncols=1)\n\n plt.subplots_adjust(hspace=0.4)\n\n i = 0\n for _, big_ax in enumerate(big_axes, start=1):\n big_ax.set_title(\n 'Receiver Operator Characteristic and Calibration at t=' +\n str(quantiles[i]) + '\\n',\n fontsize=16)\n big_ax.tick_params(\n labelcolor=(1., 1., 1., 0.0),\n top='off',\n bottom='off',\n left='off',\n right='off')\n i += 1\n \n eces = {}\n metrics = {}\n\n for quant in quantiles:\n eces[quant] = {}\n \n for i in range(len(quantiles)):\n\n scores = outputs[quantiles[i]]\n for j in range(len(groups) + 2):\n\n pt = (i * (len(groups) + 2) + j + 1)\n if plot:\n ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)\n else:\n ax = None\n \n if (j==1):\n eces[quantiles[i]]['all'] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n None,\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot) \n \n if (j>1):\n eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups[j - 2],\n quantiles[i],\n strat=strat,\n adj=adj,\n plot=plot)\n \n if (j==0):\n metrics[quantiles[i]] = plot_roc_curve(ax,\n scores,\n e,\n t,\n a,\n folds,\n groups,\n quantiles[i],\n plot=plot)\n\n for quant in quantiles:\n metrics[quant] = metrics[quant] + (eces[quant], )\n \n if plot: \n plt.show()\n return metrics",
"def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''",
"def plot_metric(df_metrics, name, batch_size=10, epochs=10):\n\n # One groupplot\n fig, axarr = plt.subplots(3, 4, sharey=True, sharex=True)\n plotname = 'apfd'\n subplot_labels = ['(a)', '(b)', '(c)']\n\n for column, nr in enumerate(sorted(df_metrics['negative_ratio'].unique())):\n for row, emb_size in enumerate(df_metrics['emb_size'].unique()):\n for agidx, (labeltext, task, linestyle) in enumerate(\n [('Classification', 'True', '-'), ('Regression', 'False', '-.')]):\n rel_df = df_metrics[\n (df_metrics['emb_size'] == str(emb_size)) & (df_metrics['negative_ratio'] == str(nr)) &\n (df_metrics['batch_size'] == str(batch_size)) & (df_metrics['epochs'] == str(epochs))]\n\n # rel_df[rel_df['agent'] == agent].plot(x='step', y='napfd', label=labeltext, ylim=[0, 1], linewidth=0.8,\n # style=linestyle, color=sns.color_palette()[agidx], ax=axarr[row,column])\n\n apfd = rel_df.loc[rel_df['classification'] == task, 'apfd']\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = labeltext + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n # sns.displot(data=rel_df, x=\"apfd\", hue='classification', kde=True, ax=axarr[row, column])\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[agidx],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label, ax=axarr[row, column])\n\n axarr[row, column].xaxis.grid(True, which='major')\n\n axarr[row, column].set_title('Emb_size - %s - Neg_Ratio - %s' % (emb_size, nr), fontsize=10)\n\n if row == 2:\n axarr[row, column].set_xlabel('APFD')\n if column == 0:\n axarr[row, column].set_ylabel('Density')\n\n axarr[row, column].legend(frameon=True, prop={'size': 6})\n\n # Tweak spacing to prevent clipping of ylabel\n fig.suptitle('APFD Parameter Tuning - %d Epochs and batch-size - %d' % (epochs, batch_size))\n fig.tight_layout()\n plt.savefig(name, bbox_inches='tight')\n plt.show()",
"def _report(self, pagerank_by_target):\r\n for target in sorted(pagerank_by_target, key=pagerank_by_target.get, reverse=True):\r\n yield '%f - %s' % (pagerank_by_target[target], target)",
"def make_plot_for_proportion_within_target(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n num_of_trials,\n seed_num,\n target,\n runtime=1440,\n max_threshold=None,\n):\n ambulance_proportions = []\n other_proportions = []\n all_proportions = []\n if max_threshold == None:\n max_threshold = num_of_servers\n for threshold in range(max_threshold + 1):\n mean_ambulance, mean_other, mean_combined = get_mean_waits_of_current_threshold(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n num_of_trials,\n runtime,\n target,\n )\n ambulance_proportions.append(mean_ambulance)\n other_proportions.append(mean_other)\n all_proportions.append(mean_combined)\n\n plt.figure(figsize=(23, 10))\n proportion_plot = plt.plot(\n ambulance_proportions, \":\", other_proportions, \":\", all_proportions, \"-\"\n )\n plt.title(\n \"Proportion of individuals within target for different capacity thresholds\"\n )\n plt.xlabel(\"Capacity Threshold\")\n plt.ylabel(\"Proportion of Individuals within target\")\n plt.legend(\n [\"Ambulance Patients\", \"Other Patient\", \"All Patients\"], fontsize=\"x-large\"\n )\n\n return proportion_plot",
"def summarize(group, fs=None, include_source=True):\n _line_break = '{0:-<120}\\n'.format('')\n tests = sorted(ComparisonBenchmark.groups[group], key=lambda t: getattr(t, 'time_average_seconds'))\n log = StringIO.StringIO()\n log.write('Call statement:\\n\\n')\n log.write('\\t' + tests[0].stmt)\n log.write('\\n\\n\\n')\n fmt = \"{0: <8} {1: <35} {2: <12} {3: <15} {4: <15} {5: <14}\\n\"\n log.write(fmt.format('Rank', 'Function Name', 'Time', '% of Slowest', 'timeit_repeat', 'timeit_number'))\n log.write(_line_break)\n log.write('\\n')\n\n for i, t in enumerate(tests):\n func_name = \"{}.{}\".format(t.classname, t.callable.__name__) if t.classname else t.callable.__name__\n if i == len(tests)-1:\n time_percent = 'Slowest'\n else:\n time_percent = \"{:.1f}\".format(t.time_average_seconds / tests[-1].time_average_seconds * 100)\n log.write(fmt.format(i+1,\n func_name,\n convert_time_units(t.time_average_seconds),\n time_percent,\n t.timeit_repeat,\n t.timeit_number))\n log.write(_line_break)\n\n if include_source:\n log.write('\\n\\n\\nSource Code:\\n')\n log.write(_line_break)\n for test in tests:\n log.write(test.log.getvalue())\n log.write(_line_break)\n\n if isinstance(fs, str):\n with open(fs, 'w') as f:\n f.write(log.getvalue())\n\n elif fs is None:\n print(log.getvalue())\n else:\n try:\n fs.write(log.getvalue())\n except AttributeError as e:\n print(e)",
"def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()",
"def median_absolute_error(self):\n print('Median absolute error regression loss: ' + str(median_absolute_error(self.model.dataset.get_y_test(),\n self.model.get_predicted())))",
"def plot(self, plot_cmd=None, tf=lambda y: y):\r\n if not plot_cmd:\r\n plot_cmd = self.plot_cmd\r\n colors = 'bgrcmyk'\r\n pylab.hold(False)\r\n res = self.res\r\n\r\n flatx, flatf = self.flattened()\r\n minf = np.inf\r\n for i in flatf:\r\n minf = min((minf, min(flatf[i])))\r\n addf = 1e-9 - minf if minf <= 0 else 0\r\n for i in sorted(res.keys()): # we plot not all values here\r\n if type(i) is int:\r\n color = colors[i % len(colors)]\r\n arx = sorted(res[i].keys())\r\n plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')\r\n pylab.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)\r\n pylab.hold(True)\r\n plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')\r\n pylab.ylabel('f + ' + str(addf))\r\n pylab.draw()\r\n show()\r\n # raw_input('press return')\r\n return self",
"def median_absolute_error(y_true, y_pred, *, multioutput=..., sample_weight=...):\n ...",
"def plot_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n df_i = df_i.sort_values(\"timestamp_end\")\n x, y = df_i.timestamp_end.to_numpy(), df_i.objective.cummin().to_numpy()\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n # plt.fill_between(T,\n # y_mean-1.96*y_std,\n # y_mean+1.96*y_std,\n # facecolor=exp_config[\"data\"][exp_name][\"color\"],\n # alpha=0.3)\n else:\n exp_df = exp_df.sort_values(\"timestamp_end\")\n x, y = exp_df.timestamp_end.to_numpy(), exp_df.objective.cummax().to_numpy()\n if \"hartmann6D\" in exp_name:\n y = y + 3.32237 # hartmann6D\n\n plt.plot(\n x,\n y,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n if MODE == \"min\":\n plt.legend(loc=\"upper right\")\n else:\n plt.legend(loc=\"lower right\")\n\n plt.ylabel(exp_config.get(\"ylabel\", \"Objective\"))\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid()\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()",
"def visualization(obj_value):\n for n in range(3):\n plt.loglog(obj_value[n],\".\");\n\n plt.ylabel('objective values');\n plt.xlabel('iteration counter');\n plt.title('objective values for each pair against iterations');\n plt.legend();\n plt.show();",
"def expression_peaks(cluster, magnitude, group1 = [ \"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\" ], group2 = [ \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ]):\n if cluster.averaged == False:\n cluster.average_matrix(group1 + group2)\n verbalise(\"G\", cluster.sample_header)\n peaklist = {}\n\n for gene in range(cluster.genenumber):\n # for group 1:\n datalist = list(cluster.data_matrix[:,gene])\n maxexpression = max(datalist[:len(group1)])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient:\n if maxexpression >= magnitude + datalist[0]:\n # check adjacent peaks are not too big:\n # difference of 5.64 corresponds to 2% of the untransformed fpkm value\n # difference of 1.00 corresponds to 50% of the untransformed fpkm value\n if maxposn == len(group1) - 1:\n if (maxexpression - 5.64 < datalist[maxposn - 1] < maxexpression - 1):\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n # for group 2:\n maxexpression = max(datalist[len(group1):])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient for reciprocal swap:\n if maxexpression >= magnitude * datalist[len(group1)]:\n # check adjacent peaks are not too big:\n try:\n if maxposn == len(group1+group2) - 1:\n if (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5):\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n except IndexError as inst:\n verbalise(\"R\", inst)\n verbalise(\"R\", datalist)\n verbalise(\"R\", \"Max is %.3f at position %d\" % (maxexpression, maxposn))\n\n verbalise(\"G\", len(peaklist), \"significant peaks found.\")\n return peaklist",
"def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig",
"def optimizeEps(group, rep, fig=None):\n\tX = group[[\"ae1\", \"ae2\"]].to_numpy()\n\tneigh = NearestNeighbors(n_neighbors=2)\n\tnbrs = neigh.fit(X)\n\tdist, idx = nbrs.kneighbors(X)\n\t\n\tdist = np.sort(dist, axis=0)\n\td = dist[:,1]\n\tdist[:,0] = idx[:,0]\n\t#print(dist)\n\t#if fig is not None:\n\t#ax=fig.add_subplot(10,10,rep)\n\t#ax.plot(d)\n\t#plt.show()\n\t\n\trotor = Rotor()\n\trotor.fit_rotate(dist)\n\telbow_index = rotor.get_elbow_index()\n\t#ax.axhline(dist[elbow_index][1])\n\treturn(dist[elbow_index][1])",
"def plot_comparisons(self, exact, blocked, blockederr, axdelta=None):\n if axdelta is None:\n axdelta = plt.gca()\n delta = self.means - exact\n axdelta.errorbar(list(range(1, self.max_dets)), delta[0], yerr=self.stderr[0], label='independent')\n axdelta.errorbar(list(range(1, self.max_dets)), delta[1], yerr=self.stderr[1], label='correlated')\n axdelta.axhline(delta[0, 0], linestyle=':', color='grey', label='reference')\n axdelta.axhline(0, linestyle='-', linewidth=1, color='black')\n if blocked:\n axdelta.axhline(blocked-exact, linestyle='--', color='darkgreen', label='reblocked')\n if blockederr:\n axdelta.fill_between([0, self.max_dets], [blocked-exact-blockederr,blocked-exact-blockederr],\n [blocked-exact+blockederr,blocked-exact+blockederr], color='green', alpha=0.2)\n axdelta.set_xlabel('Number of determinants in estimator')\n axdelta.set_ylabel(r'$E-E_\\mathrm{CCSD}$ / ha')\n axdelta.legend()\n return axdelta",
"def psi(bench, target, group, print_df=True):\n labels_q = np.percentile(\n bench, [(100.0 / group) * i for i in range(group + 1)], interpolation=\"nearest\")\n\n # This is the right approach when you have not a lot of unique value\n ben_pct = (pd.cut(bench, bins=np.unique(labels_q),\n include_lowest=True).value_counts()) / len(bench)\n target_pct = (pd.cut(target, bins=np.unique(labels_q),\n include_lowest=True).value_counts()) / len(target)\n target_pct = target_pct.sort_index() # sort the index\n ben_pct = ben_pct.sort_index() # sort the index\n psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))\n # Print results for better understanding\n if print_df:\n results = pd.DataFrame({'ben_pct': ben_pct.values,\n 'target_pct': target_pct.values},\n index=ben_pct.index)\n return {'data': results, 'statistic': psi}\n return psi",
"def plot_groups(\n self,\n lim=4,\n center=(0, 0),\n x1='',\n y1='',\n x2='',\n y2='',\n linecolor='k',\n alpha_group=1,\n legend=False,\n pause=False):\n ax = plt.gca()\n ax.clear()\n\n shape = (\n np.sqrt(len(self.contours)).astype(int),\n np.sqrt(len(self.contours)).astype(int))\n\n plt.contour(\n -self.contours.x.values.reshape(shape),\n self.contours.y.values.reshape(shape),\n self.contours.f.values.reshape(shape),\n colors='grey',\n levels=np.arange(2, int(np.max(self.contours.f) + 1), 1),\n linewidths=1,\n zorder=1)\n\n plt.contour(\n -self.contours.x.values.reshape(shape),\n self.contours.y.values.reshape(shape),\n self.contours.f.values.reshape(shape),\n colors='k',\n levels=self.levels,\n linewidths=2,\n zorder=1)\n\n if self.target:\n plt.plot([-lim * 0.05, -lim * 0.025], [0, 0], color='k')\n plt.plot([lim * 0.05, lim * 0.025], [0, 0], color='k')\n plt.plot([0, 0], [-lim * 0.05, -lim * 0.025], color='k')\n plt.plot([0, 0], [lim * 0.05, lim * 0.025], color='k')\n\n plt.scatter(\n -self.df_gxys.loc[self.gxys.group_peak == 1, 'x'],\n self.df_gxys.loc[self.gxys.group_peak == 1, 'y'],\n edgecolor='k',\n facecolor='none',\n linewidth=2,\n s=32,\n zorder=5)\n\n inds = np.argsort(\n np.sqrt(\n self.df_gxys.loc[self.gxys.group_peak == 1, 'x'] ** 2 +\n self.df_gxys.loc[self.gxys.group_peak == 1, 'y'] ** 2))\n\n marker = 'o'\n\n alpha = np.ones_like(self.df_gxys['x'])\n alpha[\n (self.df_gxys['group_no'] > 1) &\n (self.df_gxys['group_no'] < alpha_group)] = 0.25\n\n for group_no in [0, 1]:\n plt.scatter(\n -self.df_gxys.loc[lambda x: x['group_no'] == group_no, 'x'],\n self.df_gxys.loc[lambda x: x['group_no'] == group_no, 'y'],\n c=f'C{group_no}',\n s=30,\n zorder=2,\n marker=marker,\n alpha=alpha[self.df_gxys['group_no'] == group_no][0])\n\n marker_ = np.tile(np.array(['o', 's', 'D', '^', 'x']), 2000)\n\n for i, group_no in enumerate(\n self.df_gxys.loc[lambda x: x['group_no'] > 1, 'group_no']):\n group = self.df_gxys['group_no'] == group_no\n\n color = f'C{(i % 7) + 2}'\n marker = marker_[np.floor((i + 2) / 10).astype(int)]\n\n plt.scatter(\n -self.df_gxys.loc[group, 'x'],\n self.df_gxys.loc[group, 'y'],\n c=color,\n s=30,\n zorder=2,\n marker=marker,\n label=f'Group {group_no}: {group.sum()}',\n alpha=alpha[group][0])\n\n if (x1 != '') & (y1 != '') & (x2 != '') & (y2 != ''):\n plt.plot(\n [-x1, -x2],\n [y1, y2],\n linestyle='--',\n color=linecolor,\n zorder=3)\n if (x1 != '') & (y1 != ''):\n plt.scatter(\n -x1,\n y1,\n marker='o',\n edgecolor='r',\n facecolor='none',\n zorder=4,\n s=80)\n if (x2 != '') & (y2 != ''):\n plt.scatter(-x2, y2, marker='x', color='r', zorder=4, s=80)\n\n plt.title(self.title, zorder=6)\n\n median = np.argsort(self.df_gxys['x'])[len(self.df_gxys['x']) // 2]\n\n if center == (0, 0):\n if not self.target:\n plt.xlim(\n self.df_gxys['x'][median] - lim,\n self.df_gxys['x'][median] + lim)\n plt.ylim(\n self.df_gxys['y'][median] - lim,\n self.df_gxys['y'][median] + lim)\n\n else:\n plt.xlim(-lim, lim)\n plt.ylim(-lim, lim)\n\n else:\n plt.xlim(center[0] - lim, center[0] + lim)\n plt.ylim(center[1] - lim, center[1] + lim)\n\n plt.gca().set_aspect('equal', 'box')\n\n plt.xlabel('x (Mpc)')\n\n if legend:\n plt.legend(loc='lower right', ncol=4)\n\n if self.pause:\n plt.pause(0.001)",
"def _plot_comparison_repeatables(ax_abs, ax_per, ax_mag, pan, field, unit,\n other_program_name, **kw):\n\n #plot absolute error\n h_abs = ax_abs.plot(pan['absolute-difference'][field].index.values,\n pan['absolute-difference'][field].values,\n color=mpl.rcParams['axes.labelcolor'], zorder=-2)\n ax_abs.set_ylabel('Absolute Difference ' + unit)\n #plot percentage error\n h_per = ax_per.plot(pan['percent-difference'][field].index.values,\n pan['percent-difference'][field].values,\n color='firebrick', zorder=-1)\n ax_per.set_ylabel('Percent Difference', color='firebrick')\n #set error axes legend\n #ax_per.legend(h_abs + h_per, ['Absolute Difference','Percent Difference'], **_leg_kw)\n #ax_per.get_legend().set_zorder(1)\n #plot full results profiles\n kw['H'] += [ax_mag.plot(pan['%s-results' % other_program_name][field],\n color=_colormap[1])[0],\n ax_mag.plot(pan['emf.fields-results'][field],\n color=_colormap[0])[0]]\n kw['L'] += [other_program_name + ' Results', 'emf.fields Results']\n ax_mag.set_xlabel('Distance (ft)')",
"def compare_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2013/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\tlabels = ['no APMB', 'APMB']\n\tdeep = {}\n\tuzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795 # Why?\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''",
"def plot_posteriors_conditions(self, *args, **kwargs):\n group_nodes = self.get_group_nodes()\n for dep in self.depends_on.keys():\n nodes = group_nodes.loc[group_nodes.knode_name == dep]\n if all(nodes.hidden == True):\n continue\n analyze.plot_posterior_nodes(nodes[\"node\"], *args, **kwargs)",
"def plot_metric_values(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.accuracies), 1)\n plt.plot(epochs_range, self.accuracies[threshold:], color='red', marker='o')\n plt.title('Accuracy on test data. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.show()",
"def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)",
"def plot_series(groups, series):\n fig, ax = plt.subplots()\n ax.set_xlabel(\"Iterations\")\n ax.set_ylabel(series)\n\n for gkey, gval in groups.items():\n args = dict(gkey)\n\n series_values = get_series(gval, series)\n interval_size = args['test_interval']\n interval_count = series_values.shape[1] - 1\n\n x = np.arange(0, interval_size * interval_count + 1, step=interval_size)\n mean = np.mean(series_values, axis=0)\n std = np.std(series_values, axis=0)\n\n ax.plot(x, mean, label=format_group_key(gkey))\n ax.fill_between(x, mean + std, mean - std, alpha=0.2)\n\n ax.legend()\n return fig, ax"
] | [
"0.6690005",
"0.52602696",
"0.5226427",
"0.5201476",
"0.5161905",
"0.51192516",
"0.5022657",
"0.49890342",
"0.497622",
"0.49757755",
"0.49593621",
"0.4885093",
"0.4878392",
"0.48618805",
"0.48530275",
"0.48435473",
"0.48297042",
"0.4821337",
"0.47865835",
"0.47860396",
"0.47836027",
"0.47807932",
"0.47694817",
"0.47511947",
"0.47415298",
"0.4731351",
"0.47182968",
"0.4698362",
"0.46884245",
"0.4687587"
] | 0.60396034 | 1 |
pinForward is the forward Pin, so we change its duty cycle according to speed. | def forward(self, speed):
self.pwm_backward.ChangeDutyCycle(0)
self.pwm_forward.ChangeDutyCycle(speed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction",
"def __init__(self, pinForward, pinBackward, pinControl):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControl = pinControl\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControl, GPIO.OUT)\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n GPIO.output(self.pinControl,GPIO.HIGH)",
"def forward_left(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed) \n self.pwm_right.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(100)",
"def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)",
"def forward(speed, bias, biasDir):\n\t# todo: check directions for me please\n\tif biasDir == 1:\n rightMotor.run_direct(duty_cycle_sp=speed+bias)\n leftMotor.run_direct(duty_cycle_sp=speed)\n elif biasDir == -1:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed+bias)\n else:\n rightMotor.run_direct(duty_cycle_sp=speed)\n leftMotor.run_direct(duty_cycle_sp=speed)",
"def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)",
"def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)",
"def motor_A(self, direction, speed):\n if direction == -1:\n GPIO.output(self.Motor_A_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_A_Pin2, GPIO.LOW)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)\n if direction == 1:\n GPIO.output(self.Motor_A_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_A_Pin2, GPIO.HIGH)\n self.pwm_A.start(100)\n self.pwm_A.ChangeDutyCycle(speed)",
"def forward(self, speed):\n self.controller.forward(speed)",
"def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError",
"def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)",
"def forward_right(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(100)",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def fwd(dist=0): #distance is in cm\n try:\n if dist>0:\n # this casting to int doesn't seem necessary\n pulse=int(PPR*(dist//WHEEL_CIRC) )\n enc_tgt(1,1,pulse)\n except Exception as e:\n print (\"gopigo fwd: {}\".format(e))\n pass\n return write_i2c_block(ADDRESS,motor_fwd_cmd+[0,0,0])",
"def startAcceleratingForward(self,event):\n self.isAcceleratingForward=True",
"def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)",
"def move_forward(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)",
"def forward_button(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=int(left_speed))\n self.right_motor.run_forever(speed_sp=int(right_speed))",
"def forward(self, speed):\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_rl' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fr' + self.postfix], -speed,\n ONE_SHOT_MODE)\n vrep.simxSetJointTargetVelocity(self.client_id, self.handles['rollingJoint_fl' + self.postfix], -speed,\n ONE_SHOT_MODE)",
"def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)",
"def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)",
"def turn_90degrees(self, direction):\n if direction == \"right\" or direction == 1:\n self.myspeedctrl.send_speed(0,1)\n elif direction == \"left\" or direction == 2:\n self.myspeedctrl.send_speed(0,-1)\n rospy.sleep(1.61) #value found by trail and error\n self.myspeedctrl.send_speed(0,0)",
"def drive_forward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=left_speed)\n self.right_motor.run_forever(speed_sp=right_speed)",
"def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return",
"def input_forward(self, joy_input):\n if self.saved[joy_input]:\n value = self.saved[joy_input]\n else:\n value = self.inputs[joy_input]\n yaw_pwm = np.interp(value, [-1, 1], [0, Joystick.MAX_YAW_PWM])\n print(\"(input forward) setting yaw pwm to \" + str(yaw_pwm))\n self.publish(Topic.YAW_PWM, yaw_pwm)",
"def increment_speed(self):\n self.speed += 0.0004",
"def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True",
"def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)",
"def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()",
"def drive(self,direction, speed=100) -> None:\n if direction == 1:\n driveMotor.run(Adafruit_MotorHAT.FORWARD)\n driveMotor.setSpeed(speed)\n if direction == -1:\n driveMotor.run(Adafruit_MotorHAT.BACKWARD)\n driveMotor.setSpeed(speed)\n if direction == 0:\n driveMotor.setSpeed(0)\n driveMotor.run(Adafruit_MotorHAT.RELEASE)"
] | [
"0.66786534",
"0.66583955",
"0.66447824",
"0.6522863",
"0.6463652",
"0.6370594",
"0.62973475",
"0.6285964",
"0.6233026",
"0.6130106",
"0.6119961",
"0.6082686",
"0.60227823",
"0.6020035",
"0.5921472",
"0.5857334",
"0.58558404",
"0.57667553",
"0.5758451",
"0.5737794",
"0.571628",
"0.5694123",
"0.56780034",
"0.56552285",
"0.5648163",
"0.5646829",
"0.55609435",
"0.5547905",
"0.5525594",
"0.5504033"
] | 0.726632 | 1 |
Set the duty cycle of both control pins to zero to stop the motor. | def stop(self):
self.pwm_forward.ChangeDutyCycle(0)
self.pwm_backward.ChangeDutyCycle(0)
self.pwm_left.ChangeDutyCycle(0)
self.pwm_right.ChangeDutyCycle(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop(self):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(0)",
"def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)",
"def stop(self):\n\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\tGPIO.output(self._dir_pin_2, GPIO.HIGH)\n\t\tself._last_dir = 's'\n\t\t# self._motor_pwm.ChangeDutyCycle(0)",
"def turnOffMotors(self) -> None:\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)",
"def stop(self):\n self.left_motor.stop()\n self.right_motor.stop()",
"def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)",
"def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0",
"def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)",
"def servo_off(self):\n msg = b'\\x0C\\x00'\n self.__bt.write(msg)",
"def emitters_off(self):\n self.wp.digitalWrite(self.LEDON_PIN, self.wp.LOW)\n self.wp.delayMicroseconds(20)",
"def turn_off(self, **kwargs):\n self._attributes['current_speed'] = SPEED_OFF\n self._bond.turnOff(self._deviceId)",
"def stop(self):\n self.motor.stop()",
"def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)",
"def right(self, speed):\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(speed)",
"def stop_motors(self) -> None:\n motor_1 = self.robot.all_services.get('motor_1')\n motor_2 = self.robot.all_services.get('motor_2')\n if motor_1 is not None or motor_1 is not None:\n motor_1.stop_motor()\n motor_2.stop_motor()\n log.info(\"Motors stopped\")\n else:\n log.warning(\"One of the motors is not enabled!\")",
"def _reverseduty(self):\n if self.ir_pin.duty() == 0:\n self.ir_pin.duty(512)\n else:\n self.ir_pin.duty(0)",
"def stop(self):\n self.turnOffMotors()",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError",
"def motor_B(self, direction, speed):\n if direction == 1:\n GPIO.output(self.Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(self.Motor_B_Pin2, GPIO.LOW)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)\n if direction == -1:\n GPIO.output(self.Motor_B_Pin1, GPIO.LOW)\n GPIO.output(self.Motor_B_Pin2, GPIO.HIGH)\n self.pwm_B.start(100)\n self.pwm_B.ChangeDutyCycle(speed)",
"def stop(self):\n self.right_motor.stop(stop_action='brake')\n self.left_motor.stop(stop_action='brake')",
"def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)",
"def poweron(self) -> None:\n self.servo_reset()",
"def stop(self) -> None:\n turnOffMotors()",
"def _on_stop_cycle(self, kwargs: dict) -> None:\n self._cancel_automation()\n self.toggle(state=\"off\")",
"def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)",
"def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)",
"def zeroMotor(self):\n\t\tpass",
"def power_down (self, DAC_A =1, DAC_B=1):\n try :\n bus.write_i2c_block_data(self.address, self.__pointer_register( power_down, DAC_A, DAC_B), [0,0])\n \n except IOError:\n print (\"Device is not connected\")",
"def set_pwm(self, duty_cycle):\n PWM.set_duty_cycle(self.pwm_pin, duty_cycle)"
] | [
"0.7451313",
"0.71744835",
"0.70240444",
"0.64211655",
"0.6414792",
"0.6408484",
"0.6395715",
"0.63024884",
"0.6269145",
"0.62610817",
"0.6226628",
"0.6201555",
"0.614517",
"0.61346424",
"0.6122426",
"0.6099769",
"0.6093762",
"0.60788894",
"0.6051263",
"0.60405594",
"0.6035325",
"0.6029236",
"0.6002926",
"0.6000895",
"0.5982927",
"0.5968207",
"0.5968207",
"0.5968001",
"0.58806914",
"0.58674943"
] | 0.736179 | 1 |
Interpolate the points and radii between sections that have too few points. | def interpPoints(self, interpRad=False):
# print(np.shape(long_distances))
long_sections, long_distances, meddist = self.findLongSections()
print('Long inter-point distances found: %i' % len(long_sections))
count = 0
for sec in long_sections:
print('Supposed long section %i has %i nodes' \
% (sec, len(self.sections[sec])))
# set first and last points for interpolation
pt0, pt1 = self.sections[sec][0], self.sections[sec][-1]
# find number of points
numpts = int(long_distances[long_sections.index(sec)]/meddist)
Xs = np.linspace(pt0[0], pt1[0], numpts)
Ys = np.linspace(pt0[1], pt1[1], numpts)
Zs = np.linspace(pt0[2], pt1[2], numpts)
newpts = np.dstack((Xs, Ys, Zs))
newpts = [newpts[0][i] for i in xrange(len(newpts[0]))]
self.sections[sec] = newpts
count = count + 1
rad0, rad1 = self.secRads[sec][0], self.secRads[sec][-1]
# print(rad0, rad1)
rads = np.linspace(rad0, rad1, numpts)
# print(rads)
self.secRads[sec] = rads
long_sections, long_distances, meddist = self.findLongSections()
print('Long sections still remaining: %i' % len(long_sections))
if len(long_sections) > 0:
print(long_distances, meddist)
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __hinterpolate(self):\n \n # Temp. Data holders\n upperint = []\n lowerint = []\n \n # Dont like this, because here we insert points into the rawdata\n # But it creates consisitent results in the interpolation results\n if self.__upper[0][0] != 0: self.__upper.insert(0,(0.,0.))\n if self.__lower[0][0] != 0: self.__lower.insert(0,(0.,0.))\n \n # Create points\n if self.__interpolation_method == \"l\":\n xpointsU = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n xpointsL = list(map(lambda x:x/float(self.__procPointsCount),range(0,self.__procPointsCount+1)))\n elif self.__interpolation_method == \"p\":\n xpointsU = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n xpointsL = [x**2/float(self.__procPointsCount)**2 for x in range(self.__procPointsCount+1)]\n \n # Calculate secants\n uppersec = [(self.__upper[i+1][1]-self.__upper[i][1])/(self.__upper[i+1][0]-self.__upper[i][0]) for i in range(len(self.__upper)-1)]\n lowersec = [(self.__lower[i+1][1]-self.__lower[i][1])/(self.__lower[i+1][0]-self.__lower[i][0]) for i in range(len(self.__lower)-1)]\n \n # Calculate tangents\n uppertan = [(uppersec[k-1]+uppersec[k])/2 for k in range(1,len(uppersec))]\n uppertan.insert(0,uppersec[0])\n uppertan.append(uppersec[-1])\n\n lowertan = [(lowersec[k-1]+lowersec[k])/2 for k in range(1,len(lowersec))]\n lowertan.insert(0,lowersec[0])\n lowertan.append(lowersec[-1])\n \n # Hermite blending functions\n p0 = lambda t: 2*t**3 - 3*t**2 + 1\n m0 = lambda t: t**3 - 2*t**2 + t\n p1 = lambda t: -2*t**3 + 3*t**2\n m1 = lambda t: t**3 - t**2\n \n # Find matching points to improve accuarcy\n matchU = [(i,j) for i in range(len(xpointsU)) for j in range(len(self.__upper)) if xpointsU[i] == self.__upper[j][0]]\n matchL = [(i,j) for i in range(len(xpointsL)) for j in range(len(self.__lower)) if xpointsL[i] == self.__lower[j][0]]\n \n # Reverse match pairs to insure no index errors\n matchU.reverse()\n matchL.reverse()\n\n# print(self.__lower)\n# print(xpointsL)\n # Pop xpoints that dont require interpolation and append the point into the upperint list\n for i in matchU:\n xpointsU.pop(i[0])\n upperint.append(self.__upper[i[1]])\n \n# print(matchL)\n \n # Same process as above but for lower airfoil\n for i in matchL:\n xpointsL.pop(i[0])\n lowerint.append(self.__lower[i[1]])\n \n # Interpolate upper points\n for xp in xpointsU:\n for i in range(len(self.__upper)-1):\n if self.__upper[i][0] < xp < self.__upper[i+1][0]:\n h = self.__upper[i+1][0]-self.__upper[i][0]\n t = (xp - self.__upper[i][0]) / h\n solution = ( p0(t)*self.__upper[i][1] + h*m0(t)*uppertan[i] + p1(t)*self.__upper[i+1][1] + h*m1(t)*uppertan[i+1] )\n upperint.append((xp,solution))\n \n # Interpolate lower points\n for xp in xpointsL:\n for i in range(len(self.__lower)-1):\n if self.__lower[i][0] < xp < self.__lower[i+1][0]:\n h = self.__lower[i+1][0]-self.__lower[i][0]\n t = (xp - self.__lower[i][0]) / h\n solution = ( p0(t)*self.__lower[i][1] + h*m0(t)*lowertan[i] + p1(t)*self.__lower[i+1][1] + h*m1(t)*lowertan[i+1] )\n lowerint.append((xp,solution))\n \n # Sort the points to keep the correct sequence\n upperint.sort(key=lambda x:x[0], reverse=True)\n lowerint.sort(key=lambda x:x[0])\n \n # Do checks to insure no duplicates\n if upperint[0][0] != 1.0: upperint.insert(0,(1.0,0.0))\n if upperint[-1][0] != 0.0: upperint.append((0.0,0.0))\n if lowerint[0][0] == 0.0: lowerint.pop(0)\n if lowerint[-1][0] != 1.0: lowerint.append((1.0,0.0))\n\n self.__ProcPoints = upperint + lowerint",
"def test_interpolation():\n spiral_arm = survey.get_spiral_slice(track = \"perseus\", \n interpolate = True)\n spiral_arm2 = survey.get_spiral_slice(track = \"Per\", \n interpolate = False)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)",
"def interpolate(self, distance, normalized=...): # -> BaseGeometry:\n ...",
"def test_interpolation(self):\n\n ndx1, ndx2 = self.find_partition()\n tessellation = Delaunay(self.grid[ndx2,:])\n\n # initialisation\n results = []\n ndim = self.ndim+1\n\n for j in ndx1:\n nmodels = len(self.tracks[j].models)\n aResult = np.empty((nmodels,ndim+nglb+6),dtype=gtype)\n pt = self.tracks[j].params + [0.0,]\n\n for i in range(nmodels):\n aModel1 = self.tracks[j].models[i]\n pt[-1] = aModel1.glb[iage]\n aModel2 = interpolate_model(self,pt,tessellation,ndx2)\n aResult[i,0:ndim] = pt\n if (aModel2 is None):\n aResult[i,ndim:ndim+nglb+6] = np.nan\n else:\n aResult[i,ndim:ndim+nglb+6] = compare_models(aModel1,aModel2)\n\n results.append(aResult)\n\n return results, ndx1, ndx2, tessellation",
"def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan",
"def _interpolate(self, kps1: List[List[kp]], kps2: List[List[kp]]) -> np.ndarray:\n interpolated_kps = []\n for i in range(len(kps1)):\n # If one of the two points is empty -> Not interpolate\n if len(kps1[i]) != 0 and len(kps2[i]) != 0:\n interpolated_coords = np.linspace(np.array(kps1[i]), np.array(kps2[i]), num=3).tolist()\n interpolated_kps.append(interpolated_coords[1])\n else:\n interpolated_kps.append([None, None, None])\n return np.array(interpolated_kps)",
"def interpolate_hold_9(self):\n\n # Initialize variables\n n_ensembles = self.u_mps.shape[0]\n\n # Get data from object\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan\n\n n_invalid = 0\n # Process data by ensembles\n for n in range(n_ensembles):\n # Check if ensemble is invalid and number of consecutive invalids is less than 9\n if self.valid_data[0, n] == False and n_invalid < 9:\n self.u_processed_mps[n] = self.u_processed_mps[n - 1]\n self.v_processed_mps[n] = self.v_processed_mps[n - 1]\n n_invalid += 1\n else:\n n_invalid = 0",
"def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2",
"def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity",
"def interpolate(self, *point, **kwargs):\n\n # Assume alpha enhancement of 0.4 if not given.\n if len(point) == 3:\n point = [] + list(point) + [0.4]\n warnings.warn(\n \"Assuming [alpha/Fe] = 0.4 composition unless \"\n \"otherwise specified.\", StandardCompositionAssumed)\n elif len(point) == 4:\n point = list(point)\n warnings.warn(\n \"Fourth stellar param is [alpha/Fe] = {}\".format(point[3]))\n\n return super(self.__class__, self).interpolate(*point, **kwargs)",
"def check_interp(self):\n\n points = np.loadtxt(\"skeleton_temp/\" + cell + \"_points.txt\", delimiter=',')\n\n self.initial_scatter = ax.scatter(points[:, 0],\n points[:, 1],\n points[:, 2], s=5, c='r')\n self.cell_points = self.get_cell_xyz()\n ax.scatter(self.cell_points[::5, 0],\n self.cell_points[::5, 1],\n self.cell_points[::5, 2], s=3, c='b', alpha=.03)\n ax.set_xlabel('X (um)')\n ax.set_ylabel('Y (um)')\n ax.set_zlabel('Z (um)')",
"def test_linear_interpolation_outside_domain(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Simple example first for debugging\n xis = numpy.linspace(0.9, 4.0, 4)\n etas = numpy.linspace(5, 9.1, 3)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n\n vals = interpolate2d(x, y, A, points, mode='linear',\n bounds_error=False)\n msg = ('Length of interpolation points %i differs from length '\n 'of interpolated values %i' % (len(points), len(vals)))\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or eta < y[0] or eta > y[-1]:\n assert numpy.isnan(vals[i])\n else:\n msg = ('Got %.15f for (%f, %f), expected %.15f'\n % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12, atol=1.0e-12), msg\n\n # Try a range of combinations of points outside domain\n # with error_bounds True\n print\n for lox in [x[0], x[0] - 1]:\n for hix in [x[-1], x[-1] + 1]:\n for loy in [y[0], y[0] - 1]:\n for hiy in [y[-1], y[-1] + 1]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 4)\n etas = numpy.linspace(loy, hiy, 4)\n points = combine_coordinates(xis, etas)\n\n if lox < x[0] or hix > x[-1] or \\\n loy < y[0] or hiy > y[-1]:\n try:\n vals = interpolate2d(x, y, A, points,\n mode='linear',\n bounds_error=True)\n except BoundsError, e:\n assert 'bounds_error was requested' in str(e)\n else:\n msg = 'Should have raised bounds error'\n raise Exception(msg)\n\n # Try a range of combinations of points outside domain with\n # error_bounds False\n for lox in [x[0], x[0] - 1, x[0] - 10]:\n for hix in [x[-1], x[-1] + 1, x[-1] + 5]:\n for loy in [y[0], y[0] - 1, y[0] - 10]:\n for hiy in [y[-1], y[-1] + 1, y[-1] + 10]:\n\n # Then test that points outside domain can be handled\n xis = numpy.linspace(lox, hix, 10)\n etas = numpy.linspace(loy, hiy, 10)\n points = combine_coordinates(xis, etas)\n refs = linear_function(points[:, 0], points[:, 1])\n vals = interpolate2d(x, y, A, points,\n mode='linear', bounds_error=False)\n\n assert len(points) == len(vals), msg\n for i, (xi, eta) in enumerate(points):\n if xi < x[0] or xi > x[-1] or\\\n eta < y[0] or eta > y[-1]:\n msg = 'Expected NaN for %f, %f' % (xi, eta)\n assert numpy.isnan(vals[i]), msg\n else:\n msg = ('Got %.15f for (%f, %f), expected '\n '%.15f' % (vals[i], xi, eta, refs[i]))\n assert numpy.allclose(vals[i], refs[i],\n rtol=1.0e-12,\n atol=1.0e-12), msg",
"def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)",
"def InterpolateDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def interpolate(timepoint_defined, signal, interp_type, TR):\n\n timepoint_defined = np.array(timepoint_defined)\n\n true_inds = np.where(timepoint_defined == True)[0]\n false_inds = np.where(timepoint_defined == False)[0]\n\n\n signal_copy = np.array(signal)\n\n if interp_type == 'linear':\n\n #Still need to handle beginning/end cases\n\n for temp_timepoint in false_inds:\n\n\n #past_timepoint = true_inds[np.sort(np.where(true_inds < temp_timepoint)[0])[-1]]\n #future_timepoint = true_inds[np.sort(np.where(true_inds > temp_timepoint)[0])[0]]\n\n\n #Be sure there is at least one future timepoint and one past timepoint.\n #If there isn't, then grab either two past or two future timepoints and use those\n #for interpolation. If there aren't even two total past + future timepoints, then\n #just set the output to 0. Could also set the output to be unadjusted, but this\n #is a way to make the issue more obvious.\n temp_past_timepoint = np.sort(np.where(true_inds < temp_timepoint)[0])\n temp_future_timepoint = np.sort(np.where(true_inds > temp_timepoint)[0])\n\n #If we don't have enough data to interpolate/extrapolate\n if len(temp_past_timepoint) + len(temp_future_timepoint) < 2:\n\n signal_copy[temp_timepoint] = 0\n\n #If we do have enough data to interpolate/extrapolate\n else:\n\n if len(temp_past_timepoint) == 0:\n past_timepoint = true_inds[temp_future_timepoint[1]]\n else:\n past_timepoint = true_inds[temp_past_timepoint[-1]]\n\n if len(temp_future_timepoint) == 0:\n future_timepoint = true_inds[temp_past_timepoint[-2]]\n else:\n future_timepoint = true_inds[temp_future_timepoint[0]]\n\n #Find the appopriate past/future values\n past_value = signal_copy[int(past_timepoint)]\n future_value = signal_copy[int(future_timepoint)]\n\n #Use the interp1d function for interpolation\n interp_object = interp.interp1d([past_timepoint, future_timepoint], [past_value, future_value], bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint] = interp_object(temp_timepoint).item(0)\n\n return signal_copy\n\n\n #For cubic spline interpolation, instead of taking the past/future timepoint\n #we will just take the closest 5 timepoints. If there aren't 5 timepoints, we will\n #set the output to 0\n if interp_type == 'cubic_spline':\n\n sorted_good = np.sort(signal_copy[true_inds])\n min_bound = sorted_good[0]\n max_bound = sorted_good[-1]\n\n #Continue if there are at least 5 good inds\n true_inds_needed = 5\n if len(true_inds) >= true_inds_needed:\n\n for temp_timepoint in false_inds:\n\n closest_inds = true_inds[np.argsort(np.absolute(true_inds - temp_timepoint))]\n closest_vals = signal_copy[closest_inds.astype(int)]\n interp_object = interp.interp1d(closest_inds, closest_vals, kind = 'cubic', bounds_error=False, fill_value='extrapolate')\n signal_copy[temp_timepoint.astype(int)] = interp_object(temp_timepoint).item(0)\n\n min_bound_exceded = np.where(signal_copy < min_bound)[0]\n if len(min_bound_exceded) > 0:\n\n signal_copy[min_bound_exceded] = min_bound\n\n max_bound_exceded = np.where(signal_copy > max_bound)[0]\n if len(max_bound_exceded) > 0:\n\n signal_copy[max_bound_exceded] = max_bound\n\n #If there aren't enough good timepoints, then set the bad timepoints = 0\n else:\n\n signal_copy[false_inds.astype(int)] = 0\n\n\n return signal_copy\n\n\n if interp_type == 'spectral':\n\n signal_copy = spectral_interpolation(timepoint_defined, signal_copy, TR)\n\n return signal_copy",
"def interpolate(self, interpolation=\"nearest\", **kwargs):\n return podpac.interpolators.Interpolate(source=self, interpolation=interpolation, **kwargs)",
"def InterpolationDerivs(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...",
"def test_linear_interpolation_nan_points(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n xis[6:7] = numpy.nan\n etas[3] = numpy.nan\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)",
"def test_interpolation_corner_cases(self):\n\n # Define four pixel centers\n x = [2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 3)\n etas = numpy.linspace(y[0], y[-1], 3)\n points = combine_coordinates(xis, etas)\n\n # Interpolate to cropped grids\n for xc, yc, Ac in [([x[0]], [y[0]], numpy.array([[A[0, 0]]])), # 1 x 1\n ([x[0]], y, numpy.array([A[0, :]])), # 1 x 2\n ]:\n\n vals = interpolate2d(xc, yc, Ac, points, mode='linear')\n msg = 'Expected NaN when grid %s is incomplete' % str(Ac.shape)\n assert numpy.all(numpy.isnan(vals)), msg",
"def interpolate_linear(self, transect):\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply linear interpolation\n self.u_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=u[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)\n # Apply linear interpolation\n self.v_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=v[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)",
"def InterpolatePoint(self, p_int, vtkDataSetAttributes, vtkIdList, *float, **kwargs):\n ...",
"def interpolate(self):\n print(\"Interpolating points...\")\n interpolated_points = set()\n if os.cpu_count():\n processes = os.cpu_count()\n print(f\"Running on all {processes} cores.\")\n else:\n processes = 1\n length = len(self.main_cluster)\n delta = math.ceil(length / processes)\n manager = Manager()\n result_map = manager.dict()\n jobs = []\n for index in range(processes):\n start = index * delta\n stop = (index + 1) * delta\n if stop > length:\n stop = length\n p = Process(target=worker, args=(start, stop,\n result_map, index,\n self.distances,\n self.interpolation_threshold,\n self.main_cluster,\n self.color_lookup_table_points))\n jobs.append(p)\n p.start()\n\n for proc in jobs:\n proc.join()\n\n for index in result_map.keys():\n print(index)\n interpolated_points.update(result_map[index])\n\n main_points = [self.get_value_tuple(index) for index in self.main_cluster]\n interpolated_points.update(main_points)\n\n print(\"Finished interpolation!\")\n\n self.interpolated_points = list(interpolated_points)",
"def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()",
"def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()",
"def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()",
"def test_isentropic_pressure_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk)\n trueprs = 936.213 * units.hPa\n assert_almost_equal(isentprs[0][1], trueprs, 3)",
"def test_call_interpolate(self):\r\n # Verified with iNEXT (http://glimmer.rstudio.com/tchsieh/inext/).\r\n # SE estimates differ because they use a different technique. SE\r\n # estimates have been verified against values in Colwell 2012 instead\r\n # (in separate unit tests).\r\n\r\n # Just reference.\r\n obs = self.estimator1(start=15, stop=15, num_steps=1)\r\n self.assertEqual(obs.getSampleCount(), 1)\r\n assert_almost_equal(obs.getEstimates('S1'),\r\n [(15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])\r\n\r\n # start=1 and reference.\r\n obs = self.estimator1(start=1, stop=1, num_steps=1)\r\n self.assertEqual(obs.getSampleCount(), 1)\r\n assert_almost_equal(obs.getEstimates('S1'),\r\n [(1, 1.0, 0.250252397843, 0.509514313183, 1.49048568682),\r\n (15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])\r\n\r\n # Points in between start=1 and reference.\r\n obs = self.estimator1(start=1, stop=15, num_steps=3)\r\n self.assertEqual(obs.getSampleCount(), 1)\r\n assert_almost_equal(obs.getEstimates('S1'),\r\n [(1, 1.0, 0.250252397843, 0.509514313183, 1.49048568682),\r\n (5, 3.40326340326, 0.655024590447, 2.119438797,\r\n 4.68708800953),\r\n (9, 4.4001998002, 0.680106580075,\r\n 3.0672153976, 5.7331842028),\r\n (13, 4.85714285714, 0.665379090563, 3.55302380357,\r\n 6.16126191071),\r\n (15, 5, 0.674199862463, 3.67859255119, 6.32140744881)])",
"def _larange_interpolate(x, points):\n p = PRIME\n k = len(points)\n xs, ys = [], []\n for pt in points:\n xs.append(pt.X)\n ys.append(pt.Y)\n assert k == len(set(xs)), \"Points must be destinct.\"\n nums = [] # numerators\n dens = [] # denominators calculated individually to prevent float div errors\n for i in range(k):\n others = list(xs)\n cur = others.pop(i) # current x value\n nums.append(product(x - o for o in others))\n dens.append(product(cur - o for o in others))\n den = product(dens) # common denominator\n num = sum([_divmod(nums[i] * den * ys[i] % p, dens[i], p) for i in range(k)])\n return _divmod(num, den, p) % p",
"def bilinear_interpolation(self, pt1, pt2, pt3, pt4, unknown):\n\n # Write your code for bilinear interpolation here\n # May b you can reuse or call linear interpolatio method to compute this task\n \n X1,Y1, intensity1 = pt1\n X2,Y2, intensity2 = pt2\n X3,Y3, intensity3 = pt3\n X4,Y4, intensity4 = pt4\n newPointX1,newPointY1 = unknown\n\n newpt1=self.linear_interpolation((X1,intensity1),(X2,intensity2),newPointX1)\n newpt2=self.linear_interpolation((X3,intensity3),(X4,intensity4),newPointX1)\n newpt1=Y1,newpt1\n newpt2=Y4,newpt2\n intensity=self.linear_interpolation(newpt1,newpt2,newPointY1)\n \n \n\n return intensity",
"def test_1d_linear_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n\n # Define array with corresponding values\n A = numpy.zeros((len(x)))\n\n # Define values for each xas a linear function\n for i in range(len(x)):\n A[i] = linear_function(x[i], 0)\n\n # Test first that original points are reproduced correctly\n for i, xi in enumerate(x):\n val = interpolate1d(x, A, [xi], mode='linear')[0]\n ref = linear_function(xi, 0)\n assert numpy.allclose(val, ref, rtol=1e-12, atol=1e-12)\n\n # Then test that genuinly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 10)\n points = xis\n\n vals = interpolate1d(x, A, points, mode='linear')\n refs = linear_function(points, 0)\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)\n\n # Exercise bounds_error flag\n vals = interpolate1d(x, A, points, mode='linear',\n bounds_error=True)\n refs = linear_function(points, 0)\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)"
] | [
"0.6352975",
"0.62675714",
"0.61275345",
"0.5952627",
"0.5907965",
"0.58464473",
"0.5796776",
"0.578933",
"0.57794523",
"0.57792443",
"0.5757177",
"0.5749844",
"0.57261205",
"0.5726011",
"0.56745857",
"0.5649165",
"0.56255597",
"0.5618809",
"0.5615824",
"0.55846244",
"0.5575734",
"0.55678564",
"0.5558076",
"0.5558076",
"0.5558076",
"0.55468947",
"0.55329084",
"0.5502345",
"0.55007267",
"0.54945827"
] | 0.6316739 | 1 |
Loads the data set provided in this repository and returns a list of Decks or FuzzyDecks. The deck list is sorted by archetype so the distance matrix is easier to visualize. | def load_data_set(hero_class: str, fuzzy: bool, filename: str = "data/Decks.json", debug: bool = False) \
-> Union[List[Deck], List[FuzzyDeck]]:
if debug:
print("### loading dataset...")
with open(filename) as f:
data = json.load(f)
hero_classes = list(data["series"]["metadata"].keys())
if hero_class not in hero_classes and hero_class != "ALL":
raise Exception("hero class <" + hero_class + "> not available. "
"Consider using one class out of: " + ", ".join(hero_classes))
if debug:
for cl in hero_classes:
print("" + str(len(data["series"]["data"][cl])) + " played decks for hero class " + cl)
played_decks = []
if hero_class == "ALL":
for hero_class in hero_classes:
for i, deck_data in enumerate(data["series"]["data"][hero_class]):
if fuzzy:
played_decks.append(FuzzyDeck(deck_data))
else:
played_decks.append(Deck(deck_data))
else:
for i, deck_data in enumerate(data["series"]["data"][hero_class]):
if fuzzy:
played_decks.append(FuzzyDeck(deck_data))
else:
played_decks.append(Deck(deck_data))
# sort by cluster label for easier visualization of distance matrix
played_decks = sorted(played_decks, key=lambda x: x.archetype[0])
return played_decks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_decks(**options):\n graph = bonobo.Graph()\n\n csv_in = bonobo.noop\n\n graph.add_chain(csv_in, in_use_cards, _input=None)\n\n for deck in listdir('decks'):\n deck_path = join('decks', deck)\n if deck == '.gitignore':\n continue\n\n if isfile(deck_path):\n graph.add_chain(bonobo.CsvReader(deck_path), _output=csv_in)\n\n return graph",
"def get_decks(self, include_cards=True):\n deck_previews = self.data_source.get_decks(self.user_id,\n not include_cards)\n\n return deck_previews",
"def get_deck_list(deckid):\n # Need to know if we're looking at a deckid or deckid tuple\n # TODO: Clean this up a bit (shouldn't need to support deckids or deck)\n # tuples now that I'm using Deck objects.)\n if isinstance(deckid, tuple):\n # The deckid is in deck[0]\n # Format is (deckid, deck_class)\n deckid = deckid[0]\n # http://www.hearthpwn.com/decks/listing/ + /neutral or /class\n url = 'http://www.hearthpwn.com/decks/listing/'\n css = '#cards > tbody > tr > td.col-name'\n\n cards = []\n\n # Class Cards\n pagetree = get_pagetree(url + str(deckid) + '/class')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n # Neutral Cards\n pagetree = get_pagetree(url + str(deckid) + '/neutral')\n elements = get_elements_from_page(pagetree, css)\n for element in elements:\n card = html.tostring(element, method='text', encoding='UTF-8')\n cards.append(card)\n\n regex = re.compile(b'^\\r\\n(.+)\\r\\n\\r\\n\\xc3\\x97 (\\d+)')\n deck = []\n for card in cards:\n match = re.search(regex, card)\n if match:\n cardname = match.group(1).decode('UTF-8')\n amount = int(match.group(2))\n deck.append(Card(cardname, amount))\n\n return deck",
"def get_decks(filtering=None, sorting=None, count=None,\n patch=None, classid=None):\n decks_metainfo = get_deck_metainfo(filtering, sorting, count,\n patch, classid)\n decks = [Deck(deck[0], deck[1], get_deck_list(deck[0]))\n for deck in decks_metainfo]\n return decks",
"def populatePokerDeck():\r\n #At some point, I may want this function, or a function like it, to read from a txt/json or dat file, \r\n #but for now this suffices.\r\n aDeck =\t[\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Hearts\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Clubs\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Diamonds\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Ace\",\r\n \"Type\": \"Face\",\r\n \"Value\": 1\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Two\",\r\n \"Type\": \"Number\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Three\",\r\n \"Type\": \"Number\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Four\",\r\n \"Type\": \"Number\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Five\",\r\n \"Type\": \"Number\",\r\n \"Value\": 5\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Six\",\r\n \"Type\": \"Number\",\r\n \"Value\": 6\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Seven\",\r\n \"Type\": \"Number\",\r\n \"Value\": 7\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Eight\",\r\n \"Type\": \"Number\",\r\n \"Value\": 8\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Nine\",\r\n \"Type\": \"Number\",\r\n \"Value\": 9\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Ten\",\r\n \"Type\": \"Number\",\r\n \"Value\": 10\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Jack\",\r\n \"Type\": \"Face\",\r\n \"Value\": 2\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"Queen\",\r\n \"Type\": \"Face\",\r\n \"Value\": 3\r\n },\r\n {\r\n \"Suite\": \"Spades\",\r\n \"Name\": \"King\",\r\n \"Type\": \"Face\",\r\n \"Value\": 4\r\n },\r\n {\r\n \"Suite\": \"Red\",\r\n \"Name\": \"Joker\",\r\n \"Type\": \"Face\",\r\n \"Value\": None\r\n },\r\n {\r\n \"Suite\": \"Black\",\r\n \"Name\": \"Joker\",\r\n \"Type\": \"Face\",\r\n \"Value\": None\r\n }]\r\n \r\n return aDeck",
"def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck",
"def deck(self) -> Iterable[CardIdentifier]:\n # for some reason cards are treated quite different by NS api currently\n # so we cant simply make a shards call. for now we make a direct call\n # to the requester shards_xml method, since it does not insert the\n # `nation=name` parameter\n # this request returns a <CARDS><DECK><CARD/>...</DECK><CARDS> structure,\n # so we immedietly retrieve the DECK node (which contains multiple CARD nodes)\n # with [0]\n deck = as_xml(\n self.requester.shard_request(\n shards=[\"cards\", \"deck\"], nationname=self.nationname\n ).text\n )[0]\n return [CardIdentifier.from_xml(node) for node in deck]",
"def prepare_decklists(deck_files, data_path):\n \n deck = {'deckname':[], 'cardname':[], 'card_count':[], 'sideboard':[]}\n for file_name in deck_files:\n companion = 0\n file = open(data_path+file_name, 'r')\n sideboard = False\n for line in file:\n\n items = line.split(\" \")\n if (items[0][:4] == 'Deck'):\n pass\n elif (items[0][:9] == 'Companion'):\n companion = 1\n elif '\\n' in items or items[0][:9] == 'Sideboard':\n if companion == 1:\n companion -= 1\n else:\n sideboard = True\n else:\n try:\n deck['deckname'].append(file_name)\n deck['cardname'].append(\" \".join(items[1:]).replace('\\n',''))\n deck['card_count'].append(int(items[0]))\n deck['sideboard'].append(sideboard)\n except (Exception, ValueError) as error:\n print(f'Unable to process: {error} ; file {filename}')\n return deck",
"def generate_deck(self):\n\t\tsuits = [\"hearts\", \"spades\",\"diamonds\",\"clubs\"]\n\t\tcards = []\n\n\t\tfor suit in suits:\n\t\t\tif self.ace_as_eleven:\n\t\t\t\tace = Card(\"Ace\", 11, suit)\n\t\t\telse:\n\t\t\t\tace = Card(\"Ace\", 1, suit)\n\t\t\tcards.append(ace)\n\n\t\t\ttwo = Card(\"Two\", 2, suit)\n\t\t\tcards.append(two)\n\t\t\t\n\t\t\tthree = Card(\"Three\", 3, suit)\n\t\t\tcards.append(three)\n\n\t\t\tfour = Card(\"Four\", 4, suit)\n\t\t\tcards.append(four)\n\n\t\t\tfive = Card(\"Five\", 5, suit)\n\t\t\tcards.append(five)\n\n\t\t\tsix = Card(\"Six\", 6, suit)\n\t\t\tcards.append(six)\n\n\t\t\tseven = Card(\"Seven\", 7, suit)\n\t\t\tcards.append(seven)\n\n\t\t\teight = Card(\"Eight\", 8, suit)\n\t\t\tcards.append(eight)\n\n\t\t\tnine = Card(\"Nine\", 9, suit)\n\t\t\tcards.append(nine)\n\n\t\t\tten = Card(\"Ten\", 10, suit)\n\t\t\tcards.append(ten)\n\n\t\t\tjack = Card(\"Jack\", 10, suit)\n\t\t\tcards.append(jack)\n\n\t\t\tqueen = Card(\"Queen\", 10, suit)\n\t\t\tcards.append(queen)\n\n\t\t\tking = Card(\"King\", 10, suit)\n\t\t\tcards.append(king)\n\n\t\treturn cards",
"def __init__(self):\n \n self.deck = [Card(suit,rank) for suit in SUITS for rank in RANKS]",
"def loadCardDB():\n with open(CARDS_JSON, 'r') as infofile:\n cards = json.load(infofile)\n with open(PILOT_TEXT_JSON, 'r') as infofile:\n pilotTexts = json.load(infofile)\n with open(UPGRADE_TEXT_JSON, 'r') as infofile:\n upgradeTexts = json.load(infofile)\n with open(MODIFICATION_TEXT_JSON, 'r') as infofile:\n modificationTexts = json.load(infofile)\n with open(TITLE_TEXT_JSON, 'r') as infofile:\n titleTexts = json.load(infofile)\n return _createCardDB(cards, pilotTexts, upgradeTexts, modificationTexts, titleTexts)",
"def sort(self):\n self.deckcards.sort()",
"def get_cards(self):\n return [card.view_model() for card in self._deck.loc]",
"def load():\n\n # Path for the cache-file.\n cache_path = os.path.join(data_dir, \"collisions.pkl\")\n\n # If the DataSet-object already exists in a cache-file\n # then load it, otherwise create a new object and save\n # it to the cache-file so it can be loaded the next time.\n dataset = load_cached(cache_path=cache_path,\n in_dir=data_dir)\n\n return dataset",
"def shuffle(self):\n\n if self.deck:\n self.deck = deque()\n\n max_decks = self.deck_count + 1 # +1 for range function\n\n for deck in range(1, max_decks):\n for suit in self.suits:\n for num, name in enumerate(self.names, start=1):\n card = PlayingCard()\n card.set_attributes(name, suit, num)\n self.deck.append(card)\n\n for deck_shuffle in range(self.shuffle_count):\n random.shuffle(self.deck)",
"def test_list(self):\n\n decks = []\n try:\n decks.extend(scrape_decks())\n except Exception as e:\n self.logger.exception(\n 'Scraper for site TappedOut raised an exception'\n )\n\n print(\"Collected {} decks:\".format(len(decks)))\n for deck in decks:\n print(\"#\", deck.deckType)\n print(\" commander =\", deck.commander)\n print(\" image =\", deck.commander_img)\n print(\" video =\", deck.video)\n if deck.decklist is not None:\n print(\" decklist =\", deck.decklist)\n print()\n\n if os.environ.get(\"JUMBO_WRITE_TO_DB\") is not None:\n self.insert_decks(decks)",
"def populate_deck_db(decks, cursor):\n cursor.execute('DROP TABLE IF EXISTS decks')\n cursor.execute('DROP TABLE IF EXISTS deck_lists')\n cursor.execute('''CREATE TABLE IF NOT EXISTS decks\n (deckid integer primary key, class text)\n WITHOUT ROWID''')\n\n cursor.execute('''CREATE TABLE IF NOT EXISTS deck_lists\n (deckid integer, cardname text, amount integer,\n PRIMARY KEY (deckid, cardname))''')\n for deck in decks:\n cursor.execute('INSERT INTO decks VALUES (?, ?)',\n (deck.deckid, deck.playerclass))\n for card in deck.decklist:\n cursor.execute('INSERT INTO deck_lists VALUES (?, ?, ?)',\n (deck.deckid, card.cardname, card.amount))\n return",
"def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)",
"def create_deck(self):\n\n deck = []\n\n # Suits and face values\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\n face_values = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n # Creating deck\n for suit in suits:\n for value in face_values:\n deck.append(Card(suit[0], value))\n\n # Adding jokers\n if self.jokers:\n deck.append(Card('Jk', 0))\n deck.append(Card('Jk', 0))\n\n return deck",
"def refresh(self):\n self.deck = []\n\n for _suit in Suit:\n for _face in Face:\n self.insert(Card(_suit, _face, self))",
"def load(self):\n\t\t# Initialize empty list\n\t\tdata_files = []\n\n\t\t# Append the Drusen files to the list\n\t\tfor single_file in os.listdir(self.data_dir):\n\t\t\tdata_files.append(single_file)\n\t\treturn data_files",
"def get_card_sets(self, name: str) -> List:",
"def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))",
"def shuffle_deck(self):\n deck = [i for i in range(0, 52)]\n shuffle(deck)\n self.deck = [cards[c*2:c*2+2] for c in deck]",
"def load(self, source):\n try:\n inputdata = self.__inputmanager.read(source)\n self.__suitables = self.__inputmanager.map(inputdata)\n self.__data = inputdata\n except ValueError as e:\n print (\"Failed to load the dataset: %s\" % e)\n raise\n\n self.__modules = self.import_suitable_visualizations(self.__suitables)\n self.__has_datefields = self.__inputmanager.has_date_points()\n # Converting the datakeys into strings.\n self.__datakeys = [str(i) for i in list(self.__data[0].keys())]\n return self.__suitables",
"def parse_decklist(archidekt_id: str) -> tuple[Decklist, bool, list]:\r\n decklist = Decklist()\r\n warnings = []\r\n ok = True\r\n\r\n r = requests.get(f\"https://archidekt.com/api/decks/{archidekt_id}/\")\r\n if r.status_code != 200:\r\n raise (ValueError(f\"Archidekt returned statuscode {r.status_code}\"))\r\n\r\n data = r.json()\r\n\r\n in_deck = {cat[\"name\"] for cat in data[\"categories\"] if cat[\"includedInDeck\"]}\r\n\r\n for item in data[\"cards\"]:\r\n # Extract relevant data\r\n count = item[\"quantity\"]\r\n card_name = item[\"card\"][\"oracleCard\"][\"name\"]\r\n set_id = item[\"card\"][\"edition\"][\"editioncode\"]\r\n collector_number = item[\"card\"][\"collectorNumber\"]\r\n if len(item[\"categories\"]) > 0 and item[\"categories\"][0] not in in_deck:\r\n continue\r\n\r\n # Validate card name\r\n card_name, warnings_name = validate_card_name(card_name)\r\n if card_name is None:\r\n decklist.append_comment(card_name)\r\n warnings.extend([(decklist.entries[-1], level, msg) for level, msg in warnings_name])\r\n ok = False\r\n continue\r\n\r\n # Validate card print\r\n card, warnings_print = validate_print(card_name, set_id, collector_number)\r\n\r\n decklist.append_card(count, card)\r\n warnings.extend([(decklist.entries[-1], level, msg) for level, msg in warnings_name + warnings_print])\r\n\r\n decklist.name = data[\"name\"]\r\n\r\n return decklist, ok, warnings",
"def populate(cards_info, sets_file=None, session=Session):\n\n s = session()\n\n with sets_file or codecs.open(DEFAULT_SETS_FILE, encoding=\"utf-8\") as file:\n reader = unicode_csv_reader(file, reader=csv.DictReader)\n sets = {}\n for row in reader:\n row[\"released\"] = datetime.datetime.strptime(\n row[\"released\"], u\"%Y/%m/%d\"\n )\n sets[row[\"code\"]] = m.Set(**row)\n\n sts = itertools.chain.from_iterable(types.subtypes.itervalues())\n\n types_ = {type : m.Type(name=type) for type in types.all}\n supertypes = {st : m.Supertype(name=st) for st in types.supertypes}\n subtypes = {st : m.Subtype(name=st) for st in sts}\n\n s.add_all(\n itertools.chain.from_iterable(\n i.itervalues() for i in (sets, types_, supertypes, subtypes)\n )\n )\n\n for card in cards_info:\n # XXX: Split cards / Stupid multiple ability\n if \" // \" in card[u\"name\"] or card[u\"name\"] == u\"Seeds of Strength\":\n continue\n\n t, u, v = (card.pop(k) for k in [u\"supertypes\", u\"types\", u\"subtypes\"])\n\n card[u\"ability_objects\"] = [\n s.query(m.Ability).filter_by(description=d).first() or\n m.Ability(description=d) for d in card.pop(u\"abilities\")\n ]\n\n card[u\"supertype_objects\"] = {supertypes[st] for st in t}\n card[u\"type_objects\"] = {types_[type] for type in u}\n card[u\"subtype_objects\"] = {subtypes[st] for st in v}\n\n appearances = {\n m.SetAppearance(set=sets[set], rarity=rarity)\n for set, rarity in card.pop(u\"appearances\")\n }\n\n card = m.Card(**card)\n card.set_appearances.update(appearances)\n\n s.add(card)\n\n s.commit()",
"def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()",
"def shuffle(self):\r\n random.shuffle(self.deck_of_cards)\r\n return self.deck_of_cards",
"def shuffle(self):\n random.shuffle(self.deckcards)"
] | [
"0.5819562",
"0.5731613",
"0.544646",
"0.53726274",
"0.5366942",
"0.5298741",
"0.5272846",
"0.52254647",
"0.5140814",
"0.50974256",
"0.50899714",
"0.50865227",
"0.5069896",
"0.50649506",
"0.5054334",
"0.50400245",
"0.5027452",
"0.50256103",
"0.50096786",
"0.50049436",
"0.4990609",
"0.49646613",
"0.4954556",
"0.49482644",
"0.49426016",
"0.4938606",
"0.49299636",
"0.4926834",
"0.49254796",
"0.4924566"
] | 0.648076 | 0 |
Calculates the distance matrix of a list of Deck or FuzzyDeck objects. Returns the vectorform distance vector. | def calculate_distance_matrix(played_decks: Union[List[FuzzyDeck], List[Deck]], measure: str):
deck_data = np.array(played_decks).reshape(len(played_decks), 1)
if measure == "jaccard":
dist = pdist(deck_data, lambda u, v: u[0].jaccard_distance(v[0]))
elif measure == "euclidean":
dist = pdist(deck_data, lambda u, v: u[0].euclidean_distance(v[0]))
else:
raise ValueError("Unknown distance measure {}. ".format(measure) +
"Please choose one of the following distance measures ['euclidean','jaccard']")
return dist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDistanceMatrix(self):\n v = self.getVectors()\n vLis = v.keys()\n N = len(v.keys())\n D = np.zeros([N, N], dtype=np.float32)\n print(N)\n for i in range(N):\n print(\"%d/%d\" %(i, N))\n D[i, i] = 1\n for j in range(i + 1, N):\n dist = self.cosin_sim_pairs(v[vLis[i]], v[vLis[j]])\n D[i, j] = dist\n D[j, i] = dist\n return D",
"def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist",
"def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)",
"def calculate_distance_matrix(atomlist):\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist",
"def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))",
"def DistanceMatrices(self):\r\n return self._dms",
"def compute_distances(Ls):\n if not isinstance(Ls, list):\n Ls = [Ls]\n\n dists = []\n for L in Ls:\n N,D = L.shape\n # 1xNxD - Nx1xD (L1 distance)\n dist = (np.abs(L[None,:,:] - L[:,None,:])).sum(axis=2)\n dists.append(dist)\n\n return dists",
"def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)",
"def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix",
"def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix",
"def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ",
"def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists",
"def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)",
"def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D",
"def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)",
"def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances",
"def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix",
"def getDistanceMatrix(self):\n return self.pointcloud.distmat",
"def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()",
"def compute_distance(self, transpose=False):\n\n # Calculate distance matrix\n if transpose:\n distance_matrix = pdist(self.matrix.T, self.distance)\n else:\n distance_matrix = pdist(self.matrix, self.distance)\n\n # Remove NaNs\n distance_matrix[np.isnan(distance_matrix)] = 1.0\n\n return distance_matrix",
"def get_distance_matrix(self, points):\n return points[:, :, np.newaxis, :]-points[:, np.newaxis, :, :]",
"def test_distances_with_vector_input(self):\n input_vector = self.vectors['dog.n.01']\n distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances(input_vector)\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))",
"def norm_dist(face_vectors, f_vector):\n if len(face_vectors) == 0:\n return np.empty((0))\n return np.linalg.norm(face_vectors - f_vector, axis=1)",
"def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists",
"def distances(self):\n self._sort_measurements()\n return self._distances",
"def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list",
"def distances(self):",
"def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat",
"def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix",
"def DistanceMatrices(self, dms):\r\n if not isinstance(dms, ListType):\r\n raise TypeError(\"The item passed in as the new list was not a \"\r\n \"list data type.\")\r\n if self._num_dms >= 0 and len(dms) != self._num_dms:\r\n raise ValueError(\"Cannot set %d distance matrices. Must provide \"\r\n \"exactly %d distance matrices.\" % (len(dms),\r\n self._num_dms))\r\n for dm in dms:\r\n if not isinstance(dm, DistanceMatrix):\r\n raise TypeError(\r\n 'Invalid type (%s); expected DistanceMatrix' %\r\n dm.__class__.__name__)\r\n if self._min_dm_size >= 0 and dm.shape[0] < self._min_dm_size:\r\n raise ValueError(\"Distance matrix of size %dx%d is smaller \"\r\n \"than the minimum allowable distance matrix \"\r\n \"size of %dx%d for this analysis.\" %\r\n (dm.shape[0], dm.shape[0], self._min_dm_size,\r\n self._min_dm_size))\r\n self._dms = dms"
] | [
"0.6267775",
"0.60056895",
"0.59760046",
"0.5957225",
"0.5952635",
"0.5888199",
"0.5881407",
"0.5870848",
"0.58344764",
"0.58344764",
"0.5777284",
"0.5726229",
"0.5702075",
"0.5700626",
"0.5683218",
"0.5678633",
"0.56485",
"0.55838466",
"0.5582898",
"0.55652297",
"0.5557965",
"0.5538048",
"0.5524559",
"0.5520437",
"0.55093014",
"0.5492023",
"0.5476906",
"0.54740274",
"0.5468652",
"0.54592836"
] | 0.68410903 | 0 |
Calculates vmeasure, homogeneity, and completeness for each clustering algorithm stored in clustering_alg and adds it to each algorithms dictionary. | def eval_v_measure_homogeneity_completeness(clustering_alg: List, sdist_euclidean, sdist_jaccard,
labels_true, debug: bool = False):
for i, alg_dict in enumerate(clustering_alg):
if "alg" in alg_dict:
if alg_dict["distance"] == "euclidean":
clustering = alg_dict["alg"].fit(sdist_euclidean)
elif alg_dict["distance"] == "jaccard":
clustering = alg_dict["alg"].fit(sdist_jaccard)
else:
raise ValueError("Unknown distance measure {}. ".format(alg_dict["distance"]) +
"Please choose one of the following distance measures ['euclidean','jaccard']")
labels_predicted = clustering.labels_
alg_dict["labels"] = labels_predicted
else:
labels_predicted = alg_dict["labels"]
alg_dict["homogeneity"], alg_dict["completeness"], alg_dict["v-measure"] = \
homogeneity_completeness_v_measure(labels_true, labels_predicted)
if debug:
print("Alg: " + alg_dict["name"] + "; \t v-measure = " + str(alg_dict["v-measure"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_clustering_methods(methods):\r\n results = {}\r\n for m in methods:\r\n res = results[m['name']] = {}\r\n prec = 3\r\n res['Adjusted Rand Score'] = round(sklearn.metrics.adjusted_rand_score(m['target'], m['clustering']),prec)\r\n res['Normalized Mutual Information'] = round(sklearn.metrics.normalized_mutual_info_score(m['target'], m['clustering']),prec)\r\n res['Adjusted Mutual Information'] = round(sklearn.metrics.adjusted_mutual_info_score(m['target'], m['clustering']),prec)\r\n return np.transpose(results)",
"def __init__(self, dictAlg):\n\n # values of dict dictAlg are DataSetList which should have only one\n # element which will be assigned as values in the following lines.\n d = set()\n f = set()\n for i in dictAlg.values():\n d |= set(j.dim for j in i)\n f |= set(j.funcId for j in i)\n\n if len(f) > 1 or len(d) > 1:\n Usage('Expect the data of algorithms for only one function and '\n 'one dimension.')\n\n f = f.pop()\n d = d.pop()\n\n dictMaxEvals = {}\n dictFinalFunVals = {}\n tmpdictAlg = {}\n for alg, i in dictAlg.iteritems():\n if len(i) == 0:\n warnings.warn('Algorithm %s was not tested on f%d %d-D.'\n % (alg, f, d))\n continue\n elif len(i) > 1:\n warnings.warn('Algorithm %s has a problem on f%d %d-D.'\n % (alg, f, d))\n continue\n\n tmpdictAlg[alg] = i[0] # Assign ONLY the first element as value\n dictMaxEvals[alg] = i[0].maxevals\n dictFinalFunVals[alg] = i[0].finalfunvals\n\n dictAlg = tmpdictAlg\n\n sortedAlgs = dictAlg.keys()\n # algorithms will be sorted along sortedAlgs which is now a fixed list\n\n # Align ERT\n erts = list(np.transpose(np.vstack([dictAlg[i].target, dictAlg[i].ert]))\n for i in sortedAlgs)\n res = readalign.alignArrayData(readalign.HArrayMultiReader(erts))\n\n resalgs = []\n reserts = []\n # For each function value\n for i in res:\n # Find best algorithm\n curerts = i[1:]\n assert len((np.isnan(curerts) == False)) > 0\n currentbestert = np.inf\n currentbestalg = ''\n for j, tmpert in enumerate(curerts):\n if np.isnan(tmpert):\n continue # TODO: don't disregard these entries\n if tmpert == currentbestert:\n # TODO: what do we do in case of ties?\n # look at function values corresponding to the ERT?\n # Look at the function evaluations? the success ratio?\n pass\n elif tmpert < currentbestert:\n currentbestert = tmpert\n currentbestalg = sortedAlgs[j]\n reserts.append(currentbestert)\n resalgs.append(currentbestalg)\n\n dictiter = {}\n dictcurLine = {}\n resDataSet = []\n\n # write down the #fevals to reach the function value.\n for funval, alg in zip(res[:, 0], resalgs):\n it = dictiter.setdefault(alg, iter(dictAlg[alg].evals))\n curLine = dictcurLine.setdefault(alg, np.array([np.inf, 0]))\n while curLine[0] > funval:\n try:\n curLine = it.next()\n except StopIteration:\n break\n dictcurLine[alg] = curLine.copy()\n tmp = curLine.copy()\n tmp[0] = funval\n resDataSet.append(tmp)\n\n setalgs = set(resalgs)\n dictFunValsNoFail = {}\n for alg in setalgs:\n for curline in dictAlg[alg].funvals:\n if (curline[1:] == dictAlg[alg].finalfunvals).any():\n # only works because the funvals are monotonous\n break\n dictFunValsNoFail[alg] = curline.copy()\n\n self.evals = resDataSet\n # evals is not a np array but a list of arrays because they may not\n # all be of the same size.\n self.maxevals = dict((i, dictMaxEvals[i]) for i in setalgs)\n self.finalfunvals = dict((i, dictFinalFunVals[i]) for i in setalgs)\n self.funvalsnofail = dictFunValsNoFail\n self.dim = d\n self.funcId = f\n self.algs = resalgs\n self.algId = 'Virtual Best Algorithm'\n self.comment = 'Combination of ' + ', '.join(sortedAlgs)\n self.ert = np.array(reserts)\n self.target = res[:, 0]\n\n bestfinalfunvals = np.array([np.inf])\n for alg in sortedAlgs:\n if np.median(dictAlg[alg].finalfunvals) < np.median(bestfinalfunvals):\n bestfinalfunvals = dictAlg[alg].finalfunvals\n algbestfinalfunvals = alg\n self.bestfinalfunvals = bestfinalfunvals\n self.algbestfinalfunvals = algbestfinalfunvals",
"def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }",
"def cluster_analysis(\n clusterers: list,\n hyperparameter_grids: list,\n eval_metrics_grid: list,\n eval_metrics_params: dict,\n word_embeddings: np.ndarray,\n words_vocabulary: list,\n word_to_int: dict,\n word_embeddings_normalized: np.ndarray = None,\n compute_pairwise_word_distances: bool = False,\n compute_pairwise_word_distances_normalized: bool = False,\n return_word_vectors: bool = False,\n save_result_to_disk: bool = False,\n output_dir: Optional[str] = None,\n model_name: Optional[str] = None,\n dataset_name: Optional[str] = None,\n output_filepath_suffix: Optional[str] = None,\n) -> Union[dict, tuple]:\n # Create word vectors from given words/vocabulary\n word_vectors = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings,\n )\n\n # Create normalized word vectors from given words/vocabulary if specified.\n word_vectors_normalized = None\n if word_embeddings_normalized is not None:\n word_vectors_normalized = words_to_vectors(\n words_vocabulary=words_vocabulary,\n word_to_int=word_to_int,\n word_embeddings=word_embeddings_normalized,\n )\n\n if compute_pairwise_word_distances:\n word_vectors_pairwise_distances = pairwise_cosine_distances(word_vectors)\n if (\n compute_pairwise_word_distances_normalized\n and word_vectors_normalized is not None\n ):\n normalized_word_vectors_pairwise_distances = euclidean_distances(\n word_vectors_normalized\n )\n\n # Perform cluster analysis\n clusterers_result = {}\n unique_cluster_metrics = set()\n for clusterer_tuple, hyperparameter_grid, eval_metrics in zip(\n clusterers, hyperparameter_grids, eval_metrics_grid\n ):\n if len(clusterer_tuple) == 3:\n (clusterer_name, clusterer_cls, clusterer_use_normalized) = clusterer_tuple\n else:\n clusterer_use_normalized = False\n (clusterer_name, clusterer_cls) = clusterer_tuple\n print(f\"-- Clustering using {clusterer_name} --\")\n clusterers_result[clusterer_name] = {\n \"cluster_labels\": [],\n \"cluster_params\": [],\n \"cluster_metrics\": {},\n }\n\n # Do clustering for each set of hyperparameters\n param_grid = ParameterGrid(hyperparameter_grid)\n for params_idx, params in enumerate(tqdm(param_grid)):\n clusterers_result[clusterer_name][\"cluster_params\"].append(params)\n\n # Add exception for ward linkage clustering.\n if (\n clusterer_cls is AgglomerativeClustering\n and params.get(\"linkage\") == \"ward\"\n and word_vectors_normalized is not None\n ):\n params = {**params, \"affinity\": \"euclidean\"}\n clusterer_instance = clusterer_cls(**params)\n fit_predict_X = word_vectors_normalized\n else:\n clusterer_instance = clusterer_cls(**params)\n if (\n params.get(\"affinity\") == \"precomputed\"\n or params.get(\"metric\") == \"precomputed\"\n ):\n if (\n clusterer_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n fit_predict_X = normalized_word_vectors_pairwise_distances\n elif compute_pairwise_word_distances:\n fit_predict_X = word_vectors_pairwise_distances\n else:\n if clusterer_use_normalized and word_vectors_normalized is not None:\n fit_predict_X = word_vectors_normalized\n else:\n fit_predict_X = word_vectors\n\n # Use fit_predict if it is available.\n if getattr(clusterer_instance, \"fit_predict\", None) is not None:\n predicted_labels = clusterer_instance.fit_predict(fit_predict_X)\n else:\n clusterer_instance.fit(fit_predict_X)\n predicted_labels = clusterer_instance.predict(fit_predict_X)\n\n # Separate noise labels into clusters\n if clusterer_cls is HDBSCAN:\n predicted_labels = separate_noise_labels_into_clusters(predicted_labels)\n\n clusterers_result[clusterer_name][\"cluster_labels\"].append(predicted_labels)\n\n # Evaluate predicted cluster labels using internal evaluation metrics\n for eval_metric_tuple in eval_metrics:\n if len(eval_metric_tuple) == 3:\n (\n eval_metric_key,\n eval_metric,\n eval_metric_use_normalized,\n ) = eval_metric_tuple\n else:\n eval_metric_use_normalized = False\n (eval_metric_key, eval_metric) = eval_metric_tuple\n eval_metric_params = eval_metrics_params.get(eval_metric_key, {})\n if (\n compute_pairwise_word_distances\n and eval_metric_params.get(\"metric\") == \"precomputed\"\n ):\n if (\n eval_metric_use_normalized\n and compute_pairwise_word_distances_normalized\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=normalized_word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_pairwise_distances,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n if (\n eval_metric_use_normalized\n and word_vectors_normalized is not None\n ):\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors_normalized,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n else:\n metric_name, metric_score, metric_obj_max = eval_metric(\n word_embeddings=word_vectors,\n cluster_labels=predicted_labels,\n clusterer=clusterer_instance,\n **eval_metric_params,\n )\n unique_cluster_metrics.add(metric_name)\n\n # Initialize metric result\n if (\n metric_name\n not in clusterers_result[clusterer_name][\"cluster_metrics\"]\n ):\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ] = {\n \"metric_scores\": [],\n \"metric_obj_max\": metric_obj_max,\n \"best_metric_score_indices\": [],\n }\n\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"metric_scores\"\n ].append(metric_score)\n\n # Set best metric score indices\n if params_idx == len(param_grid) - 1:\n best_metric_score_indices = np.argsort(\n clusterers_result[clusterer_name][\"cluster_metrics\"][\n metric_name\n ][\"metric_scores\"]\n )\n if metric_obj_max:\n best_metric_score_indices = best_metric_score_indices[::-1]\n clusterers_result[clusterer_name][\"cluster_metrics\"][metric_name][\n \"best_metric_score_indices\"\n ] = best_metric_score_indices\n\n # Find preferred clusterers for each cluster metric (from best to worst)\n metric_preferred_clusterers = {}\n for cluster_metric_name in unique_cluster_metrics:\n metric_obj_max = None\n metric_best_scores = []\n clusterer_names = []\n for clusterer_name, clusterer_result in clusterers_result.items():\n if cluster_metric_name in clusterer_result[\"cluster_metrics\"]:\n clusterer_names.append(clusterer_name)\n metric_result = clusterer_result[\"cluster_metrics\"][cluster_metric_name]\n if metric_obj_max is None:\n metric_obj_max = metric_result[\"metric_obj_max\"]\n best_metric_score = metric_result[\"metric_scores\"][\n metric_result[\"best_metric_score_indices\"][0]\n ]\n metric_best_scores.append(best_metric_score)\n clusterer_names = np.array(clusterer_names)\n metric_best_scores = np.array(metric_best_scores)\n\n metric_best_scores_sorted_indices = np.argsort(metric_best_scores)\n if metric_obj_max:\n metric_best_scores_sorted_indices = metric_best_scores_sorted_indices[::-1]\n metric_preferred_clusterers[cluster_metric_name] = {\n \"clusterer_names\": clusterer_names[metric_best_scores_sorted_indices],\n \"best_metric_scores\": metric_best_scores[metric_best_scores_sorted_indices],\n }\n\n # Return result as dictionary\n cluster_analysis_result = {\n \"clusterers\": clusterers_result,\n \"metric_preferred_clusterers\": metric_preferred_clusterers,\n }\n\n if return_word_vectors:\n if compute_pairwise_word_distances:\n cluster_analysis_result = (\n cluster_analysis_result,\n word_vectors,\n word_vectors_pairwise_distances,\n )\n else:\n cluster_analysis_result = (cluster_analysis_result, word_vectors)\n\n # Save result to disk\n if save_result_to_disk:\n save_cluster_result_to_disk(\n cluster_result=cluster_analysis_result,\n output_dir=output_dir,\n model_name=model_name,\n dataset_name=dataset_name,\n output_filepath_suffix=output_filepath_suffix,\n )\n\n return cluster_analysis_result",
"def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):\n for (alg_name, alg_dict) in clustering_alg:\n if \"alg\" in alg_dict:\n clustering = alg_dict[\"alg\"].fit(sdist)\n labels_pred = clustering.labels_\n alg_dict[\"labels\"] = labels_pred\n else:\n labels_pred = alg_dict[\"labels\"]\n\n pred_label_dict, new_labels = normalize_labels(labels_pred)\n\n alg_dict[\"cm\"] = contingency_matrix(labels_true, new_labels)",
"def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf",
"def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info",
"def run_algorithm(algorithm, algorithm_kwargs, clustering_id):\n clustering = algorithm.perform_clustering(algorithm_kwargs)\n return (clustering_id, clustering)",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()",
"def interpret_clusters(self, split=0.7, all_demos=None, num_clusters=None, \n max_depth=CLUSTER_DEPTH, data=None, labels=None, verbose=True):\n all_demos = self.all_data if all_demos is None else all_demos\n clusters = self.get_ordered_clusters(labels, num_clusters)\n data = self.demos if data is None else data\n labels = self.labels if labels is None else labels\n\n cluster_formulas = []\n counter = 0\n sep = \"\\n \"\n for c in clusters:\n counter += 1\n res = self.sample_from_clusters(num_samples=split,\n all_data=all_demos,\n pos_validation=True, \n neg_validation=True,\n which_cluster=counter)\n positive_samples, val_positive_samples = res[0], res[1]\n negative_samples, val_negative_samples = res[2], res[3]\n z = 0\n for d in positive_samples:\n if d[1] == 0: z += 1\n\n cluster_data = {'pos': positive_samples,\n 'neg': negative_samples}\n val_cluster_data = {'pos': val_positive_samples,\n 'neg': val_negative_samples}\n\n if verbose: print(sep +\"Checking formulas \" + \\\n \"with max depth {}\\n\".format(max_depth))\n\n cluster_formula, value_formula = wrapper_train(max_depth,\n cluster_data, \n val_cluster_data,\n verbose=verbose,\n pred_data=[self.pipeline_X,\n self.pipeline_y])\n if cluster_formula is not None:\n print(cluster_formula)\n\n cluster_formulas.append((c, cluster_formula, value_formula))\n self.reset_pipeline()\n\n return cluster_formulas",
"def run_evaluation(self, n_runs=1, n_points=1000, n_iterations=1, min_n_components=2, max_n_components=25,\n\t\t\t\t\t n_splits=3, save_data=False, file_label='',n_microstates=None, all_methods=True,\n\t\t\t\t\t assign_transition_points=True):\n\n\t\tif self.presampled_data is not None:\n\t\t\tsampled_data = self.presampled_data[0]\n\t\t\ttrue_clustering = self.presampled_data[1]\n\t\t\tn_runs = sampled_data.shape[0]\n\n\t\tself.cluster_score_ami_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_fm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_vm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tdata = self.toy_model_.sample(3)\n\n\t\t# Create free energy estimators\n\t\tgmm_FE = GMM_FE.FreeEnergyClustering(data, min_n_components=min_n_components, max_n_components=max_n_components,\n\t\t\t\t\t\t\t\t\t x_lims=self.x_lims_, n_grids=self.n_grids_, stack_landscapes=False,\n\t\t\t\t\t\t\t\t\t n_splits=n_splits, n_iterations=n_iterations,convergence_tol=self.convergence_tol_,\n\t\t\t\t\t\t\t\t\t verbose=self.verbose_)\n\n\t\tkm = kmc.KMeansCluster(min_n_components, max_n_components)\n\t\taw = awc.AWCluster(min_n_components, max_n_components)\n\t\tspectral = sc.SpectralCluster(min_n_components, max_n_components)\n\n\t\tall_data = []\n\t\tfor i_run in range(n_runs):\n\t\t\tprint(\"Run: \"+str(i_run+1)+'/'+str(n_runs))\n\n\t\t\tif self.presampled_data is None:\n\t\t\t\t# Sample data\n\t\t\t\tdata = self.toy_model_.sample(n_points)\n\t\t\telse:\n\t\t\t\tdata = sampled_data[i_run]\n\t\t\t\n\t\t\tall_data.append(data)\n\n\t\t\tprint('Shape data: ' + str(data.shape))\n\n\t\t\t# Set data in model and estimate GMM density\n\t\t\tgmm_FE.data_ = data\n\t\t\tcoords, est_FE_landsc, FE_points = gmm_FE.landscape()\n\n\t\t\t# Get true cluster labels\n\t\t\tif self.presampled_data is None:\n\t\t\t\tif hasattr(self.toy_model_, \"assign_cluster_labels\"):\n\t\t\t\t\tself.true_labels_ = self.toy_model_.assign_cluster_labels(data)\n\t\t\t\telse:\n\t\t\t\t\tprint('Setting true labels.')\n\t\t\t\t\tself.true_labels_, _ = self.true_FE_.cluster(data, np.zeros(data.shape[0]))\n\t\t\telse:\n\t\t\t\tself.true_labels_ = true_clustering[i_run]\n\t\t\t\n\t\t\t# Cluster data with different methods\n\t\t\tif n_microstates is None:\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(data, FE_points, assign_transition_points=assign_transition_points)\n\t\t\telse:\n\t\t\t\tkmea = KMeans(n_clusters=n_microstates).fit(data[::2])\n\t\t\t\tmicrostate_centers = kmea.cluster_centers_\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(microstate_centers, FE_points, data, assign_transition_points=assign_transition_points, unravel_grid=False)\n\n\t\t\tif all_methods:\n\t\t\t\tself.km_labels = km.cluster(data)\n\t\t\t\tself.aw_labels = aw.cluster(data)\n\t\t\t\tself.spectral_labels = spectral.cluster(data)\n\n\t\t\t# Score clustering using different scoring metrics\n\t\t\t# V-measure score\n\t\t\tself.cluster_score_vm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'vm')\n\t\t\tprint(self.cluster_score_vm_GMM_FE_min_[i_run])\n\t\t\tif all_methods:\n\t\t\t\tself.cluster_score_vm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'vm')\n\t\t\t\tself.cluster_score_vm_kmeans_[i_run] = self._score_clustering(self.km_labels,'vm')\n\t\t\t\tself.cluster_score_vm_AW_[i_run] = self._score_clustering(self.aw_labels,'vm')\n\t\t\t\tself.cluster_score_vm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'vm')\n\n\t\t\t\t# Adjusted MI\n\t\t\t\tself.cluster_score_ami_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'ami')\n\t\t\t\tself.cluster_score_ami_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'ami')\n\t\t\t\tself.cluster_score_ami_kmeans_[i_run] = self._score_clustering(self.km_labels,'ami')\n\t\t\t\tself.cluster_score_ami_AW_[i_run] = self._score_clustering(self.aw_labels,'ami')\n\t\t\t\tself.cluster_score_ami_spectral_[i_run] = self._score_clustering(self.spectral_labels,'ami')\n\n\t\t\t\t# Fowlkes Mallows\n\t\t\t\tself.cluster_score_fm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'fm')\n\t\t\t\tself.cluster_score_fm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'fm')\n\t\t\t\tself.cluster_score_fm_kmeans_[i_run] = self._score_clustering(self.km_labels,'fm')\n\t\t\t\tself.cluster_score_fm_AW_[i_run] = self._score_clustering(self.aw_labels,'fm')\n\t\t\t\tself.cluster_score_fm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'fm')\n\t\t\n\t\tif save_data:\n\t\t\tif self.presampled_data is None:\n\t\t\t\tnp.save('data_out/sampled_data_'+self.toy_model_.name+file_label+'.npy',all_data)\n\n\t\t\tif False:\n\t\t\t\tnp.save('data_out/cluster_score_fm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_fm_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_kmeans_' + self.toy_model_.name +file_label +'.npy', self.cluster_score_fm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_spectral_)\n\n\t\t\t\tnp.save('data_out/cluster_score_ami_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_ami_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_spectral_)\n\n\t\t\tnp.save('data_out/cluster_score_vm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_vm_GMM_FE_min_)\n\t\t\tif all_methods:\n\t\t\t\tnp.save('data_out/cluster_score_vm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_spectral_)\n\t\treturn",
"def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return",
"def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"",
"def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)",
"def calculate_all_metrcis(self):\n self.calculate_gc_metrcis()\n self.calculate_sam_metrics()\n self.calculate_classification_metrics()\n self.calculate_losses()",
"def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )",
"def get_sklearn_algorithms(verbose=False):\n from collections import defaultdict\n import importlib\n import sklearn\n algos = defaultdict(list)\n if verbose: print(dir(sklearn))\n for nom_module in dir(sklearn):\n if verbose: print(nom_module)\n try:\n to_import = \"sklearn.%s\" % nom_module\n module = importlib.import_module(to_import)\n for nom_fonction in dir(module):\n fonction = getattr(module, nom_fonction)\n if hasattr(fonction, \"fit\"):\n if verbose: print(\" nom algorithme = \", nom_fonction)\n algos[nom_module].append(fonction)\n except Exception as e:\n if verbose: print(e)\n if verbose: print(\"=\" * 30)\n return algos",
"def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()",
"def calculate_all_scores(best_phenotype, clustering_algorithm, dataset, y):\n samples_dist_matrix = distance.squareform(distance.pdist(dataset.values))\n allowed_fitness = list(DICT_ALLOWED_FITNESSES.keys())\n scores = [(fitness_name, fitness_value) for fitness_name, fitness_value in\n zip(allowed_fitness,\n eval_multiple(dataset.values, clustering_algorithm, allowed_fitness, samples_dist_matrix, y,\n best_phenotype))]\n scores = dict(scores)\n return scores",
"def measureAll(authors_texts,sectorialized_agents):\n authors_texts=P.text.aux.textFromAuthors(authors_texts,self.topm_dict[\"sectorialized_agents\"])\n authors_measures={}\n # análise de cada mensagem e de cada autor\n for author in authors_texts:\n authors_measures[author]={}\n texts=authors_texts[author]\n authors_measures[author][\"raw_strings\"]=P.text.raw.analyseAll(texts)\n authors_measures[author][\"pos\"]= P.text.pos.analyseAll(authors_analysis[author][\"raw_analysis\"])\n authors_measures[author][ \"wordnet\" ]=P.text.wordnet.analyseAll(authors_analysis[author][\"pos_analysis\"])\n authors_measures[author][\"tfIdf\"]=P.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias\n # análise de cada setor e da estrutura toda\n# sectors_texts=P.text.aux.textFromSectors(authors_text,sectorialized_agents)\n sectors_measures={}\n for sector in sectorialized_agents:\n sectors_measures[sector][\"raw_strings\"]=P.text.raw.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"pos\"]= P.text.pos.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"wordnet\"]= P.text.wordnet.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n # tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores\n sectors_measures[sector][\"tfIdf\"]= P.text.tfIdf.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n\n# texts=[sectors_texts[i] for i in (\"peripherals\",\"intermediaries\",\"hubs\")]\n# sectors_analysis[\"raw_strings\"]=P.text.raw.analyseAll(texts)\n# sectors_analysis[\"pos\"]= P.text.pos.analyseAll(sectors_analysis[\"raw_analysis\"])\n# sectors_analysis[ \"wordnet\" ]=P.text.wordnet.analyseAll(sectors_analysis[\"pos_analysis\"])\n# sectors_analysis[\"tfIdf\"]=P.text.tfIdf.tfIdf(texts)\n\n overall_measures[\"raw_strings\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"pos\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"wordnet\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n # tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores\n overall_measures[\"tfIdf\"]=P.text.tfIdf.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n\n del authors_texts,sectorialized_agents,author, sector\n return locals()",
"def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements",
"def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)",
"def compute_feature_properties(self):\n\n self.valuecounts = {}\n self.unique_values = {}\n self.missing_ratios = {}\n self.counts = {}\n self.codemaps = {}\n for f in self.features:\n # Compute various things\n all_values = [self.data[l].get(f,\"?\") for l in self.data]\n missing_data_ratio = all_values.count(\"?\") / (1.0*len(all_values))\n non_q_values = [v for v in all_values if v != \"?\"]\n counts = {}\n for v in non_q_values:\n counts[v] = non_q_values.count(v)\n unique_values = list(set(non_q_values))\n # Sort unique_values carefully.\n # Possibly all feature values are numeric strings, e.g. \"1\", \"2\", \"3\".\n # If we sort these as strings then we get weird things like \"10\" < \"2\".\n # This can actually matter for things like ordinal models.\n # So convert these to ints first...\n if all([v.isdigit() for v in unique_values]):\n unique_values = list(map(int, unique_values))\n unique_values.sort()\n unique_values = list(map(str, unique_values))\n # ...otherwise, just sort normally\n else:\n unique_values.sort()\n self.unique_values[f] = unique_values\n\n N = len(unique_values)\n self.valuecounts[f] = N\n self.missing_ratios[f] = missing_data_ratio\n self.counts[f] = counts\n self.codemaps[f] = self.build_codemap(unique_values)",
"def compute_statistics(self):\n for i in range(len(self.wine_matrix[0, :])):\n feature = self.wine_matrix[:, i]\n self.wine_stats['feature ' + str(i)] = {}\n if i == 11: # results column\n self.wine_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.wine_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()\n\n for i in range(len(self.cancer_matrix[0, :])):\n feature = self.cancer_matrix[:, i]\n self.cancer_stats['feature ' + str(i)] = {}\n if i == 10: # results column\n self.cancer_stats['feature ' + str(i)]['positive_class_ratio'] = (feature == 1).sum() / len(feature)\n null, self.cancer_stats['feature ' + str(i)]['pvalue'] = stats.normaltest(feature)\n\n # plot\n # pyplot.hist(feature, bins=50)\n # pyplot.show()",
"def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models",
"def compute_metrics(\n Phi, optimal_subspace\n):\n feature_norm = jnp.linalg.norm(Phi) / Phi.shape[0]\n cosine_similarity = compute_cosine_similarity(Phi, optimal_subspace)\n\n metrics = {\n 'cosine_similarity': cosine_similarity,\n 'feature_norm': feature_norm,\n 'eigengame_subspace_distance': eigengame_subspace_distance(\n Phi, optimal_subspace\n ),\n }\n\n _, d = Phi.shape\n if d > 1:\n grassman_distance = compute_grassman_distance(Phi, optimal_subspace)\n metrics |= {'grassman_distance': grassman_distance}\n elif d == 1:\n dot_product = compute_normalized_dot_product(Phi, optimal_subspace)\n metrics |= {'dot_product': dot_product}\n\n return metrics",
"def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension",
"def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def clustering_metrics(clusts, node_assn, node_pred):\n pred_vox = cluster_to_voxel_label(clusts, node_pred)\n true_vox = cluster_to_voxel_label(clusts, node_assn)\n ari = ARI(pred_vox, true_vox)\n ami = AMI(pred_vox, true_vox)\n sbd = SBD(pred_vox, true_vox)\n pur, eff = purity_efficiency(pred_vox, true_vox)\n return ari, ami, sbd, pur, eff"
] | [
"0.6315484",
"0.61245906",
"0.5925279",
"0.5822709",
"0.57913584",
"0.57853943",
"0.57158273",
"0.56992424",
"0.5629585",
"0.56262213",
"0.55483156",
"0.5493547",
"0.5488877",
"0.54682064",
"0.5456372",
"0.5410733",
"0.5409758",
"0.54085684",
"0.53946304",
"0.53820395",
"0.5365618",
"0.5353488",
"0.53240466",
"0.5320415",
"0.5304393",
"0.5303199",
"0.52986354",
"0.5283749",
"0.5268433",
"0.52369326"
] | 0.69420356 | 0 |
Calculates a clustering's contingency matrix for each clustering algorithm stored in the list clustering_alg and adds it to the dict. | def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):
for (alg_name, alg_dict) in clustering_alg:
if "alg" in alg_dict:
clustering = alg_dict["alg"].fit(sdist)
labels_pred = clustering.labels_
alg_dict["labels"] = labels_pred
else:
labels_pred = alg_dict["labels"]
pred_label_dict, new_labels = normalize_labels(labels_pred)
alg_dict["cm"] = contingency_matrix(labels_true, new_labels) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_clustering_info(self, algorithm_type, clustering_parameters, clusterings = []):\n clustering_info = {}\n for i, running_parameters in enumerate(clustering_parameters):\n\n clustering_id = \"clustering_%04d\"%(self.current_clustering_id)\n self.current_clustering_id += 1\n clustering_info[clustering_id] = {\n \"type\":algorithm_type,\n \"clustering\": None,\n \"parameters\": running_parameters\n }\n\n if clusterings != []:\n clustering_info[clustering_id][\"clustering\"] = clusterings[i]\n\n return clustering_info",
"def enumerate_clusterings(self):\n\n # Initialize an empty list of clusterings. Each element of the list\n # is a dictionary mapping NOEs to the signatures they are clustered to\n # in a solution. Each clustering is initialize with all uniquely\n # clusterable NOEs as keys mapping to their unique clusters\n\n clusterings = []\n\n while True:\n\n # Run the solver and get a solution back\n\n solution = self.solve()\n\n # If UNSAT, then flush aux clauses from the formula and return\n # all the clusterings we found so far\n\n if not solution:\n self.flush()\n return clusterings\n\n # Iterate over the clustering variables set to true by in the\n # discovered solution. Forbid this clustering from reoccuring and\n # add it to the list of found clusterings\n\n clause = []\n clustering = {}\n for node in self.clustering_variables.keys():\n if len(node.clusters) == 1:\n clustering[node] = list(node.clusters)[0]\n\n for vtype, node, cluster in solution:\n if vtype == Formula.CST_VAR:\n clustering[node] = cluster\n clause.append(-self.clustering_variables[node][cluster])\n\n self.add_clause(clause)\n clusterings.append(clustering)",
"def matrix_dist(self):\n matrix_dic = {}\n for clus in self.clusters:\n for other_clus in self.clusters:\n if clus.samples[0].s_id > other_clus.samples[0].s_id: # avoid duplicates\n matrix_dic[(clus.samples[0].s_id, other_clus.samples[0].s_id)] = clus.samples[0]\\\n .compute_euclidean_distance(other_clus.samples[0])\n return matrix_dic",
"def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index",
"def evaluate_clustering_methods(methods):\r\n results = {}\r\n for m in methods:\r\n res = results[m['name']] = {}\r\n prec = 3\r\n res['Adjusted Rand Score'] = round(sklearn.metrics.adjusted_rand_score(m['target'], m['clustering']),prec)\r\n res['Normalized Mutual Information'] = round(sklearn.metrics.normalized_mutual_info_score(m['target'], m['clustering']),prec)\r\n res['Adjusted Mutual Information'] = round(sklearn.metrics.adjusted_mutual_info_score(m['target'], m['clustering']),prec)\r\n return np.transpose(results)",
"def clustering(distribution, areal_units, classes=None):\n\n # Regroup into classes if specified. Otherwise return categories indicated\n # in the data\n if not classes:\n classes = return_categories(distribution) \n \n ## Get the number of neighbourhoods\n neigh = mb.neighbourhoods(distribution, areal_units, classes)\n num_neigh = {cl: len(neigh[cl]) for cl in classes}\n num_units = {cl: len([a for ne in neigh[cl] for a in ne])\n for cl in classes}\n\n ## Compute clustering values\n clustering = {}\n for cl in classes:\n if num_units[cl] == 0:\n clustering[cl] = float('nan')\n elif num_units[cl] == 1:\n clustering[cl] = 1\n else:\n clustering[cl] = _single_clustering(num_units[cl],\n num_neigh[cl])\n\n clustering[cl] = ((num_neigh[cl] - num_units[cl]) /\n (1 - num_units[cl]))\n return clustering",
"def get_clustering_algorithm_class(cls):\n return {\n \"spectral\": SpectralClusteringAlgorithm,\n \"dbscan\": DBSCANAlgorithm,\n \"gromos\": GromosAlgorithm,\n \"kmedoids\": KMedoidsAlgorithm,\n \"random\": RandomClusteringAlgorithm,\n \"hierarchical\": HierarchicalClusteringAlgorithm\n }",
"def cluster_all_features(feature_mat):\n n_dims = feature_mat.shape[1]\n whitened = whiten(feature_mat.transpose())\n all_codebooks = dict()\n for k in range(n_dims, 0, -1):\n centroids, distortion = kmeans(whitened, k)\n all_codebooks[k] = (distortion, centroids)\n\n return all_codebooks",
"def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters",
"def update(self, clusters):\n centroids = {}\n for cluster, coordinates in clusters.iteritems():\n sumLat = 0\n sumLong = 0\n for coordinate in coordinates:\n sumLat += float(coordinate[0])\n sumLong += float(coordinate[1])\n centroids[cluster] = (sumLat/float(len(coordinates)), sumLong/float(len(coordinates)))\n return centroids",
"def evaulate_clusters(self, pred_dict, model_dir):\n\t\tclustering_dict = {\"Topic\":[], \"Text\":[], \"Keywords\": []}\n\t\tfor cluster_num, sents_list in pred_dict.items():\n\t\t\tprint(\"\\n cluster number : \", cluster_num)\n\t\t\tprint(\"\\n number of sents : \", len(sents_list))\n\t\t\ttfidf_vec = TfidfVectorizer(use_idf=True, sublinear_tf=True, max_df=0.8, max_features=20, ngram_range=(1,5), min_df=1)\n\t\t\tX_tfidf = tfidf_vec.fit_transform(sents_list).toarray()\n\t\t\ttotal_tfidf = tfidf_vec.get_feature_names()\n\t\t\tfor sent in sents_list:\n\t\t\t\tclustering_dict[\"Topic\"].append(cluster_num)\n\t\t\t\tclustering_dict[\"Text\"].append(sent)\n\t\t\t\tclustering_dict[\"Keywords\"].append(\",\".join(total_tfidf))\n\t\t\"\"\" save the clusters to csv file \"\"\"\n\t\tdf_dominant_topic = defaultdict(list) \n\t\tdf_dominant_topic[\"Topic\"] = clustering_dict[\"Topic\"]\n\t\tdf_dominant_topic[\"Text\"] = clustering_dict[\"Text\"]\n\t\tdf_dominant_topic[\"Keywords\"] = clustering_dict[\"Keywords\"]\n\t\tdf_dominant_topic = pd.DataFrame(df_dominant_topic)\n\t\tdf_dominant_topic.to_csv(os.path.join(model_dir, \"cluster_sentence_topic_mapping.csv\"))\n\t\treturn df_dominant_topic",
"def calc_cc(graph):\n\tclustering_coeffs = {}\n\tfor node in graph.nodes():\n\t\tclustering_coeffs[node] = { \"cc\" : nx.clustering(graph, node)}\n\tnx.set_node_attributes(graph, clustering_coeffs)",
"def gen_cluster_accuracies():\n accuracies = {}\n with Parallel(n_jobs=morphs.parallel.N_JOBS) as parallel:\n for block_path in morphs.paths.blocks():\n print(block_path)\n spikes = morphs.load.ephys_data(block_path, collapse_endpoints=True)\n\n if len(spikes[\"recording\"].unique()) >= 1:\n template_spikes = spikes[spikes[\"stim_id\"].isin(list(\"abcdefgh\"))]\n assert len(template_spikes) > 0\n cluster_groups = template_spikes.groupby(\"cluster\")\n\n morph_dims = spikes.morph_dim.unique()\n morph_dims = morph_dims[~pd.isnull(morph_dims)]\n morph_dims.sort()\n\n max_num_reps = np.max(\n [\n len(stim_group.groupby(by=[\"recording\", \"stim_presentation\"]))\n for stim_id, stim_group in template_spikes.groupby(\"stim_id\")\n ]\n )\n\n accuracies_list = parallel(\n delayed(cluster_accuracy)(\n cluster, cluster_group, morph_dims, max_num_reps\n )\n for (cluster, cluster_group) in cluster_groups\n )\n\n accuracies[block_path] = pd.concat(accuracies_list)\n\n morphs.paths.PROCESSED_DIR.mkdir(parents=True, exist_ok=True)\n with open(morphs.paths.ACCURACIES_PKL.as_posix(), \"wb\") as f:\n pickle.dump(accuracies, f)",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def cluster_classification(weblog,classification_column_transaction,\\\n classification_column_diversity, session_data_threshold, cluster_type, classification_wanted_transaction, verbose = False):\n if verbose== True:\n start_time = timelib.time()\n print(\"\\n * Computing cluster matrices ...\") \n browsing_matrix = {}\n diversifying_matrix = {}\n # Selecting sessions from each cluster\n for cluster_id in session_data_threshold[cluster_type].unique():\n sessions_cluster = session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id\n divpat_log = weblog[weblog.session_id.isin(sessions_cluster)]\n # Filtering some requests\n divpat_log=divpat_log[divpat_log['requested_'+classification_column_transaction].isin(classification_wanted_transaction)]\n divpat_log=divpat_log[divpat_log['referrer_'+classification_column_transaction].isin(classification_wanted_transaction)]\n \n # Defining matrices\n diversity_columns=('referrer_'+classification_column_diversity,'requested_'+classification_column_diversity)\n browsing_matrix[cluster_id],_ = compute_browsing_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,labels=classification_wanted_transaction)\n diversifying_matrix[cluster_id],_ = compute_diversifying_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,\\\n diversity_columns,labels = classification_wanted_transaction)\n if verbose == True:\n print(\" Cluster matrices computed in %.1f seconds.\"%(timelib.time() - start_time))\n \n return browsing_matrix, diversifying_matrix;",
"def _granger_causality(self):\r\n gc = dict(frequencies={}, gc_xy={}, gc_yx={}, gc_sim={},\r\n spectral_density={})\r\n for i, j in self.ij:\r\n w, f_x2y, f_y2x, f_xy, Sw = \\\r\n alg.granger_causality_xy(self.model_coef[i, j],\r\n self.error_cov[i, j],\r\n n_freqs=self._n_freqs)\r\n\r\n # All other measures are dependent on i, j:\r\n gc['gc_xy'][i, j] = f_x2y\r\n gc['gc_yx'][i, j] = f_y2x\r\n gc['gc_sim'][i, j] = f_xy\r\n gc['spectral_density'][i, j] = Sw\r\n\r\n return gc",
"def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust",
"def kmode_calculation(self, data):\n col_dict = {}\n\n for col in data.columns:\n data[col] = data[col].astype('category')\n col_dict.update({col: dict(enumerate(data[col].cat.categories))})\n\n # Get all the cols in the DataFrame\n cols = [col for col in data.columns]\n\n # Transform all values into categorical and numerical values\n for col in cols:\n data[col] = data[col].astype('category')\n data[col] = data[col].cat.codes\n\n # Run k-modes using the algorithm\n kmodes_method = KModes(n_clusters=self.n_cluster, init=self.init_method, n_init=self.n_iter, verbose=1)\n kmode_result = kmodes_method.fit_predict(data[cols])\n\n # Attach the output label for each data point\n data['classification'] = pd.Series(kmode_result, index=data.index)\n\n return col_dict, kmodes_method.cluster_centroids_, data",
"def _compute_centroids(self, encodings, labels):\n counts = {}\n centroids = {}\n\n # Copy encodings to avoid ref modification when computing centroid.\n encodings = encodings.copy()\n\n for i, encoding in enumerate(encodings):\n key = int(labels[i])\n if key in centroids:\n centroids[key] += encoding\n counts[key] += 1\n else:\n centroids[key] = encoding\n counts[key] = 1\n for key in centroids:\n centroids[key] /= counts[key]\n self.centroids = centroids",
"def get_clusters_adjacencies(adjacency, clusters: list):\n clusters.sort(key=lambda t: len(t), reverse=True)\n id_to_cluster = get_id_to_cluster(clusters, adjacency.shape[0])\n num_clusters = len(clusters)\n mat = np.zeros((num_clusters, num_clusters))\n rows, cols = adjacency.nonzero()\n for i, j in zip(rows, cols):\n weight = adjacency[i, j]\n src_cluster = id_to_cluster[i]\n dest_cluster = id_to_cluster[j]\n mat[src_cluster, dest_cluster] += weight\n return mat",
"def compute_confusion_matrix(num_clusters, clustered_points_algo, sorted_indices_algo):\r\n seg_len = 400\r\n true_confusion_matrix = np.zeros([num_clusters, num_clusters])\r\n for point in range(len(clustered_points_algo)):\r\n cluster = clustered_points_algo[point]\r\n num = (int(sorted_indices_algo[point]/seg_len) % num_clusters)\r\n true_confusion_matrix[int(num), int(cluster)] += 1\r\n return true_confusion_matrix",
"def clustering_metrics(clusts, node_assn, node_pred):\n pred_vox = cluster_to_voxel_label(clusts, node_pred)\n true_vox = cluster_to_voxel_label(clusts, node_assn)\n ari = ARI(pred_vox, true_vox)\n ami = AMI(pred_vox, true_vox)\n sbd = SBD(pred_vox, true_vox)\n pur, eff = purity_efficiency(pred_vox, true_vox)\n return ari, ami, sbd, pur, eff",
"def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df",
"def index_nodes(self):\n out = {}\n\n #avg = np.mean(list(self.rtype_vectors.values()),axis=0)\n\n\n #for name, node in self.nodes.items():\n # tmp1 = [self.rtype_vectors[rtype]\n # for rtype, dest in node.outgoing_relations] or [NULL_VEC()]\n # tmp2 = [permute_rtype_vector(self.rtype_vectors[rtype])\n # for rtype, prev in node.incoming_relations] or [NULL_VEC()]\n\n # net = tmp1 + tmp2\n\n # #out[name] = np.asarray(net).mean(axis=0)\n # #out[name] = np.asarray(net).sum(axis=0)\n # v = np.asarray(net).sum(axis=0)\n # if v.any():\n # out[name] = v/max(v)#softmax(v/max(v))\n # else:\n # out[name] = v\n\n\n #avg = np.mean(list(out.values()),axis=0)\n\n #maxm = np.max(list(out.values()),axis=0)\n\n ####normalize everything\n #for r,v in out.items():\n # if v.any():\n # #out[r] = v / sqrt(v.dot(v))\n # out[r] = softmax((v-avg)/maxm)\n\n\n\n # PCA method 0001701\n rmap = self.rtype_vectors\n data = np.zeros((len(self.nodes), JACCARD_DIMENSIONS), dtype=np.float)\n ix = 0\n for node in self.nodes.values():\n\n #compute weighted average of each relation type\n tmp = [rmap[rtype] for \n rtype, dest in node.outgoing_relations] + \\\n [permute_rtype_vector(rmap[rtype]) for \n rtype, prev in node.incoming_relations]\n\n v = np.asarray(tmp).mean(axis=0) if tmp else NULL_VEC()\n\n #normalize\n if v.any():\n data[ix] = v / sqrt(v.dot(v))\n else:\n data[ix] = v\n ix += 1\n\n #eliminate projection onto first 7 principal components\n d2 = data - PCA(data, 7)\n\n #order of nodes is preserved\n for i,v in enumerate(self.nodes):\n out[v] = softmax(d2[i])\n\n return out",
"def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def _generate_adjacency_matrices(self):\n self.adj_matrices = dict()\n mes = []\n args = []\n for metaedge in self.metaedges:\n mes.append(metaedge)\n args.append(self._prepare_parallel_adj_matrix_args(self.edge_df.query('abbrev == @metaedge')))\n res = parallel_process(array=args, function=mt.get_adj_matrix, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.adj_matrices[metaedge] = matrix",
"def clustering(self):\n ret_concepts = []\n clusters = []\n for word in self.words:\n clusters.append(WordCluster(None, word))\n while len(clusters) > 1:\n maxi = -1\n maxj = -1\n max = -1\n m = -1\n for i in range(len(clusters)):\n for j in range(len(clusters)):\n if i == j:\n continue\n # print(\"%d cluster compare with %d cluster\" % (i, j))\n # 1: join 21: i absorb j 22: j absorb i 3: collapse\n # l1: join L(Tm) value l21: A absorb B L(Tm)value\n l1, newtags = self.__calculate_ltm(clusters[i], clusters[j], 1)\n if l1 > max:\n m = 1\n maxi = i\n maxj = j\n max = l1\n print(\"max L(Tm) for clustering in current loop: %lf\" % max)\n if max < ClusterAlgorithm.P_threshold:\n return\n Tm = clusters[maxi].join(clusters[maxj])\n Tm_concepts = self.__select_concepts(self.__getword(Tm))\n for tmp_concept in Tm_concepts.items():\n ret_concepts.append(tmp_concept)\n rm1 = clusters[maxi]\n rm2 = clusters[maxj]\n clusters.remove(rm1)\n clusters.remove(rm2)\n if Tm is not None:\n print(\"merged cluster's words:\")\n print(self.__getword(Tm))\n return ret_concepts"
] | [
"0.60130954",
"0.5818192",
"0.57543993",
"0.5732739",
"0.56639177",
"0.56580245",
"0.56447226",
"0.560482",
"0.552206",
"0.551928",
"0.54748267",
"0.5463208",
"0.5428008",
"0.54264724",
"0.5389764",
"0.535151",
"0.5327543",
"0.53244734",
"0.5321029",
"0.5317272",
"0.5313567",
"0.52784014",
"0.5268973",
"0.52574134",
"0.5225924",
"0.52034247",
"0.5200481",
"0.5199405",
"0.5187148",
"0.51740474"
] | 0.7477364 | 0 |
Modify the column name to make it Pythoncompatible as a field name | def normalize_col_name(col_name, used_column_names, is_relation):
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find(LOOKUP_SEP) >= 0:
while new_name.find(LOOKUP_SEP) >= 0:
new_name = new_name.replace(LOOKUP_SEP, '_')
if col_name.lower().find(LOOKUP_SEP) >= 0:
# Only add the comment if the double underscore was in the original
# name
field_notes.append(
"Field renamed because it contained more than one '_' in a row."
)
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append(
'Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append(
"Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_column_name(self, column, idx, old_name, name):\n dtype = self.dtype\n # Updating the names on the dtype should suffice\n dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]",
"def py_field_name(self, field):\n name = field.name\n name = as_identifier(name)\n if self.options(field).convert_case:\n name = from_camel_case(name)\n name = self._mangle_name(name)\n return name",
"def col_name(col):\n\n if isinstance(col, str):\n return col\n return col.__name__",
"def encodeColumnName(self, column):\r\n return '\"{}\"'.format(column)",
"def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name",
"def set_column_name(self, name):\r\n self.column_name = name",
"def set_column_name(self, name):\r\n self.column_name = name",
"def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name",
"def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name",
"def wrap_columns_name(self, format_string):\n self._data_frame = self._data_frame.rename(\n columns=lambda column: format_string.format(column)\n )",
"def rename_columns(self, col):\n try:\n self.cleaned_data.columns = col\n except Exception as e:\n raise e",
"def db_field_name(self):\r\n return self.db_field or self.column_name",
"def db_field_name(self):\r\n return self.db_field or self.column_name",
"def short_column(name : str) -> str:\n return name.split(\"-\")[1]",
"def get_name(self):\n return self.col_name",
"def capnp_field_name(self, field):\n name = field.name\n return as_identifier(name)",
"def _valid_column(column_name):\n return str(column_name)",
"def namehack(field):\n if field.endswith((\"attribute\", \"views\")):\n return field + \"__name\"\n else:\n return field",
"def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")",
"def column_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"column_name\")",
"def _validate_column_name(col_name : str) -> str:\n\n if col_name[0].isdigit():\n return f'\"{col_name}\"'\n return col_name",
"def typed_column(self) -> str:\n\n return \"{}:{}\".format(self.name, self.dtype)",
"def rename(self, newname):\n # set the new column name\n self.colname = newname",
"def __getitem__(self, item):\n if isinstance(item, str):\n name_dict = {n.lower():n for n in self.colnames}\n item = item.lower()\n item = ','.join([name_dict[i] for i in item.split(',')])\n out = APtable.__getitem__(self, item)\n return out",
"def as_field(identifier: str) -> str:\n return identifier.lower()",
"def _remap_column_names(self, frame):\n\n frame[TransactionColumns.BANK.name] = self.INSTITUTION\n frame[TransactionColumns.ACCOUNT.name] = self.account\n frame.rename(columns=self._FIELD_2_TRANSACTION, inplace=True)\n frame[TransactionColumns.CHECK_NO.name] = None\n return frame",
"def column_name(self) -> Optional[str]:\n return pulumi.get(self, \"column_name\")",
"def test_column_name(self):\n field = self.base_field\n sch = SchemaField(field)\n self.assertEqual(sch.name, sch.column_name)\n self.assertNotEqual(sch.column_name, sch.title)",
"def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col",
"def getColName(self, col):\n try:\n return chr(ord('a') + col)\n except:\n return col"
] | [
"0.7072573",
"0.7007338",
"0.6994978",
"0.69579434",
"0.68072045",
"0.6802428",
"0.6802428",
"0.67907166",
"0.6732477",
"0.6600031",
"0.65641886",
"0.64931035",
"0.64931035",
"0.64816284",
"0.6459289",
"0.64281356",
"0.63902634",
"0.6334104",
"0.6324554",
"0.6324554",
"0.62862897",
"0.6277314",
"0.62329",
"0.6204517",
"0.6178222",
"0.61457276",
"0.6116452",
"0.61137515",
"0.6037867",
"0.6037867"
] | 0.7277575 | 0 |
Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. | def get_field_type(connection, table_name, row):
field_params = OrderedDict()
field_notes = []
is_geometry = False
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[
5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(
table_name, geo_col)
field_params.update(geo_params)
is_geometry = True
return field_type, field_params, is_geometry
# return getattr(models.fields, field_type), field_params | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results",
"def get_field_type(self, table_name, field_name):\n \n dtype = self.field_types[(self.field_types.TABNAME == table_name) & (self.field_types.FIELDNAME == field_name)]['DATATYPE'].values[0] \n return dtype",
"def getFieldDetails(self, field_name):\n try:\n value_list = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_field_details', [field_name, results])\n\n for row in results:\n # column_name, data_type, desc_or_value, definition, active\n value_list.append((row[0], row[1], row[2], row[3], row[4]))\n \n if len(value_list) == 0:\n # If not found in the dictionary, assume this is a user-created column\n value_list.append((field_name, 'text', '', ''))\n \n return value_list[0]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def get_geometry_type(self, table_name, description):\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"\"\"\n SELECT t.coord_dimension, t.srid, t.type FROM (\n SELECT * FROM geometry_columns\n UNION ALL\n SELECT * FROM geography_columns\n ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\n \"\"\",\n (table_name, description.name),\n )\n row = cursor.fetchone()\n if not row:\n raise Exception(\n 'Could not find a geometry or geography column for \"%s\".\"%s\"'\n % (table_name, description.name)\n )\n dim, srid, field_type = row\n # OGRGeomType does not require GDAL and makes it easy to convert\n # from OGC geom type name to Django field.\n field_type = OGRGeomType(field_type).django\n # Getting any GeometryField keyword arguments that are not the default.\n field_params = {}\n if self.postgis_oid_lookup.get(description.type_code) == \"geography\":\n field_params[\"geography\"] = True\n if srid != 4326:\n field_params[\"srid\"] = srid\n if dim != 2:\n field_params[\"dim\"] = dim\n return field_type, field_params",
"def _convert_field_type(row):\n return row",
"def get_column_def(self):\r\n return '{} {}'.format(self.cql, self.db_type)",
"def parse_description(_descriptions, _db_type):\n _field_names = []\n _field_types = []\n\n \"\"\"name, type_code, display_size, internal_size, precision, scale, null_ok\"\"\"\n\n for _column in _descriptions:\n _field_names.append(_column[0])\n if _db_type == DB_MYSQL:\n _field_types.append(mysql_type_to_sql_type(_column[1]))\n else:\n _field_types.append(_column[1])\n\n return _field_names, _field_types",
"def test_get_field_type_text_field(self):\n db_introspection = DatabaseIntrospection(self.connection)\n self.assertEqual(\n db_introspection.get_field_type(\n TypeCode.STRING,\n description=ColumnInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n internal_size=\"MAX\",\n ),\n ),\n \"TextField\",\n )",
"def field_type(self):\n return \"\"",
"def field_type(name):\n if name not in field_types:\n field_types[name] = records.fields_get([name], attributes=['type'])[name]['type']\n return field_types.get(name)",
"def findMetadataTable(self, field_name, field_type, log, study_id, lock):\n \n # log passed in from writeMetadataValue() - it's a list. At end of function, \n # exception handler will output contents of log to web for viewing if error\n # occurrs.\n \n try:\n table = ''\n field_name = field_name.upper()\n field_name.replace('\"', '')\n\n # Fill out the field list if it's the first call\n log.append('Length of fields is: {0}'.format(str(len(self.fields))))\n if len(self.fields) == 0:\n log.append('Filling out field list for table lookup. Current field is \"{0}\"'.format(field_name))\n lock.acquire()\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.find_metadata_table', [results])\n for tab_name, col_name in results:\n if col_name not in self.fields:\n self.fields[col_name] = []\n self.fields[col_name].append(tab_name)\n lock.release()\n \n log.append('field{} successfully filled out')\n \n if field_name in self.fields:\n # If there's only one hit we can assign it\n tables = self.fields[field_name]\n log.append('Type of variable is: %s' % str(tables))\n if len(self.fields[field_name]) == 1:\n table = self.fields[field_name][0]\n log.append('Field only in one table: %s' % table)\n \n # More than one table was found with this column name. Find the correct one\n # based on the study id\n else:\n log.append('Field in multiple tables(%s): %s' % (len(self.fields[field_name]), str(self.fields[field_name])))\n log.append('Study is is: %s' % study_id)\n for table_name in self.fields[field_name]:\n if str(study_id) in table_name:\n table = table_name\n \n # If table is not found, assume user-defined column\n else:\n \"\"\" Code may look bizarre... but here's why:\n 1. To streamline access and prevent blocking, we first check to see if the field\n does exist in the field list. If it does, we do not have to lock and can simply\n look up the table name.\n \n 2. If field is not in list, it must be a new column. In this case we must lock the \n code that handles new column creation. The catch is that if two threads both hit the lock\n with the same field name, one will get in and the other will block. Once the initial thread \n exists, it will have handled the new column, gotten the appropriate table name, and returned. \n The 2nd thread will now enter the critical section, however if we don't again check to see \n if the field is now in the field list, it will attempt to create the same column again and \n fail. Thus we check a 2nd time to see if the field exists and if so, simply read it from the \n field list. \n \"\"\"\n lock.acquire() \n if field_name in self.fields:\n log.append('Field now exists. Pulling from local list.')\n table = self.fields[field_name][0]\n log.append('Table name exists. Using \"%s\".' % table)\n else:\n log.append('Entities do not exist. Creating...')\n table = self.handleExtraData(study_id, field_name, field_type, log)\n log.append('Entities created. Table name is \"%s\"' % table)\n if field_name not in self.fields:\n self.fields[field_name] = [table]\n else:\n self.fields[field_name].append(table)\n lock.release()\n \n log.append('Returning from findMetadataTable with value: %s' % str(table))\n return table\n\n except Exception, e:\n lock.release()\n log.append('Exception caught: %s' % str(e))\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception('\\n'.join(log))",
"def get_column_def(self):\r\n db_type = self.db_type.format(self.value_type.db_type)\r\n return '{} {}'.format(self.cql, db_type)",
"def get_unique_name(self, cursor, field_name=None):\n if cursor.kind in [CursorKind.UNEXPOSED_DECL]:\n return ''\n # covers most cases\n name = cursor.spelling\n if cursor.kind == CursorKind.CXX_BASE_SPECIFIER:\n name = cursor.type.spelling\n # if it's a record decl or field decl and its type is anonymous\n if name == '':\n # if cursor.is_anonymous():\n # a unnamed object at the root TU\n if (cursor.semantic_parent\n and cursor.semantic_parent.kind == CursorKind.TRANSLATION_UNIT):\n name = self.make_python_name(cursor.get_usr())\n log.debug('get_unique_name: root unnamed type kind %s',cursor.kind)\n elif cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL,CursorKind.FIELD_DECL]:\n name = self._make_unknown_name(cursor, field_name)\n log.debug('Unnamed cursor type, got name %s',name)\n else:\n log.debug('Unnamed cursor, No idea what to do')\n #import code\n #code.interact(local=locals())\n return ''\n if cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL, CursorKind.CXX_BASE_SPECIFIER]:\n names= {CursorKind.STRUCT_DECL: 'struct',\n CursorKind.UNION_DECL: 'union',\n CursorKind.CLASS_DECL: 'class',\n CursorKind.TYPE_REF: '',\n CursorKind.CXX_BASE_SPECIFIER: 'class'\n }\n name = '%s_%s'%(names[cursor.kind],name)\n log.debug('get_unique_name: name \"%s\"',name)\n return name",
"def get_column_def(self):\r\n db_type = self.db_type.format(\r\n self.key_type.db_type,\r\n self.value_type.db_type\r\n )\r\n return '{} {}'.format(self.cql, db_type)",
"def get_eltype_from_sbmfielddesc(hgf_field):\n\tq = \"\"\"SELECT type FROM sbmFIELDDESC where name='%s'\"\"\" %(hgf_field)\n\treturn run_sql(q)[0][0]",
"def getdocfield(fieldname):\t\t\n\tl = [d for d in doctype_dl if d.doctype=='DocField' and d.fieldname==fieldname]\n\treturn l and l[0] or None",
"def test_row_description(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b char(3))\")\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert cursor.description == row.cursor_description",
"def get_field_from_sbmfielddesc(hgf_field):\n\tq = \"\"\"SELECT * FROM sbmFIELDDESC where name='%s'\"\"\" %(hgf_field)\n\treturn run_sql(q)[0]",
"def get_type(self) -> str:\n return self.row_dict['type']",
"def _read_metadata(self, conn, tbl_name): \n # Split table name in libname and actual table name\n name, schema = tuple(tbl_name.split('.'))\n # Query the Vertica dictionary to get types and formats\n query = \"\"\"\n SELECT column_name as NAME, data_type as TYPE, data_type_length AS LENGTH \n FROM v_catalog.columns \n WHERE table_schema = '{}' AND table_name = '{}'\n \"\"\".format(name, schema)\n \n md = conn.fetch(query)\n if not len(md):\n raise ValueError('No metadata for table {}'.format(tbl_name))\n\n md = (md\n # Use variable names as row names, then remove the NAME column\n .set_index('NAME', inplace=False)\n # Compute the number of bytes for each variable It is given by the LENGTH variable\n .rename({'LENGTH': 'NUM_BYTES'}, axis=1))\n\n # Identify data types\n type_upper = md['TYPE'].str.upper()\n md['IS_TEXT'] = type_upper.str.startswith('VARCHAR')\n md['IS_BOOLEAN'] = type_upper == 'BOOLEAN'\n md['IS_INTEGER'] = type_upper.isin(['INT', 'INTEGER'])\n md['IS_FLOAT'] = (type_upper == 'FLOAT') | type_upper.str.startswith('NUMERIC')\n md['IS_DATE'] = type_upper == 'DATE'\n md['IS_TIMESTAMP'] = type_upper == 'TIMESTAMP'\n md['IS_TIME'] = type_upper == 'TIME'\n # Determine datetime formats for date and time data\n md['DATETIME_FORMAT'] = np.nan\n md.loc[md['IS_DATE'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd'\n md.loc[md['IS_TIME'], 'DATETIME_FORMAT'] = 'HH:mm:ss'\n # Determine datetime formats for timestamp data\n # For timestamp data, the right format is:\n # - yyyy-MM-dd HH:mm:ss.0 with a JDBC connection <-- python default\n # - yyyy-MM-dd HH:mm:ss with an ODBC connection\n md.loc[md['IS_TIMESTAMP'], 'DATETIME_FORMAT'] = 'yyyy-MM-dd HH:mm:ss.0'\n\n # Original type\n md.rename({'TYPE': 'TYPE_IN_SOURCE'}, axis=1, inplace=True)\n # Create the metadata catalog\n md = MetadataCatalog(md, is_case_sensitive=False)\n # Check that all formats have been correctly processed\n format_check = md.check_metadata_completeness()\n if not all(format_check):\n unsupported_format = md.get_type_in_source()\n unsupported_format = unsupported_format[~format_check].unique()\n raise ValueError('Unsupported Vertica format: {}'.format(unsupported_format))\n return md",
"def columns_type(self,table):\n with self.conn.cursor() as cur:\n #_logger.debug('Columns Query. sql: %r', self.table_columns_query)\n cur.execute(self.columns_info_query % (self.dbname,table))\n for row in cur:\n yield row",
"def _get_tabletype(cls) -> str:\n raise NotImplementedError",
"def field_type(self) -> Optional[NameObject]:\n return self.get(\"/FT\")",
"def get_field_type(field):\n if (field < len(Field.FIELD_TYPES)):\n return Field.FIELD_TYPES[field][1]\n return 'unknown'",
"def db_field_name(self):\r\n return self.db_field or self.column_name",
"def db_field_name(self):\r\n return self.db_field or self.column_name",
"def get_column_type(type_name: str) -> object:\n raise NotImplementedError",
"def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields",
"def db_fields(self):",
"def dict_factory(self, cursor, row):\n results = {}\n for index, col_name in enumerate(cursor.description):\n results[col_name[0]] = row[index]\n\n return results"
] | [
"0.6303345",
"0.6107471",
"0.5995526",
"0.5872125",
"0.5803399",
"0.5616249",
"0.56076336",
"0.5588405",
"0.55621403",
"0.55433136",
"0.55343395",
"0.5498443",
"0.5464399",
"0.5463323",
"0.5434962",
"0.53413516",
"0.53411543",
"0.53399295",
"0.53027624",
"0.52892405",
"0.5286212",
"0.5240713",
"0.523672",
"0.5220382",
"0.5216029",
"0.5216029",
"0.52090305",
"0.52000684",
"0.5199184",
"0.5199134"
] | 0.7295043 | 0 |
Authenticate user based on code. | def authenticate_user(authentication_code):
for suffix in ('', '=', '=='):
attempt = authentication_code + suffix
decoded = base64.decodestring(attempt)
fields = decoded.split('_')
email, user_id, time_stamp, str_hex = fields
if time_stamp < time.time():
# Authentication Code Expired
raise seerpod_exceptions.AuthenticationCodeExpired('Authentication code expired',
response_data=authentication_code)
user = None #business_contact_api.BusinessContacts().get_user_detail_from_email(email)
if not user:
continue
if attempt == generate_authentication_code(
user.id, time_stamp, user.owner_email_id, user.password):
return user
# Invalid authentication code
raise seerpod_exceptions.InvalidAuthenticationCode('Invalid Authentication code',
response_data=authentication_code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticate(self, request, **kwargs):\n\n self.request = request\n if not self.request:\n return None\n\n state = self.request.GET.get('state')\n code = self.request.GET.get('code')\n nonce = kwargs.pop('nonce', None)\n\n if not code or not state:\n return None\n\n reverse_url = import_from_settings('OIDC_AUTHENTICATION_CALLBACK_URL',\n 'oidc_authentication_callback')\n\n token_payload = {\n 'client_id': self.OIDC_RP_CLIENT_ID,\n 'client_secret': self.OIDC_RP_CLIENT_SECRET,\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': absolutify(\n self.request,\n reverse(reverse_url)\n ),\n }\n\n # Get the token\n token_info = self.get_token(token_payload)\n id_token = token_info.get('id_token')\n access_token = token_info.get('access_token')\n refresh_token = token_info.get('refresh_token')\n\n # Validate the token\n payload = self.verify_token(id_token, nonce=nonce)\n\n # Store users tokens\n usertokens, created = UserTokens.objects.update_or_create(\n user=payload['sub'],\n defaults={'access_token': access_token,\n 'refresh_token': refresh_token}\n )\n\n if payload:\n self.store_tokens(access_token, id_token)\n try:\n return self.get_or_create_user(access_token, id_token, payload)\n except SuspiciousOperation as exc:\n LOGGER.warning('failed to get or create user: %s', exc)\n return None\n\n return None",
"def authentication_callback(request):\n code = request.GET.get('code')\n user = authenticate(token=code, request=request)\n if user:\n auth_login(request, user)\n set_session_from_user(request, user)\n region = request.user.endpoint\n region_name = dict(Login.get_region_choices()).get(region)\n request.session['region_endpoint'] = region\n request.session['region_name'] = region_name\n url = getattr(settings, \"LOGIN_REDIRECT_URL\", \"/\")\n resp = HttpResponseRedirect(url)\n\n return resp",
"def authenticate(user, request):",
"def authentication_hook(self):\n pass",
"def authenticate_user(self, email, password):\n authentication = self.client.validate(email, password).decode(\"utf-8\")\n if authentication == \"valid\":\n self.current_email = email\n self.unlock_time = round(datetime.now().timestamp())\n if self.is_user and not self.is_return:\n self.display_successful_unlock_cust()\n elif self.is_user and self.is_return:\n self.return_car()\n else:\n self.display_successful_unlock_eng()\n elif authentication == \"invalid\":\n print(self.INVALID_USER)\n time.sleep(3)\n self.display_main()",
"def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)",
"def authenticate_user():\n\n error = request.args.get(\"error\")\n if error:\n logger.warning(\"Google sent us an error via OAuth2: %s\", error)\n\n return redirect(url_for(\"login\"))\n\n # Get OAuth2 authentication code\n code = request.args.get(\"code\")\n\n # Exchange code for fresh credentials\n credentials = flow.step2_exchange(code)\n\n # Extract email and email verification\n id_token = credentials.id_token\n email = id_token[\"email\"]\n verified_email = id_token[\"email_verified\"]\n\n if verified_email is True:\n # Find the user with the given email\n try:\n user = FlaskUser(User.objects.get(email = email))\n except User.DoesNotExist:\n user = None\n\n if not user:\n flash(\"A Galah account does not exist for this email.\", \"error\")\n\n logger.info(\n \"User %s has attempted to log in via OAuth2 but an account \"\n \"does not exist for them.\", email\n )\n else:\n login_user(user)\n\n logger.info(\n \"User %s has succesfully logged in via OAuth2.\", email\n )\n\n return redirect(url_for(\"home\"))\n\n else:\n flash(\"Sorry, we couldn't verify your email\", \"error\")\n\n logger.info(\"User %s failed to authenticate with OAuth2 because \"\n \"their email has not been verified with google.\", email)\n\n return redirect(url_for(\"login\"))",
"def authenticate(self, msg=\"\"):\n if self.request.user:\n return True\n else:\n templating = self.server.templating # save current templating settings\n templating_path = self.server.templating_path\n self.server.set_templating(\"pystache\")\n self.server.set_templating_path(\".\")\n params = {'hidden_fields': self.request.params} # pass all parameters\n self.response.send_template(self.login_template, params)\n self.server.templating = templating # restore templating settings\n self.server.templating_path = templating_path\n raise AlreadyProcessed()",
"def do_authenticate():\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.",
"def accesscode(request, code):\n employee = Employee.objects.get(access_code=code)\n user = employee.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n return HttpResponseRedirect('/')",
"def authenticate(credentials):",
"def authenticate_with_github(username=None, password=None, code=None):\n if username is not None and password is not None:\n print(' (auth given as {}:{})'.format(username, '*'*len(password)))\n\n def _2fa_func():\n return code\n\n if code:\n return login(username, password, two_factor_callback=_2fa_func)\n else:\n return GitHub(username, password)",
"def auth(self, user):",
"def handleAuth(self, opcode, data, client):\n \n # Get the data the client sent.\n clientUser = data.getString()\n clientPass = data.getString()\n \n # Flag to be send back after serverside auth\n flag = None\n userpass = False\n loginTries = 0 # Not thought out now, will return to it later...\n \n # Get the data from DB\n try:\n # Here we can add the player to the PLAYERS{} by using a player\n # ID or something\n details = []\n details = Database.getAccountData(clientUser, clientPass)\n \n except:\n print \"Can't connected to ACCOUNT DATABASE\"\n \n # Will make some other checks later... this is just good for now..\n if details == None:\n flag = 2\n print \"Player: \", clientUser, \" Doesn't exist! or Incorrect!\"\n loginTries += 1\n \n # Check if the password/username match\n elif clientPass == details[2] and clientUser == details[1]:\n print details\n userpass = True\n self.network.base.PLAYERS[details[0]] = Player(self, details[0], details[1])\n print \"Player: \", details[1], \" Logged in, ID: \", details[0]\n flag = 1\n \n else:\n userpass = False\n print \"Player: \", clientUser, \" login incorrect\"\n loginTries += 1\n flag = 2\n \n # Create buffer\n pkg = PyDatagram()\n \n # Add response\n pkg.addUint16(SMSG_AUTH_RESPONSE)\n \n # Add the flag\n pkg.addUint16(flag)\n \n # Send the packet\n self.network.tcpWriter.send(pkg, client)",
"def auth():\n pass",
"def auth():\n pass",
"def do_login(self, backend, user):",
"def authenticate():\n if request.environ['PATH_INFO'] == \"/notification\":\n user = getUser()\n \n if user is None:\n raise HTTPResponse(body=\"Forbidden\", status=403)\n \n try:\n if authz.login(user):\n logging.info('Login success: %s', user.username)\n return\n except IOError:\n raise HTTPResponse(body=\"Error reading user file\", status=400)\n except Exception as e:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n raise HTTPResponse(body=\"Invalid username or password\", status=401)",
"def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)",
"def doAuth(pamh):\n\tprint('called third eye')\n\t# Abort if third_eye is disabled\n\tif config.getboolean(\"core\", \"disabled\"):\n\t\tsys.exit(0)\n\n\tif \"SSH_CONNECTION\" in os.environ or \"SSH_CLIENT\" in os.environ or \"SSHD_OPTS\" in os.environ:\n\t\tsys.exit(0)\n\tpamh.conversation(pamh.Message(pamh.PAM_TEXT_INFO, \"Attempting a face detection\"))\n\n\t# Run compare as python3 subprocess to circumvent python version and import issues\n\tstatus = subprocess.call([\"/usr/bin/python3\", os.path.dirname(os.path.abspath(__file__)) + \"/new_compare.py\", pamh.get_user()])\n\n\t# Status 12 means we aborted\n\tif status == 12:\n\t\treturn pamh.PAM_AUTH_ERR\n\t# Status 0 is a successful exit\n\telif status == 0:\n\t\t# Show the success message if it isn't suppressed\n\t\tpamh.conversation(pamh.Message(pamh.PAM_TEXT_INFO, \"Identified face as \" + pamh.get_user()))\n\t\treturn pamh.PAM_SUCCESS\n\t#unknown err\n\treturn pamh.PAM_SYSTEM_ERR",
"def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200",
"def authenticate(self, username, password):\n user = self.db.get_user(username)\n print(user)\n\n if user is None:\n self.__deny_state()\n\n if not self.argon2.verify(user[1], password):\n self.__deny_state()\n\n self.__accept_state()",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")",
"def auth_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_code\")"
] | [
"0.66076124",
"0.6604477",
"0.6523267",
"0.64544946",
"0.6403558",
"0.6359539",
"0.6357345",
"0.6290257",
"0.6288636",
"0.6280958",
"0.6248023",
"0.6243201",
"0.6242323",
"0.6237753",
"0.6226705",
"0.6226705",
"0.62162906",
"0.61871463",
"0.6182399",
"0.61742467",
"0.6170313",
"0.61575925",
"0.6150889",
"0.6150889",
"0.6150889",
"0.6150889",
"0.6150889",
"0.6150889",
"0.6150889",
"0.6150889"
] | 0.70372474 | 0 |
Activates the specified MFA TOTP device for the user. Activation requires manual interaction with the Console. | def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):
resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"activate_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"mfaTotpDeviceId": mfa_totp_device_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=mfa_totp_token,
response_type="MfaTotpDeviceSummary")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=mfa_totp_token,
response_type="MfaTotpDeviceSummary") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def activate_application_token(self, apptoken, temptoken) -> bool:\n await self.raw_request(\n self.URL_ACTIVATE.format(apptoken=apptoken, temptoken=temptoken)\n )\n return True",
"def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('home')\n else:\n return render(request, 'registration/activation_invalid.html')",
"def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return render(request, 'accounts/active_done.html')\n else:\n return HttpResponse('Activation link is invalid!')",
"def enable_mfa_device(self, user_name, serial_number,\r\n auth_code_1, auth_code_2):\r\n params = {'UserName' : user_name,\r\n 'SerialNumber' : serial_number,\r\n 'AuthenticationCode1' : auth_code_1,\r\n 'AuthenticationCode2' : auth_code_2}\r\n return self.get_response('EnableMFADevice', params)",
"def activate_token(request, token):\n # Getting environment from settings\n debug = settings.DEBUG\n\n # Based on the debug redirect the user to correct url\n if debug:\n REDIRECT_URL = 'http://localhost:3000'\n else:\n REDIRECT_URL = 'https://leadbook-challenge.herokuapp.com'\n\n try:\n profile = Profile.objects.get(activation_key=token)\n profile.is_verified = True\n profile.save()\n except Profile.DoesNotExist:\n profile = None\n\n if profile:\n return HttpResponseRedirect('{}/activation/success'.format(REDIRECT_URL))\n else:\n return HttpResponseRedirect('{}/activation/failed'.format(REDIRECT_URL))",
"def activate(request, uidb64, token):\r\n\ttry:\r\n\t\tuid = force_text(urlsafe_base64_decode(uidb64))\r\n\t\tuser = User.objects.get(pk=uid)\r\n\texcept (TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n\t\tuser = None\r\n\r\n\tif user is not None and account_activation_token.check_token(user, token):\r\n\t\t# User activated and redirected to the homepage\r\n\t\tuser.is_active = True\r\n\t\tuser.profile.email_confirmed = True\r\n\t\tuser.save()\r\n\t\tlogin(request, user, backend='django.contrib.auth.backends.ModelBackend')\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\telse:\r\n\t\treturn render(request, 'account_activation_invalid.html')",
"def activate_user(request, uidb64, token):\n activation_session_token = '_activation_reset_token'\n activation_url_token = 'user-activation'\n title = \"Account activation\"\n context = {'title': 'Invalid Activation Link', 'isvalid': False}\n\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user and user.is_active:\n messages.success(request, 'The account is active.')\n return redirect('login')\n\n if request.method == 'GET':\n if token == activation_url_token:\n session_token = request.session.get(activation_session_token)\n if default_token_generator.check_token(user, session_token):\n # If the token is valid, display the password reset form.\n form = forms.ActivationForm(user=user)\n return render(request, 'user/activate_user.html', {\n 'form': form, 'title': title})\n else:\n if default_token_generator.check_token(user, token):\n # Store the token in the session and redirect to the\n # password reset form at a URL without the token. That\n # avoids the possibility of leaking the token in the\n # HTTP Referer header.\n request.session[activation_session_token] = token\n redirect_url = request.path.replace(token, activation_url_token)\n return HttpResponseRedirect(redirect_url)\n else:\n if token == activation_url_token:\n session_token = request.session.get(activation_session_token)\n form = forms.ActivationForm(user=user, data=request.POST)\n if form.is_valid() and default_token_generator.check_token(user, session_token):\n with transaction.atomic():\n user.set_password(form.cleaned_data['password1'])\n user.is_active = True\n # Check legacy credentials\n check_legacy_credentials(user, user.email)\n user.save()\n email = user.associated_emails.first()\n email.verification_date = timezone.now()\n email.is_verified = True\n email.save()\n request.session.pop(activation_session_token)\n logger.info('User activated - {0}'.format(user.email))\n messages.success(request, 'The account has been activated.')\n login(request, user)\n return redirect('project_home')\n return render(request, 'user/activate_user.html', {'form': form,\n 'title': title})\n\n return render(request, 'user/activate_user_complete.html', context)",
"def activate():\n try:\n body = request.get_json()\n\n activate_token = body[\"activate_token\"]\n password = body[\"password\"]\n\n if len(password) < 3 or len(password) > 50:\n return bad_request()\n\n if not models.token_exists(activate_token):\n\n return bad_request()\n\n student_hash = models.create_hash(password)\n models.save_hash(student_hash, activate_token)\n\n except KeyError:\n return bad_request()\n except Exception as e:\n print(e)\n return server_error()\n\n return created()",
"def account_activate(request, uidb64, token):\r\n try:\r\n # decode the user's id and get the user by id.\r\n user_id = smart_str(urlsafe_base64_decode(uidb64))\r\n user = get_object_or_404(User, id=user_id)\r\n if user.is_active:\r\n # Display already activated account message\r\n messages.success(request, f'Your Account already activated. You can login.', extra_tags='activation-valid')\r\n # check if the token is valid.\r\n elif account_activation_token.check_token(user, token):\r\n user.is_active = True\r\n # user.previously_logged_in = True\r\n user.save()\r\n # Display activation success message\r\n messages.success(request, f'Your Account has been activated successfully. Now you can login.', extra_tags='activation-valid') \r\n else:\r\n # Display error message.\r\n messages.error(request, f'The activation link is invalid. Please request a new one.', extra_tags='activation-invalid') \r\n except DjangoUnicodeDecodeError:\r\n # Display error message.\r\n messages.error(request, f'The activation link is invalid. Please request a new one.', extra_tags='activation-invalid') \r\n return redirect('accounts:login')",
"def req_display_otp(self):\n\n ret = self.ui_auth.create_new_one_time_pwd()\n if ret is not None:\n self.error_msg_queue_list.append(ret)",
"def activate(request, activation_key, template_name='registration/activate.html'):\n activation_key = activation_key.lower() # Normalize before trying anything with it.\n account = RegistrationProfile.objects.activate_user(activation_key)\n account.is_active = True\n account.save()\n return render(request, template_name,\n { 'account': account,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })",
"def activate_account(self):\n self.driver.execute_script(\"window.scrollTo(0, 1000)\")\n self.click_on_element_by_css(tep.ACTIVATION_LINK)\n self.click_on_element_by_css(tep.ACTIVATION_BUTTON)",
"def activate_factor(self, state_token, factor_id, passcode, relay_state=None):\n request = {\n 'stateToken': state_token,\n 'passCode': passcode,\n 'relayState': relay_state\n }\n\n response = ApiClient.post_path(self, '/factors/{0}/lifecycle/activate'.format(factor_id), request)\n return Utils.deserialize(response.text, AuthResult)",
"async def set_mfa_and_connect(self, mfa_input: str):\n await self._set_mfa_code(mfa_input)\n await asyncio.sleep(10)\n await self._set_products()",
"def check_for_activate(self):\n try:\n # Attempt to activate. If the user has completed pairing on the,\n # backend, this will succeed. Otherwise it throws and HTTPError()\n\n token = self.data.get(\"token\")\n login = self.api.activate(self.state, token) # HTTPError() thrown\n\n # When we get here, the pairing code has been entered on the\n # backend and pairing can now be saved.\n # The following is kinda ugly, but it is really critical that we\n # get this saved successfully or we need to let the user know that\n # they have to perform pairing all over again at the website.\n try:\n IdentityManager.save(login)\n except Exception as e:\n self.log.debug(\"First save attempt failed: \" + repr(e))\n time.sleep(2)\n try:\n IdentityManager.save(login)\n except Exception as e2:\n # Something must be seriously wrong\n self.log.debug(\"Second save attempt failed: \" + repr(e2))\n self.abort_and_restart()\n\n if mycroft.audio.is_speaking():\n # Assume speaking is the pairing code. Stop TTS of that.\n mycroft.audio.stop_speaking()\n\n self.enclosure.activate_mouth_events() # clears the display\n\n # Notify the system it is paired\n self.gui.show_page(\"pairing_done.qml\", override_idle=False)\n self.bus.emit(Message(\"mycroft.paired\", login))\n\n self.pairing_performed = True\n with self.pair_dialog_lock:\n if self.mycroft_ready:\n # Tell user they are now paired\n self.speak_dialog(self.paired_dialog)\n mycroft.audio.wait_while_speaking()\n else:\n self.speak_dialog(\"wait.for.startup\")\n mycroft.audio.wait_while_speaking()\n\n # Un-mute. Would have been muted during onboarding for a new\n # unit, and not dangerous to do if pairing was started\n # independently.\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n # Send signal to update configuration\n self.bus.emit(Message(\"configuration.updated\"))\n\n # Allow this skill to auto-update again\n self.reload_skill = True\n except HTTPError:\n # speak pairing code every 60th second\n with self.counter_lock:\n if self.count == 0:\n self.speak_code()\n self.count = (self.count + 1) % 6\n\n if time.monotonic() > self.time_code_expires:\n # After 20 hours the token times out. Restart\n # the pairing process.\n with self.counter_lock:\n self.count = -1\n self.data = None\n self.handle_pairing()\n else:\n # trigger another check in 10 seconds\n self.__create_activator()\n except Exception as e:\n self.log.debug(\"Unexpected error: \" + repr(e))\n self.abort_and_restart()",
"def send_mfa(\n self,\n form: object = None, # noqa: ARG002\n code: str = \"\",\n trusted_device: bool = True,\n ) -> None:\n el_otp = self._driver.find_element(By.CSS_SELECTOR, \"input[name=otc]\", timeout=5)\n el_otp.clear()\n el_otp.send_keys(code)\n\n el_verify = self._driver.find_element(By.CSS_SELECTOR, \"input[type=submit]\", timeout=5)\n if el_verify.accessible_name != \"Verify\":\n msg = f'{self.__class__.__name__}: Cannot find \"Verify\" button'\n raise IdpError(msg)\n\n if trusted_device:\n el_verify.click()\n\n self._stay_signed_in()",
"def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)",
"def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp",
"def activate(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.activate(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='activation',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='activation',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )",
"def activate_user(activation_code, new_password):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_activation_code(activation_code)\n user.activate()\n user.set_password(new_password)\n except ex.UserNotFoundError:\n blogger.debug(\"no user found with activation code %s\" % activation_code)\n transaction.abort()\n return dict(activated=False)\n else:\n transaction.commit()\n return dict(activated=True)",
"def dev_dial(action):\n\n try:\n client = AMIClient(address=AUTH_CREDS['address'], port=AUTH_CREDS['port'])\n client.login(username=AUTH_CREDS['username'], secret=AUTH_CREDS['secret'])\n\n future = client.send_action(action)\n if VERBOSE:\n print(future.response or \"None\")\n\n client.logoff()\n\n except Exception as e:\n print(\"Error: %s\" % e.strerror)\n sys.exit(1)",
"def activate(self, *args, **kwargs):\n username = self.validate_key(kwargs.get(\"activation_key\"))\n user = self.get_user(username)\n user.is_active = True\n user.save()\n return user",
"def add_user(self):\n\n pin, code = self.get_auth_pin() \n print(\"Enter the PIN '{}' into the Add Application window and click Add Application\".format(pin))\n input(\"waiting press enter to continue...\")\n\n access_token, refresh_token = self.get_tokens(code)\n user_id = self.tokens.get_next_user_id()\n self.tokens.insert_user(user_id, access_token, refresh_token)\n tstat_ids = self.get_tstat_ids(access_token)\n for tstat_id in tstat_ids:\n logger.info(\"Adding Thermostat ID: {}\".format(tstat_id))\n self.tokens.insert_tstat(user_id, tstat_id)",
"def send_otp_to_primary_mobile(otp, mobile):\n print('Sending otp to mobile: ', otp, mobile)",
"def user_activation(user):\n act_hash = random_password(32)\n user.set_hashword(act_hash)\n user.save()\n base_url = url_for('public.home', _external=True)\n act_url = url_for(\n 'auth.activate',\n userid=user.id,\n userhash=act_hash,\n _external=True)\n if not 'mailman' in current_app.extensions:\n logging.warning('E-mail extension has not been configured')\n return act_hash\n msg = EmailMessage()\n msg.subject = 'Your dribdat account'\n msg.body = \\\n \"Hello %s,\\n\" % user.username \\\n + \"Thanks for signing up at %s\\n\\n\" % base_url \\\n + \"Tap here to activate your account:\\n\\n%s\" % act_url\n msg.to = [user.email]\n logging.info('Sending activation mail to user %d' % user.id)\n logging.debug(act_url)\n msg.send(fail_silently=True)\n return act_hash",
"def activate(self) -> None:\n self._bot.inject_flows_from(self)\n self.is_activated = True",
"def login_on_activation(sender, user, request, **kwargs):\n user.backend = 'storybase_user.auth.backends.EmailModelBackend'\n login(request, user)",
"def _turn_on(self):\n logger.info(\"Check antenna power\")\n power = yield WaitDBus(self.gsm_device.GetAntennaPower)\n logger.info(\"antenna power is %d\", power)\n if power:\n yield None\n logger.info(\"turn on antenna power\")\n try:\n yield WaitDBus(self.gsm_device.SetAntennaPower, True)\n except dbus.exceptions.DBusException, ex:\n if ex.get_dbus_name() != 'org.freesmartphone.GSM.SIM.AuthFailed':\n raise\n yield self._ask_pin()",
"def turn_on_modem(self):\n if not self.is_power_on():\n self._logger.debug(\"Switching modem on...\")\n self.set_pin()\n # give modem some time to login\n time.sleep(10)\n else:\n self._logger.debug(\"Modem is already powered on...\")",
"def activate_profile(field, code, request):\n try:\n activation = ActivationProfile.objects.get(**{field:code})\n except ActivationProfile.DoesNotExist:\n messages.error(request, _('Activation code expired or not valid!'))\n return False\n if timezone.now() < activation.valid_through:\n activation.user.is_active = True\n activation.user.set_unusable_password()\n activation.user.save()\n if request.user.is_anonymous():\n if field == 'token':\n user = authenticate(username=activation.user.username, token=activation.token)\n elif field == 'sms_key':\n user = authenticate(username=activation.user.username, code=activation.sms_key)\n else:\n user = None\n activation.delete()\n if user:\n login(request, user)\n messages.success(request, _(\"\"\"Profile activated successfully! You should change your password!\"\"\"))\n return True\n else:\n return False\n else:\n messages.success(request, _(\"\"\"You already have an account!\"\"\"))\n return False"
] | [
"0.60715777",
"0.6028133",
"0.59996897",
"0.58769685",
"0.5760404",
"0.5734255",
"0.5616806",
"0.5603392",
"0.5589512",
"0.5500395",
"0.5455279",
"0.5448774",
"0.5440349",
"0.54232854",
"0.5410598",
"0.5386604",
"0.5370613",
"0.53615427",
"0.5324096",
"0.53227437",
"0.5316876",
"0.5257387",
"0.5242015",
"0.5238215",
"0.52204853",
"0.5180888",
"0.5168266",
"0.5167923",
"0.5149471",
"0.51486313"
] | 0.6753613 | 0 |
Moves the specified tag namespace to the specified compartment within the same tenancy. To move the tag namespace, you must have the manage tagnamespaces permission on both compartments. For more information about IAM policies, see `Details for IAM`__. Moving a tag namespace moves all the tag key definitions contained in the tag namespace. | def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs):
resource_path = "/tagNamespaces/{tagNamespaceId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_tag_namespace_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tagNamespaceId": tag_namespace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_tag_namespace_compartment_detail)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_tag_namespace_compartment_detail) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_namespace(self, doc, namespace):\r\n ns = u'{%s}' % namespace\r\n nsl = len(ns)\r\n for elem in doc.getiterator():\r\n if elem.tag.startswith(ns):\r\n elem.tag = elem.tag[nsl:]\r\n else:\r\n pass",
"def remove_namespace(doc, namespace=u\"{http://www.EcoInvent.org/EcoSpold02}\"):\n ns = u'{}'.format(namespace)\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]",
"def update():\n for namespace in metadata.get_namespaces():\n logging.info('Switching namespace: \\'%s\\'', namespace)\n namespace_manager.set_namespace(namespace)\n update_per_namespace()\n\n namespace_manager.set_namespace('')\n return ('', 204)",
"def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def ReplaceNamespace(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceNamespace')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")",
"async def remove_namespace(self, namespace: str) -> Any:\n if namespace == self.get_namespace(): # if it belongs to this app's namespace\n raise ValueError(\"Cannot remove namespace with the same name as operating namespace\")\n\n return await self.AD.state.remove_namespace(namespace)",
"def clean_up_namespaces(node, namespace=None):\n if namespace is not None:\n Namespaces.delete_namespace(node, namespace)\n return\n\n namespace_copy = deepcopy(Namespaces.__namespaces)\n for namespace_name in namespace_copy:\n Namespaces.delete_namespace(node, namespace_name)",
"def update_tag_namespace(self, tag_namespace_id, update_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}\"\n method = \"PUT\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_namespace_details,\n response_type=\"TagNamespace\")",
"def fix(self):\n for namespace in pm.listNamespaces():\n for elem in namespace.ls():\n elem.rename(elem.split(\":\")[-1])\n namespace.remove()\n\n self.run()",
"def removeNamespace(self, *args):\n return _libsbml.XMLToken_removeNamespace(self, *args)",
"def set_target_namespace(self, namespace):\n # do shit\n self.target_namespace = namespace.strip(\":\")",
"def test_replace_namespaced_deployment_config(self):\n pass",
"def sync_namespace(alias, reg_code, authToken, space=None, action=None):\n if space == None:\n action = 'get'\n print(\" ACTION: GET\")\n elif action == None:\n if 'aeskey' not in space:\n print(\"Space not encrypted\")\n quit()\n action = 'update'\n print(\" ACTION: UPDATE\")\n elif action == 'delete':\n print(\" ACTION: DELETE\")\n url = endpoint('namespace')\n headers={'authorizationToken': authToken}\n data = json.dumps({'action': action, 'alias': alias, 'reg_code': reg_code, 'namespace': space})\n payload_size = sys.getsizeof(data)\n print(\" Size of payload is: %s\" % (convert_size(payload_size)))\n print(\" Max payload is: %s\" % (convert_size(max_payload_size)))\n if payload_size >= max_payload_size:\n print(\" OVER MAX PAYLOAD: %s\" % (convert_size(max_payload_size)))\n quit()\n r = requests.post(url, headers=headers, data=data) \n print(\" Request made\")\n if r.status_code == 403:\n print(\" Invalid registration code, exiting\")\n quit()\n elif r.status_code == 406:\n print(\" Namespace mismatch\")\n quit()\n else:\n print(\" └──statusCode:\" + str(r.status_code) )\n return r",
"def delete_namespace(node, namespace):\n cmd_timeout = 5\n cmd = f\"ip netns delete {namespace}\"\n (ret_code, _, delete_errmsg) = \\\n exec_cmd(node, cmd, timeout=cmd_timeout, sudo=True)\n if ret_code != 0:\n cmd = f\"ip netns list {namespace}\"\n (stdout, _) = \\\n exec_cmd_no_error(node, cmd, timeout=cmd_timeout, sudo=True)\n if stdout == namespace:\n raise RuntimeError(f\"Could not delete namespace \"\n f\"({namespace}): {delete_errmsg}\")\n try:\n Namespaces.__namespaces.remove(namespace)\n except ValueError:\n pass",
"def test_replace_net_namespace(self):\n pass",
"def test_replace_namespaced_role(self):\n pass",
"def create_tag_namespace(self, create_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")",
"def removeNamespace(self, *args):\n return _libsbml.SBMLNamespaces_removeNamespace(self, *args)",
"def set_namespace(self, namespace: str) -> None:\n self._namespace = namespace",
"def delete_tag_namespace(self, tag_namespace_id, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)",
"def move_objects(self, src_s3_prefix_path, destination_s3_prefix_path):\n src_bucket_name, src_prefix = S3Util.get_bucket_and_key(src_s3_prefix_path)\n destination_bucket_name, destination_prefix = S3Util.get_bucket_and_key(destination_s3_prefix_path)\n\n src_bucket = self.s3_resource.Bucket(src_bucket_name)\n destination_bucket = self.s3_resource.Bucket(destination_bucket_name)\n\n for obj in src_bucket.objects.filter(Prefix=src_prefix):\n source_obj = self._object_summary_to_copy_source(obj)\n\n # replace the prefix\n new_key = obj.key.replace(src_prefix, destination_prefix)\n destination_bucket.copy(CopySource=source_obj, Key=new_key)\n obj.delete()",
"def replace_namespaced_namespace(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_namespaced_namespace_with_http_info(body, name, **kwargs)\n else:\n (data) = self.replace_namespaced_namespace_with_http_info(body, name, **kwargs)\n return data",
"def test_delete_net_namespace(self):\n pass",
"def move_tag_seq(words, seq, dest, punc=None):\n if len(seq) > len(words):\n return None\n seq_start = index_tag_seq(words, seq)\n if seq_start > -1:\n move_words = words[seq_start:seq_start+len(seq)]\n words = words[:seq_start] + words[seq_start+len(seq):]\n if dest == 'start':\n words = move_words + words\n if dest == 'end':\n if punc is not None:\n words.append(punc)\n words += move_words\n return words\n return None",
"def test_delete_namespaced_deployment_config(self):\n pass",
"def namespace_delete(cursor, namespace_id):\n haystack = (namespace_id,)\n query = \"DELETE FROM namespaces WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))",
"def move(owner_id=None, target_album_id=None, photo_id=None):\n params = {\n 'owner_id': owner_id,\n 'target_album_id': target_album_id,\n 'photo_id': photo_id\n }\n result = call('photos.move', **params)\n return parse_response(result)",
"def swap_cnames(profile, source_environment, destination_environment):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"SourceEnvironmentName\"] = source_environment\n params[\"DestinationEnvironmentName\"] = destination_environment\n return client.swap_environment_cnames(**params)",
"def post_namespace_delete(self, resource_id, resource_dict):\n pass"
] | [
"0.55153644",
"0.5485366",
"0.5347061",
"0.52107394",
"0.52107394",
"0.5199132",
"0.5165057",
"0.5079889",
"0.5022554",
"0.49942735",
"0.49675435",
"0.49604434",
"0.49384886",
"0.48304433",
"0.4796919",
"0.4729666",
"0.46716085",
"0.46684968",
"0.4638175",
"0.46381432",
"0.46096462",
"0.45636493",
"0.45625663",
"0.45530888",
"0.45495844",
"0.4545023",
"0.4537018",
"0.45306963",
"0.4528945",
"0.45188636"
] | 0.61622286 | 0 |
Creates a new dynamic group in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the dynamic group, which must be unique across all dynamic groups in your tenancy, and cannot be changed. Note that this name has to be also unique across all groups in your tenancy. You can use this name or the OCID when writing policies that apply to the dynamic group. For more information about policies, see `How Policies Work`__. You must also specify a description for the dynamic group (although it can be an empty string). It does not | def create_dynamic_group(self, create_dynamic_group_details, **kwargs):
resource_path = "/dynamicGroups"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_dynamic_group_details,
response_type="DynamicGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_dynamic_group_details,
response_type="DynamicGroup") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response",
"def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")",
"def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))",
"def create_group(self, groupname):\n data = {\"groupname\": groupname}\n headers = {\"user-agent\": self.u_agent}\n req_url = self.normalize_admin_url(\"groups\")\n res = requests.post(\n req_url,\n headers=headers,\n auth=self.auth,\n data=json.dumps(data),\n verify=False,\n )\n if res.status_code == 201:\n return Response(0, u\"Group {} has been created\".format(groupname))\n else:\n return Response(res.status_code, res)",
"def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def create_group(self, group_name, group_type):\n grp_data = {\"name\": group_name, \"type\": group_type}\n return requests.post(self.groups_url, data=json.dumps(grp_data),\n headers=self.headers)",
"def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")",
"def __create_new_group(self, group_name) -> None:\n group = Group(name=group_name)\n group.save()\n\n self.__add_permission_to_group(group)",
"def create_group(self, name):\n\t\tdata = {\"name\":name}\n\t\tresponse = self.client.post(self._endpoint + \"/group\", content=data)\n\t\treturn Group(\n\t\t\tresponse.json['group_id'],\n\t\t\tself.user_id,\n\t\t\tself.site_id,\n\t\t\tdata=response.json\n\t\t)",
"def create_placement_group(self, name, strategy='cluster'):\r\n params = {'GroupName':name, 'Strategy':strategy}\r\n group = self.get_status('CreatePlacementGroup', params, verb='POST')\r\n return group",
"def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }",
"def create(self, group_name):\n METHOD = 'POST'\n API_PATH = '/groups/create'\n\n data = {'group_name': group_name}\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH, data=data)\n if resp.status_code == 200:\n return resp.json()\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))",
"def allocate_group(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AllocateNewGroupID(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AllocateNewGroupID(key1, result_val)\n return result_val.i",
"def add_group():\n name = request.form['name']\n data, code, message = FIELD_SERVICE.add_group(name)\n return __result(data, code, message)",
"def create_group(self, identifier: str, group_name: str) -> Group:\n\n # APM-137701 - Namespace for custom device calculation should not be set\n group_id = get_group_id(\"\", identifier)\n if group_id in self._groups:\n raise ValueError(\"Group \" + group_name + \" already exist, id: \" + str(group_id))\n else:\n group = Group(group_id, group_name, self._technologies, self._results_builder)\n\n self._groups[group_id] = group\n return group",
"def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200",
"def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)",
"def create_group(self, create_group_details, **kwargs):\n resource_path = \"/groups\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_group_details,\n response_type=\"Group\")",
"def post_groups(\n data: PostGroupIn, tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.create_group\")\n grp = Group(\n description=data.description,\n members=[tkn.owner],\n group_name=data.group_name,\n owner=tkn.owner,\n ).save()\n logging.debug(\n \"Created group %s (%s) owned by %s\",\n data.group_name,\n str(grp.pk),\n tkn.owner.character_name,\n )\n return GetGroupOut.from_record(grp)",
"def create_adgroup(self, account_id, name, campaign_id,\n creative_id, bid_type=None, bid_info=None, max_bid=None,\n tracking_specs=None, view_tags=None, objective=None,\n adgroup_status=None, targeting=None, conversion_specs=None, batch=False):\n path = 'act_%s/adgroups' % account_id\n args = {\n 'name': name,\n 'campaign_id': campaign_id,\n 'creative': json.dumps({'creative_id': creative_id}),\n }\n if bid_type:\n args['bid_type'] = bid_type\n if max_bid:\n # can only use max_bid with CPM bidding\n args['max_bid'] = max_bid\n elif bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)",
"def autoCreateGroup(cleaned_data, cookie_user, isAutoApproved=False, querystring_content=False):\n existingSites = Site.objects.filter(\n domain=cleaned_data['domain'],\n )\n if len(existingSites) > 0:\n\n try:\n site = existingSites[0]\n group = site.group\n except Exception, e:\n raise Exception(\"Site \"+cleaned_data['domain']+\" has no group.\")\n else:\n # make a group and site\n try:\n group = Group.objects.create(\n name=cleaned_data['name'],\n short_name=cleaned_data['short_name'],\n approved=False,\n temp_interact=0,\n requires_approval=False,\n )\n except Exception, e:\n print \"* * * ** * * * * * * * EXCEPTION \"\n print e\n logger.warn(e)\n groups = Group.objects.filter(\n short_name=cleaned_data['short_name']\n )\n if len(groups) == 1:\n group = groups[0]\n elif len(groups) > 1:\n raise Exception(\"More than one group with shortname found: \" + cleaned_data['short_name'])\n else:\n raise Exception(\"No groups found with shortname: \" + cleaned_data['short_name'])\n\n site = Site.objects.create(\n name=cleaned_data['domain'],\n domain=cleaned_data['domain'],\n group=group,\n # this is whether or not a querystring is counted in the url - we should rename this\n querystring_content=querystring_content,\n )\n\n blessed_tags = addDefaultsForNewGroup(group, cookie_user)\n autoApproveUserAsAdmin(group, cookie_user, isAutoApproved=isAutoApproved)\n\n return group, site, blessed_tags",
"def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)",
"def create_ad_group(client, customer_id, campaign_resource_name):\n ad_group_service = client.get_service(\"AdGroupService\")\n\n # Creates the ad group.\n # Note that the ad group type must not be set.\n # Since the advertising_channel_sub_type is APP_CAMPAIGN,\n # 1- you cannot override bid settings at the ad group level.\n # 2- you cannot add ad group criteria.\n ad_group_operation = client.get_type(\"AdGroupOperation\")\n ad_group = ad_group_operation.create\n ad_group.name = f\"Earth to Mars cruises {uuid4()}\"\n ad_group.status = client.enums.AdGroupStatusEnum.ENABLED\n ad_group.campaign = campaign_resource_name\n\n ad_group_response = ad_group_service.mutate_ad_groups(\n customer_id=customer_id, operations=[ad_group_operation]\n )\n\n ad_group_resource_name = ad_group_response.results[0].resource_name\n print(f'Ad Group created with resource name: \"{ad_group_resource_name}\".')\n return ad_group_resource_name",
"def create(person_group_id, name=None, user_data=None):\n name = person_group_id if name is None else name\n url = 'persongroups/{}'.format(person_group_id)\n json = {\n 'name': name,\n 'userData': user_data,\n }\n\n return util.request('PUT', url, json=json)",
"def create_new_group(self, group_id, poll_id, name):\n obj = self.table()\n obj.group_id = str(group_id)\n obj.poll_id = poll_id\n obj.name = name\n self.db.session.add(obj)\n self.db.session.commit()",
"def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))",
"def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))",
"def group_add_name(org_id, data):\n if data.has_key('groupname'):\n groupname = data['groupname']\n add_group(org_id, groupname, False)",
"def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')"
] | [
"0.7186179",
"0.71604204",
"0.7018895",
"0.679185",
"0.6695333",
"0.661491",
"0.6580392",
"0.6452021",
"0.64109707",
"0.64004433",
"0.6369519",
"0.63523793",
"0.6294868",
"0.628068",
"0.6277388",
"0.62702346",
"0.6215326",
"0.6186225",
"0.61719334",
"0.6139862",
"0.61309004",
"0.6128545",
"0.6117724",
"0.6116714",
"0.61166877",
"0.6114094",
"0.61130893",
"0.61098534",
"0.6097828",
"0.60967726"
] | 0.71613544 | 1 |
Creates a new identity provider in your tenancy. For more information, see `Identity Providers and Federation`__. You must specify your tenancy's OCID as the compartment ID in the request object. Remember that the tenancy is simply the root compartment. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the `IdentityProvider`, which must be unique across all `IdentityProvider` objects in your tenancy and cannot be changed. You must also specify a description for the `IdentityProvider` (although it can be an empty string). It does not have to be unique, and you can change it anytime with | def create_identity_provider(self, create_identity_provider_details, **kwargs):
resource_path = "/identityProviders"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_identity_provider got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_identity_provider_details,
response_type="IdentityProvider")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_identity_provider_details,
response_type="IdentityProvider") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_identity_provider(module, sdk, cloud, name):\n\n if module.check_mode:\n return True, None\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n if enabled is None:\n enabled = True\n if remote_ids is None:\n remote_ids = []\n\n attributes = {\n 'domain_id': domain_id,\n 'enabled': enabled,\n 'remote_ids': remote_ids,\n }\n if description is not None:\n attributes['description'] = description\n\n try:\n idp = cloud.identity.create_identity_provider(id=name, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to create identity provider: {0}'.format(str(ex)))\n return (True, idp)",
"def create_provider(\n provider_id:UUID = Form(...),\n name:str = Form(...),\n qualification:str = Form(...),\n speciality:str = Form(...),\n phone:str = Form(...),\n department:Optional[str] = Form(\"N/A\"),\n organization:str = Form(...),\n location:Optional[str] = Form(\"N/A\"),\n address:str = Form(...),\n active:bool = Form(...)\n ):\n\n post_data = {\n \"name\": name,\n \"qualification\": qualification,\n \"speciality\": speciality,\n \"phone\": phone,\n \"department\": department,\n \"organization\": organization,\n \"location\": location,\n \"address\": address,\n \"active\": active\n }\n provider_data = open_for_reading()\n if str(provider_id) in provider_data.keys():\n response = {\"message\": \"ID already exists\"}\n else:\n provider_data[str(provider_id)] = post_data\n open_for_writing(data=provider_data)\n response = {\"message\": \"provider created\"}\n\n return response",
"def create_cloud_provider(providername):\n backend_name = request.get_json().get(\"backend\")\n service_name = request.get_json().get(\"service\")\n response = jsonify(\n admin.create_provider(\n current_app.scoped_session(),\n providername,\n backend=backend_name,\n service=service_name,\n )\n )\n return response",
"def create(self, identity, record=None, data=None, **kwargs):\n data['id'] = data['id'].lower()\n self._validate(data['id'])\n record['id'] = data['id']\n try:\n provider = record.__class__.pid.field._provider.create(record=record)\n except PIDAlreadyExists:\n raise ValidationError(\n 'A community with this identifier already exists.',\n field_name='id',\n )\n setattr(record, 'pid', provider.pid)",
"def m_create_identity(DID, domain_name, website, commercial_name, parent_node_account, password, overwrite):\n\n error, didDoc = create_identity(\n DID, domain_name, website, commercial_name, parent_node_account, password, overwrite)\n if error is not None:\n print(error)\n\n print(f\"Created\")",
"def add_new_provider(self, provider_name, provider_type, endpoints, zone_id, provider_region):\n try:\n result = self.client.post(self.providers_url, name=provider_name,\n type=ManageIQProvider.PROVIDER_TYPES[provider_type],\n zone={'id': zone_id},\n connection_configurations=endpoints,\n provider_region=provider_region)\n provider_id = result['results'][0]['id']\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to add provider. Error: {!r}\".format(e))\n return provider_id",
"def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)\n self._init_owners(identity, record, **kwargs)",
"def create(self, identity, data=None, record=None, **kwargs):\n if system_process in identity.provides:\n return\n\n member = {\n \"type\": \"user\",\n \"id\": str(identity.id),\n }\n self.service.members.add(\n # the user is not yet owner of the community (is being added)\n # therefore we cannot use `identity`\n system_identity,\n record.id,\n {\"members\": [member], \"role\": current_roles.owner_role.name},\n uow=self.uow,\n )\n\n # Invalidate the membership cache\n on_user_membership_change(identity=identity)",
"def __init__(__self__, *,\n identity_pool_id: pulumi.Input[str],\n identity_provider_name: pulumi.Input[str],\n principal_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"identity_pool_id\", identity_pool_id)\n pulumi.set(__self__, \"identity_provider_name\", identity_provider_name)\n if principal_tags is not None:\n pulumi.set(__self__, \"principal_tags\", principal_tags)\n if use_defaults is not None:\n pulumi.set(__self__, \"use_defaults\", use_defaults)",
"def test_create_identity(self):\n pass",
"def __init__(__self__, *,\n principal_id: Optional[pulumi.Input[str]] = None,\n tenant_id: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[Union[str, 'ManagedIdentityType']]] = None,\n user_assigned_identities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if principal_id is not None:\n pulumi.set(__self__, \"principal_id\", principal_id)\n if tenant_id is not None:\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if user_assigned_identities is not None:\n pulumi.set(__self__, \"user_assigned_identities\", user_assigned_identities)",
"def _create_resource_provider(context, uuid, name,\n parent_provider_uuid=None):\n return {\n 'uuid': uuid,\n 'name': name,\n 'generation': 0,\n 'parent_provider_uuid': parent_provider_uuid\n }",
"def __init__(__self__, *,\n identity_pool_id: Optional[pulumi.Input[str]] = None,\n identity_provider_name: Optional[pulumi.Input[str]] = None,\n principal_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n use_defaults: Optional[pulumi.Input[bool]] = None):\n if identity_pool_id is not None:\n pulumi.set(__self__, \"identity_pool_id\", identity_pool_id)\n if identity_provider_name is not None:\n pulumi.set(__self__, \"identity_provider_name\", identity_provider_name)\n if principal_tags is not None:\n pulumi.set(__self__, \"principal_tags\", principal_tags)\n if use_defaults is not None:\n pulumi.set(__self__, \"use_defaults\", use_defaults)",
"def _create_resource_provider(self, uuid, name):\n url = \"/resource_providers\"\n payload = {\n 'uuid': uuid,\n 'name': name,\n }\n resp = self.post(url, payload)\n if resp.status_code == 201:\n msg = _LI(\"Created resource provider record via placement API \"\n \"for resource provider with UUID {0} and name {1}.\")\n msg = msg.format(uuid, name)\n LOG.info(msg)\n return objects.ResourceProvider(\n uuid=uuid,\n name=name,\n generation=1,\n )\n elif resp.status_code == 409:\n # Another thread concurrently created a resource provider with the\n # same UUID. Log a warning and then just return the resource\n # provider object from _get_resource_provider()\n msg = _LI(\"Another thread already created a resource provider \"\n \"with the UUID {0}. Grabbing that record from \"\n \"the placement API.\")\n msg = msg.format(uuid)\n LOG.info(msg)\n return self._get_resource_provider(uuid)\n else:\n msg = _LE(\"Failed to create resource provider record in \"\n \"placement API for UUID %(uuid)s. \"\n \"Got %(status_code)d: %(err_text)s.\")\n args = {\n 'uuid': uuid,\n 'status_code': resp.status_code,\n 'err_text': resp.text,\n }\n LOG.error(msg, args)",
"async def create_issuer(self, issuer_name: str, provider: str, **kwargs) -> CertificateIssuer:\n\n enabled = kwargs.pop(\"enabled\", None)\n account_id = kwargs.pop(\"account_id\", None)\n password = kwargs.pop(\"password\", None)\n organization_id = kwargs.pop(\"organization_id\", None)\n admin_contacts = kwargs.pop(\"admin_contacts\", None)\n\n if account_id or password:\n issuer_credentials = self._models.IssuerCredentials(account_id=account_id, password=password)\n else:\n issuer_credentials = None\n if admin_contacts:\n admin_details: Optional[List[Any]] = [\n self._models.AdministratorDetails(\n first_name=contact.first_name,\n last_name=contact.last_name,\n email_address=contact.email,\n phone=contact.phone,\n )\n for contact in admin_contacts\n ]\n else:\n admin_details = None\n if organization_id or admin_details:\n organization_details = self._models.OrganizationDetails(id=organization_id, admin_details=admin_details)\n else:\n organization_details = None\n if enabled is not None:\n issuer_attributes = self._models.IssuerAttributes(enabled=enabled)\n else:\n issuer_attributes = None\n\n parameters = self._models.CertificateIssuerSetParameters(\n provider=provider,\n credentials=issuer_credentials,\n organization_details=organization_details,\n attributes=issuer_attributes,\n )\n\n issuer_bundle = await self._client.set_certificate_issuer(\n vault_base_url=self.vault_url, issuer_name=issuer_name, parameter=parameters, **kwargs\n )\n return CertificateIssuer._from_issuer_bundle(issuer_bundle=issuer_bundle)",
"def create_namespaced_identity(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_identity\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_identity`\")\n\n resource_path = '/oapi/v1/identities'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Identity',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def __init__(__self__,\n resource_name: str,\n args: OpenIdConnectProviderArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def create(self, identity, data=None, record=None, **kwargs):\n if record.access.visibility != \"public\":\n return\n\n community_set = self._create_set_from_community(record)\n # NOTE: will be indexed via a listener in oaiserver module\n db.session.add(community_set)",
"def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)",
"def _create_entity_in_domain(entity_type, domain_id):\n if entity_type == 'users':\n new_entity = unit.new_user_ref(domain_id=domain_id)\n new_entity = self.identity_api.create_user(new_entity)\n elif entity_type == 'groups':\n new_entity = unit.new_group_ref(domain_id=domain_id)\n new_entity = self.identity_api.create_group(new_entity)\n elif entity_type == 'roles':\n new_entity = self._create_role(domain_id=domain_id)\n else:\n # Must be a bad test plan\n raise exception.NotImplemented()\n return new_entity",
"def create_with_instance_principal(iam_auth_uri=None):\n if iam_auth_uri is None:\n return SignatureProvider(\n oci.auth.signers.InstancePrincipalsSecurityTokenSigner())\n else:\n return SignatureProvider(\n oci.auth.signers.InstancePrincipalsSecurityTokenSigner(\n federation_endpoint=iam_auth_uri))",
"def __init__(__self__,\n resource_name: str,\n args: IdentityPoolProviderPrincipalTagArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(self, region, user_pool_id, app_client_id):\n self.region = region\n self.user_pool_id = user_pool_id\n self.client_id = app_client_id\n self.client = boto3.client('cognito-idp', region_name=self.region)",
"def register_provider(args):\n if len(args) == 0:\n click.echo(\"Usage: mephisto register <provider_type> --arg1:value --arg2:value\")\n return\n\n from mephisto.core.local_database import LocalMephistoDB\n from mephisto.core.registry import get_crowd_provider_from_type\n from mephisto.core.argparse_parser import parse_arg_dict, get_extra_argument_dicts\n\n provider_type, requester_args = args[0], args[1:]\n args_dict = dict(arg.split(\":\") for arg in requester_args)\n transformed = dict(\n (key, {\"option_string\": key, \"value\": value})\n for (key, value) in args_dict.items()\n )\n\n crowd_provider = get_crowd_provider_from_type(provider_type)\n RequesterClass = crowd_provider.RequesterClass\n\n if len(requester_args) == 0:\n from tabulate import tabulate\n\n params = get_extra_argument_dicts(RequesterClass)\n for param in params:\n click.echo(param[\"desc\"])\n click.echo(tabulate(param[\"args\"].values(), headers=\"keys\"))\n return\n\n try:\n parsed_options = parse_arg_dict(RequesterClass, transformed)\n except Exception as e:\n click.echo(str(e))\n\n if \"name\" not in parsed_options:\n click.echo(\"No name was specified for the requester.\")\n\n db = LocalMephistoDB()\n requesters = db.find_requesters(requester_name=parsed_options[\"name\"])\n if len(requesters) == 0:\n requester = RequesterClass.new(db, parsed_options[\"name\"])\n else:\n requester = requesters[0]\n try:\n requester.register(parsed_options)\n click.echo(\"Registered successfully.\")\n except Exception as e:\n click.echo(str(e))",
"def add_tomcat7_idp():\n pass",
"def sso_test_create_user(request, idp_slug):\n if settings.SERVER_ENVIRONMENT not in ['staging']:\n raise Http404()\n\n username = request.GET.get('username')\n if username:\n prepare_session_with_sso_username(request, username)\n\n invitation_uuid = request.GET.get('invitation')\n invitation = Invitation.objects.get(uuid=invitation_uuid)\n if invitation:\n prepare_session_for_sso_invitation(request, invitation)\n\n return HttpResponseRedirect(reverse(\"sso_saml_login\", args=(idp_slug,)))",
"def test_create_resource_provider(self):\n uuid = uuids.compute_node\n name = 'computehost'\n resp_mock = mock.Mock(status_code=200)\n self.ks_adap_mock.post.return_value = resp_mock\n\n self.assertEqual(\n resp_mock.json.return_value,\n self.client._create_resource_provider(self.context, uuid, name))\n\n expected_payload = {\n 'uuid': uuid,\n 'name': name,\n }\n\n expected_url = '/resource_providers'\n self.ks_adap_mock.post.assert_called_once_with(\n expected_url, json=expected_payload, microversion='1.20',\n global_request_id=self.context.global_id)",
"def __init__(__self__, *,\n identity_namespace: Optional[pulumi.Input[str]] = None,\n identity_provider: Optional[pulumi.Input[str]] = None,\n workload_pool: Optional[pulumi.Input[str]] = None):\n if identity_namespace is not None:\n pulumi.set(__self__, \"identity_namespace\", identity_namespace)\n if identity_provider is not None:\n pulumi.set(__self__, \"identity_provider\", identity_provider)\n if workload_pool is not None:\n pulumi.set(__self__, \"workload_pool\", workload_pool)",
"def create(self, request, *args, **kwargs):\n \n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n provider = serializer.data.get('provider')\n\n access_token = serializer.data.get('access_token')\n user = None if request.user.is_anonymous else request.user\n\n # strategy sets up the required custom configuration for working with Django\n strategy = load_strategy(request)\n try:\n # Loads backends defined on SOCIAL_AUTH_AUTHENTICATION_BACKENDS,\n # checks the appropriate one by using the provider given\n\n backend = load_backend(strategy=strategy, name=provider, redirect_uri=None)\n access_token = self.update_access_token(backend, request, access_token)\n\n except MissingBackend:\n return Response({\n \"errors\": {\n \"provider\": [\"Invalid provider\"]\n }\n }, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n # creates a user in our user model \n # If the user exists, we just authenticate the user.\n user = backend.do_auth(access_token, user=user)\n\n except BaseException as error:\n return Response({\"error\": str(error)}, status=status.HTTP_400_BAD_REQUEST)\n\n # Since the user is using social authentication, there is no need for email verification.\n # We therefore set the user to active here.\n # And also subscribe them for notifications\n\n user.is_active = True\n user.save()\n\n subscribe_user(user, self.subscription_class)\n\n serializer = UserSerializer(user)\n \n return Response(serializer.data, status=status.HTTP_200_OK)",
"def createTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')"
] | [
"0.6144289",
"0.5990092",
"0.5924737",
"0.5494253",
"0.5485202",
"0.53507775",
"0.53166866",
"0.51905113",
"0.5181825",
"0.51246256",
"0.5108798",
"0.51040316",
"0.5087071",
"0.50794953",
"0.50547856",
"0.50211",
"0.50116605",
"0.5002519",
"0.4999784",
"0.49900097",
"0.49755615",
"0.49569455",
"0.49335718",
"0.49200395",
"0.48801878",
"0.48650733",
"0.48602",
"0.4849936",
"0.48346928",
"0.48288682"
] | 0.61856276 | 0 |
Creates a new MFA TOTP device for the user. A user can have one MFA TOTP device. | def create_mfa_totp_device(self, user_id, **kwargs):
resource_path = "/users/{userId}/mfaTotpDevices"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="MfaTotpDevice")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="MfaTotpDevice") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"activate_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")",
"def post(self, request):\n request_data = request.data\n if 'mobile' not in request_data or not request_data['mobile']:\n return StandardHttpResponse.bad_rsp([], 'Invalid Data.')\n email = None\n mobile = None\n if '@' in request_data['mobile']:\n email = request_data['mobile']\n else:\n mobile = request_data['mobile']\n if mobile:\n user_profile_obj = UserModelQueries.get_user_profile_by_mobile(mobile)\n if not user_profile_obj:\n return StandardHttpResponse.bad_rsp([], 'Mobile is yet not registered or not verified.')\n else:\n user_profile_obj = UserModelQueries.get_user_profile_by_email(email)\n if not user_profile_obj:\n return StandardHttpResponse.bad_rsp([], 'Looks like You haven\\'t registered yet. Please Registered.')\n\n result, response = MobileOtpService.create_otp(user_profile_obj.mobile,\n 'ForgetPassword')\n if not result:\n return StandardHttpResponse.bad_rsp([], response)\n # TODO: Code to send the otp to mobile\n SentOTP.send_otp_to_email(email, response['otp'], 'Forget Password')\n response_data = {'otp_ref': response['otp_ref']}\n return StandardHttpResponse.rsp_200(response_data, 'An OTP Sent to {} to reset the password'\n .format(user_profile_obj.mobile.__str__()))",
"def generate_totp_seed(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/generateSeed\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"generate_totp_seed got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")",
"def post(self, request):\n\n email = request.data.get('email')\n phone_number = request.data.get('phone_number')\n otp = request.data.get('otp')\n\n # check that otp is correct or not (otp should match with email or phone number\n otp_obj = Otp.objects.filter(Q(email_phone=email) | Q(email_phone=phone_number) & Q(code=otp)).first()\n if not otp_obj:\n response_json = {\n 'status': False,\n 'message': 'otp is incorrect',\n 'data': {}\n }\n\n return Response(response_json, status=400)\n\n # create new user\n request_json = {\n \"username\": request.data.get('username'),\n \"password\": make_password(request.data.get('password')),\n \"email\": email,\n \"phone_number\": phone_number\n }\n\n user_serialized = UserProfileSerializer(data=request_json)\n if not user_serialized.is_valid():\n return validate_error(user_serialized)\n user_serialized.save()\n\n user_obj = UserProfile.objects.filter(id=user_serialized.data.get('id')).first()\n if not user_obj:\n return existence_error('user')\n\n # create following and follower object\n following_obj = UserFollowing.objects.create(user=user_obj)\n follower_obj = UserFollower.objects.create(user=user_obj)\n\n token, created = Token.objects.get_or_create(user=user_obj)\n\n otp_obj.delete()\n\n response_json = {\n 'status': True,\n 'message': 'User successfully registered',\n 'data': 'Token {}'.format(token.key)\n }\n\n return Response(response_json, status=201)",
"def create(self, validated_data):\n return MFAMethod.objects.get_or_create(\n user=self.user,\n name=self.context['name'],\n defaults={\n 'secret': create_secret(),\n 'is_active': False,\n }\n )",
"def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)",
"def post(self, request):\n\n email = request.data.get('email')\n phone_number = request.data.get('phone_number')\n otp = request.data.get('otp')\n\n # check that otp is correct or not (otp should match with email or phone number\n otp_obj = Otp.objects.filter(Q(email_phone=email) | Q(email_phone=phone_number) & Q(code=otp)).first()\n if not otp_obj:\n response_json = {\n 'status': False,\n 'message': 'otp is incorrect',\n 'data': {}\n }\n\n return Response(response_json, status=400)\n\n # login user\n user_obj = UserProfile.objects.filter(\n Q(phone_number=request.data.get('phone_number')) | Q(email=request.data.get('email'))).first()\n\n token, created = Token.objects.get_or_create(user=user_obj)\n\n otp_obj.delete()\n\n response_json = {\n 'status': True,\n 'message': 'User successfully Logged in',\n 'data': 'Token {}'.format(token.key)\n }\n\n return Response(response_json, status=200)",
"def create_token(request, user):\n\n key = get_random_string(100)\n data = {}\n ip = get_client_ip_address(request)\n\n return Token.objects.create(user=user, key=key, data=json.dumps(data), ip=ip)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)",
"def create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)",
"def create_user(self, phone_number, type, password, is_staff):\n return self.__create_user(phone_number, type, password, is_staff, False, False)",
"def create(self, user, token):\n\n session['user'] = {\n 'id': str(user.id),\n 'login': user.login,\n 'token': token\n }\n\n return UserSession.create(session['user'])",
"def create_gateway_device(self, body=None):\r\n return self.post(self.gateway_devices_path, body=body)",
"def create_teacher(username, password, email, preferred_language,skype_id,name, phone_number, country,availability):\n person.create_person(username,password,email,preferred_language,skype_id,name,phone_number,country)\n teacher_account_id = person.get_last()\n query = 'INSERT INTO teacher VALUES( %s,%s );'\n args = (teacher_account_id, availability)\n database.connection.save_data(query, args)",
"def _create_user(self, email, mobile_number, password, **extra_fields):\n\n print('model number')\n print(mobile_number)\n \n user = self.model(email=email,mobile_number = mobile_number, **extra_fields)\n user.set_password(password)\n \n user.save(using=self._db)\n return user",
"def create_user(session, phone_number, name, pass_hash, funds=0.0):\n # Perform the db job\n user = User(phone_number=phone_number, name=name, pass_hash=pass_hash, funds=funds)\n session.add(user)\n session.commit()\n return USER_GET_URI.format(user_id=phone_number)",
"def create(self,request):\n return CustomAuthToken().post(request)",
"def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email",
"def add_user(self):\n\n pin, code = self.get_auth_pin() \n print(\"Enter the PIN '{}' into the Add Application window and click Add Application\".format(pin))\n input(\"waiting press enter to continue...\")\n\n access_token, refresh_token = self.get_tokens(code)\n user_id = self.tokens.get_next_user_id()\n self.tokens.insert_user(user_id, access_token, refresh_token)\n tstat_ids = self.get_tstat_ids(access_token)\n for tstat_id in tstat_ids:\n logger.info(\"Adding Thermostat ID: {}\".format(tstat_id))\n self.tokens.insert_tstat(user_id, tstat_id)",
"def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')",
"def create_device(self, device_dict):\n devices = {'devices': [device_dict]}\n url = '{}/iot/devices'.format(self.url)\n return self.post(url, data=json.dumps(devices), headers=self.headers)",
"def create_user_device(self, email, device_str):\n if self.database is None:\n raise Exception(\"No database.\")\n if email is None or len(email) == 0:\n raise Exception(\"Email address not provided.\")\n if device_str is None or len(device_str) == 0:\n raise Exception(\"Device string not provided.\")\n user_id, _, _ = self.database.retrieve_user(email)\n return self.database.create_user_device(user_id, device_str)",
"def user(self, user_token, user_device=None):\n self.set('user', user_token)\n self.set('device', user_device)",
"def register_user_device(username: str, password: str, mac_address: str, email: Optional[str] = None) -> \\\n Union[str, Token]:\n ret = register_user(username, password, email)\n if isinstance(ret, str):\n return ret\n else:\n user_id = ret\n token, device_id = _add_update_device(user_id, mac_address)\n client_logger_security().info(f\"Successfully added new device: user_id={user_id}, device_id={device_id}\")\n _set_user_authenticated(user_id, device_id)\n return token",
"def create(self, validated_data):\n request = self._kwargs['context']['request']\n user = User.objects.create(**validated_data)\n user.set_password(validated_data[\"password\"])\n user.save()\n category_list = ['Fuel', 'Bill', 'Entertainment', 'Education', 'Food']\n for category in category_list:\n user.user_categories.create(name=category)\n login(request, user)\n token, created = Token.objects.get_or_create(user=user)\n validated_data[\"token\"] = token.key\n return validated_data",
"def create_user(self, email_or_phone, password=None, **extra_fields):\n return self._create_user(email_or_phone, password, False, False, **extra_fields)",
"def RegisterDevice(self, device_id, machine_id, type, username):\n dmtoken_chars = []\n while len(dmtoken_chars) < 32:\n dmtoken_chars.append(random.choice('0123456789abcdef'))\n dmtoken = ''.join(dmtoken_chars)\n allowed_policy_types = {\n dm.DeviceRegisterRequest.BROWSER: [\n 'google/chrome/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.USER: [\n 'google/chromeos/user',\n 'google/chrome/extension'\n ],\n dm.DeviceRegisterRequest.DEVICE: [\n 'google/chromeos/device',\n 'google/chromeos/publicaccount',\n 'google/chrome/extension',\n 'google/chromeos/signinextension'\n ],\n dm.DeviceRegisterRequest.ANDROID_BROWSER: [\n 'google/android/user'\n ],\n dm.DeviceRegisterRequest.TT: ['google/chromeos/user',\n 'google/chrome/user'],\n }\n if machine_id in KIOSK_MACHINE_IDS:\n enrollment_mode = dm.DeviceRegisterResponse.RETAIL\n else:\n enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE\n self._registered_tokens[dmtoken] = {\n 'device_id': device_id,\n 'device_token': dmtoken,\n 'allowed_policy_types': allowed_policy_types[type],\n 'machine_name': 'chromeos-' + machine_id,\n 'machine_id': machine_id,\n 'enrollment_mode': enrollment_mode,\n 'username': username,\n }\n self.WriteClientState()\n return self._registered_tokens[dmtoken]",
"def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200",
"def create_user(self, email, mobile_number, password, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email, mobile_number, password , **extra_fields)"
] | [
"0.5845079",
"0.58070564",
"0.57539624",
"0.56343126",
"0.5609716",
"0.55635047",
"0.5540228",
"0.5524604",
"0.552029",
"0.552029",
"0.552029",
"0.5508349",
"0.5505848",
"0.5504013",
"0.54644525",
"0.5441662",
"0.53983897",
"0.53840804",
"0.5339161",
"0.5339142",
"0.5335467",
"0.5331013",
"0.5312626",
"0.53079784",
"0.52986294",
"0.52529204",
"0.52490073",
"0.52367395",
"0.5231955",
"0.52287084"
] | 0.7277988 | 0 |
Creates a new network source in your tenancy. You must specify your tenancy's OCID as the compartment ID in the request object (remember that the tenancy is simply the root compartment). Notice that IAM resources (users, groups, compartments, and some policies) reside within the tenancy itself, unlike cloud resources such as compute instances, which typically reside within compartments inside the tenancy. For information about OCIDs, see `Resource Identifiers`__. You must also specify a name for the network source, which must be unique across all network sources in your tenancy, and cannot be changed. You can use this name or the OCID when writing policies that apply to the network source. For more information about policies, see `How Policies Work`__. You must also specify a description for the network source (although it can be an empty string). It does not | def create_network_source(self, create_network_source_details, **kwargs):
resource_path = "/networkSources"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_network_source got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_network_source_details,
response_type="NetworkSources")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_network_source_details,
response_type="NetworkSources") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)",
"def new_source(self, name):\n params = {\"name\": name}\n return JSONRPCRequest(self, \"newSource\", params)",
"def copy_network(source_net):\n return make_net_model({\"id\": source_net.id,\n \"subnets\": source_net.subnets,\n \"ports\": source_net.ports,\n \"tenant_id\": source_net.tenant_id,\n \"mtu\": source_net.mtu})",
"def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)",
"def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)",
"def create_network(self, tenant_id, network):\n self.create_network_bulk(tenant_id, [network])",
"def ex_create_network(self, resource_group, network, extra=None, location=None):\n if location is None:\n if self.default_location:\n location = self.default_location\n else:\n raise ValueError(\"location is required.\")\n target = \"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s\" % (\n self.subscription_id, resource_group, network)\n params = {\"api-version\": \"2016-03-30\"}\n data = {\n \"tags\": {},\n \"location\": location.id,\n }\n\n if extra:\n data[\"properties\"] = extra\n\n r = self.connection.request(action=target,\n params=params,\n data=data,\n method=\"PUT\")\n\n while r.object is None:\n time.sleep(1)\n\n return AzureNetwork(r.object[\"id\"], r.object[\"name\"], r.object[\"location\"], r.object[\"properties\"])",
"def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id",
"def create_network(self, context, network):\n LOG.debug(_(\"NeutronRestProxyV2: create_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n with context.session.begin(subtransactions=True):\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context,\n network[\"network\"])\n\n # create network in DB\n new_net = super(NeutronRestProxyV2, self).create_network(context,\n network)\n self._process_l3_create(context, new_net, network['network'])\n mapped_network = self._get_mapped_network_with_subnets(new_net,\n context)\n\n # create network on the network controller\n self.servers.rest_create_network(tenant_id, mapped_network)\n\n # return created network\n return new_net",
"def fusion_api_create_network_set(self, body, api=None, headers=None):\n return self.network_set.create(body, api, headers)",
"def create_default_network(context):\n return [{\n 'type': 'templates/network.py',\n 'name': 'fc-network',\n 'properties': {\n 'resourceName': 'network',\n 'name': 'network',\n 'projectId': '$(ref.fc-project.projectId)',\n 'autoCreateSubnetworks': True,\n # We pass the dependsOn list into the network template as a\n # parameter. Deployment Manager doesn't support dependsOn for\n # template-call nodes, so we can't have this resource itself depend on\n # the project-wide resources.\n 'dependsOn': '$(ref.fc-project.resourceNames)',\n },\n }]",
"def create_network(self, context, network):\n\n LOG.debug(_(\"QuantumRestProxyV2: create_network() called\"))\n\n # Validate args\n tenant_id = self._get_tenant_id_for_create(context, network[\"network\"])\n net_name = network[\"network\"][\"name\"]\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\"), net_name)\n\n # create in DB\n new_net = super(QuantumRestProxyV2, self).create_network(context,\n network)\n\n # create on networl ctrl\n try:\n resource = NET_RESOURCE_PATH % tenant_id\n data = {\n \"network\": {\n \"id\": new_net[\"id\"],\n \"name\": new_net[\"name\"],\n }\n }\n ret = self.servers.post(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to create remote \"\n \"network: %s\"), e.message)\n super(QuantumRestProxyV2, self).delete_network(context,\n new_net['id'])\n raise\n\n # return created network\n return new_net",
"def create_network_profile(self, body=None):\r\n return self.post(self.network_profiles_path, body=body)",
"def subnetpool_create(request, name, prefixes, **kwargs):\n LOG.debug(\"subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, \"\n \"kwargs=%(kwargs)s\", {'name': name, 'prefixes': prefixes,\n 'kwargs': kwargs})\n body = {'subnetpool':\n {'name': name,\n 'prefixes': prefixes,\n }\n }\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnetpool'].update(kwargs)\n subnetpool = \\\n neutronclient(request).create_subnetpool(body=body).get('subnetpool')\n return SubnetPool(subnetpool)",
"def create_from_src(self, cgsnapshot_id, source_cgid, name=None,\n description=None, user_id=None,\n project_id=None):\n body = {'consistencygroup-from-src': {'name': name,\n 'description': description,\n 'cgsnapshot_id': cgsnapshot_id,\n 'source_cgid': source_cgid,\n 'user_id': user_id,\n 'project_id': project_id,\n 'status': \"creating\",\n }}\n\n self.run_hooks('modify_body_for_update', body,\n 'consistencygroup-from-src')\n resp, body = self.api.client.post(\n \"/consistencygroups/create_from_src\", body=body)\n return common_base.DictWithMeta(body['consistencygroup'], resp)",
"def CreateGcpWorkloadSource(\n client,\n messages,\n workload_source_id: str,\n resources: Optional[List[str]],\n attached_service_accounts: Optional[List[str]],\n parent: str,\n for_managed_identity: bool = False,\n):\n conditions = []\n if resources is not None:\n conditions += [\n messages.WorkloadSourceCondition(attribute='resource', value=resource)\n for resource in resources\n ]\n if attached_service_accounts is not None:\n conditions += [\n messages.WorkloadSourceCondition(\n attribute='attached_service_account', value=account\n )\n for account in attached_service_accounts\n ]\n new_workload_source = messages.WorkloadSource(\n conditionSet=messages.WorkloadSourceConditionSet(conditions=conditions)\n )\n if for_managed_identity:\n return client.projects_locations_workloadIdentityPools_namespaces_managedIdentities_workloadSources.Create(\n messages.IamProjectsLocationsWorkloadIdentityPoolsNamespacesManagedIdentitiesWorkloadSourcesCreateRequest(\n parent=parent,\n workloadSource=new_workload_source,\n workloadSourceId=workload_source_id,\n )\n )\n else:\n return client.projects_locations_workloadIdentityPools_namespaces_workloadSources.Create(\n messages.IamProjectsLocationsWorkloadIdentityPoolsNamespacesWorkloadSourcesCreateRequest(\n parent=parent,\n workloadSource=new_workload_source,\n workloadSourceId=workload_source_id,\n )\n )",
"def subnet_create(request, network_id, **kwargs):\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)",
"def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})",
"def create(self):\n logging.debug(\"%s create called\" % self)\n # networks = self.infra.get(\"networks\")\n notify(\"Creating network %s\" % self.name)\n self.cloudnet = cn.create(self.name, cidr=self.cidr)\n return True",
"def create_source(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_source_with_http_info(**kwargs)\n else:\n (data) = self.create_source_with_http_info(**kwargs)\n return data",
"def create_network_gateway(self, body=None):\r\n return self.post(self.network_gateways_path, body=body)",
"def add_provider_network(network_id, network_type, segmentation_id):\n session = db.get_session()\n if session.query(network_models_v2.ProviderNetwork).filter_by(\n network_id=network_id).first():\n raise c_exc.ProviderNetworkExists(network_id)\n pnet = network_models_v2.ProviderNetwork(network_id=network_id,\n network_type=network_type,\n segmentation_id=segmentation_id)\n session.add(pnet)\n session.flush()",
"def test_03_network_create(self):\n # Validate the following\n # 1. Create a project.\n # 2. Add virtual/direct network resource to the project. User shared\n # network resource for the project\n # 3. Verify any number of Project level Virtual/Direct networks can be\n # created and used for vm deployment within the project.\n # 4. Verify shared networks (zone and domain wide) from outside the\n # project can also be used in a project.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n network_offerings = list_network_offerings(\n self.apiclient,\n projectid=project.id,\n supportedServices='SourceNat',\n type='isolated',\n state='Enabled'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a network with network offering ID: %s\" %\n network_offering.id)\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n network = Network.create(\n self.apiclient,\n self.services[\"network\"],\n networkofferingid=network_offering.id,\n projectid=project.id\n )\n self.debug(\"Created network with ID: %s\" % network.id)\n networks = Network.list(\n self.apiclient,\n projectid=project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check for the valid network list response\"\n )\n\n self.debug(\"Deploying VM with network: %s\" % network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n network_offerings = list_network_offerings(\n self.apiclient,\n state='Enabled',\n guestiptype='Shared',\n name='DefaultSharedNetworkOffering',\n displaytext='Offering for Shared networks'\n )\n self.assertEqual(\n isinstance(network_offerings, list),\n True,\n \"Check for the valid network offerings\"\n )\n network_offering = network_offerings[0]\n\n self.debug(\"creating a shared network in domain: %s\" %\n self.domain.id)\n\n # Getting physical network and free vlan in it\n physical_network, vlan = get_free_vlan(self.apiclient, self.zone.id)\n\n self.services[\"domain_network\"][\"vlan\"] = vlan\n self.services[\"domain_network\"][\"physicalnetworkid\"] = physical_network.id\n\n # Generating random subnet number for shared network creation\n shared_network_subnet_number = random.randrange(1,254)\n\n self.services[\"domain_network\"][\"gateway\"] = \"172.16.\"+str(shared_network_subnet_number)+\".1\"\n self.services[\"domain_network\"][\"startip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".2\"\n self.services[\"domain_network\"][\"endip\"] = \"172.16.\"+str(shared_network_subnet_number)+\".20\"\n\n domain_network = Network.create(\n self.apiclient,\n self.services[\"domain_network\"],\n domainid=self.domain.id,\n networkofferingid=network_offering.id,\n zoneid=self.zone.id\n )\n self.cleanup.append(domain_network)\n self.debug(\"Created network with ID: %s\" % domain_network.id)\n\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n networkids=[str(domain_network.id)],\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n self.debug(\"Deployed VM with ID: %s\" % virtual_machine.id)\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n # Delete VM before network gets deleted in cleanup\n virtual_machine.delete(self.apiclient, expunge=True)\n return",
"def _create_network_resources(self, tenant_id):\n logger.info(\"Creating network resources...\")\n net_name = \"ostf-autoscaling-test-service-net\"\n net_body = {\n \"network\": {\n \"name\": net_name,\n \"tenant_id\": tenant_id\n }\n }\n ext_net = None\n net = None\n for network in self.neutron_cli.list_networks()[\"networks\"]:\n if not net and network[\"name\"] == net_name:\n net = network\n if not ext_net and network[\"router:external\"]:\n ext_net = network\n if not net:\n net = self.neutron_cli.create_network(net_body)[\"network\"]\n subnet = self.helpers.os_conn.create_subnet(\n \"sub\" + net_name, net[\"id\"], \"10.1.7.0/24\", tenant_id=tenant_id\n )\n router_name = 'ostf-autoscaling-test-service-router'\n router = self.helpers.os_conn.create_router(\n router_name, self.helpers.os_conn.get_tenant(\"admin\"))\n self.neutron_cli.add_interface_router(\n router[\"id\"], {\"subnet_id\": subnet[\"id\"]})\n return net[\"id\"]",
"def create_network(client, overwrite_net=False, network_name=DOCK_NETWORK_NAME, subnetwork=DOCK_NETWORK_SUBNET,\n gw=DOCK_NETWORK_GW):\n\n if overwrite_net:\n try:\n client.networks.get(network_name).remove()\n logging.info(\" Overwriting existing network\")\n except docker.errors.APIError:\n logging.info(\" Warning: Couldn't find network to overwrite (does it exist?)\")\n\n ipam_pool = docker.types.IPAMPool(subnet=subnetwork, gateway=gw)\n ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])\n client.networks.create(network_name, driver=\"bridge\", ipam=ipam_config)",
"def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])",
"def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)",
"def create_network(address=None, **options):\n return NetworkDefinition(address, **options)",
"def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network",
"def test_networking_project_network_create(self):\n pass"
] | [
"0.65968263",
"0.64328766",
"0.63080657",
"0.60401773",
"0.585405",
"0.5773544",
"0.5750441",
"0.5715569",
"0.569809",
"0.55888134",
"0.5588614",
"0.5553594",
"0.54480547",
"0.5373371",
"0.5347243",
"0.53328407",
"0.5326178",
"0.53198993",
"0.5311947",
"0.52965164",
"0.52929527",
"0.5270218",
"0.5234661",
"0.5220858",
"0.52133614",
"0.52063507",
"0.5205628",
"0.5166636",
"0.51565284",
"0.51444143"
] | 0.6830833 | 0 |
Creates a subscription to a region for a tenancy. | def create_region_subscription(self, create_region_subscription_details, tenancy_id, **kwargs):
resource_path = "/tenancies/{tenancyId}/regionSubscriptions"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_region_subscription got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tenancyId": tenancy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_region_subscription_details,
response_type="RegionSubscription")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_region_subscription_details,
response_type="RegionSubscription") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_subscription(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/subscriptions')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()",
"def test_create_subscription(self):\n pass",
"def post_create_subscription(\n self, response: pubsub.Subscription\n ) -> pubsub.Subscription:\n return response",
"def _create_subscription(self):\n try:\n self.client.create_subscription(\n name=self.subscription_path, topic=self.topic_path\n )\n except NotFound:\n # suitable topic does not exist in the Pitt-Google project\n raise ValueError(\n (\n f\"A subscription named {self.subscription_name} does not exist\"\n \"in the Google Cloud Platform project \"\n f\"{settings.GOOGLE_CLOUD_PROJECT}, \"\n \"and one cannot be automatically create because Pitt-Google \"\n \"does not publish a public topic with the same name.\"\n )\n )\n else:\n self._log_and_print(f\"Created subscription: {self.subscription_path}\")",
"def handle_create(self):\n subscription = self.client().subscription(\n self.properties[self.QUEUE_NAME],\n subscriber=self.properties[self.SUBSCRIBER],\n ttl=self.properties[self.TTL],\n options=self.properties[self.OPTIONS]\n )\n self.resource_id_set(subscription.id)",
"def create_subscription(connection, project_id, body, fields=None, error_msg=None):\n return connection.post(\n url=f'{connection.base_url}/api/subscriptions',\n params={'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n json=body,\n )",
"def list_region_subscriptions(self, tenancy_id, **kwargs):\n resource_path = \"/tenancies/{tenancyId}/regionSubscriptions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_region_subscriptions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tenancyId\": tenancy_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[RegionSubscription]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[RegionSubscription]\")",
"def do_create_subscription(csp: CloudProviderInterface, environment_id=None):\n environment = Environments.get(environment_id)\n payload = build_subscription_payload(environment)\n try:\n csp.create_subscription(payload)\n except GeneralCSPException as e:\n app.logger.warning(\n \"Unable to create subscription for environment %s.\", environment.id,\n )\n raise e",
"def add_subscription(self):\n schema = schemas.load(schemas.Subscription, self.request)\n subscription = self.customer.add_subscription(**schema)\n self.request.db.flush()\n self.request.response.status_int = 201\n return {'abonnement': subscription}",
"def create_subscription(self, client_URI_endpoint, event_destination_id,\n name, subscription_context):\n self.client_URI_endpoints[client_URI_endpoint] = \\\n Event(event_destination_id, name, subscription_context)\n self.write_subscriptions_to_tmp(self.client_URI_endpoints)",
"def create_subscription_in_snuba(query_subscription_id, **kwargs):\n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.create.subscription_does_not_exist\")\n return\n if subscription.status != QuerySubscription.Status.CREATING.value:\n metrics.incr(\"snuba.subscriptions.create.incorrect_status\")\n return\n if subscription.subscription_id is not None:\n metrics.incr(\"snuba.subscriptions.create.already_created_in_snuba\")\n # This mostly shouldn't happen, but it's possible that a subscription can get\n # into this state. Just attempt to delete the existing subscription and then\n # create a new one.\n try:\n _delete_from_snuba(\n QueryDatasets(subscription.snuba_query.dataset), subscription.subscription_id\n )\n except SnubaError:\n logger.exception(\"Failed to delete subscription\")\n\n subscription_id = _create_in_snuba(subscription)\n subscription.update(\n status=QuerySubscription.Status.ACTIVE.value, subscription_id=subscription_id\n )",
"def test_create_subscription_template(self):\n pass",
"def subscribe(request):\n address = request.POST.get('address')\n\n new_sub = Subscription(**{\n \"address\": address\n })\n new_sub.save()\n\n return HttpResponse(json.dumps({\n \"status\": \"success\"\n }, default=helpers.json_custom_parser), content_type='application/json')",
"def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)",
"def create_subscription(self, user, standard):\r\n\r\n subscription = self.create(\r\n user=user,\r\n standard=standard,\r\n )\r\n\r\n return subscription",
"def create_subscription(self, organization, collaborations, contractors):\r\n\r\n subscription = self.create(\r\n organization=organization,\r\n collaborations=collaborations,\r\n contractors=contractors,\r\n partner_discovery=partner_discovery,\r\n )\r\n return subscription",
"def subscription(self, uuid):\r\n return subs.Subscription(self, uuid)",
"def CreateSubscribeTransaction(self, dest, once=False):\n c = Subscribe(dest, self.node_id, once)\n self.connections.append((\"REACTIVE\", c))\n return c",
"def subscription(self):\r\n return SubscriptionResource(self)",
"def subscribePost() -> object:\n log = logging.getLogger(__name__)\n db = Db()\n\n body = request.get_json()\n\n if body is None:\n return jsonify({\"error\": \"json body is required\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('datasetId') in body:\n return jsonify({\"error\": \"datasetId is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n if not('notificationUrl') in body:\n return jsonify({\"error\": \"notificationUrl is a required attribute\"}), HTTPStatus.HTTPStatus.BAD_REQUEST\n\n\n subscription = db.Subscriptions(\n datasetId=body['datasetId'],\n notificationUrl=body['notificationUrl']\n )\n\n subscription.save()\n\n subscription = json.loads(subscription.to_json())\n subscription['id'] = subscription['_id'][\"$oid\"]\n subscription.pop(\"_id\")\n log.debug(\"subscription created\")\n\n return jsonify(subscription), HTTPStatus.CREATED",
"def post(self):\n data = request.json\n return new_subscription(data=data)",
"def post(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n request_data = request.get_json()\n subscription_id = request_data[\"subscription_id\"]\n if permission.can():\n organization = model.organization.get_organization(orgname)\n user = get_authenticated_user()\n account_number = marketplace_users.get_account_number(user)\n subscriptions = marketplace_subscriptions.get_list_of_subscriptions(account_number)\n\n if subscriptions is None:\n abort(401, message=\"no valid subscriptions present\")\n\n user_subscription_ids = [int(subscription[\"id\"]) for subscription in subscriptions]\n if int(subscription_id) in user_subscription_ids:\n try:\n model.organization_skus.bind_subscription_to_org(\n user_id=user.id, subscription_id=subscription_id, org_id=organization.id\n )\n return \"Okay\", 201\n except model.OrgSubscriptionBindingAlreadyExists:\n abort(400, message=\"subscription is already bound to an org\")\n else:\n abort(401, message=f\"subscription does not belong to {user.username}\")\n\n abort(401)",
"def create_subscription(chid, use_time=False, use_ctrl=False,\n mask=None, callback=None):\n mask = mask or DEFAULT_SUBSCRIPTION_MASK\n\n ftype = promote_type(chid, use_ctrl=use_ctrl, use_time=use_time)\n\n uarg = ctypes.py_object(callback)\n evid = ctypes.c_void_p()\n poll()\n ret = libca.ca_create_subscription(ftype, 0, chid, mask,\n _CB_EVENT, uarg, ctypes.byref(evid))\n PySEVCHK('create_subscription', ret)\n\n poll()\n return (_CB_EVENT, uarg, evid)",
"def createTenant(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp",
"def create_subscription(self, device_type):\n url = '{}/v2/subscriptions'.format(self.url)\n device_type = device_type.split('.')[0]\n device_pattern = \"urn:ngsi-ld:{}:*\".format(device_type)\n description = \"Notify QuantumLeap with {}\".format(device_type)\n data = {\n \"description\": description,\n \"subject\": {\n \"entities\": [\n {\n \"idPattern\": device_pattern\n }\n ]\n },\n \"notification\": {\n \"http\": {\n \"url\": \"http://quantumleap:8668/v2/notify\"\n },\n \"metadata\": [\"dateCreated\", \"dateModified\"]\n },\n \"throttling\": 1\n }\n return self.post(url, data=json.dumps(data), headers=self.headers_json)",
"def create_pubsub_subscription(client, project, topic, name):\n topic_name = pubsub.topic_name(project, topic)\n full_name = pubsub.subscription_name(project, name)\n if client.get_subscription(full_name):\n return\n\n client.create_subscription(full_name, topic_name)",
"def register(self, region=None, payload=None):\n return self._put_response_body([], payload=payload)",
"def test_create_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def test_create_subscription(self):\n try:\n self.arb.create_subscription(\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n except KeyError:\n pass\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n bill_first_name=u\"Michael\",\n bill_last_name=u\"Pool\"\n )\n self.arb.create_subscription(\n trial_amount=5.00,\n trial_occurrences=4,\n interval_length=1,\n interval_unit=arb.MONTHS_INTERVAL,\n start_date=u\"2008-09-09\",\n amount=39.99,\n card_number=u\"4222222222222\",\n expiration_date=u\"2009-10\",\n ship_first_name=u\"valentino\",\n first_name=u\"valentino\",\n bill_first_name=u\"valentino\",\n bill_last_name=u\"Pool\",\n driver_number=u\"55555\",\n driver_state=u\"CA\",\n driver_birth=u\"1990-09-09\"\n )"
] | [
"0.61023337",
"0.6050946",
"0.60171896",
"0.59941614",
"0.5989991",
"0.5985926",
"0.59581876",
"0.59402776",
"0.5903215",
"0.5853864",
"0.5773465",
"0.57645184",
"0.56852347",
"0.5659827",
"0.56107926",
"0.56008047",
"0.55303335",
"0.55104107",
"0.5472032",
"0.5425874",
"0.54192394",
"0.53581506",
"0.53247344",
"0.5322388",
"0.53061765",
"0.5292069",
"0.5279004",
"0.52674073",
"0.5249765",
"0.52019763"
] | 0.74824893 | 0 |
Creates a new SMTP credential for the specified user. An SMTP credential has an SMTP user name and an SMTP password. You must specify a description for the SMTP credential (although it can be an empty string). It does not have to be unique, and you can change it anytime with | def create_smtp_credential(self, create_smtp_credential_details, user_id, **kwargs):
resource_path = "/users/{userId}/smtpCredentials"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_smtp_credential got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_smtp_credential_details,
response_type="SmtpCredential")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_smtp_credential_details,
response_type="SmtpCredential") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_new_credential(account,userName,password):\n new_credential = Credentials(account,userName,password)\n return new_credential",
"def CreateNewSmtpUser(s):\n payload = ['adduser %s %s\\n' % (FLAGS.exploit_user, FLAGS.exploit_password),\n 'quit\\n']\n SendPayload(s, payload)\n logging.info('Created new user %s/%s' % (\n FLAGS.exploit_user, FLAGS.exploit_password))\n s.close()",
"def create_credential(self, body=None):\r\n return self.post(self.credentials_path, body=body)",
"def create_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n pass",
"def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass",
"def Create( profile_name,\r\n host,\r\n username=None,\r\n password=None,\r\n port=26,\r\n from_name=None,\r\n from_email=None,\r\n ssl=False,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n if not from_name and not from_email:\r\n raise CommandLine.UsageException(\"'from_name' or 'from_email' must be provided\")\r\n\r\n mailer = SmtpMailer( host,\r\n username=username,\r\n password=password,\r\n port=port,\r\n from_name=from_name,\r\n from_email=from_email,\r\n ssl=ssl,\r\n )\r\n mailer.Save(profile_name)\r\n\r\n output_stream.write(\"The profile '{}' has been created.\\n\".format(profile_name))",
"def create_user_credentials(storage_type, storage_id, space_name, client_ip,\n user_details):\n user_id = user_details[\"id\"]\n if user_id == \"0\":\n return PosixCredentials(0, 0)\n\n uid = gid = gen_storage_id(user_id)\n return PosixCredentials(uid, gid)",
"def send_new_credentials(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_generate(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)",
"def create_user(email, password, f_name, l_name):\n pass",
"def create_email(user):\n if 'research' in user.get_domains():\n domain = 'research'\n else: domain = 'academic'\n subject = \"ECE/CIS Account Created\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n \n message = \"Your ECE/CIS %s account has been created with the username: %s\\n\\n\" % (domain, user.username)\n message += \"Please do not reply to this message. If you need assistance with your account, please visit:\\n\"\n message += \"%s\\n\\n\" % helprequest\n message += \"-- EE/CIS Labstaff\\n\"\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)",
"def create(self, username, password, email):\n pass",
"def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user",
"def create_user(self, conn, name, password, group):\n user = conn.user.allocate(name, password, \"\", [group])\n return user",
"def m_credential_create(node_name, credential_hash, participantDID):\n pass",
"def create_server(host, port, uid, pwd):\r\n s = smtplib.SMTP(host, port)\r\n s.starttls()\r\n s.login(\r\n uid,\r\n pwd\r\n )\r\n return s",
"def create_service_credentials(user, new_roles=None):\n tenant = config('service-tenant')\n if not tenant:\n raise Exception(\"No service tenant provided in config\")\n\n domain = None\n if get_api_version() > 2:\n domain = DEFAULT_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n if get_api_version() > 2:\n # Create account in SERVICE_DOMAIN as well using same password\n domain = SERVICE_DOMAIN\n passwd = create_user_credentials(user, get_service_password,\n set_service_password,\n tenant=tenant, new_roles=new_roles,\n grants=[config('admin-role')],\n domain=domain)\n return passwd",
"async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}",
"def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()",
"def create(self, credentials):\n return User.objects.create_user(\n credentials['username'],\n credentials['email'],\n credentials['password']\n )",
"def newuser(lp, creds, username=None):\n\n names = guess_names_from_smbconf(lp, None, None)\n db = Ldb(url=get_ldb_url(lp, creds, names), session_info=system_session(), \n credentials=creds, lp=lp)\n user_dn = get_user_dn(db, \"CN=Users,%s\" % names.domaindn, username)\n if user_dn:\n extended_user = \"\"\"\ndn: %(user_dn)s\nchangetype: modify\nadd: mailNickName\nmailNickname: %(username)s\nadd: homeMDB\nhomeMDB: CN=Mailbox Store (%(netbiosname)s),CN=First Storage Group,CN=InformationStore,CN=%(netbiosname)s,CN=Servers,CN=First Administrative Group,CN=Administrative Groups,CN=%(firstorg)s,CN=Microsoft Exchange,CN=Services,CN=Configuration,%(domaindn)s\nadd: homeMTA\nhomeMTA: CN=Mailbox Store (%(netbiosname)s),CN=First Storage Group,CN=InformationStore,CN=%(netbiosname)s,CN=Servers,CN=First Administrative Group,CN=Administrative Groups,CN=%(firstorg)s,CN=Microsoft Exchange,CN=Services,CN=Configuration,%(domaindn)s\nadd: legacyExchangeDN\nlegacyExchangeDN: /o=%(firstorg)s/ou=First Administrative Group/cn=Recipients/cn=%(username)s\nadd: proxyAddresses\nproxyAddresses: =EX:/o=%(firstorg)s/ou=First Administrative Group/cn=Recipients/cn=%(username)s\nproxyAddresses: smtp:postmaster@%(dnsdomain)s\nproxyAddresses: X400:c=US;a= ;p=First Organizati;o=Exchange;s=%(username)s\nproxyAddresses: SMTP:%(username)s@%(dnsdomain)s\nreplace: msExchUserAccountControl\nmsExchUserAccountControl: 0\n\"\"\"\n ldif_value = extended_user % {\"user_dn\": user_dn,\n \"username\": username,\n \"netbiosname\": names.netbiosname,\n \"firstorg\": names.firstorg,\n \"domaindn\": names.domaindn,\n \"dnsdomain\": names.dnsdomain}\n db.modify_ldif(ldif_value)\n\n res = db.search(base=user_dn, scope=SCOPE_BASE, attrs=[\"*\"])\n if len(res) == 1:\n record = res[0]\n else:\n raise Exception, \\\n \"this should never happen as we just modified the record...\"\n record_keys = map(lambda x: x.lower(), record.keys())\n\n if \"displayname\" not in record_keys:\n extended_user = \"dn: %s\\nadd: displayName\\ndisplayName: %s\\n\" % (user_dn, username)\n db.modify_ldif(extended_user)\n\n if \"mail\" not in record_keys:\n extended_user = \"dn: %s\\nadd: mail\\nmail: %s@%s\\n\" % (user_dn, username, names.dnsdomain)\n db.modify_ldif(extended_user)\n\n print \"[+] User %s extended and enabled\" % username\n else:\n print \"[!] User '%s' not found\" % username",
"def new_credentials(site_name, user_name, password):\n new_credentials = Credentials(site_name, user_name, password)\n return new_credentials",
"def create_user_credentials(user, passwd_get_callback, passwd_set_callback,\n tenant=None, new_roles=None,\n grants=None, domain=None):\n passwd = passwd_get_callback(user)\n if not passwd:\n log(\"Unable to retrieve password for user '{}'\".format(user),\n level=INFO)\n return\n\n log(\"Creating service credentials for '%s'\" % user, level=DEBUG)\n if user_exists(user, domain=domain):\n log(\"User '%s' already exists\" % (user), level=DEBUG)\n # NOTE(dosaboy): see LP #1648677\n if is_password_changed(user, passwd):\n update_user_password(user, passwd, domain)\n else:\n create_user(user, passwd, tenant=tenant, domain=domain)\n\n passwd_set_callback(passwd, user=user)\n\n if grants:\n for role in grants:\n # grant role on project\n grant_role(user, role, tenant=tenant, user_domain=domain,\n project_domain=domain)\n else:\n log(\"No role grants requested for user '%s'\" % (user), level=DEBUG)\n\n if new_roles:\n # Allow the remote service to request creation of any additional roles.\n # Currently used by Swift and Ceilometer.\n for role in new_roles:\n log(\"Creating requested role '%s'\" % role, level=DEBUG)\n create_role(role, user=user, tenant=tenant, domain=domain)\n\n return passwd",
"def _create_user(self, username, password, domain_id, project_id):\n request = {\n \"user\": {\n \"name\": username,\n \"password\": password,\n \"domain_id\": domain_id,\n \"default_project_id\": project_id,\n \"description\": \"description\",\n \"email\": \"[email protected]\",\n \"enabled\": True,\n }\n }\n response = self.client.post(USER_PATH, data=json.dumps(request),\n headers=HEADERS)\n if response.status_code == 409:\n return\n elif response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create test user.\")",
"def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u",
"def create_user(\n screen_name: str,\n email_address: str,\n password: str,\n first_names: Optional[str],\n last_name: Optional[str],\n site_id: SiteID,\n *,\n consents: Optional[Set[Consent]] = None,\n) -> Tuple[User, UserAccountCreated]:\n # user with details, password, and roles\n user, event = create_basic_user(\n screen_name,\n email_address,\n password,\n first_names=first_names,\n last_name=last_name,\n )\n\n # consents\n if consents:\n for consent in consents:\n # Insert missing user ID.\n consent = consent_service.build_consent(\n user.id,\n consent.subject_id,\n consent.expressed_at,\n )\n db.session.add(consent)\n\n db.session.commit()\n\n request_email_address_confirmation(user, email_address, site_id)\n\n return user, event",
"def createNewUser(name, account, auth, email, pwd, group, expiry, node):\n \n #Check if the user creation was succesful\n if hl.createUser(name, account, auth, email = email, passwd = pwd, group = group, expiry = expiry, node = node):\n user = hl.getUser(\"Email\", email)\n\n if(auth == \"Email\"):\n subjectTitle = \"OneGroup account keys\"\n recipientEmail =[email]\n bodyMessage = \"here are your keys\"\n attachmentName = user['Keys'] + '.ovpn'\n filename = \"{}/{}\".format(keys_dir,attachmentName)\n attachmentFilePath = filename\n emailMessage(subjectTitle, recipientEmail, bodyMessage,attachmentName, attachmentFilePath)\n\n elif(auth == \"Passphrase\"):\n subjectTitle = \"OneGroup account details\"\n recipientEmail = [email]\n bodyMessage = \"Your login details are\\n Email :\" + str(email) + \"\\nPassword :\" + str(pwd)\n emailMessage(subjectTitle, recipientEmail, bodyMessage)\n return True\n else:\n return False",
"def create_user(self, uname, name, password=None):\r\n\r\n if not uname:\r\n return _('Must provide username')\r\n if not name:\r\n return _('Must provide full name')\r\n\r\n email_domain = getattr(settings, 'SSL_AUTH_EMAIL_DOMAIN', 'MIT.EDU')\r\n\r\n msg = u''\r\n if settings.FEATURES['AUTH_USE_CERTIFICATES']:\r\n if not '@' in uname:\r\n email = '{0}@{1}'.format(uname, email_domain)\r\n else:\r\n email = uname\r\n if not email.endswith('@{0}'.format(email_domain)):\r\n msg += u'{0} @{1}'.format(_('email must end in'), email_domain)\r\n return msg\r\n mit_domain = 'ssl:MIT'\r\n if ExternalAuthMap.objects.filter(external_id=email,\r\n external_domain=mit_domain):\r\n msg += _('Failed - email {0} already exists as '\r\n 'external_id').format(email)\r\n return msg\r\n new_password = generate_password()\r\n else:\r\n if not password:\r\n return _('Password must be supplied if not using certificates')\r\n\r\n email = uname\r\n\r\n if not '@' in email:\r\n msg += _('email address required (not username)')\r\n return msg\r\n new_password = password\r\n\r\n user = User(username=uname, email=email, is_active=True)\r\n user.set_password(new_password)\r\n try:\r\n user.save()\r\n except IntegrityError:\r\n msg += _('Oops, failed to create user {0}, '\r\n 'IntegrityError').format(user)\r\n return msg\r\n\r\n reg = Registration()\r\n reg.register(user)\r\n\r\n profile = UserProfile(user=user)\r\n profile.name = name\r\n profile.save()\r\n\r\n if settings.FEATURES['AUTH_USE_CERTIFICATES']:\r\n credential_string = getattr(settings, 'SSL_AUTH_DN_FORMAT_STRING',\r\n '/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}')\r\n credentials = credential_string.format(name, email)\r\n eamap = ExternalAuthMap(\r\n external_id=email,\r\n external_email=email,\r\n external_domain=mit_domain,\r\n external_name=name,\r\n internal_password=new_password,\r\n external_credentials=json.dumps(credentials),\r\n )\r\n eamap.user = user\r\n eamap.dtsignup = timezone.now()\r\n eamap.save()\r\n\r\n msg += _('User {0} created successfully!').format(user)\r\n return msg",
"def setupUser(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', create user '%s'\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"create user %s with password '%s'\" % (userName, userPassword))",
"def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)",
"def send_email( user, password ):\n \n mail = Mailer( host = EMAIL['host'], \n port = EMAIL['port'],\n use_tls = EMAIL['use_tls'], \n usr = EMAIL['user'], \n pwd = EMAIL['password']\n )\n \n message = Message( From = '[email protected]',\n To = [user.email],\n Subject = \"Password Reset\"\n )\n \n body = \"\"\"Your new password for {} is {}\n You can reset it to what you like on your settings page once you log in with\n this password\n \"\"\".format(__name__, password )\n\n message.Body = body\n try:\n mail.send(message)\n except Exception as e:\n log.error( 'Send mail error: {}'.format( str(e) ) )"
] | [
"0.6836732",
"0.6782684",
"0.66625166",
"0.6148272",
"0.60939145",
"0.59752667",
"0.59635276",
"0.59110373",
"0.5898103",
"0.58977437",
"0.5887567",
"0.58812404",
"0.5815839",
"0.5802219",
"0.57664925",
"0.57609296",
"0.57314557",
"0.57081306",
"0.56840855",
"0.56605315",
"0.5635352",
"0.55870754",
"0.5580626",
"0.5579031",
"0.55513906",
"0.55480254",
"0.55424273",
"0.55354136",
"0.5533354",
"0.5503334"
] | 0.72970504 | 0 |
Creates a new tag in the specified tag namespace. The tag requires either the OCID or the name of the tag namespace that will contain this tag definition. You must specify a name for the tag, which must be unique across all tags in the tag namespace and cannot be changed. The name can contain any ASCII character except the space (_) or period (.) characters. Names are case insensitive. That means, for example, \"myTag\" and \"mytag\" are not allowed in the same namespace. If you specify a name that's already in use in the tag namespace, a 409 error is returned. The tag must have a description. It does not have to be unique, and you can change it with | def create_tag(self, tag_namespace_id, create_tag_details, **kwargs):
resource_path = "/tagNamespaces/{tagNamespaceId}/tags"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_tag got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tagNamespaceId": tag_namespace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_tag_details,
response_type="Tag")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_tag_details,
response_type="Tag") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")",
"def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)",
"def create_tag(self, entry_name, tag):\n return self.__datacatalog.create_tag(parent=entry_name, tag=tag)",
"def create_tag_namespace(self, create_tag_namespace_details, **kwargs):\n resource_path = \"/tagNamespaces\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_namespace got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_namespace_details,\n response_type=\"TagNamespace\")",
"def create_tag(name):\n name = name.strip().lower()\n tag = Tags(name)\n try:\n db_session.add(tag)\n db_session.commit()\n except exc.IntegrityError as err:\n db_session.rollback()\n return 'Tag \"%s\" has not been added - already exists: %s.' % (name, err), 'warning', None\n return 'Tag \"%s\" has been added.' % name, 'success', tag",
"def create_or_get_tag(self, tag_name: str, *args, **kwargs):\n\n tag_data = api.create_or_get_tag(\n tag_name,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Tag(tag_data)",
"def create_a_tag(self, tag_id, contact_id):\n data = {\"contactTag\":{\"contact\":str(contact_id),\"tag\":str(tag_id)}}\n\n return self.client._post(\"/contactTags\", json=data)",
"def createTag(self, authenticationToken, tag):\r\n pass",
"def createTag(self, authenticationToken, tag):\r\n self.send_createTag(authenticationToken, tag)\r\n return self.recv_createTag()",
"def create_tag():\n \n name = request.form['tag_name']\n\n if \"name\" in session:\n return redirect(\"/tags\")\n\n else:\n new_tag = Tag(name = name)\n db.session.add(new_tag)\n db.session.commit()\n return redirect(\"/tags\")",
"def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def create(self, params={}, **options):\n return self.client.post(\"/tags\", params, **options)",
"def make_tag(tag_name, text='', tag_attr=None):\n if tag_attr is None:\n tag_attr = {}\n\n doc = xml.dom.minidom.Document()\n element = doc.createElement(tag_name)\n if tag_attr:\n for k, v in izip(list(tag_attr.keys()), list(tag_attr.values())):\n element.setAttribute(unicode(k), unicode(v))\n if text:\n text_node = doc.createTextNode(text.strip())\n element.appendChild(text_node)\n return element",
"def tag ():\n\n tagname = get_tag(comp_versions, 'ACE')\n\n if opts.tag:\n if opts.take_action:\n vprint (\"Placing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git tag -a \" + tagname + \" -m\\\"\" + tagname + \"\\\"\")\n\n vprint (\"Placing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git tag -a \" + tagname + \" -m\\\"\" + tagname + \"\\\"\")\n\n # Update release branches\n latest_branch_helper (update_latest_branch, opts.release_type)\n else:\n vprint (\"Placing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Placing tag %s on MPC\" % (tagname))\n print (\"Creating tags:\\n\")\n print (\"Placing tag \" + tagname + \"\\n\")",
"def create_tag(tag, directory=None):\n execute_command('git tag {0}'.format(tag), shell=True, cwd=directory)",
"def create_in_workspace(self, workspace, params={}, **options):\n path = \"/workspaces/%s/tags\" % (workspace)\n return self.client.post(path, params, **options)",
"def add_tag(self, obj, tag_name):\r\n tag_names = parse_tag_input(tag_name)\r\n if not len(tag_names):\r\n raise AttributeError(_('No tags were given: \"%s\".') % tag_name)\r\n if len(tag_names) > 1:\r\n raise AttributeError(_('Multiple tags were given: \"%s\".') % tag_name)\r\n tag_name = tag_names[0]\r\n if settings.FORCE_LOWERCASE_TAGS:\r\n tag_name = tag_name.lower()\r\n tag, created = self.get_or_create(name=tag_name)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n TaggedItem._default_manager.get_or_create(\r\n tag=tag, content_type=ctype, object_id=obj.pk)",
"def add_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n print filename\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = set(metadata.get(\"tags\", []))\n tags.add(tag_name)\n metadata[\"tags\"] = list(tags)\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"added\", 200",
"def add_tag(session, tag_name, user_id=None, username='system_user'):\n session = validate_session(session)\n date_created=datetime.now()\n try:\n add_tag = TagInfo(tag_name, date_created, user_id)\n session.add(add_tag)\n session.commit()\n return(True, \"Tag %s added\" % (tag_name), add_tag)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s failed to add\" % (tag_name))",
"def addNode(self, nTag, pkg, exe, args, name, namespace):\r\n try:\r\n validateName(nTag)\r\n except IllegalName:\r\n raise InvalidRequest('Node tag is not a valid.')\r\n\r\n if nTag in self._nodes:\r\n raise InvalidRequest(\"Can not use the same node tag '{0}' in the \"\r\n 'same container twice.'.format(nTag))\r\n\r\n node = self._obj.createNode(pkg, exe, args, name, namespace)\r\n self._nodes[nTag] = node\r\n node.notifyOnDeath(self._nodeDied)",
"def create(self, label_id):\n data = {\n 'type': 'tagit',\n 'rate_count': 0,\n 'rate_range': 'day',\n 'limit_count': 0,\n 'limit_range': 'day',\n 'schedule': [],\n 'enabled': True,\n 'args': {\n 'sn': label_id,\n 'tag_sn': label_id\n }\n }\n # Yes, it's confusing. the `/actions/` endpoint is used for tags, while\n # the /tags/ endpoint is used for labels.\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.ACTIONS.value,\n params=data\n )",
"def ex_create_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'CreateTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def add_tag(self, session, tag):\n self._tag(session.put, key=tag, session=session)",
"def test_create_tag_invalid(self):\n tag_data = {'name': ''}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def create(self, name, description=None, color=None):\n data = {\n 'name': name,\n 'title': name,\n 'description': description or name,\n 'appearance': {\n 'color': color or random_color()\n }\n }\n # Yes, it's confusing. the `/tags/` endpoint is used for labels\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.TAGS.value,\n params=data\n )",
"def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def make_new_tag(tag_name, user_path, user_signed_in, current_user):\n if not user_signed_in:\n print('ALERT: -- User not logged in --')\n else:\n user = current_user[0]\n print(is_tag(tag_name, user_path, current_user))\n if is_tag(tag_name, user_path, current_user):\n print('Tag already exist')\n else:\n os.mkdir((user_path + '\\\\' + user + '\\\\' + tag_name).encode('unicode_escape'))\n print('Tag --' + tag_name + '-- Created')",
"def create_tag(case_dict, new_tag, username, password):\n # ---------------------------------------------------------------------\n logger.debug(\"create_tag\")\n\n # create a new trunk tag\n os.chdir(case_dict[\"archive_temp_dir\"])\n svn_repo = \"{0}/trunk\".format(case_dict[\"svn_repo_url\"])\n svn_repo_tag = \"{0}/trunk_tags/{1}\".format(case_dict[\"svn_repo_url\"], new_tag)\n msg = '\"create new trunk tag\"'\n cmd = [\n \"svn\",\n \"copy\",\n \"--username\",\n username,\n \"--password\",\n password,\n svn_repo,\n svn_repo_tag,\n \"--message\",\n msg,\n ]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as error:\n cmd_nopasswd = [\n \"svn\",\n \"copy\",\n \"--username\",\n username,\n \"--password\",\n \"******\",\n svn_repo,\n svn_repo_tag,\n \"--message\",\n msg,\n ]\n msg = _call_template.substitute(\n function=\"checkin_trunk\",\n cmd=cmd_nopasswd,\n error=error.returncode,\n strerror=error.output,\n )\n logger.warning(msg)\n raise SVNException(msg)",
"def _create_element(tag, text=\"\", attr={}, namespace=Xmlns_path):\n element = Et.Element('.//' + namespace + tag, attr)\n element.text = text\n return element"
] | [
"0.668044",
"0.64789695",
"0.6425235",
"0.63370335",
"0.62885815",
"0.6265026",
"0.6156292",
"0.61304843",
"0.5985835",
"0.5902972",
"0.5860836",
"0.5857317",
"0.58103025",
"0.5736905",
"0.57278633",
"0.571501",
"0.5682197",
"0.56273866",
"0.5566556",
"0.5561892",
"0.5553828",
"0.5545572",
"0.55432534",
"0.55401194",
"0.5506235",
"0.5496501",
"0.54942423",
"0.5488063",
"0.54709196",
"0.5461829"
] | 0.7148086 | 0 |
Creates a new tag default in the specified compartment for the specified tag definition. If you specify that a value is required, a value is set during resource creation (either by the user creating the resource or another tag defualt). If no value is set, resource creation is blocked. If the `isRequired` flag is set to \"true\", the value is set during resource creation. If the `isRequired` flag is set to \"false\", the value you enter is set during resource creation. | def create_tag_default(self, create_tag_default_details, **kwargs):
resource_path = "/tagDefaults"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_tag_default got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_tag_default_details,
response_type="TagDefault")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_tag_default_details,
response_type="TagDefault") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Option(name: str, value: Union[str, int], default: Optional[bool] = None) -> Dict:\n doc = {'name': name, 'value': value}\n if default is not None:\n doc['isDefault'] = default\n return doc",
"def register_option_pair(key, default_value):\n\n _OPTION_TEMPLATE[key] = default_value",
"def createDevIDAttr(shapefileName, defaultVal):\n\n inputds = ogr.Open(shapefileName,update=True)\n if not inputds:\n sys.exit(\"Unable to open input file '{0}'\".format(shapefileName))\n\n inputlyr = inputds.GetLayer()\n\n # Create field definition(s)\n # Add input Layer Fields to the output Layer if defined in field_names arg.\n inLayerDefn = inputlyr.GetLayerDefn()\n if inLayerDefn.GetFieldIndex(cc.DEV_LAYER_ATTRIBUTE_NAME) == -1:\n print(\"\\tCreating an Attribute '{0}' in vector file '{1}'\".format(cc.DEV_LAYER_ATTRIBUTE_NAME,shapefileName))\n\n inputlyr.CreateField(ogr.FieldDefn(cc.DEV_LAYER_ATTRIBUTE_NAME, ogr.OFTInteger))\n\n for inFeature in inputlyr:\n inFeature.SetField(cc.DEV_LAYER_ATTRIBUTE_NAME,defaultVal)\n inputlyr.SetFeature(inFeature)\n\n inputds.Destroy()\n print(\"\\tCreated an Attribute '{0}' in vector file '{1}'\".format(cc.DEV_LAYER_ATTRIBUTE_NAME,shapefileName))",
"def update_tag_default(self, tag_default_id, update_tag_default_details, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_default_details,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_tag_default_details,\n response_type=\"TagDefault\")",
"def _add_default_tags(self):\n self.tags.add_tag('ban', required=True)",
"def create_object_parameter_from_default(obj, default):\n values = []\n if default.enum:\n for v in DefaultParameterVl.objects.filter(parameter=default).all():\n values.append({'value' : v.value,\n 'caption' : v.caption})\n return create_object_parameter(obj, 'user', False,\n tp = default.tp,\n name=default.name,\n descr=default.descr,\n values=values)",
"def __init__(self, default_value, description, register=None, name=None,\n is_key=False, **kwargs):\n self._default_value = default_value\n self._description = description\n self._register = register\n self._name = name\n self._is_key = is_key\n self._kwargs = kwargs\n\n self._value = default_value\n self._frozen = False",
"def set_default(self, name, default, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info['default'] = self._get_enforced_type_value(\n opt_info['opt'], default)\n opt_info['location'] = LocationInfo(\n Locations.set_default,\n _get_caller_detail(3), # this function has a decorator to skip\n )",
"def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def addDefault(self, name, object):\n if name is None:\n raise ValueError(\"Name cannot be None\")\n self.defaultChoice = name\n self.addObject(name, object)",
"def __init__(self, name=None, values=None, default_value=None):\n self.swagger_types = {\n 'name': 'str',\n 'values': 'list[TagPropertyAllowedValue]',\n 'default_value': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'values': 'values',\n 'default_value': 'defaultValue'\n }\n\n self._name = name\n self._values = values\n self._default_value = default_value",
"def var(\n default: Any = RAISE,\n converter: Callable | None = None,\n name: str | None = None,\n validator: Callable | None = None,\n help: str | None = None,\n) -> Any:\n return attr.ib(\n default=default,\n metadata={CNF_KEY: _ConfigEntry(name, default, None, None, help)},\n converter=converter,\n validator=validator,\n )",
"def create(self, name, description=None, color=None):\n data = {\n 'name': name,\n 'title': name,\n 'description': description or name,\n 'appearance': {\n 'color': color or random_color()\n }\n }\n # Yes, it's confusing. the `/tags/` endpoint is used for labels\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.TAGS.value,\n params=data\n )",
"def validate_default_element(self, value):\n return self.validate_element(value)",
"def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)",
"def test_string_default(self):\n tag = Tag()\n self.assertEqual(tag.value, 'default')",
"def delete_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_request_id\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing),\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)",
"def default_arg(default):\n class DefaultArg(argparse.Action):\n def __call__(self, parser, namespace, value, option_string):\n if value is None:\n setattr(namespace, self.dest, default)\n else:\n setattr(namespace, self.dest, value)\n\n return DefaultArg",
"def _set_default(name, value, context):\n if name not in context:\n context[name] = value",
"def __init__(self, name=None, value=None):\n default_attr = dict(name=str(),\n value=str())\n self.name = name\n self.value = value\n self._set_default_attr(default_attr)",
"def createTag(self, authenticationToken, tag):\r\n pass",
"def __init__(self,\n name=None,\n help_text=None,\n fallthroughs=None,\n completer=None,\n completion_request_params=None,\n completion_id_field=None,\n value_type=None,\n parameter_name=None):\n self.attribute_name = name\n self.help_text = help_text\n self.fallthroughs = fallthroughs or []\n # The completer is always None because neither the surface nor the yaml\n # schema allow for specifying completers currently.\n self.completer = completer\n self.completion_request_params = completion_request_params\n self.completion_id_field = completion_id_field\n self.value_type = value_type or six.text_type\n self.parameter_name = parameter_name",
"def get_default_value(self, tag, primitive_type, hint=None):\n # initialize\n default_value = self.get_default_value_of_type(primitive_type)\n\n # use example value as default (if exist)\n if self.use_examples_for_default and self.get_examples_values:\n examples_values = self.get_examples_values(tag)\n if examples_values:\n default_value = list(examples_values)[0]\n\n # use response value as default (if exist)\n if self.use_response_for_default and self.get_response_values:\n response_values = self.get_response_values(tag, hint)\n if response_values:\n default_value = response_values[0]\n\n return default_value",
"def make_tag(tag_name, text='', tag_attr=None):\n if tag_attr is None:\n tag_attr = {}\n\n doc = xml.dom.minidom.Document()\n element = doc.createElement(tag_name)\n if tag_attr:\n for k, v in izip(list(tag_attr.keys()), list(tag_attr.values())):\n element.setAttribute(unicode(k), unicode(v))\n if text:\n text_node = doc.createTextNode(text.strip())\n element.appendChild(text_node)\n return element",
"def create_or_get_tag(self, tag_name: str, *args, **kwargs):\n\n tag_data = api.create_or_get_tag(\n tag_name,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Tag(tag_data)",
"def _default(self, section, option, default):\r\n if not self.has_section(section):\r\n self.add_section(section)\r\n if not self.has_option(section, option):\r\n self.set(section, option, default)\r\n self.save()",
"def default(default_value, force=False):\n def default_setter(value):\n \"\"\"\n Sets the value to the given default value, assuming the original value\n is not set or the default value is set to forced.\n\n :param Any value: Injected by CKAN core\n :rtype: Any\n \"\"\"\n return value if value and not force else default_value\n\n return default_setter",
"def default_value(self, value: Any) -> None:\n self.sdc_resource.set_input_default_value(self, value)\n self._default_value = value",
"def createElement(tag,attrib={},text={}):\n element = ET.Element(tag,attrib)\n element.text = text\n return element",
"def setdefault(self, value: Any) -> None:\n self.default_factory = value \n return"
] | [
"0.5894807",
"0.55696887",
"0.548009",
"0.52427024",
"0.5172015",
"0.5163364",
"0.5069004",
"0.50466603",
"0.50242513",
"0.5003325",
"0.4976948",
"0.49649096",
"0.49576932",
"0.49290386",
"0.4914476",
"0.49040148",
"0.49038035",
"0.48990166",
"0.48935652",
"0.48932627",
"0.48672876",
"0.4841067",
"0.48255783",
"0.4821933",
"0.48154172",
"0.47908288",
"0.4787615",
"0.47782978",
"0.47718528",
"0.47643954"
] | 0.64231205 | 0 |
Creates a new tag namespace in the specified compartment. You must specify the compartment ID in the request object (remember that the tenancy is simply the root compartment). You must also specify a name for the namespace, which must be unique across all namespaces in your tenancy and cannot be changed. The name can contain any ASCII character except the space (_) or period (.). Names are case insensitive. That means, for example, \"myNamespace\" and \"mynamespace\" are not allowed in the same tenancy. Once you created a namespace, you cannot change the name. If you specify a name that's already in use in the tenancy, a 409 error is returned. You must also specify a description for the namespace. It does not have to be unique, and you can change it with | def create_tag_namespace(self, create_tag_namespace_details, **kwargs):
resource_path = "/tagNamespaces"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_tag_namespace got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_tag_namespace_details,
response_type="TagNamespace")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_tag_namespace_details,
response_type="TagNamespace") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_namespace_create(self, resource_dict):\n pass",
"def create_namespace(node, namespace, delete_before_create=True):\n if delete_before_create:\n Namespaces.delete_namespace(node, namespace)\n\n cmd = f\"ip netns add {namespace}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n Namespaces.__namespaces.append(namespace)",
"def createNamespace(self):\r\n raise NotImplementedError('Endpoint can not be used directly.')",
"def create_namespaced_net_namespace(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def change_tag_namespace_compartment(self, tag_namespace_id, change_tag_namespace_compartment_detail, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/actions/changeCompartment\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"change_tag_namespace_compartment got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=change_tag_namespace_compartment_detail)",
"def create_namespaced_namespace(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_namespaced_namespace_with_http_info(body, **kwargs)\n else:\n (data) = self.create_namespaced_namespace_with_http_info(body, **kwargs)\n return data",
"def create_namespaced_namespace_with_http_info(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_namespace`\")\n\n resource_path = '/api/v1/namespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Namespace',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))",
"def create(cls, ns, name, **kwargs):\n key_name = '%s:%s' % (ns, name)\n return cls(key_name=key_name, ns=ns, name=name, **kwargs)",
"def create (self, name, dsspolicyguid, jobguid = \"\", executionparams = {}):\n params =dict()\n params['name'] = name\n params['dsspolicyguid'] = dsspolicyguid\n executionparams['rootobjecttype'] = 'dssnamespace'\n\n \n return q.workflowengine.actionmanager.startRootobjectAction('dssnamespace', 'create', params, jobguid=jobguid, executionparams=executionparams)",
"def create_tag(self, tag_namespace_id, create_tag_details, **kwargs):\n resource_path = \"/tagNamespaces/{tagNamespaceId}/tags\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagNamespaceId\": tag_namespace_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_tag_details,\n response_type=\"Tag\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_tag_details,\n response_type=\"Tag\")",
"def _create_namespace(self):\n self.ocp.new_project(self.namespace)",
"def create_namespace(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"create_namespace\")",
"def create_namespace(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"create_namespace\")",
"def create_namespace(self, name, status_wait=True):\n name = name or self.generate_random_name()\n\n manifest = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Namespace\",\n \"metadata\": {\n \"name\": name,\n \"labels\": {\n \"role\": name\n }\n }\n }\n self.v1_client.create_namespace(body=manifest)\n\n if status_wait:\n with atomic.ActionTimer(self,\n \"kubernetes.wait_for_nc_become_active\"):\n wait_for_status(name,\n status=\"Active\",\n read_method=self.get_namespace)\n return name",
"def test_namespace_bucket_creation_rpc(\n self, ns_resource_factory, bucket_factory, platform\n ):\n # Create the namespace resource and verify health\n ns_resource_name = ns_resource_factory(platform=platform)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name,\n read_ns_resources=[ns_resource_name],\n )",
"def replace_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def test_create_namespaced_deployment_request_instantiate(self):\n pass",
"def test_namespace_bucket_creation_with_rgw_rpc(\n self, ns_resource_factory, bucket_factory, rgw_deployments\n ):\n # Create the namespace resource and verify health\n ns_resource_name = ns_resource_factory(platform=constants.RGW_PLATFORM)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name,\n read_ns_resources=[ns_resource_name],\n )",
"def test_create_net_namespace(self):\n pass",
"def sync_namespace(alias, reg_code, authToken, space=None, action=None):\n if space == None:\n action = 'get'\n print(\" ACTION: GET\")\n elif action == None:\n if 'aeskey' not in space:\n print(\"Space not encrypted\")\n quit()\n action = 'update'\n print(\" ACTION: UPDATE\")\n elif action == 'delete':\n print(\" ACTION: DELETE\")\n url = endpoint('namespace')\n headers={'authorizationToken': authToken}\n data = json.dumps({'action': action, 'alias': alias, 'reg_code': reg_code, 'namespace': space})\n payload_size = sys.getsizeof(data)\n print(\" Size of payload is: %s\" % (convert_size(payload_size)))\n print(\" Max payload is: %s\" % (convert_size(max_payload_size)))\n if payload_size >= max_payload_size:\n print(\" OVER MAX PAYLOAD: %s\" % (convert_size(max_payload_size)))\n quit()\n r = requests.post(url, headers=headers, data=data) \n print(\" Request made\")\n if r.status_code == 403:\n print(\" Invalid registration code, exiting\")\n quit()\n elif r.status_code == 406:\n print(\" Namespace mismatch\")\n quit()\n else:\n print(\" └──statusCode:\" + str(r.status_code) )\n return r",
"def pre_namespace_create(self, resource_dict):\n pass",
"def create_resource(self, namespace: \"str\" = None):\n names = [\"create_namespaced_csistorage_capacity\", \"create_csistorage_capacity\"]\n\n _kube_api.execute(\n action=\"create\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict()},\n )",
"def _AddCreatedNamespace(self, state_tracker, identifier, line_number,\n namespace=None):\n if not namespace:\n namespace = identifier\n\n if self._HasSuppression(state_tracker, 'missingProvide'):\n return\n\n self._created_namespaces.append([namespace, identifier, line_number])",
"def create_client_by_namespace(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def register_namespace(alias, reg_code, pubKey=None, password=None):\n print( \" Registering namespace: %s\" % (alias) )\n if pubKey == None:\n generate_keys()\n pubKey = os.environ[\"pubKey\"]\n\n if check_lspace() == True:\n print(\" Device already registred to a namespace\")\n return False\n \n url = endpoint('register')\n payload = json.dumps({\n \"action\": \"register\",\n \"alias\": alias,\n \"reg_code\": reg_code,\n \"pubKey\": pubKey\n })\n r = requests.post(url, data=payload) \n statusCode = r.status_code\n content = json.loads(r.content)\n\n if statusCode == 201:\n print(\" Namespace registered succesfully\")\n namespace = content[\"namespace\"]\n namespace[\"privKey\"] = os.environ[\"privKey\"]\n save_lspace(namespace, password)\n else:\n print(\" Something went wrong - %s\" % (statusCode))\n quit()\n\n return statusCode",
"def replace_namespaced_namespace_with_http_info(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_namespace`\")\n\n resource_path = '/api/v1/namespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Namespace',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))",
"def new_namespace(key):\n\tif key in REGISTRY:\n\t\traise KeyError(\"key:{0} already exists\".format(key))\n\n\tREGISTRY[key] = Namespace()",
"def create_or_fetch_namespace(self):\n\n def _create_new_namespace():\n logger.info(\n f\"Creating a new namespace: {self.namespace_name} in {self.namespace_region}\"\n )\n\n data = {\n \"name\": self.namespace_name,\n \"resource_group_id\": self.resource_group_id,\n \"resource_plan_id\": \"functions-base-plan\",\n }\n\n res = requests.post(\n self.cf_namespaces_url, headers=self.get_headers(), json=data\n ).json()\n if res.status_code != 200:\n logger.error(res.text)\n namespace_id = res[\"id\"]\n logger.info(f\"Created new namespace with id: {namespace_id}\")\n return namespace_id\n\n def _get_cloud_function_namespaces_metadata(offset=0):\n \"\"\"returns meta data on namespaces of ibm cloud functions within a specified region\n :param offset - offset from the beginning of the list of results attained from the GET request,\n which may contain up to 200 namespaces per http response\"\"\"\n\n res = requests.get(\n f\"{self.cf_namespaces_url}?limit=200&offset={offset}\",\n headers=self.get_headers(),\n )\n return json.loads(res.text)\n\n def _get_cloud_function_namespaces():\n \"\"\"returns relevant metadata on existing namespaces within a given region.\"\"\"\n logger.info(\n f\"Obtaining Cloud Function namespaces in {self.namespace_region}\"\n )\n\n namespaces = []\n\n collecting_namespaces = True\n max_limit = 200\n offset = 0\n\n # request for namespaces is limited to 200 at a time, thus the request is fulfilled in increments of 200s.\n while collecting_namespaces:\n namespace_metadata = _get_cloud_function_namespaces_metadata(offset)\n if namespace_metadata[\"total_count\"] == max_limit:\n offset += max_limit\n else:\n collecting_namespaces = False\n\n for name_space in namespace_metadata[\"namespaces\"]:\n if \"name\" in name_space: # API based namespace\n namespaces.append(\n {\n \"name\": name_space[\"name\"],\n \"type\": \"API_based\",\n \"id\": name_space[\"id\"],\n \"region\": name_space[\"location\"],\n }\n )\n\n else: # cloud foundry based namespace\n namespaces.append(\n {\n \"name\": name_space[\"id\"],\n \"type\": \"CF_based\",\n \"region\": name_space[\"location\"],\n }\n )\n\n return namespaces\n\n namespaces_in_region = _get_cloud_function_namespaces()\n target_namespace_id = None\n if namespaces_in_region:\n target_namespace_id = next(\n (\n namespace[\"id\"]\n for namespace in namespaces_in_region\n if namespace[\"name\"] == self.namespace_name\n ),\n None,\n )\n if not target_namespace_id:\n target_namespace_id = _create_new_namespace()\n else:\n logger.info(f\"Reusing namespace: {target_namespace_id}\")\n return target_namespace_id",
"def createNamespace(self):\r\n if self._namespaces:\r\n raise InternalError('Can not have more than one namespace '\r\n 'in an Environment endpoint at a time.')\r\n\r\n return Environment(self)",
"async def create_client_by_namespace_async(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )"
] | [
"0.6208053",
"0.61728823",
"0.61027914",
"0.61002004",
"0.59118104",
"0.5871321",
"0.58043087",
"0.5771377",
"0.5746048",
"0.5731766",
"0.5726167",
"0.57059276",
"0.5678216",
"0.55117524",
"0.54968196",
"0.54622465",
"0.54276574",
"0.5421414",
"0.54114",
"0.5391365",
"0.53783107",
"0.53694326",
"0.53572416",
"0.53054005",
"0.5293745",
"0.5259266",
"0.5218361",
"0.5187072",
"0.5183011",
"0.5120911"
] | 0.656771 | 0 |
Deletes the specified compartment. The compartment must be empty. | def delete_compartment(self, compartment_id, **kwargs):
resource_path = "/compartments/{compartmentId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def removeCompartment(self, *args):\n return _libsbml.Model_removeCompartment(self, *args)",
"def removeCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_removeCompartmentReference(self, *args)",
"def delcomponent(self,\n context=[],\n componentid=None):\n if componentid == None:\n raise ValueError, \"delcomponent: componentid is None\"\n return jsoncall.do_call(\"delcomponent\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'componentid':componentid},\n self.connection)",
"def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def delete_address(self) -> object:\n self.delete_button.click()\n\n return DeletionModal(self).wait_for_component_to_be_present()",
"def delete_pcb_component(self, comp_name):\n arg = [\"NAME:Selections\", \"Selections:=\", comp_name]\n\n self.modeler.oeditor.Delete(arg)\n return True",
"def removeCompartmentType(self, *args):\n return _libsbml.Model_removeCompartmentType(self, *args)",
"def removeCompartmentGlyph(self, *args):\n return _libsbml.Layout_removeCompartmentGlyph(self, *args)",
"def delete_composed_node(cls, composed_node_uuid):\n cls.dbdriver.delete_composed_node(composed_node_uuid)",
"def vertree_delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")",
"def _deleteElement(self, identifier):\n self._collection.removeByIdentifier(identifier)\n return Deleted()",
"def delete(self):\n self.log.info('Deleting')\n self._state = PonPort.State.DELETING\n self._cancel_deferred()",
"def unsetCompartment(self):\n return _libsbml.CompartmentReference_unsetCompartment(self)",
"def delete(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n raise Exception(\"TODO\")",
"def delete(self, department_id):\n department = get_department_by_id(department_id)\n db.session.delete(department)\n db.session.commit()\n return {}, 204",
"def unsetCompartment(self):\n return _libsbml.Reaction_unsetCompartment(self)",
"def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()",
"def delete(self, endpoint, content=None, params=None):\n\t\treturn self._call(\"DELETE\", endpoint, content, params)",
"def delete(self, controller, virtual_drive='all'):\n return self.run('/c{} /v{} del force'.format(controller, virtual_drive))",
"def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)",
"def delete(self, request, app_id, addon_name):\n addon = Addon.objects.get(app__app_id=app_id, display_name=addon_name)\n provider = get_provider_from_provider_name(addon.provider_name)\n result = provider.deprovision(addon.provider_uuid)\n manager = StateMachineManager()\n with manager.transition(addon.id, AddonEvent.deprovision_success):\n pass\n manager.start_task(addon.id)\n return self.respond({'message': result['message']})",
"def delete(self):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant delete abstract elements')\r\n if self.eid is None:\r\n return self\r\n query = \"\"\"\r\n g.removeVertex(g.v(eid))\r\n g.stopTransaction(SUCCESS)\r\n \"\"\"\r\n results = execute_query(query, {'eid': self.eid})",
"def delete(self, name):\n instance = self.get_one_instance('name', name)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n res = delete_in_db(instance)\n\n if res != 'deleted':\n set_session_var('errors', str(res))\n else:\n set_session_var('success', res)\n\n return True",
"def delete(self, endpoint, params=None):\n params = params or dict()\n return self.request(verb=requests.delete, address=self.project_address + endpoint,\n params=params)",
"def _delete(performer):\n if not isinstance(performer, helper._AelObjectPerformer):\n raise Exception('Invalid delete performer type')\n\n try:\n util.delete(\n obj=performer.getObject(), testmode=performer.isInTestMode()\n )\n except Exception as e:\n raise Exception('Failed to delete %s: %s' % (performer._name, str(e)))\n\n return",
"def remove(self, component) -> None:\n pass",
"def delete(self):\n\n headers = self._default_headers()\n\n return self._request(self.name,\n ok_status=None,\n data=None,\n headers=headers,\n method=\"DELETE\")",
"def delete_provisioning(self, identifier):\n return self.client.call(\"SoftLayer_Provisioning_Hook\", \"deleteObject\", id=identifier)",
"def delete(self, vehicle_id=None):\n raise NotImplementedError()",
"def delete(self,\n tier1_id,\n segment_id,\n port_id,\n ):\n return self._invoke('delete',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'port_id': port_id,\n })"
] | [
"0.6996225",
"0.5918634",
"0.5823131",
"0.573744",
"0.57213587",
"0.571788",
"0.5709205",
"0.56610256",
"0.56202364",
"0.55895793",
"0.5515105",
"0.5480321",
"0.5393466",
"0.5318188",
"0.5316087",
"0.53012276",
"0.5280208",
"0.527711",
"0.527472",
"0.52587646",
"0.5253273",
"0.524758",
"0.5220202",
"0.51949716",
"0.5167984",
"0.5151166",
"0.51484567",
"0.5142402",
"0.513271",
"0.513119"
] | 0.6579754 | 1 |
Deletes the specified MFA TOTP device for the specified user. | def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):
resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"mfaTotpDeviceId": mfa_totp_device_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_user(self, user):\n self.delete(user)",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass",
"def delete(self, user_id):\r\n return delete_user(request, user_id)",
"def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])",
"def delete(self, user_id):\n return delete_user(user_id)",
"def delete_user():",
"def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def test_delete_device_user(self):\n pass",
"def delete_user():\n #TODO user delete\n pass",
"def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")",
"def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status",
"def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))",
"def delete_user_entitlement(self, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n self._send(http_method='DELETE',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values)",
"def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}",
"def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))",
"def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)",
"def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)",
"def delete_user(id):\n pass",
"def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()",
"def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))",
"def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")",
"def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200",
"def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")",
"def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")",
"def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))",
"def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200",
"def delete_user_by_xng_id(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/xngId/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['xngId'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")",
"def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)"
] | [
"0.7167678",
"0.69209623",
"0.6781475",
"0.6747502",
"0.67389566",
"0.66825885",
"0.66802096",
"0.66421336",
"0.66130745",
"0.6571461",
"0.64496547",
"0.64242226",
"0.63735026",
"0.63403934",
"0.6330194",
"0.63215464",
"0.63176155",
"0.63172036",
"0.63106817",
"0.62828964",
"0.6236682",
"0.6191388",
"0.617884",
"0.6178165",
"0.6170782",
"0.61678034",
"0.6151824",
"0.61508304",
"0.6147104",
"0.613341"
] | 0.69395584 | 1 |
Deletes the specified network source | def delete_network_source(self, network_source_id, **kwargs):
resource_path = "/networkSources/{networkSourceId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_network_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"networkSourceId": network_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, source):\n _source = self._source_prefix+source\n assert _source in self.cache.keys()\n del self.cache[_source]",
"def delete_source(self, src_name: SourceName) -> None:\n while True:\n try:\n response = self.genes.query(\n IndexName=\"src_index\",\n KeyConditionExpression=Key(\"src_name\").eq(src_name.value),\n )\n except ClientError as e:\n raise DatabaseReadException(e)\n records = response[\"Items\"]\n if not records:\n break\n with self.genes.batch_writer(\n overwrite_by_pkeys=[\"label_and_type\", \"concept_id\"]\n ) as batch:\n for record in records:\n try:\n batch.delete_item(\n Key={\n \"label_and_type\": record[\"label_and_type\"],\n \"concept_id\": record[\"concept_id\"],\n }\n )\n except ClientError as e:\n raise DatabaseWriteException(e)\n\n try:\n self.metadata.delete_item(Key={\"src_name\": src_name.value})\n except ClientError as e:\n raise DatabaseWriteException(e)",
"def RemoveSource(self,source):\n self._sources.RemoveSource(source)",
"def RemoveSource(self, source):\n self._sources.remove(source)",
"def delete_network(self, network):\r\n return self.delete(self.network_path % (network))",
"def delete_source(username, id, force, token=None):\n if not force:\n click.confirm(\n \"Are you sure you want to delete {0} {1}?\".format(username, id), abort=True\n )\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.delete(url)\n if r.status_code == 204:\n click.echo(\"Source deleted.\")\n else:\n raise errors.TilesetsError(r.text)",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)",
"def remove(self, source, graph, dest):\n return self.server.execute(self._execute_operation(\n source, graph, dest,\n ttypes.ExecuteOperationType.Remove))",
"def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def fusion_api_delete_fc_network(self, name=None, uri=None, api=None, headers=None):\n return self.fc_network.delete(name, uri, api, headers)",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def delete(self, dest, source=None):\n raise NotImplementedYet()",
"def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})",
"def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)",
"def remove_source(self, name):\n logger.warning('You are deleting a source. This could have unintended \\\n side effects. If you are replacing values, use get_source(name) \\\n and modify it instead.')\n source = self._sources[name]\n self._pattern_reg.remove_usage(source.strength_timeseries.pattern_name, (source.name, 'Source'))\n self._node_reg.remove_usage(source.node_name, (source.name, 'Source')) \n del self._sources[name]",
"def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def test_delete_network(self):\n pass",
"def src_delete(state):\n _lib.src_delete(state)",
"def remove(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['remove', source])\n self.m.path.mock_remove_paths(source)",
"def delete(self): \n params = {'command':'deleteNetwork',\n 'id':self.id}\n \n self.logger.debug('Remove network %s' % self.name)\n \n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['deletenetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'deleteNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def delete_network_segment(context, segment_id):\n with db_api.context_manager.writer.using(context):\n network_obj.NetworkSegment.delete_objects(context, id=segment_id)",
"def delete_network_profile(arn=None):\n pass",
"def deleteRig(self):\n\n allNodes = cmds.ls(\"*\")\n for node in allNodes:\n if cmds.objExists(node + \".sourceModule\"):\n cmds.lockNode(node, lock=False)\n source = cmds.getAttr(node + \".sourceModule\")\n if source == self.name:\n try:\n cmds.delete(node)\n except:\n pass",
"def Delete(self):\n\n if self.network_id:\n self.cs.delete_network(self.network_id)\n\n if self.is_vpc and self.vpc_id:\n self.cs.delete_vpc(self.vpc_id)",
"def removeModelSource(self, modelSource):\n self._modelSources.remove(modelSource)\n if modelSource.isLoaded():\n self._reload()",
"def delete(self):\n \n logging.info(\"Deleting network %s\" % self.cloudnet)\n # res = cn.delete(self.cloudnet)\n res = self.cloudnet.delete()\n return res",
"def delete_order_source(self, order_source_id, **kwargs):\n\n all_params = ['order_source_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_order_source\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'order_source_id' is set\n if ('order_source_id' not in params) or (params['order_source_id'] is None):\n raise ValueError(\"Missing the required parameter `order_source_id` when calling `delete_order_source`\")\n\n resource_path = '/beta/orderSource/{orderSourceId}'.replace('{format}', 'json')\n path_params = {}\n if 'order_source_id' in params:\n path_params['orderSourceId'] = params['order_source_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def deleteNetwork(self, session: Session, id_: str):\n try:\n return NetworkManager().deleteNetwork(session, id_)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)"
] | [
"0.69649154",
"0.67236555",
"0.67101824",
"0.6658213",
"0.66125065",
"0.64707583",
"0.6187282",
"0.6125395",
"0.6124346",
"0.6093754",
"0.6084731",
"0.60805976",
"0.6080252",
"0.6072282",
"0.6040917",
"0.6035688",
"0.6021448",
"0.60198027",
"0.60127157",
"0.6012108",
"0.599546",
"0.59650165",
"0.58980197",
"0.5889944",
"0.58734",
"0.5866181",
"0.5849423",
"0.58390343",
"0.5818585",
"0.5817115"
] | 0.69568324 | 1 |
Deletes the specified SMTP credential for the specified user. | def delete_smtp_credential(self, user_id, smtp_credential_id, **kwargs):
resource_path = "/users/{userId}/smtpCredentials/{smtpCredentialId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_smtp_credential got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"smtpCredentialId": smtp_credential_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_credential(self, credential):\r\n return self.delete(self.credential_path % (credential))",
"def delete_credential(credentials):\n credentials.delete_credentials()",
"def delete_credential(self):\n Credentials.credentials_list.remove(self)",
"def delete_credential(name: str):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n if '@' in name:\n username, hostname = name.split('@')\n else:\n username = name\n hostname = None\n\n # next, try to figure out which one we're supposed to remove\n matches = []\n match_indices = []\n\n for idx, cred in enumerate(creds):\n # the username must match\n if cred.username != username:\n continue\n # if specified, the hostname must match\n if hostname is not None and cred.hostname != hostname:\n continue\n\n matches.append(cred)\n match_indices.append(idx)\n\n if len(matches) == 0:\n err = f\"No matching credential found with username '{username}'\"\n if hostname is not None:\n err += f\" with hostname '{hostname}'\"\n raise RuntimeError(err)\n elif len(matches) > 1:\n raise RuntimeError(_construct_ambiguous_deletion_message(username, hostname, matches))\n\n # At this point we should have exactly one match, which we can delete\n del creds[match_indices[0]]\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()",
"def delete_credential(self):\n\n Credential.credential_list.remove(self)",
"def delete_credential(self, context, id):\n return remove_credential(id)",
"def delete_credential(self):\n Credential.credential_list.remove(self)",
"def remove_credential(self, authenticator_id, credential_id):\n pass",
"def remove_credentials(service: str) -> None:\n\n # SQL query to remove the user servise credentials from the database\n query = f\"DELETE FROM {service}_credentials WHERE user_id=?;\"\n\n # Execute the query\n with connect(DATABASE) as db:\n db.execute(query, (session[\"user_id\"],))\n db.commit()",
"def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))",
"def delete_credentials(self):\n Credentials.credential_list.remove(self)",
"def unset_credentials(ctx, user, store):\n try:\n logger.debug(\"store={store}, user={user}\".format(store=store, user=user))\n _pycred.unset_credentials(store, user)\n except Exception as e:\n logger.debug(e, exc_info=True)\n print('Error: {msg}'.format(msg=str(e)), file=sys.stderr)\n sys.exit(1)",
"def delete_user(self, user):\n self.delete(user)",
"def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })",
"async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def deleteCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)",
"def _delete_credential(self, key):\n try:\n del self._data[key]\n except KeyError:\n pass\n self._write()",
"def revoke(self):\n # Removes credentialing from the user\n with transaction.atomic():\n self.revoked_datetime = timezone.now()\n\n self.migrated_user.is_credentialed = False\n self.migrated_user.credential_datetime = None\n\n self.migrated_user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.migrated_user.email))",
"def delete_user(UserName=None, AuthenticationType=None):\n pass",
"def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)",
"def delete(self, user):\n q = \"DELETE FROM profiles WHERE user=?\"\n try:\n self._query(q, (user,), fetch='none')\n except Exception as e:\n raise e",
"def delete_user(BrokerId=None, Username=None):\n pass",
"def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)",
"def delete_user_credentials(connection, api_url):\n\n body = {\n 'endpoint': api_url,\n 'user': '',\n 'password': '',\n 'token': '',\n 'type': 'none'\n }\n\n connection.post_obj_as_json('user/credentials', body)"
] | [
"0.7331302",
"0.72613764",
"0.670908",
"0.661661",
"0.65503716",
"0.6531281",
"0.65208536",
"0.6449064",
"0.6350622",
"0.63303417",
"0.63224506",
"0.6295059",
"0.62919253",
"0.6242662",
"0.6233042",
"0.61947227",
"0.6164052",
"0.6137985",
"0.6137985",
"0.6137985",
"0.6135059",
"0.6127839",
"0.6065958",
"0.6057861",
"0.6005234",
"0.59498906",
"0.58769244",
"0.5837494",
"0.58205163",
"0.5817403"
] | 0.72640246 | 1 |
Deletes the the specified tag default. | def delete_tag_default(self, tag_default_id, **kwargs):
resource_path = "/tagDefaults/{tagDefaultId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_tag_default got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tagDefaultId": tag_default_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_default(self):\n if self.default_present:\n self.removeItem(0)\n self.default_present = False",
"def delete_tag(tag):\n tag.destroy()",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def clear_default(self, name, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info.pop('default', None)",
"def delete_tag(self, *tags: TagReference) -> None:\n return TagReference.delete(self, *tags)",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"async def delete(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"aliases deleted\")\n except:\n await ctx.send(\"Alias unsuccessfully deleted\")\n elif not tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"Tag and all aliases deleted\")\n except:\n await ctx.send(\"Tag unsuccessfully deleted\")\n else:\n await ctx.send(\"No Tag with that name found\")",
"def delete_tag_template(self, name):\n self.__datacatalog.delete_tag_template(name=name, force=True)\n logging.info('Tag Template deleted: %s', name)",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def delete_tag(request):\n try:\n tags = request.POST.getlist('tag_id', 0)\n tag = Tag.objects.filter(pk__in=tags).delete()\n ActionLogger().log(request.user, \"deleted\", \"Knowledgebase Tag %s\" % tags)\n return format_ajax_response(True, \"Knoweldgebase tag deleted successfully.\")\n except Exception as ex:\n logger.error(\"Failed to delete_tag: %s\" % ex)\n return format_ajax_response(False, \"There was an error deleting the specified knowledgebase tag.\")",
"def delete_tag(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(\"/tags\")",
"def delete_tag(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(f\"/tags\")",
"def delete_default_content(site):\n logger.info(u'Apagando conteúdo padrão do Plone')\n for item in DEFAULT_CONTENT:\n if hasattr(site, item):\n api.content.delete(site[item])\n logger.debug(u' {0} apagado'.format(item))",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(\"/tags\")",
"def __delete__(self, instance):\n raise AttributeError(\"A Default Property cannot be deleted\")",
"def delete_tags(configurationIds=None, tags=None):\n pass",
"def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_default_vpc(session, vpc_id):\n try:\n session.delete_vpc(\n VpcId=vpc_id\n )\n except Exception as e:\n print('Exception: ' + str(e))\n else:\n print(\"Successfully deleted VPC\")",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def delete_tag(user_id, tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect(f'/users/{user_id}')",
"def delete_tag_meta(self, tag_name: str) -> ProjectMeta:\n return self.delete_tag_metas([tag_name])",
"def tags_delete(tag_id):\n\n tags = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tags)\n db.session.commit()\n\n flash(f\"'{tags.name}' tag is deleted.\")\n\n return redirect(\"/tags\")",
"def setdefault(self, key, default=None):\r\n return self.data.setdefault(ref(key, self._remove),default)"
] | [
"0.6711261",
"0.6618538",
"0.62383074",
"0.61715645",
"0.5984171",
"0.5975831",
"0.5970485",
"0.585169",
"0.58504015",
"0.5791107",
"0.57085663",
"0.55927753",
"0.55772495",
"0.5562356",
"0.5456258",
"0.5449171",
"0.5441209",
"0.5433733",
"0.5428394",
"0.5425312",
"0.54240113",
"0.54216796",
"0.53936046",
"0.53900605",
"0.5386305",
"0.5369236",
"0.5367158",
"0.5355882",
"0.53492415",
"0.52725637"
] | 0.6901326 | 0 |
Gets the authentication policy for the given tenancy. You must specify your tenant\u2019s OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). | def get_authentication_policy(self, compartment_id, **kwargs):
resource_path = "/authenticationPolicies/{compartmentId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_authentication_policy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AuthenticationPolicy")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AuthenticationPolicy") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authentication_policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"authentication_policy\")",
"def authentication_policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"authentication_policy\")",
"def authentication_policy(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"authentication_policy\")",
"def _get_tenant_ocid(self):\n if isinstance(self._provider, oci.signer.Signer):\n return self._provider.api_key.split('/')[0]",
"def get_tenant_keyring(self) -> Optional[ImaKeyring]:\n return self.keyrings.get(\"tenant_keyring\")",
"def rbac_policy_get(request, policy_id, **kwargs):\n policy = neutronclient(request).show_rbac_policy(\n policy_id, **kwargs).get('rbac_policy')\n return RBACPolicy(policy)",
"def client_access_policy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_access_policy\")",
"def get_tenant_id(self, **kwargs):\n if self.authenticate() == 200:\n return self.tenant_id\n else:\n return None",
"def policies(self, request):\n policies = OtterPolicies(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return policies.app.resource()",
"def aad_tenant_id(self) -> Optional[str]:\n return pulumi.get(self, \"aad_tenant_id\")",
"def get_org_policy(self, resource, constraint, fields=None,\n verb='getOrgPolicy', **kwargs):\n arguments = {'resource': resource, 'fields': fields,\n 'body': {'constraint': constraint}}\n if kwargs:\n arguments.update(kwargs)\n return self.execute_query(\n verb=verb,\n verb_arguments=arguments,\n )",
"def get(self):\n policy_number = reqparse.request.args.get('policy_number')\n category = reqparse.request.args.get('category')\n\n dao = ClaimDao()\n return dao.get(policy_number=policy_number, category=category)",
"def get_tenant_config(tenant_id):\n for tenant in tenants:\n if tenant['tenant_id'] == tenant_id:\n return tenant\n raise errors.BaseTapisError(\"invalid tenant id.\")",
"def get_tenant_by_id(tenant_id):\n tenant = identity.Tenant.query.filter_by(id=tenant_id).first()\n if tenant:\n return tenant\n abort(404, f\"Unable to find tenant with id: {tenant_id}\")",
"def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})",
"def tenant_access(self) -> Optional[pulumi.Input['ServiceTenantAccessArgs']]:\n return pulumi.get(self, \"tenant_access\")",
"def tenant_access(self) -> Optional[pulumi.Input['ServiceTenantAccessArgs']]:\n return pulumi.get(self, \"tenant_access\")",
"def get_tenant(key, tenant_name):\n for tenant in key.tenants.list():\n if tenant.name == tenant_name:\n return tenant\n\n return None",
"def get_aad_tenant_id(self) -> Union[str, None]:\n return self._get_aad_tenant_id(enable_validation=True)",
"def get_access_policy(access_policy_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccessPolicyResult:\n __args__ = dict()\n __args__['accessPolicyId'] = access_policy_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:accesscontextmanager/v1:getAccessPolicy', __args__, opts=opts, typ=GetAccessPolicyResult).value\n\n return AwaitableGetAccessPolicyResult(\n etag=pulumi.get(__ret__, 'etag'),\n name=pulumi.get(__ret__, 'name'),\n parent=pulumi.get(__ret__, 'parent'),\n scopes=pulumi.get(__ret__, 'scopes'),\n title=pulumi.get(__ret__, 'title'))",
"def tenant_access(self) -> pulumi.Output['outputs.ServiceTenantAccess']:\n return pulumi.get(self, \"tenant_access\")",
"def get_account_for_tenant(test_auth, tenant_id):\n return '%s%s' % (test_auth.reseller_prefixes[0], tenant_id)",
"def get_quotas_tenant(self, **_params):\r\n return self.get(self.quota_path % 'tenant', params=_params)",
"def get_key_ring_policy(project_id, location_id, key_ring_id):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the KeyRing.\n parent = 'projects/{}/locations/{}/keyRings/{}'.format(\n project_id, location_id, key_ring_id)\n\n # Get the current IAM policy.\n request = kms_client.projects().locations().keyRings().getIamPolicy(\n resource=parent)\n response = request.execute()\n\n if 'bindings' in response.keys():\n print('Printing IAM policy for resource {}:'.format(parent))\n for binding in response['bindings']:\n print('')\n print('Role: {}'.format(binding['role']))\n print('Members:')\n for member in binding['members']:\n print(member)\n print('')\n else:\n print('No roles found for resource {}.'.format(parent))",
"def acquire_token_func():\n\tconf = get_conf_from_json()\n\ttenant_name = conf['tenant_name']\n\tauthority_url = f'https://login.microsoftonline.com/{tenant_name}'\n\tapp = msal.ConfidentialClientApplication(\n\t\tauthority=authority_url,\n\t\tclient_id=conf['client_id'],\n\t\tclient_credential=conf['client_secret']\n\t)\n\ttoken = app.acquire_token_for_client(scopes=[\"https://graph.microsoft.com/.default\"])\n\treturn token",
"def check_tenant_authorization(tenant_id, override_permission=None):\n claims = get_jwt_claims()\n if \"id\" in list(claims.keys()):\n tenant_user = identity.TenantUser.query.filter_by(id=claims[\"id\"]).first()\n if (\n tenant_user.tenant_id == tenant_id\n or override_permission in tenant_user.permissions\n ):\n return\n abort(403, \"Unauthorized Tenant\")",
"def get_authentication(connection=\"oracle://ATLAS_COOLPROD/ATLAS_COOLONL_GLOBAL\"):\n \n from os import environ\n from os.path import join as pjoin\n assert \"CORAL_AUTH_PATH\" in environ, \"CORAL_AUTH_PATH environment var not set\"\n \n auth_paths = environ[\"CORAL_AUTH_PATH\"].split(\":\")\n \n for auth_path in auth_paths + [\".\"]:\n file_name = pjoin(auth_path, \"authentication.xml\")\n if exists(file_name):\n authentication = parse_auth_file(file_name, connection)\n if authentication:\n return authentication\n \n raise RuntimeError(\"Unable to locate credentials for %s.\" \n % connection)",
"def get_policy_id(token_name, utxo):\n assets_id = [k.split('.') for k in utxo['balances'].keys() if len(k.split('.')) == 2 and k.split('.')[1] == token_name]\n if len(assets_id) == 1:\n policy_id = assets_id[0][0]\n else:\n policy_id = None\n return policy_id",
"def get_current_tenant():\n return getattr(_thread_locals, \"tenant\", None)",
"def getOrganizationBrandingPolicy(self, organizationId: str, brandingPolicyId: str):\n\n metadata = {\n 'tags': ['Dashboard branding policies'],\n 'operation': 'getOrganizationBrandingPolicy',\n }\n resource = f'/organizations/{organizationId}/brandingPolicies/{brandingPolicyId}'\n\n return self._session.get(metadata, resource)"
] | [
"0.5763793",
"0.5763793",
"0.56437725",
"0.52095336",
"0.51566327",
"0.5049942",
"0.49517995",
"0.49281493",
"0.4919025",
"0.48551175",
"0.48236695",
"0.48195675",
"0.48187912",
"0.47907704",
"0.47624832",
"0.4753156",
"0.4753156",
"0.47355378",
"0.47262105",
"0.4723426",
"0.47186",
"0.4705896",
"0.46974677",
"0.46798822",
"0.4659315",
"0.46489128",
"0.4631749",
"0.46225977",
"0.46120504",
"0.45981133"
] | 0.6030179 | 0 |
Gets the specified compartment's information. This operation does not return a list of all the resources inside the compartment. There is no single API operation that does that. Compartments can contain multiple types of resources (instances, block storage volumes, etc.). To find out what's in a compartment, you must call the \"List\" operation for each resource type and specify the compartment's OCID as a query parameter in the request. For example, | def get_compartment(self, compartment_id, **kwargs):
resource_path = "/compartments/{compartmentId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Compartment")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Compartment") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getCompartment(self, *args):\n return _libsbml.Model_getCompartment(self, *args)",
"def getCompartment(self):\n return _libsbml.CompartmentReference_getCompartment(self)",
"def list_compartments(self, compartment_id, **kwargs):\n resource_path = \"/compartments\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"access_level\",\n \"compartment_id_in_subtree\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_compartments got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'access_level' in kwargs:\n access_level_allowed_values = [\"ANY\", \"ACCESSIBLE\"]\n if kwargs['access_level'] not in access_level_allowed_values:\n raise ValueError(\n \"Invalid value for `access_level`, must be one of {0}\".format(access_level_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"accessLevel\": kwargs.get(\"access_level\", missing),\n \"compartmentIdInSubtree\": kwargs.get(\"compartment_id_in_subtree\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")",
"def getCompartment(self):\n return _libsbml.Species_getCompartment(self)",
"def get(self, *args):\n return _libsbml.ListOfCompartmentTypes_get(self, *args)",
"def getCompartmentType(self):\n return _libsbml.Compartment_getCompartmentType(self)",
"def getCompartment(self):\n return _libsbml.Reaction_getCompartment(self)",
"def getCompartment(self):\n return _libsbml.QualitativeSpecies_getCompartment(self)",
"def get(self, *args):\n return _libsbml.ListOfCompartmentReferences_get(self, *args)",
"def getCompartmentType(self):\n return _libsbml.MultiCompartmentPlugin_getCompartmentType(self)",
"def getCompartmentType(self, *args):\n return _libsbml.Model_getCompartmentType(self, *args)",
"def getCompartment(self):\n return _libsbml.MultiSpeciesType_getCompartment(self)",
"def getListOfCompartmentTypes(self, *args):\n return _libsbml.Model_getListOfCompartmentTypes(self, *args)",
"def compartment_id(self):\n return self._compartment_id",
"def compartment_id(self):\n return self._compartment_id",
"def getName(self):\n return _libsbml.CompartmentType_getName(self)",
"def getName(self):\n return _libsbml.Compartment_getName(self)",
"def getCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_getCompartmentReference(self, *args)",
"async def getCollectionDetail(self, slug=None):\n payload = {}\n \n if slug:\n payload[\"slug\"] = slug\n \n\n # Parameter validation\n schema = CatalogValidator.getCollectionDetail()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/{slug}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"slug\",\"description\":\"A `slug` is a human readable, URL friendly unique identifier of an object. Pass the `slug` of the collection which you want to retrieve.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"slug\",\"description\":\"A `slug` is a human readable, URL friendly unique identifier of an object. Pass the `slug` of the collection which you want to retrieve.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", slug=slug)\n query_string = await create_query_string(slug=slug)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/{slug}/\", slug=slug), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def getId(self):\n return _libsbml.Compartment_getId(self)",
"def get(self, *args):\n return _libsbml.ListOfCompartments_get(self, *args)",
"def getCompartmentReference(self):\n return _libsbml.MultiSimpleSpeciesReferencePlugin_getCompartmentReference(self)",
"def getCompartmentReference(self):\n return _libsbml.SpeciesTypeInstance_getCompartmentReference(self)",
"def get(self):\n return GenericGet().get_catalogs()",
"def getId(self):\n return _libsbml.CompartmentType_getId(self)",
"def get_compliment():\n name = request.args.get('name')\n show_compliments = request.args.get('show_compliments')\n compliments_to_show = sample(compliments, 3)\n\n return render_template(\n 'compliments.html',\n name=name,\n show_compliments=show_compliments,\n compliments=compliments_to_show)",
"def getCompartmentId(self):\n return _libsbml.CompartmentGlyph_getCompartmentId(self)",
"def createCompartmentType(self):\n return _libsbml.Model_createCompartmentType(self)",
"def get_catalog():\n return jsonify(getCatalog())",
"def _get_catalog_object(self):\n return self.cluster.catalogd.service.read_debug_webpage(\n \"catalog_object?object_type=TABLE&object_name=functional.alltypes\")"
] | [
"0.651724",
"0.6153286",
"0.6037744",
"0.58690524",
"0.57859063",
"0.5781716",
"0.5763131",
"0.576079",
"0.5643876",
"0.5598771",
"0.5592769",
"0.55235153",
"0.5381384",
"0.5341171",
"0.5341171",
"0.5283982",
"0.5278441",
"0.5222629",
"0.5219247",
"0.5206948",
"0.5200719",
"0.51894313",
"0.51697594",
"0.5166381",
"0.516513",
"0.51017845",
"0.5092011",
"0.506716",
"0.5047575",
"0.5045076"
] | 0.6193012 | 1 |
Get the specified MFA TOTP device for the specified user. | def get_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):
resource_path = "/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"mfaTotpDeviceId": mfa_totp_device_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="MfaTotpDeviceSummary")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="MfaTotpDeviceSummary") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_mfa_totp_device(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")",
"def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)",
"def list_mfa_totp_devices(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"sort_by\",\n \"sort_order\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_mfa_totp_devices got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n if 'sort_by' in kwargs:\n sort_by_allowed_values = [\"TIMECREATED\", \"NAME\"]\n if kwargs['sort_by'] not in sort_by_allowed_values:\n raise ValueError(\n \"Invalid value for `sort_by`, must be one of {0}\".format(sort_by_allowed_values)\n )\n\n if 'sort_order' in kwargs:\n sort_order_allowed_values = [\"ASC\", \"DESC\"]\n if kwargs['sort_order'] not in sort_order_allowed_values:\n raise ValueError(\n \"Invalid value for `sort_order`, must be one of {0}\".format(sort_order_allowed_values)\n )\n\n query_params = {\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"sortBy\": kwargs.get(\"sort_by\", missing),\n \"sortOrder\": kwargs.get(\"sort_order\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[MfaTotpDeviceSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[MfaTotpDeviceSummary]\")",
"def __retrieve_rt_token(user_id):\n\n slack_user = user_profile(user_id)\n if slack_user['ok']:\n username = slack_user['user']['profile'].get('email', '').split('@')[0]\n user = get_user_model().objects.filter(username=username).first()\n if user:\n prefs = UserPreferences.objects.filter(user=user).first()\n if prefs:\n if prefs.rt_token:\n cipher_suite = Fernet(settings.CRYPTO_KEY)\n return cipher_suite.decrypt(prefs.rt_token.encode('utf-8')).decode('utf-8')\n return None",
"def retrieve_user_devices(self, user_id):\n if user_id is None:\n self.log_error(MongoDatabase.retrieve_user_devices.__name__ + \"Unexpected empty object: user_id\")\n return None\n\n try:\n user_id_obj = ObjectId(user_id)\n user = self.users_collection.find_one({\"_id\": user_id_obj})\n if user is not None:\n if 'devices' in user:\n return user['devices']\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return None",
"def get_custom_jwt(user, device):\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_otp_payload(user, device)\n return jwt_encode_handler(payload)",
"def generate_totp_seed(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/generateSeed\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"generate_totp_seed got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")",
"def activate_mfa_totp_device(self, user_id, mfa_totp_device_id, mfa_totp_token, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/activate\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"activate_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing),\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=mfa_totp_token,\n response_type=\"MfaTotpDeviceSummary\")",
"def fetch_token(self, user_id, password):\n url = buildCommandUrl(self.server, \"/as/user/token\")\n result = json_request(\"POST\", url, {\n \"userId\": user_id,\n \"password\": password\n })\n return result[\"token\"]",
"def get_token(self, user_id, token_id):\n query = \"\"\"SELECT yubikeys.attribute_association_id AS yubikeys_attribute_association_id,\n yubikeys.id AS yubikeys_id,\n yubikeys.prefix AS yubikeys_prefix,\n yubikeys.enabled AS yubikeys_enabled\n FROM yubikeys, user_yubikeys\n WHERE user_yubikeys.user_id = %s\n AND yubikeys.prefix = %s\n AND yubikeys.id = user_yubikeys.yubikey_id\n ORDER BY yubikeys.prefix\"\"\"\n self._execute(query, (user_id, token_id))\n return self._dictfetchone()",
"def get_user(self, user_id=None):\n raise NotImplementedError",
"def get(user):\n if user:\n return Member.get_by_key_name(user.user_id())",
"def get_token(self, user_id, token_id):\n query = \"\"\"SELECT yubikeys.attribute_association_id AS yubikeys_attribute_association_id,\n yubikeys.id AS yubikeys_id,\n yubikeys.prefix AS yubikeys_prefix,\n yubikeys.enabled AS yubikeys_enabled\n FROM yubikeys\n INNER JOIN user_yubikeys\n ON user_yubikeys.yubikey_id = yubikeys.id\n WHERE user_yubikeys.user_id = %s\n AND yubikeys.prefix = %s\"\"\"\n self._execute(query, (user_id, token_id))\n return self._dictfetchone()",
"def get_token(self, user):\n\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n return token",
"def for_user(cls, user):\n\n token = super().for_user(user)\n\n TokenMeta.objects.get_or_create(\n jti=token['jti'],\n token=str(token),\n )\n\n return token",
"def get_permission_user(self, user_id):\n return self.execute(TABELLE[\"id_users\"]['select']['from_id'], (user_id,))",
"def _get_device(self, dev_id):\n tuya = self.hass.data[DOMAIN][TUYA_DATA]\n return tuya.get_device_by_id(dev_id)",
"def retrieve_user_devices(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n devices = self.database.retrieve_user_devices(user_id)\n if devices is not None:\n devices = list(set(devices)) # De-duplicate\n return devices",
"def get_user(self, user_id):\n return None # noqa: WPS324",
"def jwt_otp_payload(user, device=None):\n # username_field = get_username_field()\n username = get_username(user)\n\n payload = {\n 'user_id': user.pk,\n 'username': username,\n 'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA\n }\n\n # Include original issued at time for a brand new token,\n # to allow token refresh\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n if api_settings.JWT_AUDIENCE is not None:\n payload['aud'] = api_settings.JWT_AUDIENCE\n\n if api_settings.JWT_ISSUER is not None:\n payload['iss'] = api_settings.JWT_ISSUER\n\n # custom additions\n is_user_and_device = user is not None and device is not None\n is_users_device = is_user_and_device and device.user_id == user.id\n is_device_confirmed = is_users_device and device.confirmed is True\n if is_device_confirmed:\n payload['otp_device_id'] = device.persistent_id\n else:\n payload['otp_device_id'] = None\n\n return payload",
"def find_token_by_user_id(session, user_id):\n return session.query(Token).filter(Token.user_id == user_id).one_or_none()",
"def getUser(self, user_uuid):\n if user_uuid in self.users.keys():\n return self.users[user_uuid]\n else:\n return None",
"def get_token(user, password):\n url = urljoin(PivotalTrackerService.URI, \"me\")\n auth = (user, password)\n response = PivotalTrackerService.get_response(\"get\", url, auth=auth)\n\n try:\n response.raise_for_status()\n data = response.json()\n ret_val = data[\"api_token\"]\n except RequestException:\n ret_val = None\n\n return ret_val",
"def get_user(self, token: str) -> Optional[User]:",
"def get_user(self, token: str) -> Optional[User]:",
"def get_user(self, user_id):\n oauth_user = OAuthioUser.objects.filter(user__id=user_id)\n if oauth_user.exists():\n return oauth_user.get().user",
"def get(self, user_id):\n user = UserServices(public_id=user_id).get_an_item()\n if not user:\n api.abort(404)\n else:\n return user",
"def get_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload={}, request_type=self.REQUEST_GET, version=\"v2\")",
"def get_user(self, user_id):\n uri = 'users/' + user_id\n return self.make_request(uri)",
"def get_user(self, user, instance=None):\n instance = self._get_resource(_instance.Instance, instance)\n return self._get(_user.User, user)"
] | [
"0.6745442",
"0.60613525",
"0.59726477",
"0.5702831",
"0.55748624",
"0.5469353",
"0.54422414",
"0.53837264",
"0.53459823",
"0.53429735",
"0.5318181",
"0.5315078",
"0.5313801",
"0.5306907",
"0.5290798",
"0.52894306",
"0.52770966",
"0.5263118",
"0.5261724",
"0.52087736",
"0.5182853",
"0.5177443",
"0.5173183",
"0.5163499",
"0.5163499",
"0.5140894",
"0.5122385",
"0.51194084",
"0.51137084",
"0.51085293"
] | 0.7053731 | 0 |
Get the specified tenancy's information. | def get_tenancy(self, tenancy_id, **kwargs):
resource_path = "/tenancies/{tenancyId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_tenancy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tenancyId": tenancy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Tenancy")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Tenancy") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tenants(self):\n # print \"tenant list is %s\" % self.auth.tenants.list()\n if not self._tenancy:\n self._tenancy = {}\n for tenant in self.auth.tenants.list():\n t = Tenant(tenant, self)\n self._tenancy[t[\"name\"]] = t\n return self._tenancy",
"def get_tenants(self):",
"def get_tenants(self, **kwargs):\n url = self.get_url('tenants', kwargs, ['begin', 'end'])\n return self.api_client.get(url).json()",
"def get_tenant_usage(self, tenant_id):\n return self._get(_quota.TenantUsage, tenant_id)",
"def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result",
"def get_quotas_tenant(self, **_params):\r\n return self.get(self.quota_path % 'tenant', params=_params)",
"def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]",
"def tenant(self):\n return self._tenant",
"def _get(self, path, params=None):\n return self._api.get_json(path, headers={\"Hawkular-Tenant\": self.tenant_id}, params=params)",
"def get_tenant(key, tenant_name):\n for tenant in key.tenants.list():\n if tenant.name == tenant_name:\n return tenant\n\n return None",
"def get(self, tenant_id):\n response = self.client.get('/quotas/%s' % tenant_id)\n\n return response.json()",
"def get(identifier: str):\n if identifier.startswith('T'):\n return {'message': babel('No information on temp registrations.')}, 200\n\n business = Business.find_by_identifier(identifier)\n\n if not business:\n return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND\n\n # check authorization\n if not authorized(identifier, jwt, action=['view']):\n return jsonify({'message':\n f'You are not authorized to view business {identifier}.'}), \\\n HTTPStatus.UNAUTHORIZED\n\n return jsonify(business=business.json())",
"def tenant_access(self) -> pulumi.Output['outputs.ServiceTenantAccess']:\n return pulumi.get(self, \"tenant_access\")",
"def info(self):\n return self.client.call('GET', self.name + 'info')",
"def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants",
"def get(self) -> Info:\n return InfoService.get()",
"def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json",
"def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)",
"def show(self, req, tenant_id, id):\n LOG.info(\"Indexing quota info for tenant '%(id)s'\\n\"\n \"req : '%(req)s'\\n\\n\", {\"id\": id, \"req\": req})\n\n context = req.environ[wsgi.CONTEXT_KEY]\n if id != tenant_id and not context.is_admin:\n raise exception.TroveOperationAuthError(\n tenant_id=tenant_id\n )\n\n usages = quota_engine.get_all_quota_usages_by_tenant(id)\n limits = quota_engine.get_all_quotas_by_tenant(id)\n for key in usages.keys():\n setattr(usages[key], \"limit\", limits[key].hard_limit)\n return wsgi.Result(views.QuotaUsageView(usages).data(), 200)",
"def tenant(self, request):\n warnings.warn(\n '\"tenant\" Quota API method is deprecated, use \"project\" instead')\n return self._project(request, 'tenant')",
"def getTenantByName(self,tenantName,description):\n\n url = CIC_TENANT_ENDPOINT + \"?\" + urllib.urlencode(\n {\n \"instanceName\":tenantName,\n \"description\":description\n })\n\n logger.debug(\"Calling url {}\".format(url))\n\n try:\n response = self.httpHandler.sendHttpRequest(url)\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 404:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise KeyError(\n \"Tenant '{}' could not be found in TMS\".format(tenantName),\n \"CIC_TENANT_LOOKUP_ERROR\")\n\n elif e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to look up 'tenants' in {} {}\".format(self.cicUser, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n raise\n else:\n responseString = response.read()\n return json.loads(responseString)",
"def get_current_tenant():\n return getattr(_thread_locals, \"tenant\", None)",
"def info():\n if g.party_id is None:\n # No party is configured for the current site.\n abort(404)\n\n party = party_service.get_party(g.party_id)\n\n return {\n 'party': party,\n }",
"def get_tenant_config(tenant_id):\n for tenant in tenants:\n if tenant['tenant_id'] == tenant_id:\n return tenant\n raise errors.BaseTapisError(\"invalid tenant id.\")",
"def show_quota(self, tenant_id, **_params):\r\n return self.get(self.quota_path % (tenant_id), params=_params)",
"def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response",
"def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info",
"def info(self):\n return self._fetch_json('/api/info')",
"def tenant(self) -> \"str\":\n return self._attrs.get(\"tenant\")",
"def get_tenant_resources(self):\n resources = self.context[\"tenant\"].get(\"resources\", [])\n if not resources:\n msg = (\"No resources found for tenant: %s\"\n % self.context[\"tenant\"].get(\"name\"))\n raise exceptions.NotFoundException(message=msg)\n for res_id in resources:\n self._get_resource(res_id)"
] | [
"0.633115",
"0.6270229",
"0.6024418",
"0.5914824",
"0.59081745",
"0.5818619",
"0.5763514",
"0.5751513",
"0.5732008",
"0.5686052",
"0.56732315",
"0.5667248",
"0.5657612",
"0.5629676",
"0.561599",
"0.56010634",
"0.559895",
"0.5592223",
"0.5585628",
"0.5580276",
"0.55515385",
"0.55401057",
"0.5529882",
"0.55057806",
"0.5500245",
"0.54857826",
"0.54840803",
"0.54407525",
"0.5438957",
"0.54318666"
] | 0.6393299 | 0 |
Gets the specified UserGroupMembership's information. | def get_user_group_membership(self, user_group_membership_id, **kwargs):
resource_path = "/userGroupMemberships/{userGroupMembershipId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_user_group_membership got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userGroupMembershipId": user_group_membership_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="UserGroupMembership")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="UserGroupMembership") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_their_members(our_group):\n\tgroup_id = our_group[\"groupId\"]\n\turl = f'{BASE_URL}/groups/{group_id}/members'\n\tparams = {'$select': 'userPrincipalName,id'}\n\treturn call_api(url, params)",
"def get_membership_data_for_current_user(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/User/GetMembershipsForCurrentUser/\"))",
"def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)",
"def get_membership_data_by_id_get(self, membershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/User/GetMembershipsById/{membershipId}/{membershipType}/\"))",
"def get(self, id):\r\n return UserGroupService.getUserGroup(self, id)",
"def get_groups_for_member_get(self, filter, groupType, membershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/User/{membershipType}/{membershipId}/{filter}/{groupType}/\"))",
"def GetGroupMembers(self, group):\n return []",
"def getMembership(config, user):\r\n\r\n seen = set()\r\n for member_of in _getMembership(config, user, seen):\r\n yield member_of\r\n\r\n # everyone is always a member of group \"all\"\r\n yield 'all'",
"def get_group_group_members(self, group_id):\n try:\n group_id = self.quote(group_id)\n return self.g.get('groups/%s/groups/' % group_id)\n except HTTPError as e:\n return self._manage_errors(e)",
"def getMember(self, *args):\n return _libsbml.Group_getMember(self, *args)",
"def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list",
"def get_membersof(self, kwargs):\n group = kwargs[\"group\"]\n verbose = kwargs.get(\"verbose\", False)\n\n results = list(self.engine.query(self.engine.GROUP_DN_FILTER(group), [\"distinguishedName\", \"objectSid\"]))\n if results:\n group_dn = results[0][\"distinguishedName\"]\n else:\n error(\"Group {group} does not exists\".format(group=group))\n\n primary_group_id = results[0][\"objectSid\"].split('-')[-1]\n results = self.engine.query(self.engine.ACCOUNTS_IN_GROUP_FILTER(primary_group_id, group_dn))\n self.display(results, verbose)",
"def get_memberships(self):\n return UnitMembership.objects.filter(unit=self).select_related(\"user\")",
"def get_members_of_group_get(self, currentpage, groupId, memberType, nameSearch):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/Members/\"))",
"def get_groups_details(self, groups):\n assert isinstance(groups, list)\n # It may be require we request the API by splitting the names list\n # If the list is too long to be handled by the Gerrit server (URI)\n query_args = \"?%s\" % \"&\".join([\"q=%s\" % g for g in groups])\n query_args += \"&o=MEMBERS\" if groups else \"o=MEMBERS\"\n\n try:\n ret = self.g.get('groups/%s' % query_args)\n except HTTPError as e:\n return self._manage_errors(e)\n\n return ret",
"def get_group_member(self, group):\n fake_group_obj = SimpleNamespace(id=group[\"id\"])\n current_identity = self.context[\"identity\"]\n avatar = current_groups_service.links_item_tpl.expand(\n current_identity, fake_group_obj\n )[\"avatar\"]\n return {\n \"type\": \"group\",\n \"id\": group[\"id\"],\n \"name\": group.get(\"name\") or group[\"id\"],\n \"description\": group.get(\"description\", \"\"),\n \"avatar\": avatar,\n }",
"def groups_get(self, mar, request):\n if not mar.viewed_user_auth:\n raise exceptions.NoSuchUserException(request.groupName)\n group_id = mar.viewed_user_auth.user_id\n group_settings = self._services.usergroup.GetGroupSettings(\n mar.cnxn, group_id)\n member_ids, owner_ids = self._services.usergroup.LookupAllMembers(\n mar.cnxn, [group_id])\n (owned_project_ids, membered_project_ids,\n contrib_project_ids) = self._services.project.GetUserRolesInAllProjects(\n mar.cnxn, mar.auth.effective_ids)\n project_ids = owned_project_ids.union(\n membered_project_ids).union(contrib_project_ids)\n if not permissions.CanViewGroupMembers(\n mar.perms, mar.auth.effective_ids, group_settings, member_ids[group_id],\n owner_ids[group_id], project_ids):\n raise permissions.PermissionException(\n 'The user is not allowed to view this group.')\n\n member_ids, owner_ids = self._services.usergroup.LookupMembers(\n mar.cnxn, [group_id])\n\n member_emails = list(self._services.user.LookupUserEmails(\n mar.cnxn, member_ids[group_id]).values())\n owner_emails = list(self._services.user.LookupUserEmails(\n mar.cnxn, owner_ids[group_id]).values())\n\n return api_pb2_v1.GroupsGetResponse(\n groupID=group_id,\n groupSettings=api_pb2_v1_helpers.convert_group_settings(\n request.groupName, group_settings),\n groupOwners=owner_emails,\n groupMembers=member_emails)",
"def get_group_info(groupname):\n return jsonify(admin.get_group_info(current_app.scoped_session(), groupname))",
"def get(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('GET', url)",
"def get_group_members(self, group_key):\n try:\n paged_results = self.repository.members.list(group_key)\n result = api_helpers.flatten_list_results(paged_results, 'members')\n LOGGER.debug('Getting all the members for group_key = %s,'\n ' result = %s', group_key, result)\n return result\n except (errors.HttpError, HttpLib2Error) as e:\n raise api_errors.ApiExecutionError(group_key, e)",
"def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j",
"def users_in_group(self, group_id):\n users = []\n users = self._get(('user', 'group', str(group_id)))\n for user in users:\n if 'dreamdiary.diary.user' in user['saml_permissions']:\n users.append(user)\n return users",
"def list_group_members(self, token, userGroup):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n if userGroup not in dataBase['userGroups']:\n raise GroupDoesNotExistException(\"User group does not exist\")\n\n if requestUser not in dataBase['userGroups'][userGroup]['owners']:\n raise UserPermissionException(\"User is not an owner of this group\")\n owners = dataBase['userGroups'][userGroup]['owners']\n members = dataBase['userGroups'][userGroup]['members']\n return {'owners':owners, 'members':members}",
"def view_group(request, group_id):\n users = models.UserProfile.all().order('email')\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n if group.users:\n users = models.UserProfile.get(group.users)\n else:\n users = []\n return utility.respond(request, 'admin/view_group', {'users': users})",
"def get_group_members(self, group):\n members = []\n result = self.search('ou=groups,dc=mozilla',\n filterstr='cn=%s' % (group))\n if result == False:\n raise self.SearchError\n elif result == []:\n return []\n for group in result[1]:\n members = list(set(members) | set(group[1]['memberUid']))\n return members",
"def get(self):\n usergroup_node = graph.find_one(\"Usergroup\",\n property_key='id',\n property_value=self.id)\n return usergroup_node",
"def get_group_info(self, data):\n return self.__form_call('channels.info', data)",
"def get_membership(user):\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")",
"def get_people(self):\n url = self.base_url + 'memberships'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()",
"def get_group_members(self, group_id, max_results=None, paging_token=None):\n route_values = {}\n if group_id is not None:\n route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')\n query_parameters = {}\n if max_results is not None:\n query_parameters['maxResults'] = self._serialize.query('max_results', max_results, 'int')\n if paging_token is not None:\n query_parameters['pagingToken'] = self._serialize.query('paging_token', paging_token, 'str')\n response = self._send(http_method='GET',\n location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8',\n version='6.0-preview.1',\n route_values=route_values,\n query_parameters=query_parameters)\n return self._deserialize('PagedGraphMemberList', response)"
] | [
"0.6235419",
"0.61992306",
"0.60605794",
"0.60599184",
"0.6059044",
"0.60332453",
"0.6025113",
"0.5984252",
"0.5957381",
"0.5928913",
"0.5920698",
"0.59183306",
"0.5917003",
"0.59147394",
"0.59113026",
"0.58406615",
"0.5840103",
"0.5822994",
"0.5788936",
"0.57630724",
"0.5758944",
"0.57501817",
"0.5692017",
"0.5656566",
"0.5654035",
"0.5626449",
"0.5625217",
"0.56005925",
"0.55951107",
"0.55732036"
] | 0.67357963 | 0 |
Gets details on a specified work request. The workRequestID is returned in the opcworkrequestid header for any asynchronous operation in the Identity and Access Management service. | def get_work_request(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getwork(self, data: Optional[str] = None) -> Dict[str, Any]:\n assert data is None or type(data) == str\n return self.rpc_call(\"getwork\", data)",
"def get_request(request_id=None, workload_id=None, session=None):\n\n try:\n if not request_id and workload_id:\n request_ids = get_request_ids_by_workload_id(workload_id)\n if request_ids and len(request_ids) > 1:\n raise exceptions.IDDSException(\"More than one request with the same workload_id\")\n request_id = request_ids[0]\n\n req_select = \"\"\"select request_id, scope, name, requester, request_type, transform_tag, priority,\n status, locking, workload_id, created_at, updated_at, accessed_at, expired_at, errors,\n request_metadata, processing_metadata\n from atlas_idds.requests where request_id=:request_id\n \"\"\"\n req_stmt = text(req_select)\n result = session.execute(req_stmt, {'request_id': request_id})\n request = result.fetchone()\n\n if request is None:\n raise exceptions.NoObject('request request_id: %s, workload_id: %s cannot be found' % (request_id, workload_id))\n\n request = convert_request_to_dict(request)\n\n return request\n except sqlalchemy.orm.exc.NoResultFound as error:\n raise exceptions.NoObject('request request_id: %s, workload_id: %s cannot be found: %s' % (request_id, workload_id, error))",
"def doi_info(self,doi):\n \n doi = _clean_doi(doi)\n \n url = self.BASE_URL + 'works/' + doi\n \n try:\n return self._make_get_request(url,models.work_single)\n except errors.RequestError:\n #TODO: Check for 404\n #last_response.status_code\n #TODO: Do this only if debugging is enabled\n if self.debug:\n #TODO: Also report code\n print(\"Error msg from server: \" + self.last_response.text)\n raise errors.InvalidDOI('Invalid DOI requested: ' + doi)\n \n #return self._make_get_request(url,models.Work,kwargs)",
"async def request_job_info(self, job_id: str, *args, **kwargs) -> dict:\n # TODO: implement\n raise NotImplementedError('{} function \"request_job_info\" not implemented yet'.format(self.__class__.__name__))",
"def export_getCurrentExecutionOrder(self,requestName):\n\n if type(requestName) in StringTypes:\n result = requestDB._getRequestAttribute('RequestID',requestName=requestName)\n if not result['OK']:\n return result\n requestID = result['Value']\n else:\n requestID = requestName\n\n result = requestDB.getCurrentExecutionOrder(requestID)\n return result",
"def get_ride_request(reqID):\n req = RideRequest.query.get(reqID)\n return req",
"def getworkunit(worker_id):\r\n\r\n worker_data = identify(worker_id)\r\n global time_start\r\n global started_working\r\n global work_status\r\n if work_status == Db.WorkStatusNames.has_work.value:\r\n\r\n saved_work_unit = Db.get_free_work_unit()\r\n if saved_work_unit is None:\r\n work_status = Db.WorkStatusNames.no_work.value\r\n else:\r\n if not started_working:\r\n print(\"Starting to work!\")\r\n time_start = time.time()\r\n started_working = True\r\n #It counts it's\r\n print(str(saved_work_unit[\"work_unit_id\"]) + \" \" + str(saved_work_unit))\r\n Db.assign_work_unit(saved_work_unit[\"work_unit_id\"], worker_id)\r\n return saved_work_unit\r\n\r\n\r\n\r\n return package_data({\"fail_message\": work_status})",
"def getwork(self, data=None):\n if data is None:\n # Only if no data provided, it returns a WorkItem\n return WorkItem(**self.proxy.getwork())\n else:\n return self.proxy.getwork(data)",
"def send_announcement_get_work_request(self):\n self.analysis_id = uuid.uuid4().hex\n while True:\n self.announce_socket.send_json(((self.analysis_id, self.work_addr),))\n try:\n return self.awthread.recv(self.work_socket, 250)\n except six.moves.queue.Empty:\n continue",
"def export_getRequestFileStatus(self,requestName,lfns):\n if type(requestName) in StringTypes:\n result = requestDB._getRequestAttribute('RequestID',requestName=requestName)\n if not result['OK']:\n return result\n requestID = result['Value']\n else:\n requestID = requestName\n return requestDB.getRequestFileStatus(requestID,lfns)",
"def qos_workload_get(self, workload_name, desired_attributes=None):\n return self.request( \"qos-workload-get\", {\n 'workload_name': [ workload_name, 'workload-name', [ basestring, 'None' ], False ],\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ QosWorkloadInfo, 'None' ], False ],\n }, {\n 'attributes': [ QosWorkloadInfo, False ],\n } )",
"def list_work_requests(self, compartment_id, **kwargs):\n resource_path = \"/workRequests\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"resource_identifier\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_work_requests got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"resourceIdentifier\": kwargs.get(\"resource_identifier\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[WorkRequestSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[WorkRequestSummary]\")",
"def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass",
"def get_tagging_work_request(self, work_request_id, **kwargs):\n resource_path = \"/taggingWorkRequests/{workRequestId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_tagging_work_request got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"workRequestId\": work_request_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"TaggingWorkRequest\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"TaggingWorkRequest\")",
"def request_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"request_id\")",
"def view_request_info(line):\n args = shlex.split(line)\n if not args:\n raise PappyException(\"Request id is required\")\n reqids = args[0]\n\n reqs = yield load_reqlist(reqids)\n\n for req in reqs:\n print ''\n print_request_extended(req)\n print ''",
"def get_request(self):\n\t\t#self.__sem.lock()\n\t\ttry:\n\t\t\t\n\t\t\tr = self.get(thread.get_ident(),None)\n\t\t\tif r:\n\t\t\t\treturn r\n\t\t\traise VDOM_exception(_(\"No request associated with current thread\"))\n\t\texcept:\n\t\t\traise VDOM_exception(_(\"No request associated with current thread\"))\n\t\t#finally:\n\t\t#\tself.__sem.unlock()",
"def vcac_worklfow_request(self):\n logging.info(\"Inside ucsvm_worklfow_request method base class\")\n return None",
"def log_request(self, code='-', size='-'):\n print self._heading(\"HTTP Request\")\n #First, print the resource identifier and desired operation.\n print self.raw_requestline,\n #Second, print the request metadata\n for header, value in self.headers.items(): \n print header + \":\", value",
"def export_getRequestStatus(self,requestName):\n\n if type(requestName) in StringTypes:\n result = requestDB._getRequestAttribute('RequestID',requestName=requestName)\n if not result['OK']:\n return result\n requestID = result['Value']\n else:\n requestID = requestName\n\n result = requestDB.getRequestStatus(requestID)\n return result",
"def read_request(req_id: int, db: Session = Depends(get_db)):\n db_req = crud.get_request(db, req_id=req_id)\n if db_req is None:\n raise HTTPException(status_code=404, detail=\"Request not found\")\n return db_req",
"def queryRequest(self, requestName):\n urlQuery = \"request/%s\" % requestName\n logging.info(\"Querying request '%s'\" % requestName)\n logging.info(\"Query: '%s':\" % urlQuery)\n r = self.restSender.get(urlQuery)\n print str(r)",
"async def get_job_execution_details(\n self,\n request: metrics.GetJobExecutionDetailsRequest = None,\n *,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pagers.GetJobExecutionDetailsAsyncPager:\n # Create or coerce a protobuf request object.\n request = metrics.GetJobExecutionDetailsRequest(request)\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.get_job_execution_details,\n default_timeout=None,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Send the request.\n response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)\n\n # This method is paged; wrap the response in a pager, which provides\n # an `__aiter__` convenience method.\n response = pagers.GetJobExecutionDetailsAsyncPager(\n method=rpc, request=request, response=response, metadata=metadata,\n )\n\n # Done; return the response.\n return response",
"def get_review_request(self, rid):\r\n rsp = self.api_call('api/review-requests/%s/' % rid)\r\n return rsp['review_request']",
"def req_id(self) -> str:\n pass",
"def RetrieveWorkerInCapability(**argd):\n checkSign = argd[\"nsid\"] + \",\" + argd[\"renid\"]\n token = EncryptUtil.DecodeURLSafeBase64(argd[\"token\"])\n try:\n tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)\n except:\n tokenRet = False\n if tokenRet is False:\n return CGateway._UnauthorizedServiceResponse(token)\n flag1, ret1 = CGateway.core.RetrieveHumanWithCapability(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd[\"capabilityName\"])\n flag2, ret2 = CGateway.core.RetrieveAgentWithCapability(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd[\"capabilityName\"])\n return CGateway._DumpResponse(ret1 + ret2)",
"def request_id(self) -> Optional[str]:\n return self._request_id",
"def work_order_receipt_retrieve(self, work_order_id, id=None):\n if work_order_id is None or not is_hex(work_order_id):\n logging.error(\"Work order id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Worker id is empty or Invalid\")\n\n json_rpc_request = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"WorkOrderReceiptRetrieve\",\n \"id\": id,\n \"params\": {\n \"workOrderId\": work_order_id\n }\n }\n response = self.__uri_client._postmsg(json.dumps(json_rpc_request))\n return response",
"def get_intake_detail(request, intake_csid):\n return handle_request(request, 'cspace-services/intakes/%s' % intake_csid)",
"def _fetch_request_info(request):\n try:\n subject_id = request.environ['api.cache.subject_id']\n method = request.environ['api.cache.method']\n version = request.environ['api.cache.version']\n except KeyError:\n return None\n else:\n return (subject_id, method, version)"
] | [
"0.56530815",
"0.5529833",
"0.54778296",
"0.5333147",
"0.53069115",
"0.5274904",
"0.51362526",
"0.51026773",
"0.5036096",
"0.49953598",
"0.49753773",
"0.49725127",
"0.4961871",
"0.4939883",
"0.49278948",
"0.4925855",
"0.49189013",
"0.49009278",
"0.4879232",
"0.48540133",
"0.48538992",
"0.4844337",
"0.48351547",
"0.4832383",
"0.4820794",
"0.48182362",
"0.48112178",
"0.48111847",
"0.47723818",
"0.47482735"
] | 0.6257329 | 0 |
Lists the API signing keys for the specified user. A user can have a maximum of three keys. Every user has permission to use this API call for their own user ID. An administrator in your organization does not need to write a policy to give users this ability. | def list_api_keys(self, user_id, **kwargs):
resource_path = "/users/{userId}/apiKeys"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_api_keys got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[ApiKey]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[ApiKey]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})",
"def get_ssh_keys(self, user_id):\n _gu = self.get_user(user_id)\n if _gu is None:\n return []\n\n # build URL and make request\n return self._get('/users/{0}/keys'.format(_gu['id']))",
"def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)",
"def get_all_access_keys(self, user_name, marker=None, max_items=None):\r\n params = {'UserName' : user_name}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n return self.get_response('ListAccessKeys', params,\r\n list_marker='AccessKeyMetadata')",
"def keys(self, bucket, user=None):\n raise NotImplementedError('TODO')",
"def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)",
"def list(ctx: CLIContext, user_id, is_active, filter_, order, offset, limit) -> None:\n fields = [\n keypair_fields['user_id'],\n keypair_fields['full_name'],\n keypair_fields['access_key'],\n keypair_fields['secret_key'],\n keypair_fields['is_active'],\n keypair_fields['is_admin'],\n keypair_fields['created_at'],\n keypair_fields['last_used'],\n keypair_fields['resource_policy'],\n keypair_fields['rate_limit'],\n keypair_fields['concurrency_used'],\n ]\n try:\n with Session() as session:\n fetch_func = lambda pg_offset, pg_size: session.KeyPair.paginated_list(\n is_active,\n user_id=user_id,\n fields=fields,\n page_offset=pg_offset,\n page_size=pg_size,\n filter=filter_,\n order=order,\n )\n ctx.output.print_paginated_list(\n fetch_func,\n initial_page_offset=offset,\n page_size=limit,\n )\n except Exception as e:\n ctx.output.print_error(e)\n sys.exit(1)",
"def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)",
"def get_list_keys(rpc_user, rpc_pwd):\n data = '{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"listkeys\"}'\n return call_rpc(rpc_user, rpc_pwd, data)",
"def list_customer_secret_keys(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/customerSecretKeys\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_customer_secret_keys got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[CustomerSecretKeySummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[CustomerSecretKeySummary]\")",
"def apikeys(request):\n display = get_boolean_value(request.GET.get('display', False))\n\n return render(request, 'gui/profile/profile_api_keys_list.html', {\n 'user': request.user,\n 'display_keys': display\n })",
"def get_s3_keys(bucket, user_keys = None):\n keys = []\n if user_keys is None:\n \t\t\t\ts3 = boto3.client('s3')\n else:\n s3 = boto3.client('s3', \n aws_access_key_id = user_keys[\"AWS_ACCESS_KEY_ID\"], \n aws_secret_access_key = user_keys[\"AWS_SECRET_ACCESS_KEY\"], \n region_name = user_keys[\"REGION_NAME\"]\n ) \t \n \n resp = s3.list_objects_v2(Bucket= bucket)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n return keys",
"def get_api_keys(owner):\n api.get_all(owner)",
"async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)",
"def test_get_user_api_keys(self):\n pass",
"def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )",
"def get_api_keys(self, **kwargs):\n\n all_params = ['page', 'per_page', '_from', 'to', 'sort_dir', 'sort_field', 'filters']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page' in params:\n query_params['_page'] = params['page']\n if 'per_page' in params:\n query_params['_perPage'] = params['per_page']\n if '_from' in params:\n query_params['_from'] = params['_from']\n if 'to' in params:\n query_params['_to'] = params['to']\n if 'sort_dir' in params:\n query_params['_sortDir'] = params['sort_dir']\n if 'sort_field' in params:\n query_params['_sortField'] = params['sort_field']\n if 'filters' in params:\n query_params['_filters'] = params['filters']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ApiKey]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key",
"def List(self, user=None):\n with self.acc_lock:\n self._load()\n\n result = []\n if user:\n for k, v in self.tasks.iteritems():\n if v['user'] != user:\n continue\n d = dict(v)\n d['key'] = k\n result.append(d)\n else:\n for k, v in self.tasks.iteritems():\n d = dict(v)\n d['key'] = k\n result.append(d)\n return result",
"def get_api_key_params(user):\n if user and user.is_authenticated():\n api_key, _ = APIKey.objects.get_or_create(user=user)\n return urlencode({'user': user.pk, 'key': api_key.key})\n return ''",
"def get_key(self, user, api_key):\n return True",
"async def list_keys(request: web.Request) -> web.Response:\n keys = [\n {'uri': '/wifi/keys/{}'.format(key.directory),\n 'id': key.directory,\n 'name': os.path.basename(key.file)} for key in wifi.list_keys()\n ]\n return web.json_response({'keys': keys}, status=200)",
"def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key",
"def get_key_list(self, email=\"\"):\n\t\tif email:\n\t\t\twhere_clause = \" where email = '%s'\" % email\n\t\telse:\n\t\t\twhere_clause = \"\"\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\t%s\n\t\t\t\"\"\" % where_clause)",
"def ListAppKeys(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def update_keys(user_id):\n\n if not request.json:\n abort(400)\n\n new_pub_keys = request.json[\"public_keys\"]\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n db_pub_keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?;\", [user_id, PK_STATUS_OK]):\n db_pub_keys.append(row[0])\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n\n to_add = []\n to_revoke = []\n\n # Put the keys not present in the database in the list of keys to add\n for new_key in new_pub_keys:\n if(new_key not in db_pub_keys):\n to_add.append((user_id, new_key, PK_STATUS_OK))\n # Put the keys not in the new list in the list of keys to revoke\n for db_key in db_pub_keys:\n if(db_key not in new_pub_keys):\n to_revoke.append((PK_STATUS_REVOKED, user_id, db_key))\n\n try:\n db.executemany('INSERT INTO public_keys (username, public_key, status) VALUES (?,?,?);', to_add)\n db.executemany('UPDATE public_keys SET status=? WHERE username=? AND public_key=?;', to_revoke)\n db_conn.commit()\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n\n return jsonify({'status':True})",
"def get_api_key_from_user_id(self, user_id: str) -> str:\n response = self.get(self.url + \"/my-account\", params={\"id\": user_id})\n return self.get_api_key_from_response(response)",
"def get_user_auth_keys(self, username):\n if username in self.users_keys:\n return self.users_keys[username]\n\n self.users_keys[username] = []\n\n userdir = os.path.expanduser(\"~\" + username)\n if not userdir:\n return self.users_keys[username]\n\n keyfile = os.path.join(userdir, \".ssh/authorized_keys\")\n if not keyfile or not os.path.exists(keyfile):\n return self.users_keys[username]\n\n with open(keyfile) as f:\n for line in f.readlines():\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n values = [x.strip() for x in line.split()]\n\n exp = None\n try:\n int(values[0]) # bits value?\n except ValueError:\n # Type 1 or type 2, type 1 is bits in second value\n options_ktype = values[0]\n try:\n int(values[1]) # bits value?\n except ValueError:\n # type 2 with options\n ktype = options_ktype\n data = values[1]\n else:\n # Type 1 no options.\n exp = int(values[1])\n data = values[2]\n else:\n # Type 1 no options.\n exp = int(values[1])\n data = values[2]\n\n # XXX For now skip type 1 keys\n if exp is not None:\n continue\n\n if data:\n import base64\n if ktype == \"ssh-rsa\":\n key = ssh.RSAKey(data=base64.decodebytes(data.encode('ascii')))\n elif ktype == \"ssh-dss\":\n key = ssh.DSSKey(data=base64.decodebytes(data.encode('ascii')))\n else:\n key = None\n if key:\n self.users_keys[username].append(key)\n return self.users_keys[username]",
"async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def get_all_signing_certs(self, marker=None, max_items=None,\r\n user_name=None):\r\n params = {}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('ListSigningCertificates',\r\n params, list_marker='Certificates')"
] | [
"0.6866222",
"0.66910446",
"0.6670618",
"0.64299655",
"0.6349597",
"0.6267026",
"0.622857",
"0.62016267",
"0.6186303",
"0.6160741",
"0.60831136",
"0.5966327",
"0.5913713",
"0.58797574",
"0.58408535",
"0.57975286",
"0.5738591",
"0.571278",
"0.5685982",
"0.5669537",
"0.56538075",
"0.5618135",
"0.5544118",
"0.5529223",
"0.5461351",
"0.5461012",
"0.54606354",
"0.541753",
"0.54036325",
"0.5372311"
] | 0.69830835 | 0 |
Lists the availability domains in your tenancy. Specify the OCID of either the tenancy or another of your compartments as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__. Note that the order of the results returned can change if availability domains are added or removed; therefore, do not create a dependency on the list order. | def list_availability_domains(self, compartment_id, **kwargs):
resource_path = "/availabilityDomains"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_availability_domains got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[AvailabilityDomain]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[AvailabilityDomain]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_fault_domains(self, compartment_id, availability_domain, **kwargs):\n resource_path = \"/faultDomains\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_fault_domains got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"availabilityDomain\": availability_domain\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[FaultDomain]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[FaultDomain]\")",
"def show_domains(self):\n show_domains(self.system.cavity_gri)",
"def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))",
"def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur",
"def case_search_enabled_domains():\n return CaseSearchConfig.objects.filter(enabled=True).values_list('domain', flat=True)",
"def list_compartments(self, compartment_id, **kwargs):\n resource_path = \"/compartments\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"access_level\",\n \"compartment_id_in_subtree\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_compartments got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'access_level' in kwargs:\n access_level_allowed_values = [\"ANY\", \"ACCESSIBLE\"]\n if kwargs['access_level'] not in access_level_allowed_values:\n raise ValueError(\n \"Invalid value for `access_level`, must be one of {0}\".format(access_level_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"accessLevel\": kwargs.get(\"access_level\", missing),\n \"compartmentIdInSubtree\": kwargs.get(\"compartment_id_in_subtree\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")",
"def list_zones(self, **kwargs):\r\n return self.client['Account'].getDomains(**kwargs)",
"def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply",
"def availability_domain(self):\n return self._availability_domain",
"def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']",
"async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]",
"def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])",
"def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains",
"def describe_availability_options(DomainName=None, Deployed=None):\n pass",
"def get_domains() -> List[str]:\n ret = _call_endpoint(\"v1/domains\")\n # Example response:\n # [{'createdAt': '2016-06-25T03:08:44.000Z',\n # 'domain': 'mydomain.com',\n # 'domainId': 12345678,\n # 'expirationProtected': False,\n # 'expires': '2020-06-25T03:08:44.000Z',\n # 'holdRegistrar': False,\n # 'locked': True,\n # 'nameServers': None,\n # 'privacy': False,\n # 'renewAuto': True,\n # 'renewDeadline': '2020-08-09T03:08:44.000Z',\n # 'renewable': True,\n # 'status': 'ACTIVE',\n # 'transferProtected': False},]\n domains = [d[\"domain\"] for d in ret]\n return domains",
"def list_domain_names():\n pass",
"def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)",
"def company_lists(self):\n return self.client.get('company/named-lists')",
"def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text",
"def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs",
"def getDomains(self, company):\n return self.db.getDomains(company)",
"def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]",
"def get_storage_domains(cohesity_client):\n storage_domain_list = cohesity_client.view_boxes.get_view_boxes()\n for domain in storage_domain_list:\n exported_res_dict[\"Storage Domains\"].append(domain.name)\n return storage_domain_list",
"async def getDepartments(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getDepartments()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def list_all_agencies():\n return JsonResponse.create(StatusCode.OK, get_all_agencies())",
"def get_ad_entries(cohesity_client):\n resp = cohesity_client.active_directory.get_active_directory_entry()\n if resp:\n ad_list = list()\n for each_ad in resp:\n ad_list.append(each_ad.domain_name)\n config_dict[each_ad.domain_name] = [\n \"username\", \"password\", \"machine_accounts\"]\n exported_res_dict[\"Active directories\"] = ad_list\n return resp",
"def listRR(self):\n reply = self.rpc.getSubdomains(self.username,\n self.password,\n self.domain)\n\n if len(reply) and reply[0] in ('UNKNOWN_ERROR',\n 'RATE_LIMITED'):\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply",
"def _list_orgs(self, context):\r\n try:\r\n rtn = {'context': context,\r\n 'orgs': sorted(list(self._bbreader.cache[context].keys()))}\r\n except KeyError:\r\n raise RequestError('Context {} not found'.format(context))\r\n return rtn",
"def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())",
"def list_domain_names(self) -> Dict:\n pass"
] | [
"0.5952278",
"0.5752911",
"0.56866777",
"0.5658533",
"0.5655036",
"0.56334436",
"0.5622109",
"0.55967",
"0.5533269",
"0.55126745",
"0.5500697",
"0.5494048",
"0.5493609",
"0.5492394",
"0.54891664",
"0.54583323",
"0.5394912",
"0.5390071",
"0.5349818",
"0.5348451",
"0.53267604",
"0.53245103",
"0.53077286",
"0.53016484",
"0.5293885",
"0.5280641",
"0.5271698",
"0.5207974",
"0.5188195",
"0.5128355"
] | 0.72129303 | 0 |
Lists the compartments in a specified compartment. The members of the list returned depends on the values set for several parameters. With the exception of the tenancy (root compartment), the ListCompartments operation returns only the firstlevel child compartments in the parent compartment specified in `compartmentId`. The list does not include any subcompartments of the child compartments (grandchildren). The parameter `accessLevel` specifies whether to return only those compartments for which the requestor has INSPECT permissions on at least one resource directly or indirectly (the resource can be in a subcompartment). The parameter `compartmentIdInSubtree` applies only when you perform ListCompartments on the tenancy (root compartment). When set to true, the entire hierarchy of compartments can be returned. To get a full list of all compartments and subcompartments in the tenancy (root compartment), set the parameter `compartmentIdInSubtree` to true and `accessLevel` to ANY. See `Where to Get the Tenancy's OCID and User's OCID`__. | def list_compartments(self, compartment_id, **kwargs):
resource_path = "/compartments"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"access_level",
"compartment_id_in_subtree"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_compartments got unknown kwargs: {!r}".format(extra_kwargs))
if 'access_level' in kwargs:
access_level_allowed_values = ["ANY", "ACCESSIBLE"]
if kwargs['access_level'] not in access_level_allowed_values:
raise ValueError(
"Invalid value for `access_level`, must be one of {0}".format(access_level_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"accessLevel": kwargs.get("access_level", missing),
"compartmentIdInSubtree": kwargs.get("compartment_id_in_subtree", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[Compartment]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[Compartment]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getListOfCompartments(self):\n return self.model.getListOfCompartments()",
"def getListOfCompartments(self, *args):\n return _libsbml.Model_getListOfCompartments(self, *args)",
"def get_compartment(self, compartment_id, **kwargs):\n resource_path = \"/compartments/{compartmentId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_compartment got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"compartmentId\": compartment_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"Compartment\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"Compartment\")",
"def getListOfCompartmentReferences(self, *args):\n return _libsbml.MultiCompartmentPlugin_getListOfCompartmentReferences(self, *args)",
"def get(self, *args):\n return _libsbml.ListOfCompartments_get(self, *args)",
"def getListOfCompartmentTypes(self, *args):\n return _libsbml.Model_getListOfCompartmentTypes(self, *args)",
"def getCompartment(self, *args):\n return _libsbml.Model_getCompartment(self, *args)",
"def list_groups(self, compartment_id, **kwargs):\n resource_path = \"/groups\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_groups got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Group]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Group]\")",
"def list_policies(self, compartment_id, **kwargs):\n resource_path = \"/policies\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_policies got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Policy]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Policy]\")",
"def getCompartment(self):\n return _libsbml.CompartmentReference_getCompartment(self)",
"def get(self, *args):\n return _libsbml.ListOfCompartmentReferences_get(self, *args)",
"def get_compounds(self, ctx, params):\n # ctx is the context object\n # return variables are: out_compounds\n #BEGIN get_compounds\n self._check_param(params, ['compounds'])\n out_compounds = []\n for x in params['compounds']:\n id = x.split('/')[-1]\n comp = self.compounds.get(id, None)\n if comp:\n comp['aliases'] = self.comp_aliases.get(id, '')\n out_compounds.append(comp)\n #END get_compounds\n\n # At some point might do deeper type checking...\n if not isinstance(out_compounds, list):\n raise ValueError('Method get_compounds return value ' +\n 'out_compounds is not type list as required.')\n # return the results\n return [out_compounds]",
"def findcomps():\n try:\n appuser, _ = util.authenticate()\n tlid = dbacc.reqarg(\"tlid\", \"dbid\", required=True)\n where = (\"WHERE tlid = \" + tlid + \" AND userid != \" + appuser[\"dsId\"] +\n \" ORDER BY modified DESC LIMIT 50\")\n tlcs = dbacc.query_entity(\"TLComp\", where)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respJSON(tlcs)",
"def isSetCompartment(self):\n return _libsbml.CompartmentReference_isSetCompartment(self)",
"def setCompartment(self, *args):\n return _libsbml.CompartmentReference_setCompartment(self, *args)",
"def capacitygroup_list(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_capacitygroup_list(cmd_ctx, cpc, options))",
"def generate_compartments(parameterdict):\n\n refcmpts, model = [parameterdict[i] for i in ['refcmpts', 'model']]\n\n peripherals = [] # List for peripheral compartments\n # Iterates through compartments. Adds peripherals to peripheral list,\n # creates main and optionally sub compartment (if in SC model).\n # Doesn't allow multiple main/sub compartments.\n for cmpt in refcmpts:\n if cmpt[2] == 'Peripheral':\n peripherals.append(Compartment(cmpt[0], cmpt[1]))\n\n elif cmpt[2] == 'Main':\n if 'maincmpt' in locals():\n raise ValueError(\"Can't have two main compartments.\")\n else:\n maincmpt = Compartment(cmpt[0], cmpt[1])\n\n elif cmpt[2] == 'Sub' and model == 'sc':\n if 'subcmpt' in locals():\n raise ValueError(\"Can't have two subcompartments.\")\n else:\n subcmpt = Compartment(cmpt[0], cmpt[1])\n if subcmpt not in locals():\n subcmpt = None\n\n return maincmpt, peripherals, subcmpt",
"def list_availability_domains(self, compartment_id, **kwargs):\n resource_path = \"/availabilityDomains\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_availability_domains got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[AvailabilityDomain]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[AvailabilityDomain]\")",
"def list_components(self, request, context):\n response = ListComponentsResponse()\n for component in self._delegator.list_components():\n response.components.append(component)\n return response",
"def getCompartmentType(self):\n return _libsbml.Compartment_getCompartmentType(self)",
"def get_all_comps(self,only_leaves=False):\n def get_comp_comps(comp):\n if not comp.is_kit_invoice_comp:\n return [comp] # stop condition\n if only_leaves: # meaning we don't want under-kits\n res = []\n else:\n res = [comp]\n for c in comp.child_ids:\n res += get_comp_comps(c) # recursive call\n return res\n self.ensure_one()\n if not self.is_kit_invoice_line:\n return []\n result = []\n for comp in self.direct_child_ids:\n result += get_comp_comps(comp)\n return result",
"def isSetCompartment(self):\n return _libsbml.QualitativeSpecies_isSetCompartment(self)",
"def addCompartment(self, vol=1, comp_id=\"\"):\n\n c1 = self.model.createCompartment()\n self.check(c1, \"create compartment\")\n if len(comp_id) == 0:\n comp_id = \"c\" + str(self.model.getNumCompartments())\n self.check(c1.setId(comp_id), \"set compartment id\")\n self.check(c1.setConstant(True), 'set compartment \"constant\"')\n self.check(c1.setSpatialDimensions(3), \"set compartment dimensions\")\n\n self.check(c1.setSize(vol), 'set compartment \"size\"')\n self.check(c1.setUnits(\"litre\"), \"set compartment size units\")\n return c1",
"def compartment_id(self):\n return self._compartment_id",
"def compartment_id(self):\n return self._compartment_id",
"def List(self, parent_id=None, batch_mode=False, only_generate_request=False):\n\n if batch_mode:\n requests = [self._MakeListRequestTuple(parent_id)]\n if not only_generate_request:\n return self._compute_client.MakeRequests(requests)\n return requests\n\n return [self._service.List(self._MakeListRequestTuple(parent_id)[2])]",
"def components_list(self, mar, _request):\n config = self._services.config.GetProjectConfig(mar.cnxn, mar.project_id)\n components = [api_pb2_v1_helpers.convert_component_def(\n cd, mar, self._services) for cd in config.component_defs]\n return api_pb2_v1.ComponentsListResponse(\n components=components)",
"def getCompartment(self):\n return _libsbml.Reaction_getCompartment(self)",
"def isSetCompartment(self):\n return _libsbml.Reaction_isSetCompartment(self)",
"def get_campus_list(self, conn, offset=0, limit=100):\n path = urls.FLOOR_PLAN[\"GET_CAMPUS_LIST\"]\n params = {\n \"offset\": offset,\n \"limit\": limit\n }\n resp = conn.command(apiMethod=\"GET\", apiPath=path, apiParams=params)\n return resp"
] | [
"0.62096506",
"0.60235274",
"0.5435602",
"0.5372225",
"0.52894926",
"0.5113388",
"0.50968504",
"0.5042258",
"0.49255446",
"0.4908474",
"0.48918715",
"0.4878826",
"0.4840922",
"0.46979246",
"0.46899638",
"0.46821752",
"0.468023",
"0.45857018",
"0.45253602",
"0.4391601",
"0.4379423",
"0.43772116",
"0.43746123",
"0.4372277",
"0.4372277",
"0.4354483",
"0.43497947",
"0.434443",
"0.4339763",
"0.4327338"
] | 0.84061605 | 0 |
Lists all the tags enabled for costtracking in the specified tenancy. For information about costtracking tags, see `Using Costtracking Tags`__. | def list_cost_tracking_tags(self, compartment_id, **kwargs):
resource_path = "/tagNamespaces/actions/listCostTrackingTags"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_cost_tracking_tags got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[Tag]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[Tag]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listTags(self, authenticationToken):\r\n pass",
"def list_tags(self, session):\n result = self._tag(session.get, session=session)\n return result['tags']",
"def list_tags():\r\n tags = Tag.query.order_by(Tag.name).all()\r\n return render_template('tags.html', tags=tags)",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def list_tags():\n\n tags = Tag.query.all()\n return render_template('tags/list_tags.html', tags=tags)",
"def list_all_tags(self,obs):",
"def tag_list(context, addon, dev_tags=None, user_tags=None):\n if not dev_tags and not user_tags:\n return ''\n if not dev_tags:\n dev_tags = []\n if not user_tags:\n user_tags = []\n\n c = {\n 'request': context['request'],\n 'addon': addon,\n 'dev_tags': dev_tags,\n 'user_tags': user_tags,\n }\n t = env.get_template('tags/tag_list.html').render(**c)\n return jinja2.Markup(t)",
"def getTagList(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.getTagList()",
"def getTags(number=None):",
"def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")",
"def tags():",
"def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)",
"def tag_list(request):\r\n rdict = request.matchdict\r\n username = rdict.get(\"username\", None)\r\n if username:\r\n username = username.lower()\r\n\r\n tags_found = TagMgr.find(username=username)\r\n\r\n return {\r\n 'tag_list': tags_found,\r\n 'tag_count': len(tags_found),\r\n 'username': username,\r\n }",
"def tags(self, request, tag_list, group):\n return tag_list",
"async def guild_tags(self, ctx):\n guild_tags = self._tag_dict.get(ctx.guild.id)\n if not guild_tags:\n raise commands.BadArgument(f'This guild does not have any tags!')\n tags = sorted(guild_tags.items(), key=lambda x: x[1]['uses'], reverse=True)\n data = [f'{tag[0]} - {tag[1][\"uses\"]} uses' for tag in tags]\n embed = discord.Embed(colour=self.bot.colour)\n embed.set_author(name=f\"All Tags in {ctx.guild}\", icon_url=ctx.guild.icon_url)\n source = IndexedListSource(data=data, embed=embed, title=\"Tags\")\n await CatchAllMenu(source=source).start(ctx)",
"def ListTags(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)",
"def show_tags():\n\n tags = Tag.query.all()\n\n return render_template(\"tags/tag_list.html\", tags=tags)",
"def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)",
"def list_tags(ResourceArn=None):\n pass",
"def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')",
"def list_tags_for_resource(Resource=None):\n pass",
"def get_tags(self) -> List:\n LOGGER.info('Get all the tags')\n\n with self.client.create_session() as session:\n tag_count = (func.count(RDSTableTag.table_rk)\n + func.count(RDSDashboardTag.dashboard_rk)).label('tag_count')\n\n records = session.query(\n RDSTag.rk.label('tag_name'),\n tag_count\n )\\\n .outerjoin(RDSTableTag)\\\n .outerjoin(RDSDashboardTag)\\\n .filter(RDSTag.tag_type == 'default')\\\n .group_by(RDSTag.rk)\\\n .having(tag_count > 0)\\\n .all()\n\n results = []\n for record in records:\n results.append(TagDetail(tag_name=record.tag_name,\n tag_count=record.tag_count))\n\n return results",
"def get(self, currency, address):\n check_inputs(address=address, currency=currency) # abort if fails\n address_tags = commonDAO.list_address_tags(currency, address)\n return address_tags # can be empty list",
"def view_tags():\n tags = dict([ [k[8:],v] for k,v in os.environ.items() if k.startswith(\"HTTPBIN_\") ])\n\n if not tags:\n return Response(response=\"{}\", status=404, mimetype=\"application/json\")\n\n return jsonify(tags)",
"def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})",
"def tagging_criteria(self) -> 'outputs.AdhocBasedTaggingCriteriaResponse':\n return pulumi.get(self, \"tagging_criteria\")",
"def display_tags(self):\n from evennia.typeclasses.tags import Tag\n\n qs = (\n Tag.objects.filter(db_tagtype=None, db_category=None, db_data=None)\n .exclude(db_key__icontains=\"barracks\")\n .exclude(db_key__icontains=\"owned_room\")\n .exclude(db_key__icontains=\"_favorite\")\n )\n string = list_to_string([ob.db_key for ob in qs])\n self.msg(\n \"Types of tags (excluding custom ones for individuals, or those with categories): %s\"\n % string\n )"
] | [
"0.63822997",
"0.61948174",
"0.6163116",
"0.6132447",
"0.6132447",
"0.6095878",
"0.60922146",
"0.6062289",
"0.60137403",
"0.6006326",
"0.59625673",
"0.5926329",
"0.5855336",
"0.5804048",
"0.57833034",
"0.57692766",
"0.57686114",
"0.5734323",
"0.57283866",
"0.57195485",
"0.5714692",
"0.56930023",
"0.5682361",
"0.5677101",
"0.5653604",
"0.5633774",
"0.56310505",
"0.56263924",
"0.561428",
"0.561036"
] | 0.6232547 | 1 |
Lists the secret keys for the specified user. The returned object contains the secret key's OCID, but not the secret key itself. The actual secret key is returned only upon creation. | def list_customer_secret_keys(self, user_id, **kwargs):
resource_path = "/users/{userId}/customerSecretKeys"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_customer_secret_keys got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[CustomerSecretKeySummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[CustomerSecretKeySummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def describe_user_encryption_key_list(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_user_encryption_key_list_with_options(request, runtime)",
"def get_ssh_keys(self, user_id):\n _gu = self.get_user(user_id)\n if _gu is None:\n return []\n\n # build URL and make request\n return self._get('/users/{0}/keys'.format(_gu['id']))",
"def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})",
"def secret_keys(self):\n return self._secret_keys",
"def list_credentials(user):\n return Credentials.list_credentials(user)",
"async def describe_user_encryption_key_list_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n runtime = util_models.RuntimeOptions()\n return await self.describe_user_encryption_key_list_with_options_async(request, runtime)",
"def list_user_keys(self):\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"GET\", \"/1/keys\", self.timeout)",
"def GetSecretKey(cls, user_id):\n uid = hashlib.sha256(str(user_id)).hexdigest()\n entity = ndb.Key(cls, uid).get()\n if not entity:\n entity = cls(id=uid, secret_key=GenerateRandomHexKey())\n entity.put()\n return entity.secret_key",
"def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']",
"def list_api_keys(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/apiKeys\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_api_keys got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[ApiKey]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"list[ApiKey]\")",
"def list_secret(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_secret_with_http_info(**kwargs)\n else:\n (data) = self.list_secret_with_http_info(**kwargs)\n return data",
"def list_user_keys(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/keys\" % self.url_index_name, self.client.timeout)",
"async def list_secrets(self):\n pass",
"def list(ctx: CLIContext, user_id, is_active, filter_, order, offset, limit) -> None:\n fields = [\n keypair_fields['user_id'],\n keypair_fields['full_name'],\n keypair_fields['access_key'],\n keypair_fields['secret_key'],\n keypair_fields['is_active'],\n keypair_fields['is_admin'],\n keypair_fields['created_at'],\n keypair_fields['last_used'],\n keypair_fields['resource_policy'],\n keypair_fields['rate_limit'],\n keypair_fields['concurrency_used'],\n ]\n try:\n with Session() as session:\n fetch_func = lambda pg_offset, pg_size: session.KeyPair.paginated_list(\n is_active,\n user_id=user_id,\n fields=fields,\n page_offset=pg_offset,\n page_size=pg_size,\n filter=filter_,\n order=order,\n )\n ctx.output.print_paginated_list(\n fetch_func,\n initial_page_offset=offset,\n page_size=limit,\n )\n except Exception as e:\n ctx.output.print_error(e)\n sys.exit(1)",
"def describe_user_encryption_key_list_with_options(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n self.call_api(params, req, runtime)\n )",
"def keys(self, bucket, user=None):\n raise NotImplementedError('TODO')",
"def get_db_secrets():\n secret_response = secrets_client.get_secret_value(SecretId=db_secret_name)\n secrets = json.loads(secret_response['SecretString'])\n return secrets",
"def get_list_keys(rpc_user, rpc_pwd):\n data = '{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"listkeys\"}'\n return call_rpc(rpc_user, rpc_pwd, data)",
"def List(self, user=None):\n with self.acc_lock:\n self._load()\n\n result = []\n if user:\n for k, v in self.tasks.iteritems():\n if v['user'] != user:\n continue\n d = dict(v)\n d['key'] = k\n result.append(d)\n else:\n for k, v in self.tasks.iteritems():\n d = dict(v)\n d['key'] = k\n result.append(d)\n return result",
"def view_list_robots_by_user(self, user, userID):\r\n return user._realm.getUser(userID).robots.keys()",
"def get_wishlist_key(self, user):\n user_id = self.auth.get_user_id(user)\n p_key = ndb.Key(Profile, user_id)\n\n wishlists = Wishlist.query(ancestor=p_key).fetch()\n if wishlists:\n return wishlists[0].key\n\n wl_id = Wishlist.allocate_ids(size=1, parent=p_key)[0]\n wl_k = ndb.Key(Wishlist, wl_id, parent=p_key)\n Wishlist(**{'key': wl_k}).put()\n\n return wl_k",
"def get_all(user_id):\n return Bucketlist.query.filter_by(created_by=user_id)",
"def get_bookmarked_items(user_id):\n return list(Bookmark.objects.filter(user=user_id).values_list(\n 'item_id', flat=True))",
"def get_secrets(request):\n secret_keys = (\n 'neptune_sql_credentials',\n 'triton_sql_credentials',\n 'saturn_sql_credentials',\n 'qualtrics_credentials',\n 'rserve_service_account_credentials',\n )\n secrets = {s: json.loads(SecretValue.get(s, 'null'))\n for s in secret_keys}\n\n # Add the mandrill api key, which isn't a JSON string.\n if request.get('send_email', None) != 'false':\n secrets['mandrill_api_key'] = SecretValue.get(\n 'mandrill_api_key', '')\n\n return secrets",
"def view_list_containers_by_user(self, user, userID):\r\n return user._realm.getUser(userID).containers.keys()",
"def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)",
"def get_s3_keys(bucket, user_keys = None):\n keys = []\n if user_keys is None:\n \t\t\t\ts3 = boto3.client('s3')\n else:\n s3 = boto3.client('s3', \n aws_access_key_id = user_keys[\"AWS_ACCESS_KEY_ID\"], \n aws_secret_access_key = user_keys[\"AWS_SECRET_ACCESS_KEY\"], \n region_name = user_keys[\"REGION_NAME\"]\n ) \t \n \n resp = s3.list_objects_v2(Bucket= bucket)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n return keys",
"async def describe_user_encryption_key_list_with_options_async(\n self,\n request: dds_20151201_models.DescribeUserEncryptionKeyListRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeUserEncryptionKeyListResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.target_region_id):\n query['TargetRegionId'] = request.target_region_id\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeUserEncryptionKeyList',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeUserEncryptionKeyListResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)",
"def list_namespaced_secret(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_namespaced_secret_with_http_info(namespace, **kwargs)\n else:\n (data) = self.list_namespaced_secret_with_http_info(namespace, **kwargs)\n return data"
] | [
"0.66501856",
"0.6520959",
"0.6316638",
"0.6299123",
"0.62734264",
"0.627234",
"0.62511283",
"0.606225",
"0.60029274",
"0.59841985",
"0.5940731",
"0.5805145",
"0.5801294",
"0.5739835",
"0.5712983",
"0.5704147",
"0.566274",
"0.5550773",
"0.5525107",
"0.5505345",
"0.5473902",
"0.5453241",
"0.53608596",
"0.5350772",
"0.53441215",
"0.53431183",
"0.53249586",
"0.5310021",
"0.52864754",
"0.52864033"
] | 0.7246356 | 0 |
Lists the dynamic groups in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__. | def list_dynamic_groups(self, compartment_id, **kwargs):
resource_path = "/dynamicGroups"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_dynamic_groups got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[DynamicGroup]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[DynamicGroup]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(request):\n return render_to_response('rteacher/manage_groups_list.html', request, **klist(\n request=request\n ))",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)",
"def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))",
"def list_groups(access_token):\n request_url = OKTA_URL + \"api/v1/groups\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def ObjectGroups(object_id):\n rhino_object = rhutil.coercerhinoobject(object_id, True, True)\n if rhino_object.GroupCount<1: return []\n group_indices = rhino_object.GetGroupList()\n rc = [scriptcontext.doc.Groups.GroupName(index) for index in group_indices]\n return rc",
"def product_group_list(obj):\n client = get_client(obj)\n\n res = client.product_group_list()\n\n print(json.dumps(res, indent=4))",
"def groups(self):\n return self.get_data(\"groups\")",
"def get_groups():\r\n if 'username' not in flask.session:\r\n return flask.jsonify(**{'message': 'Forbidden', 'status_code': 403})\r\n\r\n context = {}\r\n context['url'] = flask.request.path\r\n context['groups'] = []\r\n\r\n # Retreive query variables\r\n query_num_groups = flask.request.args.get('size') \r\n query_page = flask.request.args.get('page') \r\n num_groups = int(query_num_groups) if query_num_groups != None else 10\r\n page_number = int(query_page) if query_page != None else 0\r\n\r\n groups = get_group_listing(flask.session['username'], \r\n num_groups, page_number)\r\n for g in groups:\r\n context['groups'].append({\r\n 'id': g[0],\r\n 'name': g[1]\r\n })\r\n\r\n if (num_groups == 10):\r\n context['next'] = '{}?page={}'.format(context['url'], page_number + 1)\r\n else:\r\n context['next'] = '{}?page={}&size={}'.format(context['url'], \r\n page_number + 1, num_groups)\r\n\r\n return flask.jsonify(**context)",
"def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})",
"def list_groups(self):\n return self.get_admin(\"groups\")",
"def get_groups(self, obj):\n groupsForCompany = get_groups_with_perms(obj)\n return [x.id for x in groupsForCompany]",
"def get_group_list(org_id):\n tList = get_template('app/usermanagementorg/group_list.html')\n groups = get_groups(org_id)\n return tList.render(Context({ 'groups': groups, }))",
"def groups(self):\n return []",
"def list_template_groups(context):\n template_groups = get_oneoffixx_template_groups()\n terms = []\n for group in template_groups:\n terms.append(SimpleVocabulary.createTerm(group.get(\"id\"),\n group.get(\"id\"),\n group.get(\"localizedName\")))\n return MutableObjectVocabulary(terms)",
"def customer_group_list(h):\n global html\n html = h\n\n common_elements = customer_common_elements(group=True)\n\n css_list = common_elements[\"css_list\"]\n\n javascript_list = common_elements[\"javascript_list\"]\n\n all_btn = common_elements[\"all_btn\"]\n\n html.new_header(\"Customer Organization\", \"customer_group_management.py\", all_btn, css_list, javascript_list)\n customer_string = \"\"\"\n <div>\n <table id=\"customers\" cellpadding=\"0\" cellspacing=\"0\" border=\"0\" class=\"display\" style=\"text-align:center\">\n <thead>\n <tr>\n <th>\n Company System Role\n </th>\n <th>\n Company Name\n </th>\n <th>\n Company Address\n </th>\n <th>\n Company Telephone\n </th>\n <th>\n Company Website\n </th>\n <th>\n Company Business\n </th>\n <th>\n Company Registration Number\n </th>\n <th>\n Company VAT Number\n </th>\n <th>\n Company Sales Contact\n </th>\n <th>\n Company Purchase Contact\n </th>\n <th>\n Actions\n </th>\n </tr>\n </thead>\n </table>\n </div>\n \"\"\"\n customer_string += \"\"\"\n <script>\n get_customer_groups();\n </script>\n \"\"\"\n html.write(customer_string)\n html.new_footer()\n pass",
"def get_all(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving groups\", \"/sysaccount/groups/v1\")",
"def getCSPGroups(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n\n if kwargs['search_term'] is None:\n json_response = get_csp_groups_json(strCSPProdURL, ORG_ID, sessiontoken)\n print(\"Got the groups\")\n if json_response is not None:\n groups = json_response['results']\n num_groups = len(groups)\n if num_groups == 0:\n print(\"No results returned.\")\n else:\n print(str(num_groups) + \" result\" + (\"s\" if num_groups > 1 else \"\") + \" returned:\")\n table = PrettyTable(['ID', 'Name', 'Group Type', 'User Count'])\n for grp in groups:\n table.add_row([grp['id'], grp['displayName'], grp['groupType'], grp['usersCount']])\n print(table)\n else:\n search_term = kwargs['search_term']\n json_response = get_csp_groups_searchterm_json(strCSPProdURL, ORG_ID, sessiontoken, search_term)\n if json_response is not None:\n groups = json_response['results']\n num_groups = len(groups)\n if num_groups == 0:\n print(\"No results returned.\")\n else:\n print(str(num_groups) + \" result\" + (\"s\" if num_groups > 1 else \"\") + \" returned:\")\n table = PrettyTable(['ID', 'Name', 'Group Type', 'User Count'])\n for grp in groups:\n table.add_row([grp['id'], grp['displayName'], grp['groupType'], grp['usersCount']])\n print(table)\n else:\n print(\"API Error\")\n sys.exit(1)",
"def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))",
"def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)",
"def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]",
"def groups(self):\r\n return resources.Groups(self)",
"def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)",
"def list_groups(self):\n return self._get(\"cloudConnectorGroups\").list",
"def groups_json(request):\n resp = []\n group_list = ResearchGroup.objects.order_by('name')\n for group in group_list:\n resp.append({'name': group.name, 'id': group.id})\n return HttpResponse(json.dumps(resp, ensure_ascii=False), content_type=\"application/json; charset=utf-8\")",
"def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups",
"def get_groups(self):\n return Client._get(self)",
"def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)"
] | [
"0.65068406",
"0.6469846",
"0.63963234",
"0.6134552",
"0.6109888",
"0.6099704",
"0.60848904",
"0.60196185",
"0.6018239",
"0.6011199",
"0.5968778",
"0.5935231",
"0.59249777",
"0.587962",
"0.5860688",
"0.58439285",
"0.58141536",
"0.5788746",
"0.5758732",
"0.57442486",
"0.56795675",
"0.56738865",
"0.5656402",
"0.56422925",
"0.5641386",
"0.56351846",
"0.56318486",
"0.5625607",
"0.56103766",
"0.5600625"
] | 0.671684 | 0 |
Lists the Fault Domains in your tenancy. Specify the OCID of either the tenancy or another of your compartments as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__. | def list_fault_domains(self, compartment_id, availability_domain, **kwargs):
resource_path = "/faultDomains"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_fault_domains got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"availabilityDomain": availability_domain
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[FaultDomain]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[FaultDomain]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply",
"def get_storage_domains(cohesity_client):\n storage_domain_list = cohesity_client.view_boxes.get_view_boxes()\n for domain in storage_domain_list:\n exported_res_dict[\"Storage Domains\"].append(domain.name)\n return storage_domain_list",
"def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']",
"def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])",
"def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur",
"def get_input_domains():\n df = pandas.read_excel(\"AutoScrapy/files/EBE21 - Top 100 Onlineshops to scrapp.ods\", engine=\"odf\")\n list_of_addresses = df['Domain'].to_list()\n list_of_addresses = [(\"http://\" + address) for address in list_of_addresses]\n print(list_of_addresses)\n return list_of_addresses",
"def show_domains(self):\n show_domains(self.system.cavity_gri)",
"def list_domain_names():\n pass",
"def get_ad_entries(cohesity_client):\n resp = cohesity_client.active_directory.get_active_directory_entry()\n if resp:\n ad_list = list()\n for each_ad in resp:\n ad_list.append(each_ad.domain_name)\n config_dict[each_ad.domain_name] = [\n \"username\", \"password\", \"machine_accounts\"]\n exported_res_dict[\"Active directories\"] = ad_list\n return resp",
"async def getDepartments(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getDepartments()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/departments\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")",
"def get_delta_domains():\n url = os.getenv('DELTAS_URL')\n if url is None:\n raise Exception('Delta report URL configuration not set!')\n\n json = requests.get(url, timeout=10).json()\n return [domain\n for (domain,)\n in json['values']\n if dnstwist.is_valid_domain(domain)]",
"def list_zones(self, **kwargs):\r\n return self.client['Account'].getDomains(**kwargs)",
"def doctors(self) -> DoctorsList:\n data = self.get(\"minhealth_doctors\")\n \n ls = [Doctors(**doc) for doc in data]\n return DoctorsList(items=ls)",
"def list_domain(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='Host Controller')\n domains = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id, resource_id=resource.id)\n domain_data = resource_data.value\n domains.append(Domain(resource.id, resource.name, resource.path, domain_data))\n return domains",
"def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains",
"def getDomains(self, company):\n return self.db.getDomains(company)",
"def list_availability_domains(self, compartment_id, **kwargs):\n resource_path = \"/availabilityDomains\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_availability_domains got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[AvailabilityDomain]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[AvailabilityDomain]\")",
"def districts(self):\n catalog = getToolByName(self.context, 'portal_catalog')\n d = [dict(url=district.getURL(), title=district.Title,\n address=district.Description) for district in\n catalog({'object_provides': IDistrict.__identifier__,\n 'path': dict(query='/'.join(self.context.getPhysicalPath()),\n depth=1), 'sort_on': 'sortable_title'})]\n print d\n return d",
"def get_departments(self) -> list:\n return self.client.departments.get_all()",
"def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}",
"def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)",
"def handle_domains(\n actapi: act.api.Act, content: Text, domains: List[Text]\n) -> List[act.api.fact.Fact]:\n\n feeds_facts: List[act.api.fact.Fact] = []\n\n for domain in domains:\n\n chain = []\n\n chain.append(\n actapi.fact(\"connectsTo\").source(\"content\", content).destination(\"uri\", \"*\")\n )\n chain.append(\n actapi.fact(\"componentOf\").source(\"fqdn\", domain).destination(\"uri\", \"*\")\n )\n\n feeds_facts += act.api.fact.fact_chain(*chain)\n\n return feeds_facts",
"def GetListDoctors(self):\n\t\treturn self.ClientsMap.values()",
"def listRR(self):\n reply = self.rpc.getSubdomains(self.username,\n self.password,\n self.domain)\n\n if len(reply) and reply[0] in ('UNKNOWN_ERROR',\n 'RATE_LIMITED'):\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply",
"def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )",
"def domains(self):\n return DomainCollection(self.request)",
"def get_departments() -> list:\n return Department.query.all()",
"def list_domain_names(self) -> Dict:\n pass",
"def get_doctors():\n all_doctors = Doctor.query.all()\n result = doctors_schema.dump(all_doctors)\n return jsonify(result.data)",
"def extract_domains(self, resp):\n return"
] | [
"0.57624483",
"0.5618712",
"0.5579227",
"0.55783147",
"0.5538795",
"0.54575324",
"0.5412407",
"0.53967834",
"0.5350825",
"0.5334975",
"0.52671653",
"0.52478313",
"0.52312326",
"0.52176356",
"0.5216194",
"0.520539",
"0.5197498",
"0.5193738",
"0.51901394",
"0.5165269",
"0.51523757",
"0.5151562",
"0.51459605",
"0.5128849",
"0.5116295",
"0.5092359",
"0.50779206",
"0.5067776",
"0.5048499",
"0.50379896"
] | 0.65855885 | 0 |
Lists all the identity providers in your tenancy. You must specify the identity provider type (e.g., `SAML2` for identity providers using the SAML2.0 protocol). You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__. | def list_identity_providers(self, protocol, compartment_id, **kwargs):
resource_path = "/identityProviders"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_identity_providers got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"protocol": protocol,
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[IdentityProvider]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[IdentityProvider]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(conn):\n try:\n return conn.get(url='/auth-providers')['providers']\n except SystemError as e:\n raise e",
"def providers(self) -> List[str]:\n return [\n getattr(auth_account, \"provider\")\n for auth_account in self.auth_accounts # pylint: disable=not-an-iterable\n ]",
"def get_providers(self):\n \n r = requests.get(\n self._url('/dataproviders'),\n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n providers = r.json()\n self.providers = [p['name'] for p in providers if (p['user'] is not None and p['user']!='SCRIPTING ENGINE')]\n log.info('{:d} providers found'.format(len(self.providers)))\n\n return",
"def providers(self):\n return [p for p in self._db.providers.values() if self._dbattr(p.IDATTR)]",
"def providers(self):\n return [p for p in self._db.providers.values() if self._dbattr(p.IDATTR)]",
"def get_providers(providers: list, provider_type: str = 'Author') -> list:\n return [Node('Provider', name=provider, type=provider_type) for provider in providers]",
"def get_auth_providers(cls):\n return [cls.os_primary.auth_provider]",
"def all_providers(self) -> List[ProviderInfo]:\n sp_key = self.__providers_key()\n value = self.get(name=sp_key)\n if value is None:\n return []\n js = utf8_decode(data=value)\n array = json_decode(string=js)\n return ProviderInfo.convert(array=array)",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_providers(self):\n return self.keys",
"def get_tenants():\n # these are the tenant_id strings configured for the service -\n tenants_strings = conf.tenants\n result = []\n # the tenants service is a special case, as it must be a) configured to serve all tenants and b) actually maintains\n # the list of tenants in its own DB. in this case, we return the empty list since the tenants service will use direct\n # db access to get necessary data.\n if conf.service_name == 'tenants' and tenants_strings[0] == '*':\n return result\n\n # in dev mode, services can be configured to not use the security kernel, in which case we must get\n # configuration for a \"dev\" tenant directly from the service configs:\n if not conf.use_sk:\n for tenant in tenants_strings:\n t = {'tenant_id': tenant,\n 'iss': conf.dev_iss,\n 'public_key': conf.dev_jwt_public_key,\n 'default_access_token_ttl': conf.dev_default_access_token_ttl,\n 'default_refresh_token_ttl': conf.dev_default_refresh_token_ttl,\n }\n result.append(t)\n\n else:\n # TODO -- look up tenants in the tenants API, get the associated parameters (including sk location)\n pass\n return result",
"def get_queryset(self):\n if self.requested_enterprise_uuid is None:\n raise ParseError('Required enterprise_customer_uuid is missing')\n enterprise_customer_idp = get_object_or_404(\n EnterpriseCustomerIdentityProvider,\n enterprise_customer__uuid=self.requested_enterprise_uuid\n )\n try:\n saml_provider = SAMLProviderConfig.objects.current_set().get(\n slug=convert_saml_slug_provider_id(enterprise_customer_idp.provider_id))\n except SAMLProviderConfig.DoesNotExist:\n raise Http404('No matching SAML provider found.') # lint-amnesty, pylint: disable=raise-missing-from\n return SAMLProviderData.objects.filter(entity_id=saml_provider.entity_id)",
"def get(self):\n return get_all_provider()",
"def provider_list(cls, args, config):\n # print \"MOLNSProvider.provider_list(args={0}, config={1})\".format(args, config)\n providers = config.list_objects(kind='Provider')\n if len(providers) == 0:\n print \"No providers configured\"\n else:\n table_data = []\n for p in providers:\n table_data.append([p.name, p.type])\n # table_print(['name', 'type'], table_data)\n r = {'type': 'table', 'column_names': ['name', 'type'], 'data': table_data}\n return r",
"def get_registered_providers():\n return _instance.providers_cls.keys()",
"def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts",
"def getProvidersReferences(self):\n field = self.getWrappedField('provider')\n providers = list(field._Vocabulary(self).items())\n providers.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))\n return atapi.DisplayList(providers)",
"def list_accounts(self):\n information = []\n for provider in self._accounts.values():\n information.append({\n 'token': provider.credentials.token,\n 'url': provider.credentials.url,\n })\n\n return information",
"def get_all_tenants():\n tenants = identity.Tenant.query.all()\n return tenants",
"def get_tenants(self):",
"def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]",
"def registered_providers():\n return list(_DEFAULT_PROVIDER.providers)",
"def tenancies(self) -> Iterable[dto.Tenancy]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def network_service_providers(self):\n path = '/v2.0/service-providers'\n res = self.network.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack network service providers: %s' % \n truncate(res))\n return res[0]['service_providers']",
"def get_all_providers() -> list[str]:\n return list(ALL_PROVIDERS)",
"def ids(self):\n return ['%s:%s' % (p.NAME, self._dbattr(p.IDATTR)) for p in self.providers]"
] | [
"0.6727649",
"0.64191025",
"0.6271538",
"0.62455416",
"0.62455416",
"0.6074249",
"0.6066744",
"0.5960773",
"0.59022737",
"0.59022737",
"0.59022737",
"0.59022737",
"0.59022737",
"0.59022737",
"0.5825487",
"0.5819967",
"0.581062",
"0.5770538",
"0.576516",
"0.5720312",
"0.571423",
"0.56943977",
"0.5692585",
"0.5669317",
"0.5665904",
"0.56125253",
"0.5610321",
"0.557578",
"0.55615956",
"0.54955745"
] | 0.64960575 | 1 |
Lists the group mappings for the specified identity provider. | def list_idp_group_mappings(self, identity_provider_id, **kwargs):
resource_path = "/identityProviders/{identityProviderId}/groupMappings"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_idp_group_mappings got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"identityProviderId": identity_provider_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[IdpGroupMapping]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[IdpGroupMapping]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)",
"def get_identity_groups(self):\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.identity.identitygroup.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/identitygroup'.format(self.url_base))\n\n\t\tif resp.status_code == 200:\n\t\t\tresult['success'] = True\n\t\t\t###\n\t\t\tx = ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']\n\t\t\tprint (\"x\", len(x))\n\t\t\tprint (x[0])\n\t\t\tfor element in x[0]:\n\t\t\t\tprint (element,x[0][element])\n\t\t\t###\n\t\t\tresult['response'] = [(i['@name'], i['@id'], i['@description'],i['link']['@href'])\n\t\t\t\t\t\t\t\t for i in ERS._to_json(resp.text)['ns3:searchResult']['ns3:resources']['ns5:resource']]\n\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result",
"def list_identity_provider_groups(self, identity_provider_id, **kwargs):\n resource_path = \"/identityProviders/{identityProviderId}/groups\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_identity_provider_groups got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"identityProviderId\": identity_provider_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n query_params = {\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[IdentityProviderGroupSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[IdentityProviderGroupSummary]\")",
"def get_group_list(org_id):\n tList = get_template('app/usermanagementorg/group_list.html')\n groups = get_groups(org_id)\n return tList.render(Context({ 'groups': groups, }))",
"def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]",
"def list(request):\n return render_to_response('rteacher/manage_groups_list.html', request, **klist(\n request=request\n ))",
"def list_groups(self):\n return self.get_admin(\"groups\")",
"def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})",
"def list_groups(self, **params):\n url = 'groups'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def get_list_groups(self):\n list_response = requests.get(self.groups_url, headers=self.headers)\n return list_response.json()[\"groups\"]",
"def get_map_groups(user_map):\n pg = user_map.permission_group_user_map.all()\n gids = list(pg.values_list('group', flat=True))\n if len(gids) > 0:\n return Group.objects.filter(id__in=gids)\n\n return Group.objects.filter(name=DEFAULT_GROUP)",
"def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))",
"def nfvi_get_instance_groups(callback):\n cmd_id = _compute_plugin.invoke_plugin('get_instance_groups',\n callback=callback)\n return cmd_id",
"def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))",
"def list_groups(self):\n return self._get(\"cloudConnectorGroups\").list",
"def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()",
"def get_group_list(ip_address, headers):\n group_list = None\n group_url = 'https://%s/api/GroupService/Groups' % ip_address\n response = requests.get(group_url, headers=headers, verify=False)\n if response.status_code == 200:\n group_response = response.json()\n if group_response['@odata.count'] > 0:\n group_list = [x['Id'] for x in group_response['value']]\n else:\n print(\"No groups found at \", ip_address)\n else:\n print(\"No groups found at \", ip_address)\n return group_list",
"def get_groups():\r\n if 'username' not in flask.session:\r\n return flask.jsonify(**{'message': 'Forbidden', 'status_code': 403})\r\n\r\n context = {}\r\n context['url'] = flask.request.path\r\n context['groups'] = []\r\n\r\n # Retreive query variables\r\n query_num_groups = flask.request.args.get('size') \r\n query_page = flask.request.args.get('page') \r\n num_groups = int(query_num_groups) if query_num_groups != None else 10\r\n page_number = int(query_page) if query_page != None else 0\r\n\r\n groups = get_group_listing(flask.session['username'], \r\n num_groups, page_number)\r\n for g in groups:\r\n context['groups'].append({\r\n 'id': g[0],\r\n 'name': g[1]\r\n })\r\n\r\n if (num_groups == 10):\r\n context['next'] = '{}?page={}'.format(context['url'], page_number + 1)\r\n else:\r\n context['next'] = '{}?page={}&size={}'.format(context['url'], \r\n page_number + 1, num_groups)\r\n\r\n return flask.jsonify(**context)",
"def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))",
"def list_groups(access_token):\n request_url = OKTA_URL + \"api/v1/groups\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def list_groups_factory(context, request):\n return ListGroupsService(session=request.db,\n request_authority=request.authority,\n route_url=request.route_url)",
"def get_groups(self):\n response = self._get(\"groups\")\n\n return response.json()",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def get_groups(self):\n return Client._get(self)",
"def management_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"management_groups\")",
"def groups(self):\n return self.get_data(\"groups\")",
"def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names",
"def get_groups():\n\n # FUTURE: Properly reutrn error, Mongo is giving it's own\n if current_user.groups:\n return Response(response=json.dumps([g.to_dict() for g in current_user.groups]), status=200, mimetype=\"application/json\")\n else:\n return return_json_error('No groups assigned to', 500)",
"def list(self):\n METHOD = 'GET'\n API_PATH = '/groups/list'\n\n # Make REST call\n resp = self._rest_call[METHOD](API_PATH)\n\n if resp.status_code == 200:\n return resp.json().get('group_names')\n\n elif resp.status_code == 403:\n raise AuthorizationError(\"User is not authorized or token is incorrect.\")\n\n else:\n if resp.json().get(\"error_code\") in ERROR_CODES:\n raise ERROR_CODES[resp.json().get('error_code')](resp.json().get('message'))\n else:\n raise APIError(\"Response code {0}: {1} {2}\".format(resp.status_code,\n resp.json().get('error_code'),\n resp.json().get('message')))"
] | [
"0.60451424",
"0.6032505",
"0.5956654",
"0.5918813",
"0.58596",
"0.5801229",
"0.5754967",
"0.5714305",
"0.5660738",
"0.56143993",
"0.55850315",
"0.5576164",
"0.55513936",
"0.5545537",
"0.5543902",
"0.5542496",
"0.5516063",
"0.5498167",
"0.5491729",
"0.546018",
"0.5443648",
"0.5429444",
"0.54240775",
"0.5374941",
"0.53657424",
"0.533331",
"0.5328101",
"0.5327889",
"0.5309783",
"0.52744496"
] | 0.6934423 | 0 |
Lists the MFA TOTP devices for the specified user. The returned object contains the device's OCID, but not the seed. The seed is returned only upon creation or when the IAM service regenerates the MFA seed for the device. | def list_mfa_totp_devices(self, user_id, **kwargs):
resource_path = "/users/{userId}/mfaTotpDevices"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_mfa_totp_devices got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[MfaTotpDeviceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[MfaTotpDeviceSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def retrieve_user_devices(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n devices = self.database.retrieve_user_devices(user_id)\n if devices is not None:\n devices = list(set(devices)) # De-duplicate\n return devices",
"def create_mfa_totp_device(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")",
"def get_all_mfa_devices(self, user_name, marker=None, max_items=None):\r\n params = {'UserName' : user_name}\r\n if marker:\r\n params['Marker'] = marker\r\n if max_items:\r\n params['MaxItems'] = max_items\r\n return self.get_response('ListMFADevices',\r\n params, list_marker='MFADevices')",
"def devices_for_user(self, user, confirmed=None):\n devices = self.model.objects.filter(user=user)\n if confirmed is not None:\n devices = devices.filter(confirmed=bool(confirmed))\n\n return devices",
"def retrieve_user_devices(self, user_id):\n if user_id is None:\n self.log_error(MongoDatabase.retrieve_user_devices.__name__ + \"Unexpected empty object: user_id\")\n return None\n\n try:\n user_id_obj = ObjectId(user_id)\n user = self.users_collection.find_one({\"_id\": user_id_obj})\n if user is not None:\n if 'devices' in user:\n return user['devices']\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return None",
"def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)",
"def generate_totp_seed(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}/actions/generateSeed\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"generate_totp_seed got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDevice\")",
"def get_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDeviceSummary\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"MfaTotpDeviceSummary\")",
"def list_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n for device in result:\n print(device)",
"def get_list_of_devices(self, give_json=False):\n\n url = Constants.BASE_URL + 'users/devices'\n response = requests.get(url=url, params={'key': self.user_access_token})\n\n if give_json:\n return response.json()\n else:\n return response.text",
"def get_user_access_tokens(request, user):\n manager = internal_keystoneclient(request).oauth2.access_tokens\n\n return manager.list_for_user(user=user)",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def get_user_devices_adapter(json_response):\n\n if 'devices' in json_response:\n ret = {\"result\": []}\n for device in json_response['devices']:\n ret[\"result\"].append(\n {\"name\": device[\"name\"],\n \"type\": device[\"type\"],\n \"id\": device[\"id\"],\n \"is_active\": device[\"is_active\"]})\n return ret\n return json_response",
"def get_data_source_tokens_by_user(self, user_id: int):\n all_data_source_tokens_array = []\n user = None\n try:\n user: User = UserService.get_user_by_id(self, user_id)\n except Exception:\n raise\n\n try:\n for data_source_token in DataSourceToken.select(\n DataSourceToken,\n user).where(DataSourceToken.user_id == user_id):\n all_data_source_tokens_array.append(\n model_to_dict(data_source_token, recurse=False))\n return all_data_source_tokens_array\n except Exception:\n raise",
"def test_user_get_topteams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/top/10')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test_multiple_devices(self) -> None:\n\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200,\n payload={\n \"active\": True,\n \"sub\": SUBJECT,\n \"scope\": \" \".join(\n [\n MATRIX_USER_SCOPE,\n f\"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC\",\n f\"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF\",\n ]\n ),\n \"username\": USERNAME,\n },\n )\n )\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n self.get_failure(self.auth.get_user_by_req(request), AuthError)",
"def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})",
"def delete_mfa_totp_device(self, user_id, mfa_totp_device_id, **kwargs):\n resource_path = \"/users/{userId}/mfaTotpDevices/{mfaTotpDeviceId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_mfa_totp_device got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id,\n \"mfaTotpDeviceId\": mfa_totp_device_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)",
"def get_user_tasks(self, user_pk: int) -> APIResponse:\n user_args = {\"pk\": user_pk}\n return self._get(\"user_list\", user_args)",
"def get_tokens_for_user(user):\n\n refresh = RefreshToken.for_user(user)\n\n return {\n 'refresh': str(refresh),\n 'access': str(refresh.access_token),\n }",
"def devices(self, query=None):\n if query is not None:\n query = clean(query, self.devices_parameters)\n query = \"?\" + urllib.parse.urlencode(query)\n else:\n query = \"\"\n return self.get(\"/devices\" + query)",
"def load_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n return [(device['id'], device['name'], device['state']) for device in result]",
"def _list_tokens(self, user_id, tenant_id=None, trust_id=None,\n consumer_id=None):\n raise exception.NotImplemented() # pragma: no cover",
"def device_list(self, plant_id):\n return self.plant_info(plant_id)['deviceList']",
"def test_for_user(self):\n devices = Device.objects.for_user(self.user)\n self.assertEqual(len(devices), 1)\n self.assertEqual(devices[0], self.device)",
"def list_devices(arn=None, nextToken=None):\n pass",
"def get_devices(self):\n return get_devices(self.api_key)",
"def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices",
"def plant_list(self, user_id):\n response = self.session.get(self.get_url('PlantListAPI.do'),\n params={'userId': user_id},\n allow_redirects=False)\n if response.status_code != 200:\n raise RuntimeError(\"Request failed: %s\", response)\n data = json.loads(response.content.decode('utf-8'))\n return data['back']",
"def view_list_users(self, user):\r\n return user.realm._users.keys()"
] | [
"0.63648194",
"0.62265784",
"0.6117706",
"0.61175627",
"0.5826038",
"0.574232",
"0.5716294",
"0.5668577",
"0.53647625",
"0.52985567",
"0.5172679",
"0.5028343",
"0.501832",
"0.49658716",
"0.4858226",
"0.48548654",
"0.4841386",
"0.48357037",
"0.48057312",
"0.47699174",
"0.4757789",
"0.47471172",
"0.47447142",
"0.47440553",
"0.4720507",
"0.47186446",
"0.47182205",
"0.47145575",
"0.47067425",
"0.46776956"
] | 0.6962967 | 0 |
Lists the network sources in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See `Where to Get the Tenancy's OCID and User's OCID`__. | def list_network_sources(self, compartment_id, **kwargs):
resource_path = "/networkSources"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_network_sources got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[NetworkSourcesSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[NetworkSourcesSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources",
"def paths_list(ctx):\n for path in ctx.obj['CLIENT'].paths.list():\n if not path.source.name:\n cidr_blocks = [subnetwork.cidr_block for subnetwork in path.source.subnetworks]\n source_name = \",\".join(cidr_blocks)\n network_name = \"external\"\n else:\n source_name = path.source.name\n network_name = path.source.network.name\n click.echo(\"%s:%s -(%s)-> %s:%s\" % (network_name, source_name, path.port,\n path.network.name, path.destination.name))",
"def ListSources(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources",
"def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))",
"def get_datasource_list():\n global datasource_list\n\n if not datasource_list:\n datasource_list = stixhelpers.get_datasources(get_srcs())\n\n return datasource_list",
"def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)",
"def network_list(request):\n flatpage = get_flatpage_or_none(request)\n network_list = Network.objects.filter(user_id=0)\n\n return {\n 'flatpage': flatpage,\n 'network_list': network_list,\n }",
"def network_list_for_tenant(request, tenant_id, include_external=False,\n include_pre_auto_allocate=False, page_data=None,\n **params):\n\n # Pagination is implemented consistently with nova and cinder views,\n # which means it is a bit hacky:\n # - it requests X units but displays X-1 units\n # - it ignores the marker metadata from the API response and uses its own\n # Here we have extra hacks on top of that, because we have to merge the\n # results of 3 different queries, and decide which one of them we are\n # actually paginating.\n # The 3 queries consist of:\n # 1. Shared=True networks\n # 2. Project non-shared networks\n # 3. External non-shared non-project networks\n # The main reason behind that order is to maintain the current behavior\n # for how external networks are retrieved and displayed.\n # The include_external assumption of whether external networks should be\n # displayed is \"overridden\" whenever the external network is shared or is\n # the tenant's. Therefore it refers to only non-shared non-tenant external\n # networks.\n # To accomplish pagination, we check the type of network the provided\n # marker is, to determine which query we have last run and whether we\n # need to paginate it.\n\n LOG.debug(\"network_list_for_tenant(): tenant_id=%(tenant_id)s, \"\n \"params=%(params)s, page_data=%(page_data)s\", {\n 'tenant_id': tenant_id,\n 'params': params,\n 'page_data': page_data,\n })\n\n page_data, marker_net = _configure_pagination(\n request, params, page_data, tenant_id=tenant_id)\n\n query_kwargs = {\n 'request': request,\n 'include_external': include_external,\n 'tenant_id': tenant_id,\n 'page_data': page_data,\n **params,\n }\n\n return _perform_query(\n _query_nets_for_tenant, query_kwargs, marker_net,\n include_pre_auto_allocate)",
"def get_results_from_aggregation_sources(self, context):\n sources = context.getContentSources()\n results = []\n for source in sources:\n sresults = source.queryCatalog()\n if not sresults:\n continue\n results.append({\n 'id': source.id,\n 'title': source.Title(),\n 'description': source.Description(),\n 'uid': source.UID(),\n 'portal_type': sresults[0].portal_type,\n 'brains': sresults,\n 'brains_count': len(sresults),\n })\n return results",
"def list_protection_sources(cohesity_client, env=\"kView\"):\n sources = cohesity_client.protection_sources.list_protection_sources(\n environments=env\n )\n sources = sources if sources else []\n return sources",
"def networks(view):\n return \"network?\" \\\n \"_return_fields=\" \\\n \"extattrs,\" \\\n \"comment,\" \\\n \"network,\" \\\n \"network_view,\" \\\n \"utilization&\" \\\n \"network_view=\" + view + \\\n \"&_max_results=-25000\"",
"def show_networks():\n return get_networks()",
"def list_sources(config, base_dir, verbose=False):\n for source in config.sources_under(abspath(base_dir)):\n if verbose:\n print(\"# %s (%s)\" % (source.nicedir, ' '.join(source.info)))\n else:\n print(source.nicedir)",
"def sources(self) -> Sequence[Any]:\n return pulumi.get(self, \"sources\")",
"def do_network_list(cs, args):\n opts = {}\n opts['container'] = args.container\n opts = zun_utils.remove_null_parms(**opts)\n networks = cs.containers.network_list(**opts)\n zun_utils.list_container_networks(networks)",
"def getNetworksList():\n logger.debug('Start.')\n code, res = rest_requests.get(networks_url)\n if code != requests.codes.ok:\n logger.error((code, res))\n return None\n return res[\"networks\"]",
"def load_network_templates(self) -> List:\n try:\n network_templates = self.api.get(host=self.host, endpoint=f\"/api/v1/orgs/{self.oid}/networktemplates\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting network templates:{TextColors.ENDC} {e}\")\n raise e\n self.network_templates = network_templates",
"def list_sources(username, token=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n for source in r.json():\n click.echo(source[\"id\"])\n else:\n raise errors.TilesetsError(r.text)",
"def netlist(self):\n return self._netlist",
"def sources(self) -> Optional[Sequence['outputs.AddressPrefixItemResponse']]:\n return pulumi.get(self, \"sources\")",
"def source_list(self):\n return list(self._client.group.streams_by_name().keys())",
"def list_net(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing the networks\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Network List : %s \" % output)\n return output['networks']",
"def get_all_feed_sources(request):\n feed_sources = FeedSource.objects.all().order_by('-id')\n return get_feed_sources_list(feed_sources)",
"def get_all_host(self, conf, tenant_id, network_id):\n\t\tpass",
"def get_network_list(network = None, include_details = True):\n \n if network == None: \n json_obj = requests.get(api_base_url + 'networks')\n return json.loads(json_obj.content)['networks']\n rq_url = api_base_url + '{}/sites'.format(network)\n json_obj = requests.get(rq_url)\n sites_list = json.loads(json_obj.content)\n d = OrderedDict(zip([x.pop('network_siteid') for x in sites_list['sites']], \n sites_list['sites']))\n if include_details: return d\n return d.keys()",
"def Sources():\n return _sources",
"def list_subnets(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if not verbose:\n attributes = [\"distinguishedName\", \"name\", \"description\"]\n else:\n attributes = ALL\n\n if verbose:\n self.display(\n self.engine.query(\n self.engine.SITES_FILTER(),\n attributes, base=','.join([\"CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )\n else:\n entries = self.engine.query(self.engine.SITES_FILTER(), attributes, base=','.join([\"CN=Configuration\", self.engine.base_dn]))\n\n site_dn = \"\"\n site_name = \"\"\n site_description = \"\"\n # subnet_dn = \"\"\n subnet_name = \"\"\n subnet_description = \"\"\n for entry in entries:\n site_dn = entry[\"distinguishedName\"] if entry[\"distinguishedName\"] else \"\"\n site_name = entry[\"name\"] if entry[\"name\"] else \"\"\n site_description = entry[\"description\"][0] if entry[\"description\"] else \"\"\n subnet_entries = self.engine.query(self.engine.SUBNET_FILTER(site_dn), attributes, base=','.join([\"CN=Sites,CN=Configuration\", self.engine.base_dn]))\n for subnet in subnet_entries:\n # subnet_dn = subnet[\"distinguishedName\"] if subnet[\"distinguishedName\"] else \"\"\n subnet_name = subnet[\"name\"] if subnet[\"name\"] else \"\"\n subnet_description = subnet[\"description\"][0] if subnet[\"description\"] else \"\"\n servers = self.engine.query(\"(objectClass=server)\", ['cn'], base=site_dn)\n servers_list = [d['cn'] for d in servers]\n\n output = \"Site: {}\".format(site_name)\n output += \" | Subnet: {}\".format(subnet_name) if subnet_name else \"\"\n output += \" | Site description: {}\".format(site_description) if site_description else \"\"\n output += \" | Subnet description: {}\".format(subnet_description) if subnet_description else \"\"\n output += \" | Servers: {}\".format(', '.join(servers_list)) if servers_list else \"\"\n print(output)",
"def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)",
"def sources(self):\n return self._sources"
] | [
"0.6069622",
"0.60684687",
"0.6046004",
"0.60339135",
"0.59831995",
"0.59754866",
"0.59675294",
"0.5898026",
"0.586566",
"0.5807168",
"0.5727688",
"0.5714986",
"0.5702389",
"0.5680545",
"0.564806",
"0.5645466",
"0.56405693",
"0.56157684",
"0.55836064",
"0.55833864",
"0.55707765",
"0.5557252",
"0.55565894",
"0.55148464",
"0.54938745",
"0.54872566",
"0.54819494",
"0.5478539",
"0.5476802",
"0.54744625"
] | 0.66614133 | 0 |
Lists the policies in the specified compartment (either the tenancy or another of your compartments). See `Where to Get the Tenancy's OCID and User's OCID`__. To determine which policies apply to a particular group or compartment, you must view the individual statements inside all your policies. There isn't a way to automatically obtain that information via the API. | def list_policies(self, compartment_id, **kwargs):
resource_path = "/policies"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_policies got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[Policy]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[Policy]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_policies(self):\n client = self.connect(VAULT_TOKEN)\n return client.list_policies()",
"def list_policies(policystore_url, verbose):\n\n if verbose:\n logging.info('Listing policies')\n\n list_url = policystore_url + POLICYSTORE_PREFIX + 'ListEntitlementPolicies'\n\n r = requests.post(list_url, headers=headers(), json={})\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n sys.exit('Failed to list policies')\n\n logging.info('SUCCESS: Listed policies')\n\n resp = r.json()\n\n if verbose:\n logging.info('Policies retrieved')\n pprint.pprint(resp)\n\n return resp",
"def list_policies(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"escalation_policies\",\n \"id\",\n __salt__[\"config.option\"](profile),\n api_key,\n opts=__opts__,\n )",
"def list_policies(self):\n return self.con.list_policies(\n Scope='Local'\n )",
"def policies(self):\n return self._data.get('policies')",
"def list_auth_policies(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.AUTH_POLICIES_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )",
"def ListPolicies(self, request, global_params=None):\n config = self.GetMethodConfig('ListPolicies')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def _get_policies(self):\n flag, response = self._commcell_object._cvpysdk_object.make_request('GET', self._POLICY)\n\n if flag:\n if response.json() and 'taskDetail' in response.json():\n policies = response.json()['taskDetail']\n policies_dict = {}\n\n for policy in policies:\n temp_name = policy['task']['taskName'].lower()\n temp_id = str(policy['task']['taskId']).lower()\n policies_dict[temp_name] = temp_id\n\n return policies_dict\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)",
"def policies(self, request):\n policies = OtterPolicies(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return policies.app.resource()",
"def get_policies():\r\n policy = policies.values()\r\n return policy",
"def policies(self):\n return self._policies",
"def rbac_policy_list(request, **kwargs):\n policies = neutronclient(request).list_rbac_policies(\n **kwargs).get('rbac_policies')\n return [RBACPolicy(p) for p in policies]",
"def policy_list(request, **kwargs):\n policies = neutronclient(request).list_qos_policies(\n **kwargs).get('policies')\n return [QoSPolicy(p) for p in policies]",
"def list_acl_policies(client, container_name, **kwargs):\n return _get_acl(client, container_name, **kwargs)",
"def list_policies(policies, verbosity):\n print()\n if verbosity < 1:\n rows = []\n for p in sorted_by_name(policies):\n rows.append((p.name, p.generator, p.length, p.frequency))\n print_table(('NAME', 'GEN', 'LEN', 'FREQ'), rows)\n else:\n for policy in sorted_by_name(policies):\n chars = NONE\n if policy.disallowed_characters:\n chars = ''.join(sorted(policy.disallowed_characters))\n print_detail(\n policy.name, (\n ('description', nullable(policy.description)),\n ('specs', get_policy_specs(policy)),\n ('∅ chars', chars),\n ),\n )\n print()",
"def list_workload_policies(self, params=None):\n uri = 'proj/list_workload_policies'\n if params:\n uri += '?%s' % urllib.urlencode(params)\n \n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['workload_policies'])",
"def list_org_policies(self, resource, fields=None, max_results=None,\n verb='listOrgPolicies', **kwargs):\n arguments = {'resource': resource, 'fields': fields, 'body': {}}\n if max_results:\n arguments['body']['pageSize'] = max_results\n\n if kwargs:\n arguments.update(kwargs)\n\n for resp in self.execute_search_query(\n verb=verb,\n verb_arguments=arguments):\n yield resp",
"def list_policy(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/policies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1PolicyList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def list_compartments(self, compartment_id, **kwargs):\n resource_path = \"/compartments\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"access_level\",\n \"compartment_id_in_subtree\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_compartments got unknown kwargs: {!r}\".format(extra_kwargs))\n\n if 'access_level' in kwargs:\n access_level_allowed_values = [\"ANY\", \"ACCESSIBLE\"]\n if kwargs['access_level'] not in access_level_allowed_values:\n raise ValueError(\n \"Invalid value for `access_level`, must be one of {0}\".format(access_level_allowed_values)\n )\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"accessLevel\": kwargs.get(\"access_level\", missing),\n \"compartmentIdInSubtree\": kwargs.get(\"compartment_id_in_subtree\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Compartment]\")",
"def available_policies(self):\n return tuple(self._policies.keys())",
"def GetPolicies(self):\n policy = {}\n if json is None:\n logging.error('No JSON module, cannot parse policy information')\n else :\n try:\n policy = json.loads(open(self.policy_path).read(), strict=False)\n except IOError:\n logging.error('Failed to load policies from %s' % self.policy_path)\n return policy",
"def test_get_hyperflex_vcenter_config_policy_list(self):\n pass",
"def role_policy_statements(role_name):\n policies = []\n role = iam.Role(role_name)\n \n role_policies = [p.policy_document['Statement'] for p in role.policies.all()]\n for p in role_policies:\n policies.extend(p)\n \n attached_policies = [p.default_version.document['Statement'] for p in role.attached_policies.all()]\n for p in attached_policies:\n policies.extend(p)\n\n return policies",
"def child_policies(self) -> Sequence['outputs.SubResourceResponse']:\n return pulumi.get(self, \"child_policies\")",
"def test_get_hyperflex_ucsm_config_policy_list(self):\n pass",
"def test_list_policy_for_all_namespaces(self):\n pass",
"def describe_service_access_policies(DomainName=None, Deployed=None):\n pass",
"def list_ikepolicies(self, retrieve_all=True, **_params):\r\n return self.list('ikepolicies', self.ikepolicies_path, retrieve_all,\r\n **_params)",
"def test_list_ikepolicy_sort(self):\r\n resources = \"ikepolicies\"\r\n cmd = ikepolicy.ListIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def service_endpoint_policies(self) -> Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]:\n return pulumi.get(self, \"service_endpoint_policies\")"
] | [
"0.7189241",
"0.6911353",
"0.69109756",
"0.6786608",
"0.6505484",
"0.6489422",
"0.6399176",
"0.6282769",
"0.6273543",
"0.62666065",
"0.6170152",
"0.61578345",
"0.59558356",
"0.5855535",
"0.5853909",
"0.58492464",
"0.57598",
"0.57497317",
"0.5630949",
"0.56128603",
"0.56102735",
"0.5597646",
"0.55645853",
"0.5524058",
"0.54679096",
"0.54492956",
"0.54452443",
"0.5431996",
"0.5408298",
"0.5382999"
] | 0.7190767 | 0 |
Lists the region subscriptions for the specified tenancy. | def list_region_subscriptions(self, tenancy_id, **kwargs):
resource_path = "/tenancies/{tenancyId}/regionSubscriptions"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_region_subscriptions got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tenancyId": tenancy_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[RegionSubscription]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[RegionSubscription]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAllSubscriptions(self):\n return self.request(\n \"getAllSubscriptions\",\n )",
"def listSubscriptions() -> object:\n\n db = Db()\n return db.Subscriptions.objects().to_json()",
"def GetSubscriptions(self):\n\n return self.__GetJson(\"/subscriptions\", True)",
"def subscriptions(self):\r\n return subs.AccountSubscriptions(self)",
"def list_subscriptions(profile=None):\n if profile is None:\n profile = subscription_profile()\n cred, _, _ = profile.get_login_credentials()\n sub_client = SubscriptionClient(cred)\n return [\n {\"Index\": i, \"Name\": sub.display_name, \"id\": sub.subscription_id}\n for i, sub in enumerate(sub_client.subscriptions.list())\n ]",
"def get(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n organization = model.organization.get_organization(orgname)\n query = model.organization_skus.get_org_subscriptions(organization.id)\n\n if query:\n subscriptions = list(query.dicts())\n for subscription in subscriptions:\n subscription[\"sku\"] = marketplace_subscriptions.get_subscription_sku(\n subscription[\"subscription_id\"]\n )\n return subscriptions\n else:\n return []\n abort(401)",
"def list_subscriptions(\n connection, project_id, fields=None, offset=0, limit=-1, error_msg=None\n):\n return connection.get(\n url=f'{connection.base_url}/api/subscriptions',\n params={'offset': offset, 'limit': limit, 'fields': fields},\n headers={'X-MSTR-ProjectID': project_id},\n )",
"def ListSubscriptions(): # pylint: disable=unused-variable\n\n try:\n list_request = json_format.Parse(request.get_data(),\n sheriff_config_pb2.ListRequest())\n except json_format.ParseError as error:\n return jsonify(\n {'messages': [{\n 'severity': 'ERROR',\n 'text': '%s' % (error)\n }]}), 400\n list_response = sheriff_config_pb2.ListResponse()\n configs = list(luci_config.ListAllConfigs(datastore_client))\n configs = match_policy.FilterSubscriptionsByIdentity(\n auth_client, list_request, configs)\n for config_set, revision, subscription in configs:\n subscription_metadata = list_response.subscriptions.add()\n subscription_metadata.config_set = config_set\n subscription_metadata.revision = revision\n luci_config.CopyNormalizedSubscription(subscription,\n subscription_metadata.subscription)\n return (json_format.MessageToJson(\n list_response, preserving_proto_field_name=True), 200, {\n 'Content-Type': 'application/json'\n })",
"def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()",
"def list(cls, **kwargs):\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]",
"def get_all_subscriptions(self, next_token=None):\r\n params = {'ContentType' : 'JSON'}\r\n if next_token:\r\n params['NextToken'] = next_token\r\n response = self.make_request('ListSubscriptions', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)",
"def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass",
"def get_subscriptions(self):\n return self.subscriptions.all()",
"def subscriptions(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"subscriptions\")",
"def subscriptions(self):\r\n return v3.Subscriptions(self)",
"def get_subscriptions(self) -> Iterator[\"Subscription\"]:\n yield from self._subscriptions[self.id]",
"def getSubscriptions(self):\n\n address = self.getAddress()\n if address is None:\n return []\n else:\n return [\n \"shellies/announce\",\n \"{}/online\".format(address),\n \"{}/emeter/{}/energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/returned_energy\".format(address, self.getChannel()),\n \"{}/emeter/{}/power\".format(address, self.getChannel()),\n \"{}/emeter/{}/reactive_power\".format(address, self.getChannel()),\n \"{}/emeter/{}/voltage\".format(address, self.getChannel()),\n \"{}/emeter/{}/total\".format(address, self.getChannel()),\n \"{}/emeter/{}/total_returned\".format(address, self.getChannel())\n ]",
"def list(pat: str, resource_registration_endpoint: str,\n secure: bool = False) -> List[str]:\n headers={\"Authorization\": \"Bearer \"+pat}\n\n disable_warnings_if_debug(secure)\n response = request(\"GET\", resource_registration_endpoint, headers=headers, verify=secure)\n\n if not is_ok(response):\n raise Exception(\"An error occurred while listing resources: \"+str(response.status_code)+\":\"+str(response.reason)+\":\"+str(response.text))\n\n return response.json()",
"def get_all_subscriptions(cls, **kwargs):\n return Subscription.query.filter(**kwargs).all()",
"def subscriptions(self):\n if not hasattr(self, '_subscriptions'):\n subscriptions_resource = self.resource.subscriptions\n self._subscriptions = Subscriptions(\n subscriptions_resource, self.client)\n return self._subscriptions",
"def list_subscriptions_command(client: KeyVaultClient) -> CommandResults:\n response = client.list_subscriptions_request()\n\n readable_output = tableToMarkdown('Subscriptions List',\n response,\n ['subscriptionId', 'tenantId',\n 'state', 'displayName'\n ],\n removeNull=True, headerTransform=string_to_table_header)\n return CommandResults(\n outputs_prefix='AzureKeyVault.Subscription',\n outputs_key_field='id',\n outputs=response,\n raw_response=response,\n readable_output=readable_output,\n )",
"def list_subscriptions_async(\n future_session: \"FuturesSession\",\n connection,\n project_id,\n fields=None,\n offset=0,\n limit=-1,\n):\n params = {'offset': offset, 'limit': limit, 'fields': fields}\n url = f'{connection.base_url}/api/subscriptions'\n headers = {'X-MSTR-ProjectID': project_id}\n\n return future_session.get(url=url, headers=headers, params=params)",
"def _get_cloudwatch_subscriptions(self):\n return self._get_subscriptions(self.cloudwatch_arn)",
"def test_list_all_response_descriptor_subscriptions_subscription_subscription_resource(self):\n pass",
"def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")",
"def region_clients(self, **kwargs):\n return stats.region_clients(self._host, self._session, **kwargs)",
"def list_subscriptions(self):\n return {'abonnementen': self.customer.abonnementen}",
"def _get_subscriptions(self, topic_arn):\n return self.conn.get_all_subscriptions_by_topic(topic_arn)['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']",
"def list_tenants(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants\"\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \" tenant list Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant List : %s \" % output)\n return output[\"tenants\"]",
"def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations"
] | [
"0.6887986",
"0.64734304",
"0.6237735",
"0.6100922",
"0.6089093",
"0.6054437",
"0.6008919",
"0.5998848",
"0.5985489",
"0.5876979",
"0.58604753",
"0.58339596",
"0.58058745",
"0.57581854",
"0.57325035",
"0.5700422",
"0.56921446",
"0.56718814",
"0.5643824",
"0.56429935",
"0.5636409",
"0.56253004",
"0.56243",
"0.5569091",
"0.5507758",
"0.5484625",
"0.54808044",
"0.5449644",
"0.5409465",
"0.54038334"
] | 0.7903534 | 0 |
Lists the SMTP credentials for the specified user. The returned object contains the credential's OCID, the SMTP user name but not the SMTP password. The SMTP password is returned only upon creation. | def list_smtp_credentials(self, user_id, **kwargs):
resource_path = "/users/{userId}/smtpCredentials"
method = "GET"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_smtp_credentials got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[SmtpCredentialSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="list[SmtpCredentialSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_credentials(user):\n return Credentials.list_credentials(user)",
"def get_user_credentials(connection):\n\n response = connection.get_json('user')\n user_data = response.get('user', None)\n if user_data is None:\n raise SAPCliError('gCTS response does not contain \\'user\\'')\n\n config_data = user_data.get('config', None)\n if config_data is None:\n return []\n\n user_credentials = [cred for cred in config_data if cred['key'] == 'USER_AUTH_CRED_ENDPOINTS']\n return json.loads(user_credentials[0]['value'])",
"def list_credentials(self, **_params):\r\n return self.get(self.credentials_path, params=_params)",
"def get_user_cred(self):\n if Config.eap_outer == 'PEAP' or Config.eap_outer == 'TTLS':\n self.__get_credentials_from_config()",
"def create_smtp_credential(self, create_smtp_credential_details, user_id, **kwargs):\n resource_path = \"/users/{userId}/smtpCredentials\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_smtp_credential got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_smtp_credential_details,\n response_type=\"SmtpCredential\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=create_smtp_credential_details,\n response_type=\"SmtpCredential\")",
"def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())",
"def display_credentials(cls):\n return cls.credential_list",
"def display_credentials(cls):\n return cls.credential_list",
"def display_credentials(cls):\n return cls.credential_list",
"def email_user(user, template_path, from_address, context_dict):\n return email_list([user.email], template_path, from_address, context_dict)",
"def get_all_credentials():\n session = db.get_session()\n return (session.query(network_models_v2.Credential).all())",
"def list_o_auth_client_credentials(self, user_id, **kwargs):\n resource_path = \"/users/{userId}/oauth2ClientCredentials\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"lifecycle_state\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_o_auth_client_credentials got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"userId\": user_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n if 'lifecycle_state' in kwargs:\n lifecycle_state_allowed_values = [\"CREATING\", \"ACTIVE\", \"INACTIVE\", \"DELETING\", \"DELETED\"]\n if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:\n raise ValueError(\n \"Invalid value for `lifecycle_state`, must be one of {0}\".format(lifecycle_state_allowed_values)\n )\n\n query_params = {\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"lifecycleState\": kwargs.get(\"lifecycle_state\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[OAuth2ClientCredentialSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[OAuth2ClientCredentialSummary]\")",
"def display_credentials(cls):\n return cls.credentials_list",
"def display_credentials(cls):\n return cls.credentials_list",
"def user_credentials(self):\r\n credentials = {}\r\n if EMAIL_AUTHENTICATION:\r\n credentials[\"email\"] = self.cleaned_data[\"email\"]\r\n else:\r\n credentials[\"username\"] = self.cleaned_data[\"username\"]\r\n credentials[\"password\"] = self.cleaned_data[\"password1\"]\r\n return credentials",
"def user_credentials(self):\r\n credentials = {}\r\n if EMAIL_AUTHENTICATION:\r\n credentials[\"email\"] = self.cleaned_data[\"email\"]\r\n else:\r\n credentials[\"username\"] = self.cleaned_data[\"username\"]\r\n credentials[\"password\"] = self.cleaned_data[\"password\"]\r\n return credentials",
"def credentials(self) -> Sequence['outputs.DeviceCredentialResponse']:\n return pulumi.get(self, \"credentials\")",
"def get_credentials(self):\n return PlainCredentials(self.user_name, self.password)",
"def getCredentials(self):\n if self.result(): # Accepted?\n username = self.username_le.text()\n password = \"\"\n if self.askpassword:\n password = self.password_le.text()\n\n return username, password\n\n raise CredentialDialogReject()",
"def credentials(self) -> pulumi.Output[Optional['outputs.CredentialsResponse']]:\n return pulumi.get(self, \"credentials\")",
"def GetUserCredentials():\n email = options.email\n if email is None:\n email = GetEmail(\"Email (login for uploading to %s)\" % options.server)\n password = getpass.getpass(\"Password for %s: \" % email)\n return (email, password)",
"def credentials(self):\n\n return self._credentials",
"def credentials(self):\n return self._credentials",
"def get_cred(site_id, user_id):\n log = current_app.log\n db = request.db\n Cred = db.tables.Cred\n cred = Cred.query.filter_by(cred_owner=user_id,\n site_id=site_id).first_or_404()\n log.info(\"Fetched cred for user %u at site %u.\", user_id, site_id)\n return jsonify(cred.cred_value)",
"def display_credential(cls):\n return cls.credential_list",
"def GetUserCredentials(self):\r\n # Create a local alias to the email variable to avoid Python's crazy\r\n # scoping rules.\r\n global keyring\r\n email = self.email\r\n if email is None:\r\n email = GetEmail(\"Email (login for uploading to %s)\" % self.server)\r\n password = None\r\n if keyring and not email in self.accounts_seen:\r\n try:\r\n password = keyring.get_password(self.host, email)\r\n except:\r\n # Sadly, we have to trap all errors here as\r\n # gnomekeyring.IOError inherits from object. :/\r\n print \"Failed to get password from keyring\"\r\n keyring = None\r\n if password is not None:\r\n print \"Using password from system keyring.\"\r\n self.accounts_seen.add(email)\r\n else:\r\n password = getpass.getpass(\"Password for %s: \" % email)\r\n if keyring:\r\n answer = raw_input(\"Store password in system keyring?(y/N) \").strip()\r\n if answer == \"y\":\r\n keyring.set_password(self.host, email, password)\r\n self.accounts_seen.add(email)\r\n return (email, password)",
"def get_credentials(self):\n return self.credentials",
"def get_credentials(service_name=\"dataforSeo\", uname=\"[email protected]\"):\n pw = keyring.get_password(service_name, uname)\n return [uname, pw]",
"def list_credentials():\n creds = load_auth()\n max_username_len = max([len(c.username) for c in creds]) if len(creds) > 0 else 1\n long_format = f\"{{:{max_username_len}}} for {{}}\"\n for cred in creds:\n if len(cred.hostname) > 0:\n print(str.format(long_format, cred.username, cred.hostname))\n else:\n print(cred.username)\n if len(creds) == 0 and os.isatty(1):\n print(\"No credentials configured\")",
"def get_stored_credentials(user_id):\n #\n # To instantiate an OAuth2Credentials instance from a Json\n # representation, use the oauth2client.client.Credentials.new_from_json\n # class method.\n user = engine.query(User).filter(userId=user_id).first()\n if user:\n user_dict = user.__dict__\n if user_dict['credentials']:\n # credentials = Credentials.new_from_json(user['credentials'])\n credentials = json.loads(user_dict['credentials'])\n token_expiry = credentials['token_expiry']\n dexp = parser.parse(str(token_expiry))\n dexp = dexp.replace(tzinfo=None)\n dnow = datetime.now()\n\n if dexp > dnow:\n return Credentials.new_from_json(user_dict['credentials'])\n else:\n status_code, data = renew_access_token(client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n refresh_token=credentials['refresh_token'],\n )\n if status_code == INT_OK:\n credentials['access_token'] = data['access_token']\n credentials['token_expiry'] = datetime_util(datetime.now() + timedelta(seconds=float(str(data['expires_in']))))\n credentials = Credentials.new_from_json(json_encode(credentials))\n user.update_credentials(credentials.to_json())\n user.sync()\n return credentials\n else:\n return None\n else:\n return None\n return None"
] | [
"0.7158633",
"0.63490164",
"0.5840114",
"0.5356084",
"0.5350056",
"0.5333744",
"0.53025347",
"0.53025347",
"0.53025347",
"0.5278439",
"0.5255396",
"0.52334976",
"0.5210725",
"0.5210725",
"0.5193972",
"0.51926774",
"0.5166273",
"0.51630765",
"0.51324713",
"0.51061445",
"0.51052934",
"0.51003575",
"0.5089874",
"0.5070253",
"0.5051216",
"0.5050097",
"0.502",
"0.49585772",
"0.49421754",
"0.49201402"
] | 0.7803695 | 0 |
Lists the tag defaults for tag definitions in the specified compartment. | def list_tag_defaults(self, **kwargs):
resource_path = "/tagDefaults"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"id",
"compartment_id",
"tag_definition_id",
"lifecycle_state"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_tag_defaults got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACTIVE"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"id": kwargs.get("id", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"tagDefinitionId": kwargs.get("tag_definition_id", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[TagDefaultSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[TagDefaultSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initDefaults(self):\n return _libsbml.CompartmentGlyph_initDefaults(self)",
"def initDefaults(self):\n return _libsbml.Compartment_initDefaults(self)",
"def defaults(file):\n\n\tUNCAT_TAGID = 47\n\tNOSERIES_TAGID = 375\n\n\treturn [NOSERIES_TAGID, UNCAT_TAGID]",
"def get_default_vpas(self, composition_space):\n\n default_vpas = {}\n for element in composition_space.get_all_elements():\n default_vpas[element.symbol] = self.all_default_vpas[\n element.symbol]\n return default_vpas",
"def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)",
"def defaults(self):\n return self.conf.get(\"defaults\", [])",
"def _add_default_tags(self):\n self.tags.add_tag('ban', required=True)",
"def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t",
"def default_kernel_config(defn):\n # XXX(stephentu): should the default config also include cluster_hp?\n return list(it.chain(\n default_assign_kernel_config(defn),\n default_feature_hp_kernel_config(defn)))",
"def defaults():\n return {}",
"def defaults() -> dict:\n pass",
"def show_defaults(context: CreateCommandsContext):\n logger.info(\"Default parameters when creating jobs:\")\n for parameter in context.settings.job_default_parameters:\n logger.info(parameter.describe())",
"def get_defaults(self):\n\t\treturn self.__defaults",
"def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": None,\n \"attribute\": None,\n \"index_annotation\": None,\n }\n )\n return config",
"def get_persisted_default_config_fields(self):\n return []",
"def defaults(self):\n return self._config_parser.defaults()",
"def _get_default_config_list(parm_base=None):\n default_config_list = []\n if parm_base is None:\n parm_base = PARM_BASE\n\n conf_dir = os.path.join(parm_base,\n METPLUS_CONFIG_DIR)\n\n # if both are found, set old base confs first so the new takes precedence\n for base_conf in OLD_BASE_CONFS + BASE_CONFS:\n conf_path = os.path.join(conf_dir,\n base_conf)\n if os.path.exists(conf_path):\n default_config_list.append(conf_path)\n\n if not default_config_list:\n print(f\"FATAL: No default config files found in {conf_dir}\")\n sys.exit(1)\n\n return default_config_list",
"def default_configs():\n configs=configparser.ConfigParser()\n configs.read(default_configfile())\n \n return configs",
"def help_default_values():\n click.echo_via_pager(docgen.generate_default_value_help())",
"def initDefaultChoices(self):\n return [text for text in self.formatList]",
"def _default() -> list:\n if metadata is None or metadata.default is None:\n return []\n\n return self._always_array(metadata.default)",
"def print_defaults():\n print 'area_bounds :', default_area_bounds\n print 'area_bounds_format :', default_area_bounds_format\n print 'area_bounds_range :', default_area_bounds_range\n print 'years_bounds :', default_years_are_bounds\n print 'dates_are_bounds :', default_dates_are_bounds\n print 'init_date_str_format :', default_init_date_str_format\n print 'member_name :', default_member_name\n print 'period_name :', default_period_name\n print 'initialistion_time_name :', default_initialistion_time_name",
"def default_configs(cls):\n return {\n 'redirect_path': None,\n 'nif_page_structure': None,\n 'nif_text_links': None,\n }",
"def default_tags(self) -> str:\n tags = [\"persistent\"]\n if self.id_tag:\n tags.append(self.id_tag)\n tags.append(\"dismissable\" if self.is_dismissable else \"undismissable\")\n tags.append(\"safe\" if self.mark_content_safe else \"unsafe\")\n return \" \".join(tags)",
"def get_config_defaults(self): # pylint: disable=R0201\n return {}",
"def defaults():\n\n dummy = FieldTemplate.dummy\n\n return {\"disease_demographic_id\": dummy(\"demographic_id\"),\n }",
"def replace_defaults(d):\n\n # remove the defaults section\n defaults = d.pop('.defaults')\n\n # look for default tags and replace them\n for k, v in defaults.items():\n recursive_search_replace(d, '!' + k + '!', v)",
"def defaults():\n\n #dummy = FieldTemplate.dummy\n\n return None",
"def get_defaults():\n\n return {\n 'base_types': _get_base_types(),\n 'template_types': _get_template_types(),\n 'refined_types': _get_refined_types(),\n 'humannames': _get_humannames(),\n 'argument_kinds': _get_argument_kinds(),\n 'variable_namespace': {},\n 'type_aliases': _get_type_aliases(),\n 'cpp_types': _get_cpp_types(),\n 'numpy_types': _get_numpy_types(),\n 'from_pytypes': _get_from_pytypes(),\n 'cython_ctypes': _get_cython_ctypes(),\n 'cython_cytypes': _get_cython_cytypes(),\n 'cython_pytypes': _get_cython_pytypes(),\n 'cython_cimports': _get_cython_cimports(),\n 'cython_cyimports': _get_cython_cyimports(),\n 'cython_pyimports': _get_cython_pyimports(),\n 'cython_functionnames': _get_cython_functionnames(),\n 'cython_classnames': _get_cython_classnames(),\n 'cython_c2py_conv': _get_cython_c2py_conv(),\n 'cython_py2c_conv_vector_ref': CYTHON_PY2C_CONV_VECTOR_REF,\n 'cython_py2c_conv': _get_cython_py2c_conv(),\n }",
"def add_default_options(self):\n\n options = getattr(self.parent, \"pyautodoc_set_default_option\", [])\n for option in options:\n self.set_default_option(option)"
] | [
"0.5788659",
"0.5745859",
"0.5657144",
"0.5510926",
"0.5421845",
"0.52667904",
"0.5232069",
"0.52265173",
"0.51869756",
"0.5130844",
"0.51299524",
"0.5102675",
"0.5096235",
"0.50511813",
"0.5039385",
"0.502269",
"0.50160813",
"0.49855888",
"0.49597377",
"0.4956205",
"0.4953227",
"0.49445555",
"0.4942586",
"0.49185464",
"0.4917683",
"0.4894464",
"0.48850566",
"0.487327",
"0.48522192",
"0.48455822"
] | 0.6216055 | 0 |
Lists the tag namespaces in the specified compartment. | def list_tag_namespaces(self, compartment_id, **kwargs):
resource_path = "/tagNamespaces"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"include_subcompartments",
"lifecycle_state"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_tag_namespaces got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["ACTIVE", "INACTIVE", "DELETING", "DELETED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"includeSubcompartments": kwargs.get("include_subcompartments", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[TagNamespaceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[TagNamespaceSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def list_namespaces(self) -> list:\n return await self.AD.state.list_namespaces()",
"def namespaces(self):\n return [self._namespace_prefix]",
"def namespaces(self):\n namespaces = set()\n for namespace_package in self.namespace_packages:\n dotted_name = []\n for component in namespace_package.split('.'):\n dotted_name.append(component)\n namespaces.add(tuple(dotted_name))\n return sorted(namespaces, key=lambda n: len(n))",
"def namespaces(self):\n return list(self._namespace_schemas.keys())",
"def get_namespaces():\n return list(StaticAsset._load_namespaces().keys())",
"def list(self, dict_output=False, field_selector=\"\"):\n namespaces_list = self.client_core.list_namespace().items\n logger.info(\"Got namespaces\")\n\n if field_selector:\n namespaces_list = field_filter(obj_list=namespaces_list,\n field_selector=field_selector)\n # convert the list to list of dicts if required\n if dict_output:\n namespaces_list = [convert_obj_to_dict(namespace) for namespace in\n namespaces_list]\n else:\n for namespace in namespaces_list:\n namespace.metadata.resource_version = ''\n return namespaces_list",
"def getNamespaces(self):\n return _libsbml.SBMLDocument_getNamespaces(self)",
"def _fetch_all_namespaces():\n response = _fetch_herd_session() \\\n .get('{}://{}/{}/{}'.format(HERD_REST_PROTOCOL, HERD_BASE_URL,\n HERD_REST_BASE_PATH, 'namespaces')) \\\n .json()\n\n namespaces = []\n for namespaceKey in response['namespaceKeys']:\n namespaces.append(namespaceKey['namespaceCode'])\n\n _print_info('Retrieved {} namespaces.'.format(len(namespaces)))\n return namespaces",
"def test_list_template_for_all_namespaces(self):\n pass",
"def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)",
"def getNamespaces(self):\n return _libsbml.XMLToken_getNamespaces(self)",
"def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)",
"def GetNamespaces(self):\n return list(self.type_namespaces_map.values())",
"def list_namespaced_net_namespace(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespaceList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)",
"def test_list_net_namespace(self):\n pass",
"def ns_list(self):\n return sorted(self.get_ns_name(ns) for ns in self.profile.authoritative_servers)",
"def test_list_deployment_config_for_all_namespaces(self):\n pass",
"def test_list_build_for_all_namespaces(self):\n pass",
"def namespaces(\n self, index: Union[int, str] = \"len\"\n ) -> Union[List[str], int]:\n if index == \"len\":\n return len(self._namespaces)\n try:\n return self._namespaces[index] # type: ignore\n except IndexError:\n return []",
"def get_namespaces():\r\n\r\n print 'Getting namespaces'\r\n tree = etree.parse('http://lesswrong.wikia.com/wiki/Special:AllPages', parser)\r\n options = tree.xpath('//select[@id=\"namespace\"]/option')\r\n namespaces = [option.get('value') for option in options]\r\n pprint(namespaces)\r\n return namespaces",
"def get_all_namespaces():\n cmds.namespace(setNamespace=':')\n return cmds.namespaceInfo(listOnlyNamespaces=True, recurse=True)",
"def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)",
"def namespaces(self):\n if not self._namespaces:\n self.update_namespaces_info()\n\n return self._namespaces",
"def get_namespaces(self):\n if self.namespaces is None:\n namespaces = unpack(self.api.get_namespaces())\n self.namespaces = {\n namespace['name']: DevopsSecurityNamespace(namespace)\n for namespace in namespaces\n }\n return self.namespaces",
"def list_services(self, **kwargs: Optional[Any]) -> list:\n\n self.logger.debug(\"list_services: %s\", kwargs)\n\n namespace = kwargs.get(\"namespace\", \"global\")\n\n return self.AD.services.list_services(namespace) # retrieve services",
"def add_namespaces(specification):\n\n for ns in specification[\"namespaces\"]:\n specification[\"namespaces\"][ns][\"list\"] = []\n specification[\"namespaces\"][ns][\"list_long\"] = []\n specification[\"namespaces\"][ns][\"list_short\"] = []\n\n specification[\"namespaces\"][ns][\"to_short\"] = {}\n specification[\"namespaces\"][ns][\"to_long\"] = {}\n\n for obj in specification[\"namespaces\"][ns][\"info\"]:\n specification[\"namespaces\"][ns][\"list\"].extend([obj[\"name\"], obj[\"abbreviation\"]])\n specification[\"namespaces\"][ns][\"list_short\"].append(obj[\"abbreviation\"])\n specification[\"namespaces\"][ns][\"list_long\"].append(obj[\"name\"])\n\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbreviation\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"name\"]] = obj[\"abbreviation\"]\n\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbreviation\"]] = obj[\"name\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"name\"]] = obj[\"name\"]\n\n # For AminoAcid namespace\n if \"abbrev1\" in obj:\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbrev1\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbrev1\"]] = obj[\"name\"]",
"def get_services_in_namespace(self, namespace):\n ret = self.v1_service_list.get(namespace=namespace)\n return [each.metadata.name for each in ret.items]",
"def test_get_namespaces_names(self):\n pass",
"def watch_namespaced_net_namespace_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_net_namespace_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/watch/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response"
] | [
"0.71969026",
"0.61050427",
"0.5985775",
"0.59844434",
"0.59576994",
"0.5934063",
"0.59100467",
"0.5898842",
"0.58896667",
"0.5883841",
"0.58817756",
"0.58812404",
"0.58636045",
"0.584227",
"0.581729",
"0.5809152",
"0.58016217",
"0.5761703",
"0.5708301",
"0.56499183",
"0.5643741",
"0.5632821",
"0.55997247",
"0.5594167",
"0.5523036",
"0.55090964",
"0.55082816",
"0.55039364",
"0.54784137",
"0.54585445"
] | 0.7114337 | 1 |
Lists the tagging work requests in compartment. | def list_tagging_work_requests(self, compartment_id, **kwargs):
resource_path = "/taggingWorkRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"resource_identifier"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_tagging_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"resourceIdentifier": kwargs.get("resource_identifier", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[TaggingWorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[TaggingWorkRequestSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(self, jobguid=\"\", executionparams=None):",
"def tags(self, request, tag_list, group):\n return tag_list",
"def get_jobs_list(self, response):\n pass",
"def listTagsByNotebook(self, authenticationToken, notebookGuid):\r\n pass",
"def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)",
"def list(self, jobguid=\"\", executionparams=dict()):",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def listTags(self, authenticationToken):\r\n pass",
"def ListTags(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def listTagsByNotebook(self, authenticationToken, notebookGuid):\r\n self.send_listTagsByNotebook(authenticationToken, notebookGuid)\r\n return self.recv_listTagsByNotebook()",
"def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")",
"def listRequests(self):\n reqmgr = RequestManagerImpl()\n retval = []\n for request in reqmgr.listRequests(self.endpoint):\n tmpRequest = Request()\n tmpRequest.setReqmgrUrl( self.endpoint )\n tmpRequest.setWorkflowName( request['request_name'] )\n retval.append( tmpRequest )\n return retval",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)",
"def list_all_tags(self,obs):",
"def jobs(self, tags=None, tags_intersect=None):\n return list(self.all_jobs(tags=tags, tags_intersect=tags_intersect))",
"def getTagList(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.getTagList()",
"def worklist():\n from wheelcms_axle.content import Content\n pending = Content.objects.filter(state=\"pending\", node__isnull=False)\n return pending",
"def ListWorkers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def list_requesters():\n from mephisto.core.local_database import LocalMephistoDB\n from tabulate import tabulate\n\n db = LocalMephistoDB()\n requesters = db.find_requesters()\n dict_requesters = [r.to_dict() for r in requesters]\n click.echo(tabulate(dict_requesters, headers=\"keys\"))",
"def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json",
"def list_tags():\n\n tags = Tag.query.all()\n return render_template('tags/list_tags.html', tags=tags)",
"def tag_list(request):\r\n rdict = request.matchdict\r\n username = rdict.get(\"username\", None)\r\n if username:\r\n username = username.lower()\r\n\r\n tags_found = TagMgr.find(username=username)\r\n\r\n return {\r\n 'tag_list': tags_found,\r\n 'tag_count': len(tags_found),\r\n 'username': username,\r\n }",
"def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def list_work_requests(self, compartment_id, **kwargs):\n resource_path = \"/workRequests\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"resource_identifier\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_work_requests got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"resourceIdentifier\": kwargs.get(\"resource_identifier\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[WorkRequestSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[WorkRequestSummary]\")",
"def get_components(self, req):\n request_name = req.request\n\n names = []\n if(request_name == \"\"):\n comps = self.rt_proxy.get_available_components() # get all\n else:\n comps = self.rt_proxy.get_available_components(request_name)\n\n for c in comps:\n names.append(str(c))\n\n resp = ListComponentsResponse(names)\n\n return resp",
"async def request_jobs_list(self, jobs_list_active_only: bool, *args, **kwargs) -> List[str]:\n # TODO: implement\n raise NotImplementedError('{} function \"request_jobs_list\" not implemented yet'.format(self.__class__.__name__))",
"def list_cost_tracking_tags(self, compartment_id, **kwargs):\n resource_path = \"/tagNamespaces/actions/listCostTrackingTags\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_cost_tracking_tags got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Tag]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Tag]\")",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()"
] | [
"0.58259183",
"0.5787486",
"0.578615",
"0.5756901",
"0.57381487",
"0.5711629",
"0.5706951",
"0.5672795",
"0.5647787",
"0.56005555",
"0.5547329",
"0.5543428",
"0.55379766",
"0.55379766",
"0.55000997",
"0.5487424",
"0.5475018",
"0.5472611",
"0.5454506",
"0.54417205",
"0.54269785",
"0.5417054",
"0.5416555",
"0.5367583",
"0.5349937",
"0.53378636",
"0.5318648",
"0.531532",
"0.5305949",
"0.5297729"
] | 0.69553626 | 0 |
Lists the `UserGroupMembership` objects in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (see `Where to Get the Tenancy's OCID and User's OCID`__). | def list_user_group_memberships(self, compartment_id, **kwargs):
resource_path = "/userGroupMemberships"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"user_id",
"group_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_user_group_memberships got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"userId": kwargs.get("user_id", missing),
"groupId": kwargs.get("group_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[UserGroupMembership]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[UserGroupMembership]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_group(request, group_id):\n users = models.UserProfile.all().order('email')\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n if group.users:\n users = models.UserProfile.get(group.users)\n else:\n users = []\n return utility.respond(request, 'admin/view_group', {'users': users})",
"def get_queryset(self):\n user = self.request.user\n return user.group_set.all()",
"def list_groups(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/list_groups', {'groups': groups})",
"def get(self):\r\n return UserGroupService.getAllUserGroups(self)",
"def queryUsersAndGroups(self):\n with self._open_connection() as session:\n #List Users\n pprint(list( [ (x[User.id], x[User.name]) for x in session.query(User) ] ))\n # List groups\n groups = session.query(User).filter( User.type == 'rodsgroup' )\n [x[User.name] for x in groups]\n #More detailed listings\n grp_usr_mapping = [ (iRODSUserGroup ( session.user_groups, result), iRODSUser (session.users, result)) \\\n for result in session.query(UserGroup,User) ]\n pprint( [ (x,y) for x,y in grp_usr_mapping if x.id != y.id ] )",
"def list_users(access_token):\n request_url = OKTA_URL + \"api/v1/users\"\n headers = {\"Authorization\": \"Bearer \" + access_token}\n group_request = requests.get(request_url, headers=headers).json()\n return group_request",
"def get_group_list(org_id):\n tList = get_template('app/usermanagementorg/group_list.html')\n groups = get_groups(org_id)\n return tList.render(Context({ 'groups': groups, }))",
"def get_memberships(self):\n return UnitMembership.objects.filter(unit=self).select_related(\"user\")",
"def get_user_groups(user):\n auth_groups = user.groups.all()\n # groups = [group.profile for group in auth_group] # not working\n # todo implement better\n groups = [GroupProfile.objects.filter(group=group)[0] for group in auth_groups if GroupProfile.objects.filter(group=group).count()]\n return groups",
"def users_groups():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info to derive unix name\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n # Get user's group membership info based on session unix name\n users_group_memberships = get_user_group_memberships(session, unix_name)\n\n multiplexJson = {}\n group_membership_status = {}\n for group in users_group_memberships:\n if group[\"state\"] not in [\"nonmember\"]:\n group_name = group[\"name\"]\n group_query = (\n \"/v1alpha1/groups/\" + group_name + \"?token=\" + query[\"token\"]\n )\n multiplexJson[group_query] = {\"method\": \"GET\"}\n group_membership_status[group_query] = group[\"state\"]\n # POST request for multiplex return\n multiplex = get_multiplex(multiplexJson)\n\n users_groups = []\n for group in multiplex:\n if (\n session[\"url_host\"][\"unix_name\"]\n in (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"])\n ) and (\n len(\n (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"]).split(\n \".\"\n )\n )\n > 1\n ):\n users_groups.append(\n (\n json.loads(multiplex[group][\"body\"]),\n group_membership_status[group],\n )\n )\n # users_groups = [group for group in users_groups if len(group['name'].split('.')) == 3]\n\n # Query user's pending project requests\n pending_project_requests = get_user_pending_project_requests(unix_name)\n # Check user's member status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n\n domain_name = domain_name_edgecase()\n\n with open(\n brand_dir\n + \"/\"\n + domain_name\n + \"/form_descriptions/group_unix_name_description.md\",\n \"r\",\n ) as file:\n group_unix_name_description = file.read()\n\n return render_template(\n \"users_groups.html\",\n groups=users_groups,\n project_requests=pending_project_requests,\n user_status=user_status,\n group_unix_name_description=group_unix_name_description,\n )",
"def personal_group_user_listing(request):\n\town_id, page_num = request.user.id, request.GET.get('page', '1')\n\tstart_index, end_index = get_indices(page_num, OBJS_PER_PAGE_IN_USER_GROUP_LIST)\n\tpayload, total_grps = retrieve_user_group_list_contents(own_id,start_index,end_index)\n\tpage_list = get_overall_page_list(total_grps, OBJS_PER_PAGE_IN_USER_GROUP_LIST)\n\treturn render(request,\"personal_group/group_listing/user_group_list.html\",{'payload':payload,'pages':page_list,'num_pages':len(page_list),\\\n\t\t'current_page':page_num,'current_time':time.time(),'own_id':str(request.user.id),'items_in_curr_page':len(payload)})",
"def groups():\n access_token = session['access_token']\n return \"%s\" % list_groups(access_token)",
"def all_memberships(request):\n memberships = Membership.objects.all()\n context = {\n 'memberships': memberships,\n }\n return render(request, \"memberships.html\", context)",
"def get_membersof(self, kwargs):\n group = kwargs[\"group\"]\n verbose = kwargs.get(\"verbose\", False)\n\n results = list(self.engine.query(self.engine.GROUP_DN_FILTER(group), [\"distinguishedName\", \"objectSid\"]))\n if results:\n group_dn = results[0][\"distinguishedName\"]\n else:\n error(\"Group {group} does not exists\".format(group=group))\n\n primary_group_id = results[0][\"objectSid\"].split('-')[-1]\n results = self.engine.query(self.engine.ACCOUNTS_IN_GROUP_FILTER(primary_group_id, group_dn))\n self.display(results, verbose)",
"def list(self, request, *args, **kwargs):\n if not request.user.is_superuser:\n self.queryset = Group.objects.filter(owner__pk=request.user.id)\n\n return super().list(request, args, kwargs)",
"def filter_users(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/filter_users', {'groups': groups})",
"def members(self):\n data = UserProfile.objects.filter(\n organization_id=self.id\n ).order_by(\n 'display_name', 'first_name', 'last_name'\n )\n\n return data",
"def get_queryset(self):\n group_info = Group.objects.filter(id__in=Member.objects.filter(\n user=self.request.user).values('group').distinct())\n for data in group_info:\n user_id = Member.objects.get(role_type='owner', group_id=data.id)\n data.owner = user_id.user.phone\n \n return group_info",
"def list_group_members(self, entity):\n\n members = []\n\n for nodePath, node in self.cache.get_tree(self.userProjects).items():\n if nodePath.startswith(entity.path):\n # Check if node is a direct child\n distance = len(pathlib.Path(nodePath).relative_to(pathlib.Path(entity.path)).parts)\n\n if distance == 1:\n if type(node) is gitlab.v4.objects.Group or type(node) is gitlab.v4.objects.Project:\n members.append(node.path)\n elif type(node) is gitlab.v4.objects.User:\n members.append(node.username)\n\n return members",
"def get_memberships(self, kwargs):\n account = kwargs[\"account\"]\n recursive = kwargs.get(\"recursive\", False)\n\n already_printed = set()\n\n def lookup_groups(dn, leading_sp, already_treated):\n results = self.engine.query(self.engine.DISTINGUISHED_NAME(dn), [\"memberOf\", \"primaryGroupID\"])\n for result in results:\n if \"memberOf\" in result:\n for group_dn in result[\"memberOf\"]:\n if group_dn not in already_treated:\n print(\"{g:>{width}}\".format(g=group_dn, width=leading_sp + len(group_dn)))\n already_treated.add(group_dn)\n lookup_groups(group_dn, leading_sp + 4, already_treated)\n\n if \"primaryGroupID\" in result and result[\"primaryGroupID\"]:\n pid = result[\"primaryGroupID\"]\n results = list(self.engine.query(self.engine.PRIMARY_GROUP_ID(pid)))\n if results:\n already_treated.add(results[0][\"dn\"])\n\n return already_treated\n\n results = self.engine.query(self.engine.ACCOUNT_IN_GROUPS_FILTER(account), [\"memberOf\", \"primaryGroupID\"])\n for result in results:\n if \"memberOf\" in result:\n for group_dn in result[\"memberOf\"]:\n print(group_dn)\n if recursive:\n already_printed.add(group_dn)\n s = lookup_groups(group_dn, 4, already_printed)\n already_printed.union(s)\n\n # for some reason, when we request an attribute which is not set on an object,\n # ldap3 returns an empty list as the value of this attribute\n if \"primaryGroupID\" in result and result[\"primaryGroupID\"] != []:\n pid = result[\"primaryGroupID\"]\n results = list(self.engine.query(self.engine.PRIMARY_GROUP_ID(pid)))\n if results:\n print(results[0][\"dn\"])",
"def _get_org_members(self):\n url = f\"{BASE_URL}/orgs/{ORG}/members\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})",
"def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))",
"def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)",
"def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)",
"def list_user_groups(self, token):\n requestUser = self.get_username_from_token(token)\n dataBase = self.read_database()\n groups = dataBase['userGroups']\n groupList = list()\n for group in groups:\n members = groups[group]['members']\n owners = groups[group]['owners']\n if requestUser in members or requestUser in owners:\n groupList.append(group)\n return groupList",
"def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)",
"def fetch_their_members(our_group):\n\tgroup_id = our_group[\"groupId\"]\n\turl = f'{BASE_URL}/groups/{group_id}/members'\n\tparams = {'$select': 'userPrincipalName,id'}\n\treturn call_api(url, params)",
"def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list",
"def groups(self):\r\n return users.Groups(self)",
"def test_list_my_memberships_owner(self):\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(10, data['count'])"
] | [
"0.59523857",
"0.59251404",
"0.5874129",
"0.5672281",
"0.56644803",
"0.56609535",
"0.56514776",
"0.56267077",
"0.5618761",
"0.56177497",
"0.5544431",
"0.54518914",
"0.544957",
"0.5421477",
"0.54123914",
"0.54096514",
"0.5407805",
"0.5396198",
"0.53743017",
"0.53723145",
"0.5355645",
"0.53510547",
"0.53446084",
"0.534089",
"0.53142476",
"0.5313268",
"0.5311054",
"0.52878046",
"0.5286481",
"0.5281303"
] | 0.68893605 | 0 |
Lists the work requests in compartment. | def list_work_requests(self, compartment_id, **kwargs):
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"resource_identifier"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"resourceIdentifier": kwargs.get("resource_identifier", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listRequests(self):\n reqmgr = RequestManagerImpl()\n retval = []\n for request in reqmgr.listRequests(self.endpoint):\n tmpRequest = Request()\n tmpRequest.setReqmgrUrl( self.endpoint )\n tmpRequest.setWorkflowName( request['request_name'] )\n retval.append( tmpRequest )\n return retval",
"def worklist():\n from wheelcms_axle.content import Content\n pending = Content.objects.filter(state=\"pending\", node__isnull=False)\n return pending",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def ListWorkers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def list(self, jobguid=\"\", executionparams=None):",
"def list_requesters():\n from mephisto.core.local_database import LocalMephistoDB\n from tabulate import tabulate\n\n db = LocalMephistoDB()\n requesters = db.find_requesters()\n dict_requesters = [r.to_dict() for r in requesters]\n click.echo(tabulate(dict_requesters, headers=\"keys\"))",
"def list(self, jobguid=\"\", executionparams=dict()):",
"async def request_jobs_list(self, jobs_list_active_only: bool, *args, **kwargs) -> List[str]:\n # TODO: implement\n raise NotImplementedError('{} function \"request_jobs_list\" not implemented yet'.format(self.__class__.__name__))",
"def get_jobs_list(self, response):\n pass",
"def queryAllRequests(self):\n logging.info(\"Querying all requests at ReqMgr instance ...\")\n r = self.reqMgrService.getRequestNames()\n print \"Found %s requests:\" % len(r)\n for req in r:\n print req",
"def list_tagging_work_requests(self, compartment_id, **kwargs):\n resource_path = \"/taggingWorkRequests\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\",\n \"resource_identifier\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_tagging_work_requests got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing),\n \"resourceIdentifier\": kwargs.get(\"resource_identifier\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TaggingWorkRequestSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[TaggingWorkRequestSummary]\")",
"def list_resources(self):\n self.workersResponded = 0\n print(\"Main thread\", threading.get_ident())\n\n for addr in self.equipment_model.get_addr_list():\n self.equipment_model.reset_index(addr)\n self.equipment_model.set_connected(addr, 2)\n\n if not self.worker_pool.is_init():\n w = self.worker_pool.create_worker(addr)\n #Signals from worker\n w.signal_connected.connect(self.slot_connected)\n w.signal_not_connected.connect(self.slot_not_connected)\n w.signal_write_success.connect(self.parent.slot_write_success)\n w.signal_query_success.connect(self.parent.slot_query_success)\n w.signal_error.connect(self.parent.slot_error)\n\n self.next_connection(addr)\n \n self.worker_pool.set_init(True)",
"def getRequestList(self):\n\n result = RequestsDAO().getRequests()\n mapped_result = []\n\n if not result:\n return jsonify(Error=\"NOT FOUND\"), 404\n\n else:\n for r in result:\n mapped_result.append(self.mapToUserRequestDict(r))\n\n return jsonify(TURN=mapped_result), 200",
"def get_components(self, req):\n request_name = req.request\n\n names = []\n if(request_name == \"\"):\n comps = self.rt_proxy.get_available_components() # get all\n else:\n comps = self.rt_proxy.get_available_components(request_name)\n\n for c in comps:\n names.append(str(c))\n\n resp = ListComponentsResponse(names)\n\n return resp",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def list(self):\n self.background_scheduler.print_jobs()",
"def workloads(self):\n return self._workloads",
"def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))",
"def listJobs():\n logger.debug('[FLASKWEB /jobs] Request for job listing')\n jobs = db.getJobs(numdays=2)\n for job in jobs:\n job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if job['complete']:\n job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n\n # Garbage Collect Orpahened jobs\n compiles = db.getCompiles()\n for compile in compiles:\n if compile['submit']:\n compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if compile['complete']:\n compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n # for c in compiles:\n # if c['uid'] not in compile_tasks.keys():\n # db.updateCompile(c['uid'], status='KILLED', done=True)\n # compiles = db.getCompiles()\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200\n else:\n return render_template(\"jobs.html\", joblist=jobs, compilelist=compiles)",
"def get_requests(self):\n\t\tself.last_processed = self.last_modified\n\t\treturn self.requests",
"def work_devices(self):\n return self._work_devices",
"def get_requests(self):\r\n\t\tself.last_processed = self.last_modified\r\n\t\treturn self.requests",
"def list(self):\n return self.request(\"GET\")",
"def give_workers_list(self):\n return self._workers",
"def need_list():\n operation = request.args['operation']\n timestamp = int(time())\n id_session = request.remote_addr\n keys = ['operation', 'timestamp', 'id_session']\n values = [operation, timestamp, id_session]\n data = dict(zip(keys, values))\n msg = json.dumps(data)\n qm.send(cmdq, msg)\n return \"ok\"",
"def list(self, request):\n jobs = Job.objects.all()\n\n city = self.request.query_params.get('city', None)\n state = self.request.query_params.get('state', None)\n\n # Support filtering jobs by user id\n job = self.request.query_params.get('user', None)\n if job is not None:\n jobs = jobs.filter(user=request.user)\n\n if city is not None:\n jobs = jobs.filter(city=city)\n\n if state is not None:\n jobs = jobs.filter(state=state)\n\n serializer = JobSerializer(\n jobs, many=True, context={'request': request})\n return Response(serializer.data)",
"def _list(self, req):\n list_type = None\n status_prefix = 'STATUS LIST '\n if req:\n list_type = req.pop(0)\n if list_type and list_type == SPECTATE:\n games = self.server.get_unfinished_games()\n status_prefix += SPECTATE + ' '\n else:\n games = self.server.get_open_games()\n self.send_line(status_prefix + ' '.join(\n [str(g.id) for g in games if not self.game or self.game is not g]))",
"def list(self, _request):\n serializer = TaskSerializer(instance=TASKS.values(), many=True)\n return response.Response(serializer.data)",
"def get_jobs(self, *, params: Optional[dict] = None) -> \"resource_types.Jobs\":\n\n return communicator.Jobs(self.__requester).fetch(parameters=params)"
] | [
"0.67798686",
"0.64472514",
"0.6357527",
"0.62949765",
"0.61017513",
"0.606693",
"0.6015134",
"0.59746355",
"0.5971976",
"0.59663254",
"0.596574",
"0.59381527",
"0.5924295",
"0.590886",
"0.5905645",
"0.5905645",
"0.5890049",
"0.5807364",
"0.57771367",
"0.57649094",
"0.5733279",
"0.57244754",
"0.5722624",
"0.5684172",
"0.5675422",
"0.56422913",
"0.5636008",
"0.5620735",
"0.56052774",
"0.5591213"
] | 0.69024056 | 0 |
Move the compartment to a different parent compartment in the same tenancy. When you move a compartment, all its contents (subcompartments and resources) are moved with it. Note that the `CompartmentId` that you specify in the path is the compartment that you want to move. | def move_compartment(self, compartment_id, move_compartment_details, **kwargs):
resource_path = "/compartments/{compartmentId}/actions/moveCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"move_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=move_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=move_compartment_details) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_patch_project_move_child(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, self.category\n )\n self.make_assignment(new_category, self.user, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.category.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)",
"def move_by(self, path, env=None):\n env = self._find_env(env)\n old_pos = self.position(env)\n new_pos = [p + c for p, c in zip(old_pos, path)]\n env.move_agent(self, new_pos)",
"def move_to_node(self,node):\n path=self.get_path(self.current_node,node)\n self.move_to(path)",
"def test_patch_project_move_root(self):\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n new_owner = self.make_user('new_owner')\n self.make_assignment(new_category, new_owner, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': ''}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 200, msg=response.content)",
"def move_node(self, node_id, new_parent_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n self.detach_node(node_id=node_id, connection=connection)\n self.attach_node(node_id=node_id, new_parent_id=new_parent_id, connection=connection)",
"def reparent(self, obj, parent):\n return self.update(obj, parent=parent)",
"def move(self, new_parent):\n\n new_parent.children.append(self)\n new_parent.rebuild_children_dict()\n self.delete()",
"def setCompartment(self, *args):\n return _libsbml.CompartmentReference_setCompartment(self, *args)",
"def move(self, from_id, to_id):\n return self._client.post(\n url=self._client.get_full_url(\n self.get_path(\n 'children', realm=self._realm_name, group_id=to_id\n )\n ),\n data=json.dumps({\n 'id': from_id\n })\n )",
"def _swap_with_parent(self) -> bool:\n if self.parent is None:\n return False\n if self.parent.get_chainwork() >= self.get_chainwork():\n return False\n self.print_error(\"swap\", self.forkpoint, self.parent.forkpoint) #Calvin: We should see in the logs when a swap happens\n parent_branch_size = self.parent.height() - self.forkpoint + 1\n forkpoint = self.forkpoint # type: Optional[int]\n parent = self.parent # type: Optional[Blockchain]\n child_old_id = self.get_id()\n parent_old_id = parent.get_id()\n # swap files\n # child takes parent's name\n # parent's new name will be something new (not child's old name) Calvin: This makes sense, otherwise the hash would be invalid\n self.assert_headers_file_available(self.path())\n child_old_name = self.path()\n with open(self.path(), 'rb') as f:\n my_data = f.read()\n self.assert_headers_file_available(parent.path())\n assert forkpoint > parent.forkpoint, (f\"forkpoint of parent chain ({parent.forkpoint}) \"\n f\"should be at lower height than children's ({forkpoint})\")\n with open(parent.path(), 'rb') as f:\n # Calvin: forkpoint - parent.forkpoint is technically the height of this blockchain, why not use height method?\n # Calvin: Answer: There is a main_chain, this uses the blockchain with the greatest chainwork as the main_chain\n f.seek((forkpoint - parent.forkpoint)*HEADER_SIZE) # Calvin: This excludes the forkpoint_hash, why? Technically the forkpoints have the same first header! Saves a few bytes of writing.\n parent_data = f.read(parent_branch_size*HEADER_SIZE)\n self.write(parent_data, 0) # Calvin: writes the parents block data into this (current child)\n parent.write(my_data, (forkpoint - parent.forkpoint)*HEADER_SIZE) # Calvin: writes the child's block data into parents file\n # swap parameters # Calvin: Swaps the childs parents to be the parent's parent and the parent's parent is now the previous child\n self.parent, parent.parent = parent.parent, self # type: Optional[Blockchain], Optional[Blockchain]\n self.forkpoint, parent.forkpoint = parent.forkpoint, self.forkpoint\n self._forkpoint_hash, parent._forkpoint_hash = parent._forkpoint_hash, hash_raw_header(bh2u(parent_data[:HEADER_SIZE])) # Swaps the forkpoint_hash values\n self._prev_hash, parent._prev_hash = parent._prev_hash, self._prev_hash\n # parent's new name\n os.replace(child_old_name, parent.path())\n self.update_size()\n parent.update_size()\n # update pointers\n blockchains.pop(child_old_id, None)\n blockchains.pop(parent_old_id, None)\n blockchains[self.get_id()] = self\n blockchains[parent.get_id()] = parent\n return True",
"def test_clashing_without_children(self):\n old_url = '/old-url/'\n parent = RouteFactory.create(url=old_url)\n child = ChildRouteFactory.create(slug='leaf', parent=parent)\n occupied_url = '/occupied/'\n RouteFactory.create(url=occupied_url)\n\n with transaction.atomic():\n with self.assertRaises(IntegrityError):\n parent.move_to(occupied_url, move_children=False)\n\n self.assertEqual(parent.url, old_url)\n child.refresh_from_db()\n self.assertEqual(child.url, '/old-url/leaf/')",
"def move_to(i3: i3ipc.Connection, workspace: int):\n i3.command(f\"move container to workspace number {workspace}\")",
"def move(self, path):\n self.current_location = (path[1][1], path[1][0])",
"def _move(self, id: str, parent_id: str) -> MoveFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.move\n request_obj: MoveFolderRequestModel = endpoint.load_request(parent_id=parent_id)\n response: MoveFolderResponseModel = endpoint.perform_request(\n http=self.auth.http,\n request_obj=request_obj,\n id=id,\n )\n return response",
"def move_node(self, job):\n transfer = Transfer(job.jobInfo)\n target = transfer.target\n direction = transfer.direction\n result = None\n # Check uris\n check_uri(target, self.sm, shouldExist = True)\n checks = check_uri(direction, self.sm, shouldExist = False)\n null = direction.endswith(NULL)\n # Retrieve existing record\n node = self.sm.get_node(target)[0]['node']\n node = self.nf.get_node(node)\n # Check whether endpoint is reserved URI\n if null: self.nm.delete_node(target)\n if direction.endswith(AUTO): \n direction = generate_uri(direction)\n result = {'destination': direction}\n if not(null):\n # Check if endpoint is a container\n if checks['exists'] and checks['container']: direction += target[target.rfind('/'):]\n # Change identifier\n node.set_uri(direction)\n # Update db\n self.sm.update_node(target, direction, node.tostring())\n # Check if target is a container\n if isinstance(node, ContainerNode):\n # Move children\n for child in self.sm.get_children(target):\n node = self.nf.get_node(self.sm.get_node(child)[0]['node'])\n if null:\n self.nm.delete_node(node.uri)\n else:\n new_uri = node.uri.replace(target, direction)\n node.set_uri(new_uri)\n self.sm.update_node(child, new_uri, node.tostring())\n return result",
"def move(name, other, newname=None):",
"def removeCompartment(self, *args):\n return _libsbml.Model_removeCompartment(self, *args)",
"def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id",
"def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id",
"def move_to_collection(self, collection):\n if self.collection == collection:\n return\n\n if collection is None:\n raise ValidationError(\n f\"Entity {self}({self.pk}) can only be moved to another container.\"\n )\n self.collection = collection\n self.tags = collection.tags\n self.permission_group = collection.permission_group\n self.save(update_fields=[\"collection\", \"tags\", \"permission_group\"])\n self.data.update(\n permission_group=collection.permission_group,\n collection_id=collection.pk,\n tags=collection.tags,\n )",
"async def move(self, context_path: str):\n NewContext = CONTEXTS[context_path]\n new_context = NewContext(self.session)\n await self.leave()\n await type(self).condition.mark_as_done(self)\n self.session.context = new_context\n await new_context.enter()\n await self.send_messages()",
"def reparentTo(self, objnp):\n\n # if isinstance(objnp, cm.CollisionModel):\n # self.__objcm.objnp.reparentTo(objnp.objnp)\n # elif isinstance(objnp, NodePath):\n # self.__objcm.objnp.reparentTo(objnp)\n # else:\n # print(\"NodePath.reparent_to() argument 1 must be environment.CollisionModel or panda3d.core.NodePath\")\n if objnp is not base.render:\n print(\"This bullet dynamics model doesnt support to plot to non base.render nodes!\")\n raise ValueError(\"Value Error!\")\n else:\n self.__objcm.objnp.reparentTo(objnp)\n # self.setMat(self.__objcm.getMat())\n # print(self.objbdb.gethomomat())\n self.__objcm.objnp.setMat(base.pg.np4ToMat4(self.objbdb.get_homomat()))",
"def test_patch_project_move(self):\n self.assertEqual(\n self.project.full_title,\n self.category.title + ' / ' + self.project.title,\n )\n\n new_category = self.make_project(\n 'NewCategory', PROJECT_TYPE_CATEGORY, None\n )\n self.make_assignment(new_category, self.user_owner_cat, self.role_owner)\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'parent': str(new_category.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.project.refresh_from_db()\n model_dict = model_to_dict(self.project)\n self.assertEqual(model_dict['parent'], new_category.pk)\n owners = [a.user for a in self.project.get_owners()]\n self.assertIn(self.user_owner_cat, owners)\n self.assertIn(self.user_owner, owners)\n\n # Assert child project full title update\n self.assertEqual(\n self.project.full_title,\n new_category.title + ' / ' + self.project.title,\n )\n self.assertEqual(\n json.loads(response.content)['parent'], str(new_category.sodar_uuid)\n )",
"def setCompartment(self, *args):\n return _libsbml.Reaction_setCompartment(self, *args)",
"def test_deletionDisownsParent(self):\n port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)\n port.setServiceParent(self.store)\n port.deleteFromStore()\n service = IServiceCollection(self.store)\n self.failIfIn(port, list(service))",
"def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()",
"def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)",
"def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)",
"def relocate(source, destination, move=False):\n venv = api.VirtualEnvironment(source)\n if not move:\n\n venv.relocate(destination)\n return None\n\n venv.move(destination)\n return None",
"def move_to_zone(self, zone):\n if isinstance(zone, basestring):\n zone = self.project.get_flow().get_zone(zone)\n zone.add_item(self)"
] | [
"0.5687158",
"0.542566",
"0.533787",
"0.5328388",
"0.516376",
"0.5099298",
"0.50907147",
"0.5065896",
"0.50041324",
"0.49923396",
"0.49568546",
"0.4937967",
"0.49123746",
"0.4888783",
"0.4883443",
"0.48625642",
"0.48523584",
"0.48384222",
"0.48384222",
"0.48376495",
"0.48334068",
"0.4828661",
"0.47939885",
"0.4793703",
"0.47564533",
"0.4732511",
"0.47204143",
"0.47102433",
"0.4696371",
"0.46860638"
] | 0.6347577 | 0 |
Resets the OAuth2 client credentials for the SCIM client associated with this identity provider. | def reset_idp_scim_client(self, identity_provider_id, **kwargs):
resource_path = "/identityProviders/{identityProviderId}/actions/resetScimClient"
method = "POST"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"reset_idp_scim_client got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"identityProviderId": identity_provider_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScimClientCredentials")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScimClientCredentials") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def logout(self):\n self._client.clear_credentials()",
"def reset_secret(self, save=False):\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n\n if save:\n self.save()\n return True",
"def set_credentials(self, client_id=None, client_secret=None):\n self._client_id = client_id\n self._client_secret = client_secret\n\n # make sure to reset session due to credential change\n self._session = None",
"def refresh(self):\n self._request_token(grant_type='client_credentials')",
"def remove_client_credentials(self):\n if self._dry_run:\n return\n os.unlink(self._store_pathname)",
"def reset_credentials(self):\n credentials = {}\n with open(self.credentials_file, 'w') as fh_credentials:\n fh_credentials.write(json.dumps(credentials))",
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def change_authentication(self, client_id=None, client_secret=None,\n access_token=None, refresh_token=None):\n # TODO: Add error checking so you cannot change client_id and retain\n # access_token. Because that doesn't make sense.\n self.client_id = client_id or self.client_id\n self.client_secret = client_secret or self.client_secret\n self.access_token = access_token or self.access_token\n self.refresh_token = refresh_token or self.refresh_token",
"def refresh_credentials():\n global auth_token\n auth_token = get_oauth_token()",
"def logout(client):\n\n return client.post('/v1/auth/revoke')",
"def logOut(self):\n self.client.logout()",
"def disconnect(self):\r\n self._apiSession.close()\r\n self._oAuthSession.close()\r\n \r\n # Check the access token and refresh if expired\r",
"def auth_invalidate_session(self) -> None:\n self.__logger.debug('Eva.auth_invalidate_session called')\n return self.__http_client.auth_invalidate_session()",
"def resetCredentials(self, request, response):\n response.expireCookie('.ASPXAUTH', path='/', domain=COOKIE_DOMAIN)\n response.expireCookie('username', path='/', domain=COOKIE_DOMAIN)",
"def uninit_client(self):\n self.add_msg(\"Connection Lost\")\n self.client = None",
"def refresh(self):\n self._request_token(grant_type='password', username=self._username,\n password=self._password)",
"def on_reset_clientid(self, jdata):\n local_seed = get_rand_char(32).lower()\n config_file_name = MOLO_CONFIGS.get_config_object().get('domain', '')\n #keep Compatibility with old version\n if config_file_name and config_file_name!='molohub':\n config_file_name = CONFIG_FILE_NAME + '_' + config_file_name + '.yaml'\n else:\n config_file_name = CONFIG_FILE_NAME + '.yaml'\n save_local_seed(\n MOLO_CLIENT_APP.hass_context.config.path(config_file_name),\n local_seed)\n LOGGER.debug(\"reset clientid %s to %s\", self.client_id, local_seed)\n self.handle_close()",
"def disconnect_identity(identity):\n session.pop(\"cern_resource\", None)\n key = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_SESSION_KEY\",\n OAUTHCLIENT_CERN_OPENID_SESSION_KEY,\n )\n provides = session.pop(key, set())\n identity.provides -= provides",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def remove_credentials(self, conjurrc: ConjurrcData):\n self.credentials_provider.remove_credentials(conjurrc)",
"def re_authenticate(self):\n url = URLS['token']\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret\n }\n r = requests.post(url, data=data)\n r.raise_for_status()\n j = r.json()\n self.access_token = j['access_token']\n self.refresh_token = j['refresh_token']\n self._set_token_expiration_time(expires_in=j['expires_in'])\n return r",
"def test_credentials_set_reset(self):\n empty_setting = {\n 'AccessKeyId': None,\n 'SecretAccessKey': None,\n 'SessionToken': None\n }\n nonempty_setting = {\n 'AccessKeyId': '1',\n 'SecretAccessKey': '2',\n 'SessionToken': '3'\n }\n self.assertEqual(_credentials, empty_setting)\n credentials_set(nonempty_setting)\n self.assertEqual(_credentials, nonempty_setting)\n credentials_reset()\n self.assertEqual(_credentials, empty_setting)",
"def renew_access_token(self):\n self._access_token = self._get_access_token()",
"def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}",
"def _reset_connection(self):\n\n self.__userid = 0\n self.__token = 0\n self.__conn.close()\n\n self.__conn = httplib.HTTPConnection(\"www.slimtimer.com\")\n self._logon()",
"def call_for_auth_reset(self):\n pos.select_dispenser(1)\n crindsim.lift_handle()\n pos.click(\"reset\")\n pos.click(\"yes\")\n crindsim.lower_handle()\n #Checks crind diag to see if reset message is displayed\n if not system.wait_for(lambda: \"reset\" in pos.read_dispenser_diag()[\"Status\"].lower(), verify = False):\n tc_fail(\"CRIND did not reset\")\n #Wait for crind to return to idle\n if not system.wait_for(lambda: \"idle\" in pos.read_dispenser_diag()[\"Status\"].lower(), timeout = 120, verify = False):\n tc_fail(\"CRIND did not return to idle\")\n pos.click(\"back\")",
"def tearDown(self):\n self.client = app.test_client()\n self.salir = logout(self.client)",
"def resetUser(self):\n\t\turl = \"https://habitica.com/api/v4/user/reset\"\n\t\treturn(postUrl(url, self.credentials))",
"def reset(self):\n self.logger.debug(\"Resetting %s\", self.key)\n self.driver.reset(self.key)",
"def revoke(self):\n # Set the application as unsucessful with the current datetime\n self.status = self.Status.REVOKED\n self.revoked_datetime = timezone.now()\n\n # Removes credentialing from the user\n self.user.is_credentialed = False\n self.user.credential_datetime = None\n\n with transaction.atomic():\n self.user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.user.email))"
] | [
"0.63996184",
"0.61857796",
"0.6167417",
"0.61205524",
"0.6116238",
"0.5673805",
"0.5546106",
"0.54928285",
"0.54202366",
"0.53968155",
"0.53470254",
"0.53334624",
"0.5311507",
"0.5289614",
"0.5285055",
"0.5245378",
"0.52309966",
"0.5217204",
"0.5211187",
"0.51890314",
"0.51373774",
"0.5134191",
"0.51086605",
"0.50872236",
"0.50849146",
"0.50816184",
"0.5076356",
"0.50723517",
"0.5056509",
"0.50351644"
] | 0.6584309 | 0 |
Updates the specified compartment's description or name. You can't update the root compartment. | def update_compartment(self, compartment_id, update_compartment_details, **kwargs):
resource_path = "/compartments/{compartmentId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_compartment_details,
response_type="Compartment")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_compartment_details,
response_type="Compartment") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_object(self, name: str) -> None:",
"def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))",
"def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string",
"def update_catalog(self, old_catalog_name, new_catalog_name, description):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n org = self.resource\n links = get_links(\n org, rel=RelationType.DOWN, media_type=EntityType.CATALOG.value)\n for link in links:\n if old_catalog_name == link.name:\n catalog = self.client.get_resource(link.href)\n href = catalog.get('href')\n admin_href = href.replace('/api/catalog/',\n '/api/admin/catalog/')\n admin_view_of_catalog = self.client.get_resource(admin_href)\n if new_catalog_name is not None:\n admin_view_of_catalog.set('name', new_catalog_name)\n if description is not None:\n admin_view_of_catalog['Description'] = E.Description(\n description)\n return self.client.put_resource(\n admin_href,\n admin_view_of_catalog,\n media_type=EntityType.ADMIN_CATALOG.value)\n raise Exception('Catalog not found.')",
"def update(self, job_name, param_name, value, description=None):\n if job_name in self._jobs:\n getattr(self._jobs[job_name], param_name).update(value, description)\n else:\n self.log.error(\"Invalid job name: %s\", job_name)",
"def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None,\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode=mode\n if descr is not None:\n mo.descr = descr\n\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info(\"Firmware host pack <%s> not found.\" % name)",
"def _update(self, course_name: str, newdata: ParseType) -> None:\n\n self.courses[course_name] = newdata",
"def updateNameAndDescription(self, name, desc):\n self.magneticfield.name = name\n self.magneticfield.description = desc\n\n self.magneticfield.writeFile()",
"def update(self, container, representation):\n pass",
"def request_description_update():\n global should_update_description\n should_update_description = True",
"def update_description(self, host, baseUrl, description):\n self._host = host\n self._urlBase = baseUrl\n self._description = description\n return",
"def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id",
"def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id",
"def setName(self, *args):\n return _libsbml.Compartment_setName(self, *args)",
"def setCompartment(self, *args):\n return _libsbml.Species_setCompartment(self, *args)",
"def setCompartment(self, *args):\n return _libsbml.CompartmentReference_setCompartment(self, *args)",
"def update(self, title=None, description = None):\n jsonData = self.metaData.jsonObj\n header = self._baseHeader.copy()\n\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n url = self.metaData.getLink(\"edit\")\n assert url is not None\n\n if title is not None: jsonData['title'] = title\n if description is not None: jsonData['description'] = description\n\n response = self._adapter.putRequest(url, header, json.dumps(jsonData))\n\n return Document(self._client, self._client.getUrlFromHeaderLink(response['Headers']['link']))",
"def description(self, new_description):\r\n self.set({\"description\": new_description})",
"def put(self, department_id):\n department = get_department_by_id(department_id)\n department.name = request.json[\"name\"]\n db.session.commit()\n return {}, 200",
"def update(self, name):\n attemptdir = self.attemptdir(name)\n\n try:\n os.makedirs(attemptdir)\n\n except OSError as err:\n self.logger.info('Could not mkdir {} (ignoring): {}'.format(attemptdir, err))\n pass\n\n with open(os.path.join(attemptdir, 'image'), 'w+') as f:\n try:\n f.write(self.bom().content[name].image)\n\n except KeyError:\n self.logger.error('ERROR: component {} is not in the bom'.format(name))\n raise ZDGComponentBlocked(name)\n\n with open(os.path.join(attemptdir, 'owner'), 'w+') as f:\n f.write(self.bom().content[name].owner)\n\n with open(os.path.join(attemptdir, 'version'), 'w+') as f:\n f.write(self.bom().content[name].version)\n\n with open(os.path.join(attemptdir, 'submission'), 'w+') as f:\n f.write(self.bom().content[name].submission)",
"def update(ctx, name, description, tags):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n update_dict = {}\n\n if name:\n update_dict['name'] = name\n\n if description:\n update_dict['description'] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict['tags'] = tags\n\n if not update_dict:\n Printer.print_warning('No argument was provided to update the experiment.')\n sys.exit(0)\n\n try:\n response = PolyaxonClient().experiment.update_experiment(\n user, project_name, _experiment, update_dict)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not update experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n Printer.print_success(\"Experiment updated.\")\n get_experiment_details(response)",
"def rename(self, serial, name):\n api_page = \"/configuration/object/ap_rename\"\n url = \"{}{}?{}&UIDARUBA={}\".format(\n self.base_url,\n api_page,\n self.config_path,\n self.uidaruba)\n\n obj_dict = {'serial-num': serial, 'new-name': name}\n obj_json = json.loads(json.dumps(obj_dict))\n\n resp = self.post(url, obj_json)\n\n print(resp.status_code)\n print(resp.text)",
"def setCompartment(self, *args):\n return _libsbml.Reaction_setCompartment(self, *args)",
"def update():\n return 'update api in put'",
"def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)",
"def setCompartment(self, *args):\n return _libsbml.QualitativeSpecies_setCompartment(self, *args)",
"def test_update_category_to_existing_name(self):\n sample_category()\n category = sample_category(name='House')\n url = category_details_url(category.id)\n res = self.client.put(url, {\"name\": \"place\"})\n\n category.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')",
"def update(self, name=None, description=None, tags=None, provenance=None):\n # type: (str, str, List[str], str) -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n body = {} # type: Dict[str, Union[str, List, Dict]]\n\n if name is not None:\n self.raw_model[\"name\"] = name\n if description is not None:\n self.raw_model[\"description\"] = description\n if tags is not None:\n self.raw_model[\"tags\"] = tags\n if provenance is not None:\n self.raw_model[\"provenance\"] = provenance\n\n body = self.raw_model\n\n log.debug(\"Body %s\", body)\n\n resource_object = self.connection.api_call(\n \"PUT\", [\"v1\", \"resources\", self.id], headers=headers, json=body, model=Resource\n )\n\n self.raw_model = resource_object.raw_model\n\n log.debug(\"Updated dataset %s with content %s\", self.id, self.raw_model)\n return True",
"def setServiceDescription(self, description):\n with self.zeroconf.lock:\n self.zeroconf.outbox.put(description)",
"def update_simple(parent, name, value):\n element = parent.find('./' + name) \n\n if element is None:\n element = ET.SubElement(parent, name)\n element.text = value\n else:\n element.text = value"
] | [
"0.56508",
"0.5638967",
"0.5597009",
"0.55432",
"0.5494998",
"0.5449337",
"0.53832597",
"0.5348151",
"0.53268445",
"0.5285396",
"0.5275306",
"0.5235795",
"0.5235795",
"0.51921105",
"0.5159983",
"0.5123664",
"0.50844306",
"0.50828993",
"0.50785875",
"0.5047809",
"0.5020412",
"0.5014647",
"0.50084394",
"0.49942082",
"0.49363297",
"0.49216577",
"0.49170053",
"0.4913295",
"0.48995313",
"0.48943081"
] | 0.60476094 | 0 |
Updates the specified dynamic group. | def update_dynamic_group(self, dynamic_group_id, update_dynamic_group_details, **kwargs):
resource_path = "/dynamicGroups/{dynamicGroupId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_dynamic_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"dynamicGroupId": dynamic_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_dynamic_group_details,
response_type="DynamicGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_dynamic_group_details,
response_type="DynamicGroup") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)",
"def update_group(groupname):\n name = request.get_json().get(\"name\", None)\n description = request.get_json().get(\"description\", None)\n response = jsonify(\n admin.update_group(current_app.scoped_session(), groupname, description, name)\n )\n return response",
"def do_group_update():\n target_group = Group.query.filter_by(id=request.form['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n target_group.name = request.form['name']\n target_group.group_meter_id = request.form['meter']\n target_group.group_production_meter_id_first = request.form['group_production_meter_id_first']\n target_group.group_production_meter_id_second = request.form[\n 'group_production_meter_id_second']\n\n db.session.commit()\n return group_list(\"Updated group \" + target_group.name)",
"def update_group(self, group_id, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.patch('groups/%s' % group_id, post_body)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)",
"def group_update(*, login_manager: LoginManager, group_id: str, **kwargs: Any):\n groups_client = login_manager.get_groups_client()\n\n # get the current state of the group\n group = groups_client.get_group(group_id)\n\n # assemble put data using existing values for any field not given\n # note that the API does not accept the full group document, so we must\n # specify name and description instead of just iterating kwargs\n data = {}\n for field in [\"name\", \"description\"]:\n if kwargs.get(field) is not None:\n data[field] = kwargs[field]\n else:\n data[field] = group[field]\n\n response = groups_client.update_group(group_id, data)\n\n formatted_print(response, simple_text=\"Group updated successfully\")",
"def update_group(self, group_name, new_group_name=None, new_path=None):\r\n params = {'GroupName' : group_name}\r\n if new_group_name:\r\n params['NewGroupName'] = new_group_name\r\n if new_path:\r\n params['NewPath'] = new_path\r\n return self.get_response('UpdateGroup', params)",
"def request_group_update():\n target_group = Group.query.filter_by(id=request.args['id']).first()\n if target_group is None:\n return group_list(\"Unknown group.\")\n\n return Response(\n render_template(\n 'admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/update\",\n id=target_group.id,\n name=target_group.name,\n meter=target_group.group_meter_id,\n group_production_meter_id_first=target_group.group_production_meter_id_first,\n group_production_meter_id_second=target_group.group_production_meter_id_second),\n mimetype='text/html')",
"def update(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })",
"def update(self):\r\n return self.connection._update_group('UpdateAutoScalingGroup', self)",
"def update_targetgroup(self, group_id, **kwargs):\r\n result = False\r\n if self._db(self._db.targetgroup.id==group_id).select():\r\n result = True\r\n self._db(self._db.targetgroup.id==group_id).update(**kwargs)\r\n self._db.commit()\r\n return result",
"def test_modify_group(self):\n response = self.client.modify_group(\"ABC123\")\n self.assertEqual(response[\"method\"], \"POST\")\n self.assertEqual(response[\"uri\"], \"/admin/v1/groups/ABC123\")\n self.assertEqual(util.params_to_dict(response[\"body\"]), {\"account_id\": [self.client.account_id]})",
"def update_group(self, group_id, new_description):\n url = self.groups_url + \"/\" + group_id\n new_data = json.dumps({\"description\": new_description})\n\n return requests.put(url, new_data, headers=self.headers)",
"def test_update_group(self):\n pass",
"async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contact_groups set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_group_id)\n await cur.execute(q, q_args)\n\n if not await contact_group_exists(dbcon, contact_group_id):\n raise errors.InvalidArguments('contact group does not exist')\n await dbcon.transact(_run)",
"def update_group(self, group_id, update_group_details, **kwargs):\n resource_path = \"/groups/{groupId}\"\n method = \"PUT\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"update_group got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"groupId\": group_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n body=update_group_details,\n response_type=\"Group\")",
"def update_group_with_http_info(self, bucket_id, group_id, group, **kwargs):\n\n all_params = ['bucket_id', 'group_id', 'group', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `update_group`\")\n # verify the required parameter 'group_id' is set\n if ('group_id' not in params) or (params['group_id'] is None):\n raise ValueError(\"Missing the required parameter `group_id` when calling `update_group`\")\n # verify the required parameter 'group' is set\n if ('group' not in params) or (params['group'] is None):\n raise ValueError(\"Missing the required parameter `group` when calling `update_group`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `update_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `update_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/groups/{group_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'group_id' in params:\n path_params['group_id'] = params['group_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'group' in params:\n body_params = params['group']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Group',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def update_group(\n self,\n group,\n validate_only=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n # Wrap the transport method to add retry and timeout logic.\n if \"update_group\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"update_group\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.update_group,\n default_retry=self._method_configs[\"UpdateGroup\"].retry,\n default_timeout=self._method_configs[\"UpdateGroup\"].timeout,\n client_info=self._client_info,\n )\n\n request = group_service_pb2.UpdateGroupRequest(\n group=group, validate_only=validate_only,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"group.name\", group.name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n\n return self._inner_api_calls[\"update_group\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )",
"def update(person_group_id, name=None, user_data=None):\n url = 'persongroups/{}'.format(person_group_id)\n json = {\n 'name': name,\n 'userData': user_data,\n }\n\n return util.request('PATCH', url, json=json)",
"def ModifyGroup(self, group, reason=None, **kwargs):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/groups/%s/modify\" %\n (GANETI_RAPI_VERSION, group)), query, kwargs)",
"def update(self, consistencygroup, **kwargs):\n if not kwargs:\n return\n\n body = {\"consistencygroup\": kwargs}\n\n return self._update(\"/consistencygroups/%s\" %\n base.getid(consistencygroup), body)",
"def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)",
"def _mod_group(self, command, group_id, group_type, buckets=None):\n self.datapath.send_msg(\n self.parser.OFPGroupMod(\n datapath=self.datapath,\n command=command,\n group_id=group_id,\n type_=group_type,\n buckets=buckets,\n )\n )",
"def update_groups(self, groups):\n self.fetch_group_messages() # preload messages before updating groups\n self.groups = groups\n self.put()",
"def update_adgroup(self, adgroup_id, name=None, adgroup_status=None,\n bid_type=None, bid_info=None, creative_id=None,\n tracking_specs=None, view_tags=None, objective=None,\n targeting=None, conversion_specs=None,\n batch=False):\n path = \"%s\" % adgroup_id\n args = {}\n if name:\n args['name'] = name\n if bid_type:\n args['bid_type'] = bid_type\n if bid_info:\n args['bid_info'] = json.dumps(bid_info)\n\n if creative_id:\n args['creative'] = json.dumps({'creative_id': creative_id})\n if tracking_specs:\n args['tracking_specs'] = json.dumps(tracking_specs)\n if view_tags:\n args['view_tags'] = json.dumps(view_tags)\n if objective:\n args['objective'] = objective\n if adgroup_status:\n args['adgroup_status'] = adgroup_status\n if targeting:\n args['targeting'] = json.dumps(targeting)\n if conversion_specs:\n args['conversion_specs'] = json.dumps(conversion_specs)\n return self.make_request(path, 'POST', args, batch=batch)",
"def update_research_group(self, employee_id, new_research_group):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET research_group = %s '\n 'WHERE id=%s;',\n (new_research_group, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise",
"def replace_namespaced_group(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_group`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)",
"def patch(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('patch',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })",
"def test_user_group_controller_update(self):\n pass",
"def patch_group_with_http_info(self, bucket_id, group_id, group, **kwargs):\n\n all_params = ['bucket_id', 'group_id', 'group', 'if_match', 'if_none_match', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'bucket_id' is set\n if ('bucket_id' not in params) or (params['bucket_id'] is None):\n raise ValueError(\"Missing the required parameter `bucket_id` when calling `patch_group`\")\n # verify the required parameter 'group_id' is set\n if ('group_id' not in params) or (params['group_id'] is None):\n raise ValueError(\"Missing the required parameter `group_id` when calling `patch_group`\")\n # verify the required parameter 'group' is set\n if ('group' not in params) or (params['group'] is None):\n raise ValueError(\"Missing the required parameter `group` when calling `patch_group`\")\n\n if 'if_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_match']):\n raise ValueError(\"Invalid value for parameter `if_match` when calling `patch_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n if 'if_none_match' in params and not re.search('\\\\\\\"[0-9]+\\\\\\\"', params['if_none_match']):\n raise ValueError(\"Invalid value for parameter `if_none_match` when calling `patch_group`, must conform to the pattern `/\\\\\\\"[0-9]+\\\\\\\"/`\")\n\n collection_formats = {}\n\n resource_path = '/buckets/{bucket_id}/groups/{group_id}'.replace('{format}', 'json')\n path_params = {}\n if 'bucket_id' in params:\n path_params['bucket_id'] = params['bucket_id']\n if 'group_id' in params:\n path_params['group_id'] = params['group_id']\n\n query_params = {}\n if 'fields' in params:\n query_params['_fields'] = params['fields']\n collection_formats['_fields'] = 'csv'\n\n header_params = {}\n if 'if_match' in params:\n header_params['If-Match'] = params['if_match']\n if 'if_none_match' in params:\n header_params['If-None-Match'] = params['if_none_match']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'group' in params:\n body_params = params['group']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json', 'application/merge-patch+json', 'application/json-patch+json'])\n\n # Authentication setting\n auth_settings = ['basicAuth']\n\n return self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Group',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)"
] | [
"0.73748296",
"0.71882457",
"0.7182601",
"0.7062276",
"0.6967742",
"0.6875376",
"0.68748295",
"0.6828043",
"0.6739177",
"0.6604309",
"0.64636046",
"0.6460566",
"0.639448",
"0.6345957",
"0.6323035",
"0.6288533",
"0.6273564",
"0.6173461",
"0.61531174",
"0.61079484",
"0.6106953",
"0.6087284",
"0.60076416",
"0.5996293",
"0.5986506",
"0.5970233",
"0.5953732",
"0.5951706",
"0.5926803",
"0.59185225"
] | 0.7564378 | 0 |
Updates the specified identity provider. | def update_identity_provider(self, identity_provider_id, update_identity_provider_details, **kwargs):
resource_path = "/identityProviders/{identityProviderId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_identity_provider got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"identityProviderId": identity_provider_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_identity_provider_details,
response_type="IdentityProvider")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_identity_provider_details,
response_type="IdentityProvider") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_provider(self, provider_id, provider_name, endpoints, zone_id, provider_region):\n try:\n self.client.post('{api_url}/providers/{id}'.format(api_url=self.api_url, id=provider_id),\n action='edit',\n zone={'id': zone_id},\n connection_configurations=endpoints,\n provider_region=provider_region)\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to update provider. Error: {!r}\".format(e))",
"def update_identity_provider(module, sdk, cloud, idp):\n\n description = module.params.get('description')\n enabled = module.params.get('enabled')\n domain_id = module.params.get('domain_id')\n remote_ids = module.params.get('remote_ids')\n\n attributes = {}\n\n if (description is not None) and (description != idp.description):\n attributes['description'] = description\n if (enabled is not None) and (enabled != idp.is_enabled):\n attributes['enabled'] = enabled\n if (domain_id is not None) and (domain_id != idp.domain_id):\n attributes['domain_id'] = domain_id\n if (remote_ids is not None) and (remote_ids != idp.remote_ids):\n attributes['remote_ids'] = remote_ids\n\n if not attributes:\n return False, idp\n\n if module.check_mode:\n return True, None\n\n try:\n new_idp = cloud.identity.update_identity_provider(idp, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to update identity provider: {0}'.format(str(ex)))\n return (True, new_idp)",
"def update_provider(\n provider_id:UUID = Form(...),\n name:str = Form(...),\n qualification:str = Form(...),\n speciality:str = Form(...),\n phone:str = Form(...),\n department:Optional[str] = Form(\"N/A\"),\n organization:str = Form(...),\n location:Optional[str] = Form(\"N/A\"),\n address:str = Form(...),\n active:bool = Form(...)\n ):\n\n post_data = {\n \"name\": name,\n \"qualification\": qualification,\n \"speciality\": speciality,\n \"phone\": phone,\n \"department\": department,\n \"organization\": organization,\n \"location\": location,\n \"address\": address,\n \"active\": active\n }\n provider_data = open_for_reading()\n provider_data[str(provider_id)] = post_data\n open_for_writing(data=provider_data)\n return {\"msg\": \"updated\"}",
"def update(self,\n provider_id,\n l3_vpn_context,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'l3_vpn_context': l3_vpn_context,\n })",
"def update(self,\n provider_id,\n interface_id,\n provider_interface,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'interface_id': interface_id,\n 'provider_interface': provider_interface,\n })",
"def update(self,\n provider_id,\n provider_deployment_map_id,\n provider_deployment_map,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'provider_deployment_map_id': provider_deployment_map_id,\n 'provider_deployment_map': provider_deployment_map,\n })",
"def update(self,\n provider_id,\n group_id,\n group,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'group_id': group_id,\n 'group': group,\n })",
"def update(self,\n provider_id,\n interface_id,\n service_interface,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'interface_id': interface_id,\n 'service_interface': service_interface,\n })",
"def refresh_provider(self, provider_id):\n try:\n self.client.post('{api_url}/providers/{id}'.format(api_url=self.api_url, id=provider_id),\n action='refresh')\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to refresh provider. Error: {!r}\".format(e))",
"def update(self,identity,params=None, headers=None):\n path = self._sub_url_params('/payouts/:identity', {\n \n 'identity': identity,\n })\n \n if params is not None:\n params = {self._envelope_key(): params}\n\n response = self._perform_request('PUT', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)",
"def provider(self, provider):\n\n self._provider = provider",
"def update(self, request, *args, **kwargs):\n response = super(ProviderViewSet, self).update(request, *args, **kwargs)\n response.data['message'] = \"Proveedor ha sido editado\"\n return response",
"def add_or_update_provider(self, provider_name, provider_type, endpoints, zone, provider_region,\n validate_provider_auth = True, initiate_refresh = True):\n zone_id = self.find_zone_by_name(zone or 'default')\n # check if provider with the same name already exists\n provider_id = self.find_provider_by_name(provider_name)\n if provider_id: # provider exists\n existing_config = self.get_provider_config(provider_id)\n\n # ManageIQ Euwe / CFME 5.7 API and older versions don't support certificate authority field in endpoint.\n # If it wasn't returned from existing provider configuration this means it is either unsupported or null,\n # in both cases we can remove null/empty certificate_authority from endpoints we want to update.\n self.filter_unsupported_fields_from_config(endpoints, existing_config['endpoints'], {'certificate_authority'})\n\n updates = self.required_updates(provider_id, endpoints, zone_id, provider_region, existing_config)\n\n if not updates:\n return dict(changed=self.changed,\n msg=\"Provider %s already exists\" % provider_name)\n\n old_validation_details = self.auths_validation_details(provider_id)\n operation = \"update\"\n self.update_provider(provider_id, provider_name, endpoints, zone_id, provider_region)\n roles_with_changes = set(updates[\"Added\"]) | set(updates[\"Updated\"])\n else: # provider doesn't exists, adding it to manageiq\n\n # ManageIQ Euwe / CFME 5.7 API and older versions don't support certificate authority field in endpoint.\n # filter empty fields if none on creation - No existing endpoints for new provider\n self.filter_unsupported_fields_from_config(endpoints, [{}], {'certificate_authority'})\n updates = None\n old_validation_details = {}\n operation = \"addition\"\n provider_id = self.add_new_provider(provider_name, provider_type,\n endpoints, zone_id, provider_region)\n roles_with_changes = [e['endpoint']['role'] for e in endpoints]\n\n if validate_provider_auth:\n authtypes_to_verify = []\n for e in endpoints:\n if e['endpoint']['role'] in roles_with_changes:\n authtypes_to_verify.append(e['authentication']['authtype'])\n result, details = self.verify_authenticaion_validation(provider_id, old_validation_details, authtypes_to_verify)\n else:\n result = \"Skipped Validation\"\n details = result\n\n if result == \"Invalid\":\n self.module.fail_json(msg=\"Failed to Validate provider authentication after {operation}. details: {details}\".format(operation=operation, details=details))\n elif result == \"Valid\" or result == \"Skipped Validation\":\n if initiate_refresh:\n self.refresh_provider(provider_id)\n message = \"Successful {operation} of {provider} provider. Authentication: {validation}. Refreshing provider inventory\".format(operation=operation, provider=provider_name, validation=details)\n else:\n message = \"Successful {operation} of {provider} provider. Authentication: {validation}.\".format(operation=operation, provider=provider_name, validation=details)\n elif result == \"Timed out\":\n message = \"Provider {provider} validation after {operation} timed out. Authentication: {validation}\".format(operation=operation, provider=provider_name, validation=details)\n return dict(\n provider_id=provider_id,\n changed=self.changed,\n msg=message,\n updates=updates\n )",
"def testUpdate(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n self.assertTrue(ok)\n #\n ok = provU.update(pD)\n self.assertTrue(ok)\n #\n fD = provU.fetch()\n self.assertTrue(self.__provKeyName in fD)\n self.assertDictEqual(pD, fD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def setCurrentUser(self, provider):\n pass",
"def update_ldap_provider(self, body):\n try:\n self.logger.info('update_ldap_provider called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for update_ldap_provider.')\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info('Preparing query URL for update_ldap_provider.')\n _url_path = '/public/ldapProvider'\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for update_ldap_provider.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for update_ldap_provider.')\n _request = self.http_client.put(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request,\n name='update_ldap_provider')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for update_ldap_provider.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n LdapProviderResponse.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise",
"def update_user(id):\n pass",
"def provider(self, provider: Provider) -> None:\n self._provider = provider",
"def update_user():",
"def update(self,\n provider_id,\n bgp_routing_config,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'bgp_routing_config': bgp_routing_config,\n })",
"def fusion_api_edit_server_profile(self, body, uri, api=None, headers=None, param=''):\n return self.profile.update(body, uri, api, headers, param=param)",
"def updateUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def fusion_api_edit_user(self, body, uri, api=None, headers=None):\n return self.user.update(body, uri, api, headers)",
"def update(self,\n provider_id,\n l3vpn_id,\n l3_vpn,\n ):\n return self._invoke('update',\n {\n 'provider_id': provider_id,\n 'l3vpn_id': l3vpn_id,\n 'l3_vpn': l3_vpn,\n })",
"def on_identity_loaded(sender, identity):\n key = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_SESSION_KEY\",\n OAUTHCLIENT_CERN_OPENID_SESSION_KEY,\n )\n identity.provides.update(session.get(key, []))",
"def put(self, entity, schema):\n profile = entity.profiles.get_or_404(schema=schema)\n try:\n update_data = json.loads(request.data)\n except json.JSONDecodeError as e:\n raise APIBadRequest(str(e))\n\n if 'identity' in update_data:\n profile.identity = update_data['identity']\n if 'servers' in update_data:\n profile.servers = update_data['servers']\n\n profile.save()\n\n return jsonify(profile.to_json()), 200",
"def put(self, id):\n return userDao.update(id, api.payload)",
"def update(self, uuid, android_key):\n try:\n pmanager = PushManager.query.filter_by(\n uuid=uuid\n ).one_or_none()\n if pmanager is None:\n raise GatlinException(\"App not exist\", 404)\n self._provider.app_name = pmanager.app_name\n _ = self._provider.set_android_platform(android_key)\n pmanager.android_key = android_key\n return pmanager.save()\n except GatlinException as exception:\n raise exception",
"def update_user():\n #TODO user update \n pass",
"def test_update_profile(self):\n self.cim.update_profile(\n customer_id=u\"222\",\n description=u\"Foo bar baz quz\",\n email=u\"[email protected]\",\n customer_profile_id=u\"122\"\n )"
] | [
"0.6841701",
"0.6564375",
"0.63220054",
"0.62459624",
"0.623236",
"0.5886005",
"0.58837974",
"0.5625915",
"0.5601719",
"0.55890137",
"0.5554441",
"0.54530036",
"0.54441226",
"0.539471",
"0.5389379",
"0.53829527",
"0.5339994",
"0.5336483",
"0.5261702",
"0.521437",
"0.52008426",
"0.5177207",
"0.51419634",
"0.5112954",
"0.51001453",
"0.50945497",
"0.5057962",
"0.5035924",
"0.5022",
"0.5011586"
] | 0.7050549 | 0 |
Updates the specified network source. | def update_network_source(self, network_source_id, update_network_source_details, **kwargs):
resource_path = "/networkSources/{networkSourceId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_network_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"networkSourceId": network_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_network_source_details,
response_type="NetworkSources")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_network_source_details,
response_type="NetworkSources") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_sources(self, *args, **kwargs):\n tasks.update_sources()\n return Response({})",
"def set_source(self, source):\n self.data['source'] = source",
"def update_source(wn, old_source, target, new_source, change_list=None):\n rel_type = find_type(old_source, target)\n delete_rel(old_source, target, change_list)\n insert_rel(new_source, rel_type, target, change_list)\n if rel_type in wordnet.inverse_synset_rels:\n inv_rel_type = wordnet.inverse_synset_rels[rel_type]\n delete_rel(target, old_source, change_list)\n insert_rel(target, inv_rel_type, new_source, change_list)",
"def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))",
"def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()",
"def update(self, src, labels): # real signature unknown; restored from __doc__\n pass",
"def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)",
"async def async_set_source(self, source):\n self._source = source\n #self.async_schedule_update_ha_state(True)",
"def update_edge_by_source_target(self, _source, _target, source=None, target=None, name=None, data={}):\n return self.make_request(\"PUT\", \"edges?source=%s&target=%s\"%(_source,_target), { \"id\" : name, \"source\" : source, \"target\" : target, \"data\" : data })",
"def update_feed_source(request):\n try:\n feed = FeedSource.objects.get(id=request.id)\n feed.status = not feed.status\n feed.save()\n except (ValidationError, FeedSource.DoesNotExist) as e:\n exc = e\n logger(__name__, \"Could not update Feed Source due to {}\".format(str(exc)))\n errors = _get_errors(exc)\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('FAILURE'),\n details={'errors': feeds_pb2.RepeatedString(data=errors)},\n )\n return feeds_pb2.OperationStatus(\n op_status=feeds_pb2.Status.Value('SUCCESS'),\n )",
"def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net",
"def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0",
"def update(src):",
"def update_source(self):\n if self.verbose:\n print(\"Updating source\")\n self.source.data = self.source_data\n if self.source.selected is not None:\n self.source.selected.indices = self.selection\n for c in self.callbacks[\"update_source\"]:\n c()\n self.pending_update = False\n if self.update_buffer is not None:\n self.context.doc.add_next_tick_callback(self.update_buffer)\n self.update_buffer = None",
"def update_source(self, id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.update_source_with_http_info(id, **kwargs)\n else:\n (data) = self.update_source_with_http_info(id, **kwargs)\n return data",
"def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)",
"def set_source(self, source_name):\n self.source = source_name",
"def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net",
"def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())",
"def update(self, ex):\r\n if not self.optimizer:\r\n raise RuntimeError('No optimizer set.')\r\n\r\n # Train mode\r\n self.network.train()\r\n\r\n source_ids = ex['source_ids']\r\n source_pos_ids = ex['source_pos_ids']\r\n source_type_ids = ex['source_type_ids']\r\n source_mask = ex['source_mask']\r\n label = ex['label']\r\n\r\n if self.use_cuda:\r\n label = label.cuda(non_blocking=True)\r\n source_ids = source_ids.cuda(non_blocking=True)\r\n source_pos_ids = source_pos_ids.cuda(non_blocking=True) \\\r\n if source_pos_ids is not None else None\r\n source_type_ids = source_type_ids.cuda(non_blocking=True) \\\r\n if source_type_ids is not None else None\r\n source_mask = source_mask.cuda(non_blocking=True) \\\r\n if source_mask is not None else None\r\n\r\n # Run forward\r\n score = self.network(source_ids=source_ids,\r\n source_pos_ids=source_pos_ids,\r\n source_type_ids=source_type_ids,\r\n source_mask=source_mask)\r\n\r\n # Compute loss and accuracies\r\n loss = self.criterion(score, label)\r\n\r\n if self.args.gradient_accumulation_steps > 1:\r\n loss = loss / self.args.gradient_accumulation_steps\r\n\r\n if self.args.fp16:\r\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n\r\n if (self.updates + 1) % self.args.gradient_accumulation_steps == 0:\r\n if self.args.fp16:\r\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.grad_clipping)\r\n else:\r\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\r\n\r\n self.optimizer.step()\r\n self.scheduler.step() # Update learning rate schedule\r\n self.optimizer.zero_grad()\r\n\r\n self.updates += 1\r\n\r\n return loss.item()",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def source(self, source):\n\n self._source = source",
"def update_target_q_network(self):\n assert self.target_network != None\n self.target_network.run_copy()",
"def update_target_net(self, sess):\n sess.run(self.update_target_net_op)",
"def update_target_network(self) -> NoReturn:\n self.target.load_state_dict(self.model.state_dict())"
] | [
"0.6329532",
"0.612555",
"0.60807055",
"0.6027774",
"0.58959955",
"0.5872399",
"0.5833497",
"0.5821681",
"0.58040607",
"0.5792299",
"0.57776225",
"0.5715011",
"0.5686939",
"0.5681682",
"0.5670427",
"0.5601962",
"0.5567387",
"0.5534381",
"0.5495406",
"0.5481051",
"0.54780453",
"0.54780453",
"0.54780453",
"0.54780453",
"0.54780453",
"0.54780453",
"0.54780453",
"0.5462909",
"0.54583734",
"0.54476845"
] | 0.7314297 | 0 |
Updates the specified tag default. If you specify that a value is required, a value is set during resource creation (either by the user creating the resource or another tag defualt). If no value is set, resource creation is blocked. If the `isRequired` flag is set to \"true\", the value is set during resource creation. If the `isRequired` flag is set to \"false\", the value you enter is set during resource creation. | def update_tag_default(self, tag_default_id, update_tag_default_details, **kwargs):
resource_path = "/tagDefaults/{tagDefaultId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_tag_default got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"tagDefaultId": tag_default_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_tag_default_details,
response_type="TagDefault")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_tag_default_details,
response_type="TagDefault") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_default(self, name, default, group=None):\n opt_info = self._get_opt_info(name, group)\n opt_info['default'] = self._get_enforced_type_value(\n opt_info['opt'], default)\n opt_info['location'] = LocationInfo(\n Locations.set_default,\n _get_caller_detail(3), # this function has a decorator to skip\n )",
"def _update_default(self, default_value):\n if self.type == \"uri_folder\" or self.type == \"uri_file\":\n self.default = default_value\n return\n else:\n if isinstance(default_value, float) and not math.isfinite(default_value):\n # Since nan/inf cannot be stored in the backend, just ignore them.\n # logger.warning(\"Float default value %r is not allowed, ignored.\" % default_value)\n return\n \"\"\"Update provided default values.\n Here we need to make sure the type of default value is allowed or it could be parsed..\n \"\"\"\n if default_value is not None and not isinstance(default_value, self._allowed_types):\n try:\n default_value = self._parse(default_value)\n except Exception as e:\n if self.name is None:\n msg = \"Default value of %s Input cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n default_value,\n type(default_value),\n )\n else:\n msg = \"Default value of %s Input '%s' cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n self.name,\n default_value,\n type(default_value),\n )\n raise MldesignerComponentDefiningError(cause=msg) from e\n self.default = default_value",
"def Option(name: str, value: Union[str, int], default: Optional[bool] = None) -> Dict:\n doc = {'name': name, 'value': value}\n if default is not None:\n doc['isDefault'] = default\n return doc",
"def default(self, default):\n\n self._set_field(\"value\", default)",
"def SetDefaultVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def set_default(self, default):\n\n\t\tif default is not None and not isinstance(default, bool):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: default EXPECTED TYPE: bool', None, None)\n\t\t\n\t\tself.__default = default\n\t\tself.__key_modified['default'] = 1",
"def _set_default(name, value, context):\n if name not in context:\n context[name] = value",
"def delete_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"DELETE\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_request_id\",\n \"if_match\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"delete_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing),\n \"if-match\": kwargs.get(\"if_match\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params)",
"def _update_annotation_with_default(anno, name, default):\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation.name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_parameter_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n return complete_annotation",
"def setdefault(self, k, d=None): # real signature unknown; restored from __doc__\n pass",
"def create_tag_default(self, create_tag_default_details, **kwargs):\n resource_path = \"/tagDefaults\"\n method = \"POST\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"opc_retry_token\",\n \"opc_request_id\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"create_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"opc-retry-token\": kwargs.get(\"opc_retry_token\", missing),\n \"opc-request-id\": kwargs.get(\"opc_request_id\", missing)\n }\n header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n if not isinstance(retry_strategy, retry.NoneRetryStrategy):\n self.base_client.add_opc_retry_token_if_needed(header_params)\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_default_details,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=create_tag_default_details,\n response_type=\"TagDefault\")",
"def default_value(self, value: Any) -> None:\n self.sdc_resource.set_input_default_value(self, value)\n self._default_value = value",
"def _update_annotation_with_default(\n anno: Union[Annotation, Input, Output], name: str, default: Any\n ) -> Union[Annotation, Input, Output]:\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation._port_name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_primitive_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n if isinstance(complete_annotation, Output) and default is not None:\n msg = (\n f\"Default value of Output {complete_annotation._port_name!r} cannot be set:\"\n f\"Output has no default value.\"\n )\n raise UserErrorException(msg)\n return complete_annotation",
"def _default(self, section, option, default):\r\n if not self.has_section(section):\r\n self.add_section(section)\r\n if not self.has_option(section, option):\r\n self.set(section, option, default)\r\n self.save()",
"def default(self, default):\n\n self._default = default",
"def f_default(self, default = 1) :\n pass",
"def default(default_value, force=False):\n def default_setter(value):\n \"\"\"\n Sets the value to the given default value, assuming the original value\n is not set or the default value is set to forced.\n\n :param Any value: Injected by CKAN core\n :rtype: Any\n \"\"\"\n return value if value and not force else default_value\n\n return default_setter",
"def setdefault(self, name, default):\n return self.data.setdefault(name, default)",
"def validate_default(self, value):\n return self.__validate(value, self.validate_default_element)",
"def _defaulted(cls, value, default):\n return default if value is None else value",
"def default(self, value):\n # save {value} as the default\n self._default = value\n # all done\n return",
"def default_value(self, default_value):\n\n self._default_value = default_value",
"def default_value(self, default_value):\n\n self._default_value = default_value",
"def default_value(self, default_value):\n\n self._default_value = default_value",
"def get_tag_default(self, tag_default_id, **kwargs):\n resource_path = \"/tagDefaults/{tagDefaultId}\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"get_tag_default got unknown kwargs: {!r}\".format(extra_kwargs))\n\n path_params = {\n \"tagDefaultId\": tag_default_id\n }\n\n path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}\n\n for (k, v) in six.iteritems(path_params):\n if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):\n raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"TagDefault\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n path_params=path_params,\n header_params=header_params,\n response_type=\"TagDefault\")",
"def set_default_to_meta(meta, key, default_value):\n if callable(default_value):\n default_value = default_value()\n\n meta.setdefault(key, default_value)",
"def register_option_pair(key, default_value):\n\n _OPTION_TEMPLATE[key] = default_value",
"def _update_default(self, default_value):\n enum_val = self._parse(default_value)\n if self._enum_class and isinstance(enum_val, self._enum_class):\n enum_val = enum_val.value\n self.default = enum_val",
"def _update_default(self, default_value):\n enum_val = self._parse(default_value)\n if self._enum_class and isinstance(enum_val, self._enum_class):\n enum_val = enum_val.value\n self.default = enum_val",
"def SetDefaultVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')"
] | [
"0.6250223",
"0.62021977",
"0.6062567",
"0.60212696",
"0.59388936",
"0.5849943",
"0.58077544",
"0.5761992",
"0.5740362",
"0.57305574",
"0.5730093",
"0.57221216",
"0.5609175",
"0.5585611",
"0.5582013",
"0.5553106",
"0.5544306",
"0.55127645",
"0.55110776",
"0.5498642",
"0.5480265",
"0.546484",
"0.546484",
"0.546484",
"0.54296577",
"0.53810877",
"0.5378663",
"0.5361359",
"0.5361359",
"0.5359547"
] | 0.7059619 | 0 |
Updates the capabilities of the specified user. | def update_user_capabilities(self, user_id, update_user_capabilities_details, **kwargs):
resource_path = "/users/{userId}/capabilities"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_user_capabilities got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_user_capabilities_details,
response_type="User")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_user_capabilities_details,
response_type="User") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_user():",
"def update(self, user: U) -> None:\n ...",
"def update(self, user: 'User', privileges: 'Optional[List[str]]' = None) -> 'Optional[User]':\n return self._update(schema=UserSchema(), entity=user, privileges=privileges)",
"def getCapabilities4User(session_key, user=None):\n\n roles = []\n capabilities = []\n\n # Get user info\n if user is not None:\n logger.debug('Retrieving role(s) for current user: %s', user)\n userEntities = entity.getEntities('authentication/users/%s' % user, count=-1, sessionKey=session_key)\n\n for stanza, settings in userEntities.items():\n if stanza == user:\n for key, val in settings.items():\n if key == 'roles':\n logger.debug('Successfully retrieved role(s) for user: %s', user)\n roles = val\n\n # Get capabilities\n for role in roles:\n logger.debug('Retrieving capabilities for current user: %s', user)\n roleEntities = entity.getEntities('authorization/roles/%s' % role, count=-1, sessionKey=session_key)\n\n for stanza, settings in roleEntities.items():\n if stanza == role:\n for key, val in settings.items():\n if key == 'capabilities' or key == 'imported_capabilities':\n logger.debug('Successfully retrieved %s for user: %s', key, user)\n capabilities.extend(val)\n\n return capabilities",
"def update_user_entitlement(self, document, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n content = self._serialize.body(document, '[JsonPatchOperation]')\n response = self._send(http_method='PATCH',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values,\n content=content,\n media_type='application/json-patch+json')\n return self._deserialize('UserEntitlementsPatchResponse', response)",
"def set_capabilities(self, capabilities: WlSeat.capability) -> None:\n lib.wlr_seat_set_capabilities(self._ptr, capabilities)",
"def update_caps(self, caps, source):\n return ObjectCapabilities.update_capabilities(self, caps, source)",
"def update_user():\n #TODO user update \n pass",
"def user_capacity(self, user_capacity: SmartSsdUserCapacity):\n\n self._user_capacity = user_capacity",
"def update_capabilities(self):\n LOG.debug((\"Store %s doesn't support updating dynamic \"\n \"storage capabilities. Please overwrite \"\n \"'update_capabilities' method of the store to \"\n \"implement updating logics if needed.\") %\n reflection.get_class_name(self))",
"def update_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_PUT, version=\"v1\")",
"def update_user(self, user):\n query = TABELLE['id_users']['update']\n return self.execute(query,\n (user['admin'], user['tester'], user['loot_user'], user['loot_admin'], user['banned'],\n user['id']))",
"def set_capabilities(self, *dynamic_capabilites):\n for cap in dynamic_capabilites:\n self._capabilities |= int(cap)",
"def updateUser(self, payload):\n\t\turl = \"https://habitica.com/api/v3/user\"\n\t\treturn(putUrl(url, self.credentials, payload))",
"def sipserver_user_update(self, user: str, password: str) -> None:\n self.update_endpoint_in_sipserver(endpoint=user, password=password)",
"def modify_user(user_data):\r\n raise NotImplementedError()",
"def update_user_data(self, new_user: User):\n self.user_data.update_user_data(new_user)",
"def update_user(user_id, data):\n logging.debug(\"Uptating user: user_id={}\".format(user_id))\n return ask('appusers/{0}'.format(user_id), data, 'put')",
"def update_user_metrics(self,user_id:int)->None:\n with connection.cursor() as cursor:\n cursor.execute(f\"SELECT update_user_metrics({user_id})\")\n ##TODO: this should return something ",
"def update_user(self, instance, user, name=None, password=None, host=None):\n return instance.update_user(user, name=name, password=password,\n host=host)",
"def update(self, user):\n\n\t\tif self == user.classroom:\n\t\t\treturn\n\n\t\tself.size += user.classroom.size\n\t\tuser.set_classroom(self)",
"def update_user(cls, **kwargs):\n return cls._do_call(\n 'PUT', cls.api_endpoint + 'users', params=kwargs)",
"def put(self, user_id):\r\n return update_user(request, user_id)",
"def do_user_update():\n targetUsers = User.query.filter_by(id=request.form['id']).all()\n if not any(targetUsers):\n return user_list(\"Unknown user.\")\n\n targetUser = targetUsers[0]\n\n targetUser.first_name = request.form['first_name']\n targetUser.name = request.form['name']\n targetUser.nick = request.form['nick']\n targetUser.mail = request.form['mail']\n targetUser.role = request.form['role']\n targetUser.state = request.form['state']\n targetUser.gender = request.form['gender']\n targetUser.meter_id = request.form['meter_id']\n targetUser.group_id = request.form['group_id']\n\n db.session.commit()\n return user_list(\"Updated user \" + targetUser.name)",
"def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]",
"def update_user(self, user, name=None, password=None, host=None):\n return self._user_manager.update(user, name=name, password=password,\n host=host)",
"def update_user(self):\n self.client.force_authenticate(user=self.user)\n self.response = self.client.patch(\n reverse(\n 'edit_account',kwargs={ 'pk': self.user.id}),\n self.updated_data, format='json'\n )\n self.user = CustomUser.objects.get(username=self.user.username)",
"def update_user(user_id):\n update_usr = request.get_json()\n if not update_usr:\n abort(400, {'Not a JSON'})\n usr = storage.get(User, user_id)\n if not usr:\n abort(404)\n else:\n for key, value in update_usr.items():\n setattr(usr, key, value)\n storage.save()\n return jsonify(usr.to_dict())",
"def update_user_affinity(self, user_id, candidate_with_feedback):\n # Update only user's neighbor that is Candidate with feedback\n neigh = candidate_with_feedback.neighbor_id_rated\n self.user_affinity.update_preference(elem1=user_id,\n elem2=neigh,\n feedback=candidate_with_feedback.feedback)",
"def put(self, user_id):\n data = request.json\n return update_user(data, user_id)"
] | [
"0.5838249",
"0.5771097",
"0.57477796",
"0.57301706",
"0.5712233",
"0.56142646",
"0.5583086",
"0.55510235",
"0.5524724",
"0.5517849",
"0.5512739",
"0.55080456",
"0.54812056",
"0.5419713",
"0.54144186",
"0.5388361",
"0.5363638",
"0.5359393",
"0.5356132",
"0.53485364",
"0.5303966",
"0.52659965",
"0.52228224",
"0.52068657",
"0.5206759",
"0.51840377",
"0.51431036",
"0.51288265",
"0.5102041",
"0.50938725"
] | 0.71365345 | 0 |
return the classroom that has given classroomId. Otherwise return None | def getClassroomById(classroomId):
for classroom in classroomEntities:
if classroom["classroomId"] == classroomId:
return classroom.copy()
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_room(self, name=None, id=None):\n \n if(name):\n return self.rooms[name] if name in self.rooms else None\n if(id):\n return next((v for (k,v) in self.rooms.items() if v.id == id), None)\n return None",
"def getRoomById(self, id):\n for room in self.rooms:\n if room.id == id:\n return room\n\n return None",
"def GetRoom(self, id):\n try:\n return self._rooms[id]\n except:\n return None",
"def find_general_class(self, class_id):\n for class_ in my_classes:\n if class_.class_id == class_id:\n return class_\n\n return None",
"def get_room_by_id(self, id):\n if not isinstance(id, int):\n id = int(id)\n if self.rooms.has_key(id):\n return self.rooms[id]\n raise RuntimeError, \"Room not known\"",
"def get_skill_class(cursor, _class):\n cursor.execute('SELECT id FROM classes WHERE temp_id = ?', (_class,))\n data = cursor.fetchone()\n try:\n return data[0]\n except TypeError:\n l.error(\"The Class {} doesn't exists.\".format(_class))",
"def get_course(self, id):\n id = str(id)\n for i in range(len(self.courses)):\n if self.courses[i].id == id:\n return self.courses[i]",
"def deleteClassroom(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n classroomEntities.remove(selectedClassroom)\n return True\n return False",
"def get_room(self, roomName):\n for room in self.rooms:\n if roomName == room.get_name():\n return room",
"def get_room(self, room_name):\r\n try:\r\n return self._rooms[room_name]\r\n except KeyError:\r\n return None",
"def find_category(category_id: TourneyCategoryID) -> Optional[TourneyCategory]:\n return TourneyCategory.query.get(category_id)",
"def find_by_id(self, id_):\n return self.by_id.get(id_)",
"def get_for_type(class_, vehicle):\n Category = class_\n found = session.query(Category).filter_by(name=vehicle.get_category_id()).first()\n return found",
"def get_cell(self, cell_id: str) -> Optional[Cell]:\n\n for cell in self.cells:\n if cell.id == cell_id:\n return cell\n return None",
"def _find_room_helper(room_obj_list, room):\n\tfor r_obj in room_obj_list:\n\t\tif r_obj[0] == room:\n\t\t\treturn r_obj[1]\n\treturn None",
"def helpClassroom(classroomId):\n selectedClassroomCopy = getClassroomById(classroomId)\n print(\"Class Id: \" + selectedClassroomCopy[\"classroomId\"])\n print(\"Name: \" + selectedClassroomCopy[\"classroomName\"])\n print(\"Capacity: \" + selectedClassroomCopy[\"capacity\"])\n print(\"Location: \" + selectedClassroomCopy[\"location\"])\n return True",
"def by_id(cls, id):\n try:\n return DBSession.query(cls).filter(cls.id == id).one()\n except (NoResultFound, MultipleResultsFound):\n return None",
"def get_trainer_by_id(self, id):\n # Validates id\n TrainerManager._int_validator(id)\n # Database Query\n session = self._db_session()\n existing_trainer = session.query(RegularTrainer).filter(\n RegularTrainer.trainer_id == id).first()\n if isinstance(\n existing_trainer,\n AbstractTrainer) and existing_trainer.type == \"Gym Leader\":\n existing_trainer = None\n if existing_trainer is None:\n existing_trainer = session.query(GymLeader).filter(\n GymLeader.trainer_id == id).first()\n session.close()\n\n return existing_trainer",
"def _get_traj_by_id(self, itsid):\n for traj in self._trajlist:\n if traj.id == itsid:\n return traj\n return None",
"def get_room(room_id):\n try:\n room_id = int(room_id)\n room_entry = read_criteria(Room,{\"id\":room_id},session)\n except ValueError:\n room_entry = None\n # if the provided id doesn't match any room in the db, return -1 to indicate not found\n if room_entry is None:\n room = {\"roomId\":-1}\n status_code = 404\n else:\n status_code = 200\n room = room_json(room_entry, session,app.config[\"OFFLINE_TESTING\"], login_session)\n return generate_response(room,status_code)",
"def what_is(self, _id):\n for g in self.groups:\n if _id in self.h_group_ids[g]:\n return g\n return None",
"def get_rel_thread(self, org_id, rel_id):\n for thread in self.get_org_question(org_id).iter('Thread'):\n if thread.attrib['THREAD_SEQUENCE'] == org_id + \"_\" + rel_id:\n return thread\n return None",
"def get_by_id(cls, id):\n return cls.query().get(id)",
"def get_course_by_id(course_id):\n course = Courses.query. \\\n filter_by(id=course_id). \\\n first_or_404()\n\n return course",
"def modifyClassroom(classroomId, classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n selectedClassroom[\"classroomName\"] = classroomName\n selectedClassroom[\"capacity\"] = capacity\n selectedClassroom[\"location\"] = location\n return True\n return False",
"async def get_category(cls, session: AsyncSession, id: int) -> Optional[Category]:\n\n stmt = select(Category).where(Category.id == id)\n result = await session.execute(stmt)\n return result.scalars().first()",
"def get_project(self, id):\n for project in self.projects:\n if project.id == int(id):\n ret_val = project\n break\n else:\n ret_val = None\n\n return ret_val",
"def get_course_by_id(course_key, depth=0):\r\n course = modulestore().get_course(course_key, depth=depth)\r\n if course:\r\n return course\r\n else:\r\n raise Http404(\"Course not found.\")",
"def get_room_name(dungeon, room):\n for n in dungeon:\n if dungeon[n] == room:\n return n\n return None",
"def get_by_id(cls, item_id):\n return db_session.query(cls).filter(cls.id == item_id).first()"
] | [
"0.62929714",
"0.62483174",
"0.61793196",
"0.6084332",
"0.6035611",
"0.5742967",
"0.569907",
"0.5663777",
"0.56626433",
"0.5645693",
"0.5644132",
"0.5593693",
"0.55252326",
"0.54916966",
"0.54792434",
"0.5459054",
"0.54231",
"0.5395465",
"0.5352263",
"0.5347712",
"0.5322388",
"0.530201",
"0.5295201",
"0.52760977",
"0.5248419",
"0.524741",
"0.5246495",
"0.5236366",
"0.5229425",
"0.5194463"
] | 0.83576775 | 0 |
store the classroom inside the classroom data list. return True if operation is successful | def addClassroom(classroomName, capacity,location):
for classroom in classroomEntities:
if classroom["classroomName"] == classroomName:
print("Two classrooms can not have same name")
return False
if classroomEntities==[]:
lastSavedIdNumber = "0"
else:
lastSavedId=classroomEntities[-1]["classroomId"] #update classroomId as first element in classroomEntities list
lastSavedIdNumber=lastSavedId[2:]
numberOfDigitsInID = 3
if lastSavedIdNumber == "9" * len(lastSavedIdNumber):
numberOfDigitsInID = len(lastSavedIdNumber) + 1
classroomId="CR"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,"0")
# add the new Classroom
newClassroom = {}
newClassroom["classroomId"] = classroomId
newClassroom["classroomName"] = classroomName
newClassroom["capacity"] = capacity
newClassroom["location"] = location
classroomEntities.append(newClassroom)
print(f"Class Room is added into the system, Class Room id is {classroomId}.")
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modifyClassroom(classroomId, classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n selectedClassroom[\"classroomName\"] = classroomName\n selectedClassroom[\"capacity\"] = capacity\n selectedClassroom[\"location\"] = location\n return True\n return False",
"def save(self, force_insert=False, force_update=False, using=None,\n\t\t\t update_fields=None):\n\t\tif (self.capacity - self.occupied_sits) < 0:\n\t\t\traise ValueError(\"all sits in this classroom are occupied try other classes\")\n\t\telse:\n\t\t\tsuper(ClassRoom, self).save()",
"def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)",
"def store_data(self, data):\n self.data.append(data)",
"def class_to_db(self):",
"def save_data(self):\n db.session.add(self)\n db.session.commit( )",
"def store_all_to_database(self, session):\n\n description = 'Established in 1974, JSM is a family-owned provider of quality apartments. We offer a variety of units from studios to five bedrooms with every location benefitting from our award winning amenities, responsive 24 hour maintenance, and friendly property management staff. JSM Development began in Champaign, IL, and manages roughly 1,500 apartments and 450,000 sq/ft of commercial space. JSM has been a major contributor to the development of Campustown in Champaign and the East Campus area in Urbana at the University of Illinois. These popular locations are now home to major national retailers such as Urban Outfitters, Chipotle, Panera, Cold Stone Creamery, and Noodles & Co.'\n\n # Insert a JSM company instance into the database\n current_company = Company(\n name='JSM',\n baseurl='https://apartments.jsmliving.com/',\n description = description\n )\n session.add(current_company)\n\n # Iterate over the apartments, storing each in the database\n for apartment in self.apartment_data:\n logging.info(\"Inserting %s to database\", apartment['name'])\n new_apartment = Apartment(\n company=current_company,\n url=apartment['url'],\n name=apartment['name'],\n bedrooms=apartment['bedrooms'],\n bathrooms=apartment['bathrooms'],\n price=apartment['price'],\n leasing_period=apartment['leasing_period'],\n description=apartment['description'],\n address=apartment['address'],\n lat=apartment['lat'],\n lng=apartment['lng']\n )\n session.add(new_apartment)\n\n # Insert images for the given apartment\n for index, image_url in enumerate(apartment['image_urls']):\n new_image = Image(\n url=image_url,\n apartment_id=new_apartment.id,\n type=0,\n image_index=index\n )\n session.add(new_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_image)\n\n # Insert floorplan image, if it exists\n if apartment['floorplan_url'] != 0:\n new_floorplan_image = Image(\n url=apartment['floorplan_url'],\n apartment_id=new_apartment.id,\n type=1,\n image_index=len(apartment['image_urls'])\n )\n session.add(new_floorplan_image)\n\n # Connect images to apartment\n new_apartment.images.append(new_floorplan_image)\n\n # Insert amenities for the given apartment\n for amenity in apartment['amenities']:\n new_amenity = Amenity(\n apartment_id=new_apartment.id,\n amenity=amenity\n )\n session.add(new_amenity)\n\n # Connect amenity to apartment\n new_apartment.amenities.append(new_amenity)\n\n # Write all queries to the database\n session.commit()",
"def deleteClassroom(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n classroomEntities.remove(selectedClassroom)\n return True\n return False",
"def add_room(self, data):\n room_id = data['room_id']\n x, y = literal_eval(data['coordinates'])\n room_data = {'id': data['room_id'],\n 'title': data['title'],\n 'description' : data['description'],\n 'coordinates': literal_eval(data['coordinates']),\n 'elevation': data['elevation'],\n 'terrain': data['terrain'],\n 'exits' : {direction: '?' for direction in data['exits']}\n }\n self.rooms.setdefault(room_id, room_data)",
"def store(self):\n\n pass",
"def test_if_data_can_be_saved(self):\n object_count = Room.query.count()\n\n room = Room(name='Jinja', room_type='meeting',\n capacity=5,\n location_id=1,\n calendar_id='[email protected]', # noqa: E501\n image_url=\"https://www.officelovin.com/wp-content/uploads/2016/10/andela-office-main-1.jpg\") # noqa: E501\n room.save()\n\n new_count = Room.query.count()\n\n self.assertNotEquals(object_count, new_count)\n assert object_count < new_count",
"def save_room(self, room_name, room_no_of_members, this_room_type):\n cursor = self.cur()\n cursor.execute('INSERT INTO room (name, no_of_members, room_type) VALUES(?, ?, ?)', (room_name, room_no_of_members, this_room_type)\n )",
"def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='L'\n )",
"def save(self, case) -> bool:\n if case:\n key = case_key(case)\n case.key = key\n self.cases[key] = case\n the_redis = DARedis()\n the_redis.set_data(self.user_cases_key, self.cases)\n return True",
"def save(self, db):\n db.query(\n \"INSERT INTO rooms (name, type) VALUES(:name, :type)\",\n name=self.name, type='O'\n )",
"async def save(self) -> None:\n if not hasattr(self, 'errors'):\n raise RuntimeError('you must call is_valid() before save instance')\n if self.errors:\n raise RoomValidationError(self.errors)\n if hasattr(self, '_id'):\n data = self.loads()\n room_id = data.pop('_id')\n await room_collection.replace_one({'_id': room_id}, data)\n else:\n result = await room_collection.insert_one(self.loads())\n self._id = result.inserted_id",
"def callback_object(self, data):\n\n try:\n # TODO support multiple of the same object\n # Save an array of object locations\n self.redis.set(self.prefix+\"_\"+data.name, json.dumps([{\n \"name\": data.name,\n \"time\": data.time,\n \"x\": data.x,\n \"y\": data.y,\n \"z\": data.z\n }]))\n except:\n rospy.logerr(\"Cannot insert row\")",
"def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)",
"def test_PUT_room(self):\n\t\t# 1)\n\t\tself.POST_room()\n\t\t# 2)\n\t\tNEW_ROOM_DATA = {'count': '3', 'name': 'NEW-ROOM-NAME'}\n\t\trv = self.PUT_data('/api/room/' + self.room_id, NEW_ROOM_DATA)\n\t\t# 3)\n\t\tdata = self.GET_data('/api/room/' + self.room_id)\n\t\tself.assertDataMatch(TEST_ROOM_DATA, data, ['type'])\n\t\t# 4)\n\t\tself.assertDataMatch(NEW_ROOM_DATA, data, NEW_ROOM_DATA.keys())\n\t\tself.validate_last_modified(data)",
"def put(self,data):\n\n \n try:\n\n db = getDatabase()\n connection = db.connect()\n \n connection.put(self,data)\n except Exception as e:\n raise e\n finally:\n db.dispose()",
"def store(self) -> None:\n # Store the centroids\n if self._centroids != {}:\n with open(self._path_model / f\"{self}\", 'w') as file:\n json.dump({k: v.tolist() for k, v in self._centroids.items()}, file, sort_keys=True)\n else:\n print(\"No centroids created yet to store!\")\n \n # Store the (validation) clusters\n with open(self._path_data / f\"{self}-train\", 'w') as file:\n json.dump(self._clusters, file, indent=2, sort_keys=True)\n with open(self._path_data / f\"{self}-val\", 'w') as file:\n json.dump(self._clusters_val, file, indent=2, sort_keys=True)",
"def save(self):\n self.lock.acquire()\n try:\n self.xml.set(\"name\",self.name)\n self.xml.set(\"room\",self.room)\n self.xml.set(\"type\",self.type)\n self.xml.find(\"address\").text = \":\".join([str(x) for x in self.address])\n if self.pos is not None:\n self.xml.find(\"pos\").text = \" \".join([str(x) for x in self.pos])\n self.xml.find(\"icon\").text = self.icon\n \n finally:\n self.lock.release()\n \n self.house.save_devices()",
"def save_data(self, new):\n db = self.check_db()\n db.append(new)\n\n return db",
"def put(data):",
"def test_insert(self):\n c = city.City(name=\"Freiburg\")\n p1 = city.Citizen(name=\"Peter\")\n p2 = city.Citizen(name=\"Georg\")\n c.add(p1, p2, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n wrapper.add(c)\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)",
"def booking(self, customer, room):\n self.room[room] = customer\n return True",
"def _provision(self, data):\n count = 0\n for (key, value) in {**data}.items():\n if hasattr(self, key):\n count += 1\n setattr(self, key, data.pop(key))\n return count > 0",
"def store_if_new(self, act_list):\n self.create_connection()\n c = self.get_db_cursor()\n for act in act_list:\n strava_id = act.get_strava_id()\n ride_data = (strava_id, act.get_athlete(), act.get_name(),\n act.get_gmt_date(), act.get_elapsed_time(), act.get_distance(),\n act.get_elevation(), act.get_ride_type(), act.get_trainer_ride())\n sql = 'INSERT INTO rides VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) '\n sql += ' WHERE NOT EXISTS(SELECT id FROM rides WHERE rides.id = %s' % strava_id\n c.execute(sql, ride_data)\n self.commit_and_close()",
"def add(self, workout, database):\n if not database.session:\n logger.error(\"no database session\")\n return False\n\n self.cleanup_sportstype(workout)\n self.associate_sport(database)\n id = database.session.query(SportsType.id).filter(\n SportsType.name == self.name).first()\n if id:\n self.id = id[0]\n return False\n else:\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Adding new sportstype '{}' id {} of sport {}\".format(\n self.name, self.id, self.sport_id))\n return True",
"def _update_class(self, course, semester, year):\n\n if cache_result := cache.get(f'no classes {course.id}'):\n print(f'no classes found for course {course.id} at {cache_result}')\n return\n\n # Get response from SIS class resource\n response = sis_class_resource.get(\n semester=semester,\n year=year,\n course_id=course.id,\n abbreviation=course.abbreviation,\n course_number=course.course_number,\n )\n\n if len(response) == 0:\n cache.add(f'no classes {course.id}', datetime.datetime.now(), timeout=7 * 24 * 60 * 60)\n print(f'no classes found for course {course.id}')\n return\n\n updated_section_ids = set()\n primary_sect_id_to_sections = defaultdict(list)\n\n # Map response to Section and Enrollment objects and persist to database\n section_extras = {\n 'course_id': int(course.id),\n 'abbreviation': course.abbreviation,\n 'course_number': course.course_number,\n 'semester': semester,\n 'year': year,\n }\n for sect in response:\n if not sect:\n continue\n section_dict = section_mapper.map(sect, extras=section_extras)\n section, created = self.update_or_create_from_dict(section_dict)\n if not section:\n continue\n\n updated_section_ids.add(section.id)\n\n if section_dict['primary_section']:\n primary_sect_id_to_sections[section_dict['primary_section']].append(section)\n\n # Update enrollment\n if semester != 'summer' and section.is_primary and not section.disabled:\n enrollment_dict = enrollment_mapper.map(sect, extras={'section_id': section.id})\n enrollment_service.update_or_create_from_dict(enrollment_dict)\n\n # Add associations between primary and non-primary sections\n for related_sections in primary_sect_id_to_sections.values():\n primary_section = [s for s in related_sections if s.is_primary][0]\n other_sections = [s for s in related_sections if not s.is_primary]\n primary_section.associated_sections.add(*other_sections)\n for section in related_sections:\n section.save()\n\n if len(updated_section_ids) > 0:\n print({\n 'message': 'Updated sections for course',\n 'course': course,\n 'sections updated': len(updated_section_ids),\n })\n\n # Disable existing section if data not found in response\n sections_to_disable = Section.objects.filter(\n course_id=course.id,\n semester=semester,\n year=year,\n ).exclude(id__in=updated_section_ids)\n for section in sections_to_disable:\n if not section.disabled:\n section.disabled = True\n section.save()\n print({\n 'message': 'Disabling section not in API response.',\n 'section': section,\n })\n\n # Update derived enrollment fields in course object\n course_service._update_derived_enrollment_fields(course)"
] | [
"0.6729376",
"0.6159639",
"0.6049469",
"0.5342558",
"0.5292166",
"0.52788186",
"0.5239066",
"0.5235893",
"0.5207037",
"0.52025837",
"0.5175275",
"0.5164154",
"0.51413965",
"0.5103515",
"0.51000875",
"0.50629807",
"0.5052926",
"0.5046601",
"0.5038495",
"0.50214577",
"0.49945176",
"0.49926698",
"0.4989717",
"0.49747926",
"0.4972683",
"0.49703386",
"0.49688008",
"0.49534214",
"0.4934908",
"0.49208352"
] | 0.75351626 | 0 |
modify content of a already stored classroom. return True if operation is successful | def modifyClassroom(classroomId, classroomName, capacity,location):
for classroom in classroomEntities:
if classroom["classroomId"] == classroomId:
selectedClassroom = classroom
selectedClassroom["classroomName"] = classroomName
selectedClassroom["capacity"] = capacity
selectedClassroom["location"] = location
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addClassroom(classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomName\"] == classroomName:\n print(\"Two classrooms can not have same name\")\n return False\n\n if classroomEntities==[]:\n lastSavedIdNumber = \"0\"\n else:\n lastSavedId=classroomEntities[-1][\"classroomId\"] #update classroomId as first element in classroomEntities list\n lastSavedIdNumber=lastSavedId[2:]\n numberOfDigitsInID = 3\n if lastSavedIdNumber == \"9\" * len(lastSavedIdNumber):\n numberOfDigitsInID = len(lastSavedIdNumber) + 1\n classroomId=\"CR\"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,\"0\")\n\n # add the new Classroom\n newClassroom = {}\n newClassroom[\"classroomId\"] = classroomId\n newClassroom[\"classroomName\"] = classroomName\n newClassroom[\"capacity\"] = capacity\n newClassroom[\"location\"] = location\n classroomEntities.append(newClassroom)\n print(f\"Class Room is added into the system, Class Room id is {classroomId}.\")\n return True",
"def save(self, force_insert=False, force_update=False, using=None,\n\t\t\t update_fields=None):\n\t\tif (self.capacity - self.occupied_sits) < 0:\n\t\t\traise ValueError(\"all sits in this classroom are occupied try other classes\")\n\t\telse:\n\t\t\tsuper(ClassRoom, self).save()",
"def update(self):\n return True",
"def test_update(self):\n c = city.City(name=\"Paris\")\n p1 = city.Citizen(name=\"Peter\")\n c.add(p1, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n cw = wrapper.add(c)\n session.commit()\n\n p2 = city.Citizen(name=\"Georg\")\n cw.add(p2, rel=city.hasInhabitant)\n cw.name = \"Freiburg\"\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)",
"def update_content(self):\n raise NotImplementedError",
"def update(self, user):\n\n\t\tif self == user.classroom:\n\t\t\treturn\n\n\t\tself.size += user.classroom.size\n\t\tuser.set_classroom(self)",
"def do_update(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n return\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n elif len(coms) < 3:\n print(\"** attribute name missing **\")\n elif len(coms) < 4:\n print(\"** value missing **\")\n else:\n typecast = type(eval(coms[3]))\n form = coms[3].strip('\"')\n form = form.strip(\"'\")\n setattr(storage.all()[obj], coms[2], typecast(form))",
"def do_update(self, arg):\n arg = arg.split()\n try:\n h = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif h not in objects.keys():\n print(\"** no instance found **\")\n elif len(arg) <= 2:\n print(\"** attribute name missing **\")\n elif len(arg) <= 3:\n print(\"** value missing **\")\n else:\n setattr(objects[h], arg[2], arg[3])\n storage.save()",
"def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n elif args[0] in classes:\n if len(args) > 1:\n k = args[0] + \".\" + args[1]\n if k in models.storage.all():\n if len(args) > 2:\n if len(args) > 3:\n try:\n if isinstance(args[2], datetime) is True:\n pass\n if args[0] in classes:\n if isinstance(args[2], ints) is True:\n args[3] = int(args[3])\n elif isinstance(args[2], floats) is True:\n args[3] = float(args[3])\n except:\n pass\n setattr(models.storage.all()[k], args[2], args[3])\n models.storage.all()[k].save()\n else:\n print(\"** value missing **\")\n else:\n print(\"** attribute name missing **\")\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def do_update(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if len(args) == 2:\n print(\"** attribute name missing **\")\n return\n if len(args) == 3:\n print(\"** value missing **\")\n return\n if args[0] not in HBNBCommand.valid_classes.keys():\n print(\"** class doesn't exist **\")\n return\n all_objs = storage.all(args[0])\n for k, v in all_objs.items():\n if k == args[1]:\n setattr(v, args[2], args[3])\n storage.save()\n return\n print(\"** no instance found **\")",
"def do_update(self, line):\n if line:\n args = shlex.split(line)\n if len(args) < 2:\n print(\"** instance id missing **\")\n return False\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return False\n elif len(args) == 3:\n print(\"** value missing **\")\n return False\n else:\n obj_name, obj_id, obj_attr, obj_value = args\n obj_repr = \"{}.{}\".format(obj_name, obj_id)\n data = FileStorage()\n data.reload()\n data_loaded = data.all()\n for key, value in data_loaded.items():\n if key == obj_repr:\n obj = eval(obj_name)(**value.to_dict())\n if obj_name in obj.__dict__.keys():\n obj[obj_name] = obj_value\n else:\n setattr(obj, obj_attr, obj_value)\n d = {}\n for s_key, s_value in data_loaded.items():\n d[s_key] = s_value.to_dict()\n with open(data.path(), mode='w', encoding=\"utf-8\") as file:\n file.write(json.dumps(d))\n break\n else:\n print(\"** class doesn't exist **\")\n else:\n print(\"** class name missing **\")",
"def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)",
"def do_update(self, arg):\n args = arg.split()\n object_dict = storage.all()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if args[0] in self.class_dict:\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n return\n elif len(args) == 3:\n print(\"** value missing **\")\n return\n else:\n print(\"** class doesn't exist **\")\n return\n\n for i in range(len(args)):\n if args[i].startswith('\"') and args[i].endswith('\"'):\n args[i] = args[i][1:-1]\n\n for full_key in object_dict.keys():\n key = full_key.split('.')\n key_id = key[1]\n if args[0] in self.class_dict:\n if args[1] == object_dict[full_key].id:\n setattr(object_dict[full_key], args[2], args[3])\n setattr(object_dict[full_key], \"updated_at\",\n datetime.now())\n storage.save()\n return\n else:\n print(\"** class doesn't exist **\")\n return\n print(\"** no instance found **\")",
"def Persist(self) -> bool:",
"def Persist(self) -> bool:",
"def update_has_data(self):\n self.main()",
"def class_to_db(self):",
"def do_update(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif not \"{}.{}\".format(args[0], args[1]) in dicti:\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n key = dicti[\"{}.{}\".format(args[0], args[1])]\n setattr(key, args[2], args[3])\n key.save()",
"def save(self):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n if not self._retrieved:\n self.insert()\n self._retrieved = True\n else:\n self.update()",
"def save(self):\n self.logger.debug(\"In save.\")\n\n if not self.is_valid():\n self.logger.error(\"Cannot save, data is invalid\")\n return False\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n success = False\n\n if self.id is None:\n # The document has not yet been saved\n prep_data = self._get_raw_doc()\n self.logger.info(\"Got the raw JSON document.\")\n\n try:\n self.logger.info(\"Attempting to save a new node.\")\n node_id = session.get_osdf().insert_node(prep_data)\n self.logger.info(\"Save for HostSeqPrep %s successful.\", node_id)\n self.logger.info(\"Setting ID for HostSeqPrep %s.\", node_id)\n\n self._set_id(node_id)\n self._version = 1\n success = True\n except Exception as insert_exception:\n self.logger.error(\"An error occurred while inserting \" + \\\n \"%s %s. Reason: %s\", __name__, self._id,\n insert_exception\n )\n else:\n prep_data = self._get_raw_doc()\n\n try:\n self.logger.info(\"Attempting to update %s with ID: %s.\", __name__, self._id)\n session.get_osdf().edit_node(prep_data)\n self.logger.info(\"Update for %s %s successful.\", __name__, self._id)\n success = True\n except Exception as edit_exception:\n self.logger.error(\"An error occurred while updating %s \" + \\\n \" %s. Reason: %s\", __name__, self._id,\n edit_exception\n )\n\n return success",
"def put(self):\n pass",
"def put(self):\n pass",
"def put(self):\n return",
"def save(self):\n self.logger.debug(\"In save.\")\n\n # If node previously saved, use edit_node instead since ID\n # is given (an update in a way)\n # can also use get_node to check if the node already exists\n if not self.is_valid():\n self.logger.error(\"Cannot save, data is invalid.\")\n return False\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n osdf = session.get_osdf()\n\n success = False\n\n if self._id is None:\n self.logger.info(\"About to insert a new %s OSDF node.\", __name__)\n\n # Get the JSON form of the data and load it\n self.logger.debug(\"Converting %s to parsed JSON form.\", __name__)\n data = json.loads(self.to_json())\n\n try:\n node_id = osdf.insert_node(data)\n\n self._set_id(node_id)\n self._version = 1\n success = True\n except Exception as save_exception:\n self.logger.exception(save_exception)\n self.logger.error(\"An error occurred when saving %s.\", self)\n else:\n self.logger.info(\"%s already has an ID, so we \" + \\\n \"do an update (not an insert).\", __name__)\n\n try:\n node_data = self._get_raw_doc()\n self.logger.info(\"%s already has an ID, so we do an \" + \\\n \"update (not an insert).\", __name__)\n node_id = self._id\n self.logger.debug(\"%s OSDF ID to update: %s.\", __name__, node_id)\n osdf.edit_node(node_data)\n\n node_data = osdf.get_node(node_id)\n latest_version = node_data['ver']\n\n self.logger.debug(\"The version of this %s is now: %s\",\n __name__, latest_version\n )\n self._version = latest_version\n success = True\n except Exception as update_exception:\n self.logger.exception(update_exception)\n self.logger.error(\"An error occurred when updating %s.\", self)\n\n return success",
"def deleteClassroom(classroomId):\n for classroom in classroomEntities:\n if classroom[\"classroomId\"] == classroomId:\n selectedClassroom = classroom\n classroomEntities.remove(selectedClassroom)\n return True\n return False",
"def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif not args[0] in class_type:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif (\"{}.{}\".format(args[0], args[1]) not in storage.all().keys()):\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n new_dict = models.storage.all()\n tmp = \"{}.{}\".format(args[0], args[1])\n if tmp in new_dict.keys():\n attr = getattr(new_dict[tmp], args[2], \"\")\n setattr(new_dict[tmp], args[2], type(attr)(args[3]))\n new_dict[tmp].save()",
"def save(self):\n self.session.modified = True",
"def update(self):\n db.session.commit()",
"def update(self):\n db.session.commit()"
] | [
"0.642563",
"0.5946925",
"0.54554087",
"0.5389277",
"0.5383477",
"0.5381614",
"0.53707135",
"0.536546",
"0.5361684",
"0.5358011",
"0.5356025",
"0.5323734",
"0.5304585",
"0.52719533",
"0.5269355",
"0.5269355",
"0.52631587",
"0.5255712",
"0.5246466",
"0.5222454",
"0.5217273",
"0.5204923",
"0.5204923",
"0.5193499",
"0.51909333",
"0.5182403",
"0.5150957",
"0.51488924",
"0.5134032",
"0.5134032"
] | 0.697157 | 0 |
delete a classroom from the system. return True if operation is successful | def deleteClassroom(classroomId):
for classroom in classroomEntities:
if classroom["classroomId"] == classroomId:
selectedClassroom = classroom
classroomEntities.remove(selectedClassroom)
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_classroom_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.remove_classroom', self.classrooms[0]))",
"def test_remove_classroom_specific_for_learner(self):\n self.assertFalse(self.learner1.has_perm('auth.remove_classroom', self.classrooms[1]))",
"def delete_room(context):\n\n room = context.get('spark.room')\n bearer = context.get('spark.CISCO_SPARK_PLUMBERY_BOT')\n\n print(\"Deleting Cisco Spark room '{}'\".format(room))\n\n url = 'https://api.ciscospark.com/v1/rooms'\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.get(url=url, headers=headers)\n\n if response.status_code != 200:\n print(response.json())\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n actual = False\n for item in response.json()['items']:\n\n if room in item['title']:\n print(\"- found it\")\n print(\"- DELETING IT\")\n\n url = 'https://api.ciscospark.com/v1/rooms/{}'.format(item['id'])\n headers = {'Authorization': 'Bearer '+bearer}\n response = requests.delete(url=url, headers=headers)\n\n if response.status_code != 204:\n raise Exception(\"Received error code {}\".format(response.status_code))\n\n actual = True\n\n if actual:\n print(\"- room will be re-created in Cisco Spark\")\n else:\n print(\"- no room with this name yet\")\n\n context.set('spark.room_id', None)",
"def test_remove_classroom_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_classroom', self.classrooms[1]))",
"def unspawn(self):\n global NodeTypeclass\n if not NodeTypeclass:\n from .room import XYZRoom as NodeTypeclass\n\n xyz = (self.X, self.Y, self.Z)\n\n try:\n nodeobj = NodeTypeclass.objects.get_xyz(xyz=xyz)\n except django_exceptions.ObjectDoesNotExist:\n # no object exists\n pass\n else:\n nodeobj.delete()",
"def delete():",
"def do_destroy(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n else:\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n else:\n del storage.all()[obj]\n storage.save()",
"def room_delete(room_id):\n room = Room.query.get(room_id)\n if room is None:\n abort(404, 'room not found')\n\n get_db().delete(room)\n get_db().commit()\n\n return '', 204",
"def delete(room_id):\n\n entry = Room.objects.filter(room_id=room_id).first()\n if entry is not None:\n entry.delete()\n\n entries = Players.objects.filter(room_id=room_id)\n if entries.count():\n entries.delete()\n\n round.dialog.delete_rounds(room_id=room_id, called_from=__path__+\":\"+utils.fname())",
"def do_destroy(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in classes:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def test_delete(self):\n c = city.City(name=\"Freiburg\")\n p1 = city.Citizen(name=\"Peter\")\n p2 = city.Citizen(name=\"Georg\")\n p3 = city.Citizen(name=\"Hans\")\n c.add(p1, p2, p3, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n cw = wrapper.add(c)\n session.commit()\n\n cw.remove(p3.uid)\n session.prune()\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)",
"def delete(self, registration):\n return Car.delete(registration)",
"def delete():\n Course.print_all_crs()\n course_name = input(\"Please, type course name >\")\n c = Course(course_name)\n if c.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == course_name:\n del db[\"courses\"][crs_i]\n break\n Course._file.write_db(db)\n print(\"{} course is deleted\".format(course_name))\n else:\n print(\"Failed. {} course does not exist\".format(course_name))",
"def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def do_destroy(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n elif len(args) < 2:\n print(\"** class name missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n del object_dict[full_key]\n storage.save()\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")",
"def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()",
"def delete(self, name):\n instance = self.get_one_instance('name', name)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n res = delete_in_db(instance)\n\n if res != 'deleted':\n set_session_var('errors', str(res))\n else:\n set_session_var('success', res)\n\n return True",
"def test_delete_lecture(lecture_class, course, valid_datetime):\n id = lecture_class.create_lecture(course, valid_datetime)\n assert id != None\n assert lecture_class.delete_lecture()",
"def do_destroy(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n all_objs.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")",
"def delete(self):\n db.session.delete(self)\n try:\n db.session.commit()\n return True\n except Exception as error:\n db.session.rollback()\n print(error.args)\n return False",
"def delete(self):\n ...",
"def test_remove_students():\n classroom = setup_for_test()\n student = Student(\"Andrew Tsukuda\")\n classroom.add_student(student)\n assert len(classroom.student_dir) == 1\n assert classroom.student_dir[0].ID == 1\n classroom.remove_student(\"Andrew Tsukuda\")\n assert len(classroom.student_dir) == 0",
"def do_destroy(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n del dict_objs[key]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))",
"def do_destroy(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n del objects[args]\n storage.save()",
"def delete_game(self, room_code: str) -> None:\n self.games_table.delete_item(Key={\"room_code\": room_code})",
"async def delete(self):\r\n try:\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent_to_delete = Agent.filter(Agent.uuid == agent_uuid).first()\r\n sys_id = (\r\n System.select().where(System.agent_uuid == agent_to_delete).execute()\r\n )\r\n if sys_id:\r\n logger.error(\"Agent not deleted\")\r\n return web.Response(text=\"Agent not deleted.\")\r\n else:\r\n agent_to_delete.delete_instance()\r\n logger.info(\"Agent deleted successfully\")\r\n return web.Response(text=\"Agent deleted successfully.\")\r\n except Exception as ex:\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=error_message, status=500)",
"def delete(self):\n DBSESSION.delete(self)\n DBSESSION.commit()\n LOG.info(f\"Register of {self.str_representation} with id = {self.id} was successfully deleted.\")",
"def test_remove_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_COACH, self.classrooms[1]))"
] | [
"0.6959816",
"0.6681781",
"0.6642126",
"0.64679796",
"0.6322488",
"0.6272679",
"0.62258446",
"0.62023646",
"0.6199112",
"0.6172018",
"0.6158727",
"0.6144896",
"0.61320245",
"0.60920924",
"0.60902715",
"0.6075575",
"0.6064415",
"0.6015804",
"0.6012518",
"0.5992586",
"0.59777194",
"0.5955315",
"0.5931716",
"0.5928342",
"0.59209853",
"0.59111434",
"0.58980626",
"0.5897864",
"0.58675337",
"0.5856675"
] | 0.7279858 | 0 |
saves classroomEntities in the ClassRoomData file | def saveClassroomData():
with open("ClassRoomData.txt","wb") as classroomData:
pickle.dump(classroomEntities,classroomData) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata",
"def class_to_db(self):",
"def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)",
"def save():",
"def persist(self):\n pass",
"def save_data(self):\n db.session.add(self)\n db.session.commit( )",
"def save(self):\n # TODO (Pierre): code",
"def save_data(self):\n pass",
"def addClassroom(classroomName, capacity,location):\n for classroom in classroomEntities:\n if classroom[\"classroomName\"] == classroomName:\n print(\"Two classrooms can not have same name\")\n return False\n\n if classroomEntities==[]:\n lastSavedIdNumber = \"0\"\n else:\n lastSavedId=classroomEntities[-1][\"classroomId\"] #update classroomId as first element in classroomEntities list\n lastSavedIdNumber=lastSavedId[2:]\n numberOfDigitsInID = 3\n if lastSavedIdNumber == \"9\" * len(lastSavedIdNumber):\n numberOfDigitsInID = len(lastSavedIdNumber) + 1\n classroomId=\"CR\"+str(int(lastSavedIdNumber)+1).rjust(numberOfDigitsInID,\"0\")\n\n # add the new Classroom\n newClassroom = {}\n newClassroom[\"classroomId\"] = classroomId\n newClassroom[\"classroomName\"] = classroomName\n newClassroom[\"capacity\"] = capacity\n newClassroom[\"location\"] = location\n classroomEntities.append(newClassroom)\n print(f\"Class Room is added into the system, Class Room id is {classroomId}.\")\n return True",
"def save(self):\n\n pass",
"def save(self):\n raise NotImplementedError",
"def save(self):\n raise NotImplementedError",
"def save(self):\n raise NotImplementedError",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save (self):\n pass",
"def save(self):\n with open(self.__file_path, \"w\", encoding=\"UTF-8\") as file:\n parsed_dict = {\n key: value.to_dict()\n for key, value in self.__objects.items()\n }\n save_data(parsed_dict, file)",
"def save_class(self):\n with open(self.savefile, \"w\") as f:\n data = {\"name\": self.name, \"host\": self.host, \"port\": self.port}\n json.dump(data, f)",
"def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())",
"def saveData(self):\n pass",
"def persist(self, file_name, model_dir):\n pass",
"def save_db(self) -> None:",
"def save(self):\n raise NotImplementedError()",
"def save(self):\n joblib.dump(\n self.classifier, \"data/models/badlymappedfinder/badlymappedfinder.joblib\",\n )",
"def save():\n pass",
"def persist(self, file_name, model_dir):\n\n pass",
"def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))"
] | [
"0.66771996",
"0.66205716",
"0.6545138",
"0.60703945",
"0.60378635",
"0.60299575",
"0.6015636",
"0.5970347",
"0.596366",
"0.590775",
"0.58763117",
"0.58763117",
"0.58763117",
"0.58696264",
"0.58696264",
"0.58696264",
"0.58696264",
"0.58696264",
"0.5866105",
"0.5827522",
"0.5819063",
"0.58154714",
"0.5800954",
"0.5773427",
"0.57664865",
"0.5759937",
"0.575428",
"0.5740315",
"0.57371706",
"0.57309514"
] | 0.8425549 | 0 |
If the environment variable TERM is unset try with `fallback` if not empty. vt100 is a popular terminal supporting ANSI X3.64. | def load_terminfo(terminal_name=None, fallback='vt100'):
terminal_name = os.getenv('TERM')
if not terminal_name:
if not fallback:
raise TerminfoError('Environment variable TERM is unset and no fallback was requested')
else:
terminal_name = fallback
if os.getenv('TERMINFO'):
# from man terminfo(5):
# if the environment variable TERMINFO is set,
# only that directory is searched
terminfo_locations = [os.getenv('TERMINFO')]
else:
terminfo_locations = [] # from most to least important
if os.getenv('TERMINFO_DIRS'):
for i in os.getenv('TERMINFO_DIRS').split(':'):
# from man terminfo(5)
# An empty directory name is interpreted as /usr/share/terminfo.
terminfo_locations.append(i or '/usr/share/terminfo')
terminfo_locations += [
os.path.expanduser('~/.terminfo'),
'/etc/terminfo',
'/usr/local/ncurses/share/terminfo',
'/lib/terminfo',
'/usr/share/terminfo'
]
# remove duplicates preserving order
terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations))
terminfo_path = None
for dirpath in terminfo_locations:
path = os.path.join(dirpath, terminal_name[0], terminal_name)
if os.path.exists(path):
terminfo_path = path
break
if not path:
raise TerminfoError("Couldn't find a terminfo file for terminal '%s'" % terminal_name)
from terminfo_index import BOOLEAN_CAPABILITIES, NUMBER_CAPABILITIES, STRING_CAPABILITIES
data = open(terminfo_path, 'rb').read()
# header (see man term(5), STORAGE FORMAT)
header = struct.unpack('<hhhhhh', data[:12]) # 2 bytes == 1 short integer
magic_number = header[0] # the magic number (octal 0432)
size_names = header[1] # the size, in bytes, of the names section
size_booleans = header[2] # the number of bytes in the boolean section
num_numbers = header[3] # the number of short integers in the numbers section
num_offsets = header[4] # the number of offsets (short integers) in the strings section
size_strings = header[5] # the size, in bytes, of the string table
if magic_number != 0o432:
raise TerminfoError('Bad magic number')
# sections indexes
idx_section_names = 12
idx_section_booleans = idx_section_names + size_names
idx_section_numbers = idx_section_booleans + size_booleans
if idx_section_numbers % 2 != 0:
idx_section_numbers += 1 # must start on an even byte
idx_section_strings = idx_section_numbers + 2 * num_numbers
idx_section_string_table = idx_section_strings + 2 * num_offsets
# terminal names
terminal_names = data[idx_section_names:idx_section_booleans].decode('ascii')
terminal_names = terminal_names[:-1].split('|') # remove ASCII NUL and split
terminfo = Terminfo(terminal_names[0], terminal_names[1:])
# booleans
for i, idx in enumerate(range(idx_section_booleans, idx_section_booleans + size_booleans)):
cap = BooleanCapability(*BOOLEAN_CAPABILITIES[i], value=data[i] == b'\x00')
terminfo.booleans[cap.variable] = cap
# numbers
numbers = struct.unpack('<'+'h' * num_numbers, data[idx_section_numbers:idx_section_strings])
for i,strnum in enumerate(numbers):
cap = NumberCapability(*NUMBER_CAPABILITIES[i], value=strnum)
terminfo.numbers[cap.variable] = cap
# strings
offsets = struct.unpack('<'+'h' * num_offsets, data[idx_section_strings:idx_section_string_table])
idx = 0
for offset in offsets:
k = 0
string = []
while True and offset != -1:
char = data[idx_section_string_table + offset + k:idx_section_string_table + offset + k + 1]
if char == b'\x00':
break
string.append(char.decode('iso-8859-1'))
k += 1
string = u''.join(string)
cap = StringCapability(*STRING_CAPABILITIES[idx], value=string)
terminfo.strings[cap.variable] = cap
idx += 1
terminfo._reset_index()
return terminfo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_term(fullterm):\n if platform == 'win32':\n return True\n elif platform in ('darwin', 'linux'):\n global _STATIC_VARS\n fd = stdin.fileno()\n if not isatty(fd):\n return\n old = tcgetattr(fd)\n _STATIC_VARS.term_config = (fd, old)\n new = tcgetattr(fd)\n new[3] = new[3] & ~ICANON & ~ECHO\n new[6][VMIN] = 1\n new[6][VTIME] = 0\n if fullterm:\n new[6][VINTR] = 0\n new[6][VSUSP] = 0\n tcsetattr(fd, TCSANOW, new)\n # terminal modes have to be restored on exit...\n register(cleanup_console)\n return True\n else:\n return True",
"def auto_color(stream=sys.stdin):\n term_name = os.environ.get(\"TERM\", \"\").lower()\n if (stream.isatty()\n and (term_name in KNOWN_TERMINAL_TYPES or \"xterm\" in term_name)):\n return VtColor()\n return NoColor()",
"def get_term_colors():\n term = getenv('TERM')\n if not is_term() or not term:\n return 1\n if term in ('xterm-color', 'ansi', 'screen'):\n return 16\n if term in ('xterm-256color'):\n return 256\n return 1",
"def term_support_color():\n return OS_VERSION[0] == \"Linux\" or OS_VERSION[0] == \"Darwin\"",
"def set_terminal_encoding(encoding='utf_8'):\n sys.stdin = codecs.getreader(encoding)(sys.stdin)\n sys.stdout = codecs.getwriter(encoding)(sys.stdout)\n sys.stderr = codecs.getwriter(encoding)(sys.stderr)",
"def fallback(self, kw):\n print(self.fallback_text.format(kw))\n return self.ask()",
"def terminal_configured():\n return lnp.userconfig.get('terminal_type') is not None",
"def reset_term_colors():\n sys.stdout.write(ENDC)",
"def terminal_supports_color():\n plat = sys.platform\n supported_platform = plat != \"Pocket PC\" and (\n plat != \"win32\" or \"ANSICON\" in os.environ\n )\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, \"isatty\") and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True",
"def preferredRenderer(*args, fallback: Union[AnyStr, bool]=\"\", makeCurrent: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass",
"def _get_terminal_exec(self):\n\n terminal = None\n\n try:\n with open(CONFIG_FILE_PATH) as conffile:\n config = yaml.load(conffile, yaml.SafeLoader)\n terminal = config.get('terminal', None)\n except yaml.YAMLError:\n print(\"Nautiterm: invalid configuration file at {path}, falling back\" +\n \" to {d}\".format(path=CONFIG_FILE_PATH, d=DEFAULT_TERMINAL_EXEC),\n file=sys.stderr)\n except IOError as ioe:\n # catch-all for permission errors and file not founds to be compatible\n # with Python 2 which doesn't have FileNotFoundError or PermissionError\n pass\n\n if not terminal:\n terminal = DEFAULT_TERMINAL_EXEC\n\n return terminal",
"def _default_color_enabled() -> bool:\n import platform\n\n # If we're not attached to a terminal, go with no-color.\n if not sys.__stdout__.isatty():\n return False\n\n # On windows, try to enable ANSI color mode.\n if platform.system() == 'Windows':\n return _windows_enable_color()\n\n # We seem to be a terminal with color support; let's do it!\n return True",
"def reset_terminal():\n if not mswin:\n subprocess.call([\"tset\", \"-c\"])",
"def get_configured_terminal():\n s = lnp.userconfig.get_string('terminal_type')\n terminals = get_valid_terminals()\n for t in terminals:\n if s == t.name:\n return t\n return CustomTerminal",
"def supports_color():\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and (plat != 'win32'\n or 'ANSICON' in os.environ)\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n return supported_platform and is_a_tty",
"def test_term_chars_default(self, instrument):\n assert instrument.term_chars == b'\\r'",
"def defaultProcessOutputEncodingDecider(context, executable, **forfutureuse):\n\treturn __DEFAULT_PROCESS_ENCODING # stdout encoding will be None unless in a terminal",
"def consolePrompt(prompt:str, nl:bool = True, default:str = None) -> str:\n\t\tanswer = None\n\t\ttry:\n\t\t\tanswer = Prompt.ask(f'[{Logging.terminalStyle}]{prompt}', console = Logging._console, default = default)\n\t\t\tif nl:\n\t\t\t\tLogging.console()\n\t\texcept KeyboardInterrupt as e:\n\t\t\tpass\n\t\texcept Exception:\n\t\t\tpass\n\t\treturn answer",
"def stdout_supports_color():\r\n plat = sys.platform\r\n supported_platform = plat != 'Pocket PC' and (plat != 'win32' or\r\n 'ANSICON' in os.environ)\r\n\r\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\r\n if not supported_platform or not is_a_tty:\r\n return False\r\n return True",
"def supports_color():\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and \\\n (plat != 'win32' or 'ANSICON' in os.environ)\n\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True",
"def get_custom_terminal_cmd():\n return lnp.userconfig.get_string('terminal')",
"def setup_locale_environment(locale=None, text_mode=False, prefer_environment=False):\n\n # pylint: disable=environment-modify\n\n # Look for a locale in the environment. If the variable is setup but\n # empty it doesn't count, and some programs (KDE) actually do this.\n # If prefer_environment is set, the environment locale can override\n # the parameter passed in. This can be used, for example, by initial-setup,\n # to prefer the possibly-more-recent environment settings before falling back\n # to a locale set at install time and saved in the kickstart.\n if not locale or prefer_environment:\n for varname in (\"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\", \"LANG\"):\n if varname in os.environ and os.environ[varname]:\n locale = os.environ[varname]\n break\n\n # Look for a locale in the firmware if there was nothing in the environment\n if not locale:\n locale = get_firmware_language(text_mode)\n\n # parse the locale using langtable\n if locale:\n env_langs = get_language_locales(locale)\n if env_langs:\n # the first langauge is the best match\n locale = env_langs[0]\n else:\n log.error(\"Invalid locale '%s' given on command line, kickstart or environment\", locale)\n locale = None\n\n # If langtable returned no locales, or if nothing was configured, fall back to the default\n if not locale:\n locale = constants.DEFAULT_LANG\n\n # Save the locale in the environment\n os.environ[\"LANG\"] = locale\n\n # Cleanup the rest of the environment variables\n for varname in (\"LANGUAGE\", \"LC_ALL\", \"LC_MESSAGES\"):\n if varname in os.environ:\n del os.environ[varname]",
"def is_using_terminal(self):\n return self.using_terminal",
"def tty_supports_color():\r\n\t\t\r\n\t\tplat = sys.platform\r\n\r\n\t\tif plat == \"win32\":\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\tsupported_platform = plat != 'Pocket PC' and (plat != 'win32' or\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'ANSICON' in os.environ)\r\n\t\t# isatty is not always implemented, #6223.\r\n\t\t\tis_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\r\n\t\t\treturn supported_platform and is_a_tty",
"def supports_color(): # pragma: no cover # noqa\n plat = sys.platform\n supported_platform = plat != 'Pocket PC' and (\n plat != 'win32' or 'ANSICON' in os.environ\n )\n\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if not supported_platform or not is_a_tty:\n return False\n return True",
"def xterm_title(value, max_length=74, bypass_term_check=False):\n TERM = os.getenv('TERM')\n if not bypass_term_check and TERM not in TERM_TITLE_SUPPORTED:\n return\n sys.stderr.write('\\033]2;'+value[:max_length]+'\u0007')\n sys.stderr.flush()",
"def configure_terminal(termname):\n lnp.userconfig['terminal_type'] = termname\n lnp.userconfig.save_data()",
"def getpreferredencoding() -> str:\n return locale.getpreferredencoding() or \"UTF-8\"",
"def supports_color():\n unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))\n # isatty is not always implemented, #6223.\n is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\n if unsupported_platform or not is_a_tty:\n return False\n return True",
"def GetEnvironFallback(var_list, default):\n for var in var_list:\n if var in os.environ:\n return os.environ[var]\n return default"
] | [
"0.5692326",
"0.53151155",
"0.5231032",
"0.52155876",
"0.51974976",
"0.51403886",
"0.5131423",
"0.50917715",
"0.50907224",
"0.5077649",
"0.4965872",
"0.48994294",
"0.4885659",
"0.48755345",
"0.48475754",
"0.48131225",
"0.480396",
"0.47808626",
"0.4763937",
"0.47169244",
"0.47124702",
"0.4706323",
"0.4693541",
"0.468458",
"0.4677504",
"0.46382722",
"0.46357965",
"0.46302018",
"0.46000203",
"0.4575656"
] | 0.5457427 | 1 |
This function will create foreign table under the existing dummy schema. | def create_foreign_table(server, db_name, schema_name, fsrv_name,
foreign_table_name):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute(
"CREATE FOREIGN TABLE " + schema_name + "." + foreign_table_name +
"(emp_name text NULL) SERVER %s" % fsrv_name)
connection.set_isolation_level(old_isolation_level)
connection.commit()
# Get 'oid' from newly created foreign table
pg_cursor.execute(
"SELECT ftrelid FROM pg_foreign_table WHERE ftserver = "
"(SELECT oid FROM pg_foreign_server WHERE srvname = '%s') ORDER BY "
"ftrelid ASC limit 1" % fsrv_name)
oid = pg_cursor.fetchone()
ft_id = ''
if oid:
ft_id = oid[0]
connection.close()
return ft_id
except Exception:
traceback.print_exc(file=sys.stderr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_table(self):\n pass",
"def create_tables():\n db.create_all()",
"def create_tables():\n db.create_all()",
"def create_table(self, cursor: sqlite3.Cursor) -> None:\n\n if self.created:\n return\n\n # Preset this to true to work with Foreign key loops.\n self.created = True\n\n for _, model in self.foreigners.values():\n model.create_table(cursor)\n\n compiled_sql = self._create_table_sql()\n\n _LOGGER.debug(compiled_sql)\n\n cursor.execute(compiled_sql)\n\n for smodel in self.submodels.values():\n smodel.model.create_table(cursor)",
"def create_tables():\n db.create_all()",
"def create_example_test_table(conn):\n execute_sql_script(conn, \"06_create_example_test_table.sql\")",
"def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise",
"def test_dummydb_new_table(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)",
"def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])",
"def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)",
"def db_create_table(db_in, tablename):\n connection = db_in.connection.cursor()\n connection.execute('CREATE TABLE IF NOT EXISTS %s(id INTEGER PRIMARY KEY);' % tablename)",
"def test_dummydb_new_table_duplicate_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n db.create_table(\"new_table\", columns)",
"def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)",
"def create_table(self, name: str, columns=None, foreigns=None) -> None:\n\n if foreigns is None:\n foreigns = []\n\n if columns is None:\n columns = []\n\n sql = 'CREATE TABLE ' + name + ' ('\n\n for column in columns:\n sql += column.to_sql() + ','\n\n for foreign in foreigns:\n sql += foreign.to_sql()\n sql += ','\n\n sql = sql[:-1] + ');'\n\n self.cursor.execute(sql)",
"def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()",
"def create_questions_table(conn):\n execute_sql_script(conn, \"04_create_questions_table.sql\")",
"def create_table(self, create_table_sql):\n print('connect')\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n c.execute(create_table_sql)\n conn.close()",
"def check_and_create_table(self) -> None:\n table_ids = [t.table_id for t in self.instance.list_tables()]\n\n if not self.table_id in table_ids:\n self.table.create()\n f = self.table.column_family(self.family_id)\n f.create()\n\n f_inc = self.table.column_family(self.incrementer_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_inc.create()\n\n f_log = self.table.column_family(self.log_family_id)\n f_log.create()\n\n f_ce = self.table.column_family(self.cross_edge_family_id,\n gc_rule=MaxVersionsGCRule(1))\n f_ce.create()\n\n print(\"Table created\")",
"def create_tables(self):\n\n self.cur.execute('''CREATE TABLE IF NOT EXISTS my_business_entry\n (\n id SERIAL PRIMARY KEY,\n url_yes_no boolean,\n url TEXT,\n phone_yes_no boolean,\n phone TEXT,\n rating TEXT,\n nr_of_ratings TEXT,\n myBusiness boolean,\n company TEXT\n );''')\n\n self.connection.commit()",
"def create_table(conn, create_table_sql):\r\n try:\r\n c = conn.cursor()\r\n c.execute(create_table_sql)\r\n except Error as e:\r\n print(e)",
"def create_table(self, conn, create_table_sql):\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)",
"def create_table():\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(CREATE_TABLE)\n conn.commit()\n cursor.close()",
"def create_table(self, create_table_sql):\n connection = self.__create_connection()\n try:\n c = connection.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)",
"def create_dataBase(conn, create_cmd):\n if conn:\n cursor = conn.cursor()\n cursor.execute(create_cmd)\n conn.commit()\n #print '[sql management] Table Created...'",
"def create_train_table(conn):\n execute_sql_script(conn, \"03_create_train_table.sql\")",
"def create_example_sample_submission_table(conn):\n execute_sql_script(conn, \"07_create_example_sample_submission_table.sql\")",
"def create_table(conn, sql_create_table):\n try:\n c = conn.cursor()\n c.execute(sql_create_table)\n except Error as e:\n print(e)",
"def create_table(conn, sql_create_table):\n try:\n c = conn.cursor()\n c.execute(sql_create_table)\n except Error as e:\n print(e)",
"def create_table(conn, sql_create_table):\n try:\n c = conn.cursor()\n c.execute(sql_create_table)\n except Error as e:\n print(e)",
"def create_table(self):\n Engine.create_table(self)\n self.connection.commit()"
] | [
"0.68531334",
"0.6769567",
"0.6769567",
"0.67081726",
"0.6704613",
"0.6654913",
"0.6629334",
"0.66245294",
"0.6596241",
"0.65905637",
"0.6554986",
"0.6512326",
"0.64604354",
"0.64442676",
"0.6398214",
"0.63658255",
"0.63251376",
"0.6320983",
"0.6311493",
"0.6310929",
"0.6301367",
"0.6300838",
"0.629358",
"0.62701035",
"0.6254469",
"0.6234495",
"0.62333256",
"0.62333256",
"0.62333256",
"0.6221567"
] | 0.7019781 | 0 |
This function will verify current foreign table. | def verify_foreign_table(server, db_name, fsrv_name):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'])
pg_cursor = connection.cursor()
pg_cursor.execute(
"SELECT ftrelid FROM pg_foreign_table WHERE ftserver = "
"(SELECT oid FROM pg_foreign_server WHERE srvname = '%s') ORDER BY "
"ftrelid ASC limit 1" % fsrv_name)
fts = pg_cursor.fetchone()
connection.close()
return fts
except Exception:
traceback.print_exc(file=sys.stderr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def foreign_key_check(self):\n # MyRocks doesn't support foreign key\n if self.is_myrocks_table:\n log.info(\n \"SKip foreign key check because MyRocks doesn't support \" \"this yet\"\n )\n return True\n foreign_keys = self.query(\n sql.foreign_key_cnt,\n (\n self.table_name,\n self._current_db,\n self.table_name,\n self._current_db,\n ),\n )\n if foreign_keys:\n fk = \"CONSTRAINT `{}` FOREIGN KEY (`{}`) REFERENCES `{}` (`{}`)\".format(\n foreign_keys[0][\"constraint_name\"],\n foreign_keys[0][\"col_name\"],\n foreign_keys[0][\"ref_tab\"],\n foreign_keys[0][\"ref_col_name\"],\n )\n raise OSCError(\n \"FOREIGN_KEY_FOUND\",\n {\"db\": self._current_db, \"table\": self.table_name, \"fk\": fk},\n )",
"def verify_table(self):\n metadata = MetaData()\n metadata.reflect(bind = StatusSource.engine)\n mine = str(self.table.columns)\n verified = str(metadata.tables[self.tablename].columns)\n if mine != verified:\n raise DbException(\"Table '%s' in the database has schema %s whereas the query's schema is %s\" % (self.tablename, verified, mine))",
"def verify_table(self):\r\n metadata = MetaData()\r\n metadata.reflect(bind = DbInsertStatusHandler.engine)\r\n mine = str(self.table.columns)\r\n verified = str(metadata.tables[self.tablename].columns)\r\n if mine != verified:\r\n raise DbException(\"Table '%s' in the database has schema %s whereas the query's schema is %s\" % (self.tablename, verified, mine))",
"def check_foreign_key_exists(self, table_name, column_name, referenced_table, referenced_column):\n ans = self.execute(self.commands.foreign_key_exists(self.db.name, table_name, column_name, referenced_table, referenced_column))\n if not ans:\n return False\n return True",
"def validate_table(self, table, table_struct, verbose=True):\n \n assert(self.connected)\n try: \n assert(self.check_table(table, verbose=False)) \n except AssertionError: \n raise TableNotFoundError\n \n GET_SCHEMA_INFORMATION_COMMAND = \"SELECT ORDINAL_POSITION, COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE, COLUMN_KEY, EXTRA \" \\\n \t \"FROM INFORMATION_SCHEMA.COLUMNS \" \\\n \t \"WHERE TABLE_NAME='{0}' ORDER BY ORDINAL_POSITION\".format(table)\n \n GET_SCHEMA_FK_INFORMATION_COMMAND = \"SELECT COLUMN_NAME, CONSTRAINT_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME \" \\\n \"FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE \" \\\n \"WHERE REFERENCED_TABLE_SCHEMA = '{0}' AND TABLE_NAME = '{1}' AND COLUMN_NAME = '{2}'\"\n \n CHANGE_TYPE_COMMAND = \"ALTER TABLE {0} MODIFY {1} {2} {3}\"\n \n ADD_FK_COMMAND = \"ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})\" \n \n DROP_FK_CONSTRAINT_COMMAND = \"ALTER TABLE {0} DROP FOREIGN KEY {1}\" \n \n \n self.cursor.execute(GET_SCHEMA_INFORMATION_COMMAND)\n \n # load all column info from the database \n columns = {}\n for c in self.cursor:\n columns[c[1]] = c\n \n for column,db_col in zip(table_struct,columns):\n \n # load parameter values from the DB \n (ord_pos, name, col_type, isnull, key_type, extra) = columns[db_col]\n \n isnull = isnull == 'YES'\n auto_increment = extra == 'auto_increment'\n foreign_key = key_type == 'MUL'\n \n # parse new parameter values\n struct_type = table_struct[column][0]\n parameters = table_struct[column][1] if ( len(table_struct[column]) > 1) else None\n \n # get parameters values in boolean format\n if (parameters == None):\n new_isnull = True\n new_auto_increment = False\n new_foreign_key = False\n else:\n if 'not_null' in parameters: new_isnull = not parameters['not_null']\n else: new_isnull = True\n \n if 'auto_increment' in parameters: new_auto_increment = parameters['auto_increment']\n else: new_auto_increment = False\n \n if 'foreign_key' in parameters: new_foreign_key = parameters['foreign_key']\n else: new_foreign_key = False\n \n \n \n \n if verbose: \n print(\"\\n---\\n\\nChecking column '{0}'...\".format(column))\n \n # check name, type and each parameter \n if name == column:\n \n # if something doesn't match, change within the database\n if ( col_type != struct_type ): \n if verbose:\n print(\"Column '{0}' found in the correct position with the incorrect type.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, col_type, struct_type),)\n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), '')\n \n if verbose: print(\"\\t\" + cmd)\n \n self.cursor.execute(cmd) \n \n if ( isnull != new_isnull ):\n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"NOT NULLABLE\" if new_isnull else \"NULLABLE\", \"NULLABLE\" if new_isnull else \"NOT NULLABLE\"))\n \n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), \"NOT NULL\" if not new_isnull else \"\" )\n \n if verbose: print(\"\\t\" + cmd)\n \n \n self.cursor.execute(cmd)\n \n if ( auto_increment != new_auto_increment ):\n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"AUTO INCREMENT\" if new_auto_increment else \"none\", \"none\" if new_auto_increment else \"AUTO INCREMENT\"))\n \n \n cmd = CHANGE_TYPE_COMMAND.format(table, column, struct_type.upper(), \"AUTO INCREMENT\" if new_auto_increment else \"\" )\n \n if verbose: print(\"\\t\" + cmd)\n \n \n self.cursor.execute(cmd)\n \n \n if ( foreign_key != new_foreign_key ):\n \n \n if verbose:\n print(\"Column '{0}' found in the correct position an incorrect parameter.\".format(column))\n print(\"Changing the type of '{0}' from '{1}' to '{2}'\".format(column, \"FOREIGN KEY\" if new_auto_increment else \"none\", \"none\" if new_auto_increment else \"FOREIGN KEY\"))\n \n \n \n if ('foreign_key' in parameters and parameters['foreign_key']):\n \n referenced_table = parameters['references'].split('(')[0]\n referenced_column = parameters['references'].split('(')[1][:-1] \n \n \n if (not self.check_table(referenced_table, verbose=False)):\n raise(TableNotFoundError)\n \n \n if (not self.check_column(referenced_column, referenced_table, verbose=False)):\n raise(ColumnNotFoundError)\n \n \n cmd = ADD_FK_COMMAND.format(table,column,referenced_table, referenced_column)\n\n \n \n if verbose: print(\"\\t\" + cmd)\n \n try:\n self.cursor.execute(cmd) \n except:\n print(\" > Error: Cannot add foreign key constraint to column '{0}' in the table '{1}'. You must remove all data from\\n > this column using the clear_column() command first.\".format(column, table))\n \n else:\n \n # check if column has a foreign key constraint\n \n cmd = GET_SCHEMA_FK_INFORMATION_COMMAND.format(self.config['database'], table, column)\n \n self.cursor.execute(cmd)\n \n fk_name = None\n for row in self.cursor:\n fk_name = row[1]\n break\n \n if fk_name != None:\n cmd = DROP_FK_CONSTRAINT_COMMAND.format(table, fk_name)\n \n if verbose: \n print(\"Column '{0}' involved in foreign key constraint '{1}'\".format(column, fk_name))\n print(\"Dropping foreign key constraint '{0}'\".format(fk_name))\n print(\"\\t\" + cmd)\n \n self.cursor.execute(cmd)\n\n \n \n if verbose: print(\"Done.\")\n \n \n if (len(columns) > len(table_struct)):\n \n if verbose: print(\"\\n---\\n\\nExtra columns found in database\")\n \n for col in columns:\n if (col not in table_struct): \n \n if verbose:\n print(\"Column '{0}' found in the database but not found in the configuration.\".format(col))\n \n self.delete_column(col, table)\n \n \n elif(len(table_struct) > len(columns)):\n \n if verbose: print(\"\\n---\\n\\nExtra columns found in configuration. \")\n\n for col in table_struct:\n if col not in columns:\n if verbose: print(\"Column '{0}' found in configuration but not in database\".format(col))\n self.insert_column(col, table_struct[col][0], table, params = table_struct[col][1] if ( len(table_struct[col]) > 1) else None)",
"def _is_foreign_key(self, key):\n return self._in_keys(key, self._foreign_keys)",
"def check_table(cur, table: str) -> bool:\n table_data = cur.execute(f\"\"\"\n SELECT name \n FROM sqlite_master \n WHERE type='table' \n AND name='{table}'\n \"\"\")\n table_fetch = table_data.fetchall()\n if table_fetch:\n return True\n return False",
"def isForeignKey(self):\n return self._foreignKey",
"def test_foreign_column(self):\n\n table2 = self.h5file.create_table('/', 'other', self.tableDescription)\n self.assertRaises(ValueError, self.table.where,\n 'c_int32_a + c_int32_b > 0',\n {'c_int32_a': self.table.cols.c_int32,\n 'c_int32_b': table2.cols.c_int32})",
"def table_check(tablename, path):\n instance = arcno(path)\n tablelist = [i for i,j in instance.actual_list.items()]\n return True if tablename in tablelist else False",
"def enforce_foreign_keys(self):\n with self._get_db_connection() as conn:\n try:\n c = conn.cursor()\n c.execute('PRAGMA foreign_keys=ON')\n except Exception:\n conn.rollback()\n raise Exception(sys.exc_info())\n else:\n conn.commit()",
"def check_table(self, table_name: str) -> bool:\n try:\n if self.engine.dialect.has_table(self.engine.connect(), table_name):\n return self.get_input(table_name)\n return False\n except Exception as err:\n logger.error(\"check_table [error] -> %s\" % err)\n return False",
"def isForeignKey(cls, _field):\n return isinstance(_field, fields.ForeignKeyField)",
"def test_foreign_key_through_pk(self):\n metadata = MetaData(schema=\"unique\")\n sa_models = construct_models(metadata)\n sa_model = sa_models[RelatedToItemViaPrimaryKey]\n table = sa_model.__table__\n self.assertEqual(len(table.foreign_keys), 1)\n foreign_key, *_ = table.foreign_keys\n foreign_column = foreign_key.column\n item_table = sa_models[Item].__table__\n self.assertIs(foreign_column.table, item_table)\n self.assertEqual(foreign_column.name, \"id\")\n self.assertEqual(foreign_column.type, item_table.c.id.type)",
"def collect_drop_fk(self):\n try:\n conn = self.engine.connect()\n transactional = conn.begin()\n inspector = reflection.Inspector.from_engine(self.engine)\n\n for table_name in inspector.get_table_names():\n if table_name in self.table_list:\n for fk in inspector.get_foreign_keys(table_name):\n if not fk[\"name\"]:\n continue\n self.dest_fk.append(ForeignKeyConstraint((), (), name=fk[\"name\"]))\n self.contraints_columns[table_name].add(*fk[\"constrained_columns\"])\n transactional.commit()\n except Exception as err:\n logger.error(\"collect_drop_fk [error] -> %s\" % err)\n return False\n finally:\n conn.close()",
"def checkTable(self, in_table_name):\n phrase1 = \"SELECT count(*) FROM sqlite_master\"\n phrase2 = \"type='table' AND name='{}';\".format(in_table_name)\n self.cursor.execute(\"{} WHERE {}\".format(phrase1, phrase2))\n return self.cursor.fetchone()[0] == 1",
"def _check_foreign_cols(\n self, join_condition: ColumnElement[bool], primary: bool\n ) -> None:\n\n can_sync = False\n\n foreign_cols = self._gather_columns_with_annotation(\n join_condition, \"foreign\"\n )\n\n has_foreign = bool(foreign_cols)\n\n if primary:\n can_sync = bool(self.synchronize_pairs)\n else:\n can_sync = bool(self.secondary_synchronize_pairs)\n\n if (\n self.support_sync\n and can_sync\n or (not self.support_sync and has_foreign)\n ):\n return\n\n # from here below is just determining the best error message\n # to report. Check for a join condition using any operator\n # (not just ==), perhaps they need to turn on \"viewonly=True\".\n if self.support_sync and has_foreign and not can_sync:\n err = (\n \"Could not locate any simple equality expressions \"\n \"involving locally mapped foreign key columns for \"\n \"%s join condition \"\n \"'%s' on relationship %s.\"\n % (\n primary and \"primary\" or \"secondary\",\n join_condition,\n self.prop,\n )\n )\n err += (\n \" Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or are \"\n \"annotated in the join condition with the foreign() \"\n \"annotation. To allow comparison operators other than \"\n \"'==', the relationship can be marked as viewonly=True.\"\n )\n\n raise sa_exc.ArgumentError(err)\n else:\n err = (\n \"Could not locate any relevant foreign key columns \"\n \"for %s join condition '%s' on relationship %s.\"\n % (\n primary and \"primary\" or \"secondary\",\n join_condition,\n self.prop,\n )\n )\n err += (\n \" Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or are \"\n \"annotated in the join condition with the foreign() \"\n \"annotation.\"\n )\n raise sa_exc.ArgumentError(err)",
"def __call__(self):\n try:\n _ = self.engine.table_names()\n except OperationalError:\n return False\n else:\n return True",
"def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))",
"def verify(self):\n for col in self._columns:\n if col not in self._table_obj.columns.keys():\n raise GaiaException('{} column not found in {}'.format(\n col, self._table_obj))",
"def test_foreign_key_through_unique_field(self):\n metadata = MetaData(schema=\"unique\")\n sa_models = construct_models(metadata)\n sa_model = sa_models[RelatedToItemViaUniqueField]\n table = sa_model.__table__\n self.assertEqual(len(table.foreign_keys), 1)\n foreign_key, *_ = table.foreign_keys\n foreign_column = foreign_key.column\n item_table = sa_models[Item].__table__\n self.assertIs(foreign_column.table, item_table)\n self.assertEqual(foreign_column.name, \"legacy_id\")\n self.assertEqual(foreign_column.type, item_table.c.legacy_id.type)",
"def verify_database(self):\n super().verify_database(names=schema.zalert_names,\n script=schema.schema)",
"def is_key_failure(e: sqlite3.IntegrityError) -> bool:\n return str(e) == \"FOREIGN KEY constraint failed\"",
"def process(self):\n try:\n # self.alter_columns()\n self.collect_drop_fk()\n self.update_table()\n self.create_tables()\n self.db_operations.create_fk_constraint(self.fk_constraints, self.contraints_columns)\n return True\n except Exception as err:\n logger.error(\"create_tables [error] -> %s\" % err)",
"def check_table(schemaname=settings.DEFAULT_SCHEMA, tablename=settings.STATES):\n\n conn = None\n cur = None\n\n try:\n\n conn = utils.pgconnect(**settings.DEFAULT_CONNECTION)\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT to_regclass('%s.%s');\"\"\", (AsIs(schemaname), AsIs(tablename)))\n result = cur.fetchone()[0]\n\n return (True if result else False)\n\n except Exception as e:\n raise Exception(e)\n\n finally:\n if conn: conn = None\n if cur: cur = None",
"def test_table_false_positives(self):\n pass",
"def check_constraints(self, table_names=None):\r\n if self.connection:\r\n cursor = self.connection.cursor()\r\n else:\r\n cursor = self._cursor()\r\n if not table_names:\r\n cursor.execute('DBCC CHECKCONSTRAINTS WITH ALL_CONSTRAINTS')\r\n if cursor.description:\r\n raise DjangoIntegrityError(cursor.fetchall())\r\n else:\r\n qn = self.ops.quote_name\r\n for name in table_names:\r\n cursor.execute('DBCC CHECKCONSTRAINTS({0}) WITH ALL_CONSTRAINTS'.format(\r\n qn(name)\r\n ))\r\n if cursor.description:\r\n raise DjangoIntegrityError(cursor.fetchall())",
"def _relation_check(self):\n seen = set()\n for entity in self.get_entities():\n for field in entity.fields.itervalues():\n if field.is_relation():\n seen.add(field.remote_name)\n missing = seen - set(self.entities.keys())\n if missing:\n raise exceptions.SchemaError(\n 'undefined entities referenced in relations: %s' % (\n ', '.join(missing)))",
"def is_pure_binary(self, table, follow_naming_convention=True):\n\n # table has only two foreign_key constraints.\n # Each constraint is over only one column.\n if not (len(table.foreign_keys) == 2 and\n len(table.foreign_keys[0].foreign_key_columns) == 1 and\n len(table.foreign_keys[1].foreign_key_columns) == 1):\n return False\n\n fk0 = table.foreign_keys[0].foreign_key_columns[0]['column_name']\n fk1 = table.foreign_keys[1].foreign_key_columns[0]['column_name']\n\n # There is a uniqeness constraint on the pair of fkey columns.\n f = filter(lambda x: len(x.unique_columns) == 2 and fk0 in x.unique_columns and fk1 in x.unique_columns,\n table.keys)\n\n if len(list(f)) != 1:\n return False\n\n # Null is not allowed on the column.\n if table.column_definitions[fk0].nullok or table.column_definitions[fk1].nullok:\n return False\n\n if follow_naming_convention and not (fk0 in table.name and fk1 in table.name):\n return False\n\n return True",
"def check_table(table_name, engine):\n sql = (\"SELECT \"\n \"* \"\n \"FROM information_schema.tables \"\n \"WHERE table_name = '{}'\".format(table_name)\n )\n result = engine.execute(sql)\n\n if len(result.fetchall()) > 0:\n return True\n else:\n return False"
] | [
"0.73069215",
"0.65584505",
"0.65232533",
"0.65034956",
"0.6413673",
"0.61686987",
"0.6062092",
"0.60607314",
"0.5977171",
"0.5951747",
"0.59400225",
"0.5912802",
"0.5910147",
"0.5875509",
"0.5874834",
"0.58528465",
"0.5831443",
"0.5819649",
"0.5797078",
"0.57933843",
"0.57436293",
"0.5734158",
"0.5723164",
"0.5646982",
"0.56053764",
"0.5597663",
"0.55815846",
"0.5563194",
"0.55134875",
"0.55132604"
] | 0.69198555 | 1 |
returns partition sum without electronic contribution, v = vibrational frequency in m^1, m = mass in kg, I=moment of inertia either number or list of three [kgm^2], V= Volume in m^3, sym=number of similar rotation axis | def partition(v,m,I,V,sym):
T = s.Symbol("T")
return qvib(v) + qtrans(m,V) + qrot(I,sym) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vol_uc(x):\r\n return sum([vol(m) for m in metamer(x)])",
"def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}",
"def get_k(M):\n k = np.arange(1,M+1)*np.pi/(M+1) # vector of all possible quasimomenta\n return k",
"def decomposition_into_s_n_irreducibles(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n if (p >0 and (p <= self.dimension())):\r\n null = nullspace(A)\r\n w3 = []\r\n for i in range(len(null[0])):\r\n w = []\r\n for j in range(len(null)):\r\n w.append(null[j][i])\r\n w3.append(w) \r\n null = w3\r\n M = np.matrix(w3, dtype= np.float64).transpose()\r\n Mi = np.linalg.pinv(M)\r\n else:\r\n if (p == 0):\r\n M = A\r\n null = []\r\n for i in range(A.shape[0]):\r\n aux = []\r\n for j in range(A.shape[1]):\r\n aux.append(M[i,j])\r\n null.append(aux)\r\n M = np.matrix(null, dtype=np.float64)\r\n Mi = M\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = columnspace(A1)\r\n w4 = []\r\n for i in range(len(col[0])):\r\n w = []\r\n for j in range(len(col)):\r\n w.append(col[j][i])\r\n w4.append(w)\r\n col = w4\r\n M1 = np.matrix(w4, dtype=np.float64).transpose()\r\n Mii = np.linalg.pinv(M1)\r\n for h in w5:\r\n p = k \r\n if (p >0 and (p <= self.dimension())):\r\n if (all(elem == 0 for elem in null[0])):\r\n l1 = 0\r\n else:\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n if (p == 0):\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n l1 = 0\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n hi = self.basis_group_oriented_p_chains(p-1) \r\n on1i = np.ones(len(list(hi.dic.keys())), dtype=np.float64) \r\n vi = P_chains([],[])\r\n vi = P_chains(list(hi.dic.keys()),on1i)\r\n v1i = permutation_in_simplex_test(vi, make_permutation(h))\r\n D1i={}\r\n c1 = 0\r\n for i in list(v1i.dic.keys()):\r\n c2 = 1\r\n for j in list(hi.dic.keys()):\r\n if (i == j):\r\n if (v1i.dic[i] == hi.dic[j]):\r\n D1i[c1] = c2\r\n else:\r\n D1i[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M1.shape[0]\r\n cc = M1.shape[1]\r\n Mai = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Mai[i,:] = (M1[(abs(D1i[i])-1),:]*(np.sign(D1i[i])))\r\n l2 = 0\r\n for j in range(cc):\r\n l2 = np.dot(Mii[j,:],Mai[:,j])[0,0] + l2\r\n else:\r\n l2 = 0\r\n uu.append(l1-l2) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])] = abs(round(Ip))\r\n '''Note that I am using round, only because the results obtained are \r\n not esthetics'''\r\n vec_dic[k] = D\r\n return vec_dic",
"def partition_by_eigenvector(graph):\n ###TODO\n pass",
"def nfw(self, k, m, z):\n RS, rhoS, c = self.rS_rhoS_c(m, z)\n #\n result = np.sin(k * RS) * ( Si((1+c) * k * RS) - Si(k * RS) )\n result += - np.sin(c * k * RS) / ((1+c) * k * RS)\n result += np.cos(k * RS) * ( Ci((1+c) * k * RS) - Ci(k * RS) )\n result /= (np.log(1+c) - c/(1+c))\n return result",
"def nfw(self, k, m, z):\n RS, rhoS, c = self.rS_rhoS_c(m, z)\n #\n result = np.sin(k * RS) * ( Si((1+c) * k * RS) - Si(k * RS) )\n result += - np.sin(c * k * RS) / ((1+c) * k * RS)\n result += np.cos(k * RS) * ( Ci((1+c) * k * RS) - Ci(k * RS) )\n result /= (np.log(1+c) - c/(1+c))\n return result",
"def part(n, show_progress=False):\n # Get partitions as list of tuples\n parts = partitions(n, show_progress=show_progress)\n\n #products = set(map(lambda x: np.prod(x), parts))\n # Only count unique products\n filtered_products = list(set(parts.values()))\n filtered_products.sort()\n\n return format('Range: %d Average: %.2f Median: %.2f' % \n (filtered_products[-1]-filtered_products[0], np.mean(filtered_products), np.median(filtered_products)))",
"def inertia(mus):\n pos, negs, zeros = cluster_eignvalues(mus)\n\n return len(zeros) + min(len(pos), len(negs))",
"def total_electronic_hamiltonian(self):\n return block_diag(*[self.electronic_manifold(n) for n in range(3)])",
"def HarmonicOscillator(inv_mass_matrix, k=1.0, m=1.0):\n\n def potential_energy(q):\n return jnp.sum(0.5 * k * jnp.square(q[\"x\"]))\n\n def kinetic_energy(p):\n v = jnp.multiply(inv_mass_matrix, p[\"x\"])\n return jnp.sum(0.5 * jnp.dot(v, p[\"x\"]))\n\n return potential_energy, kinetic_energy",
"def spectral_modularity_partition(G):\n try:\n import numpy as np\n except:\n raise ImportError(\"spectral_partition() \\\n requires NumPy: http://scipy.org/\")\n\n\n k = np.matrix(G.degree().values())\n m = G.number_of_edges()\n B = nx.adj_matrix(G) - (k.transpose() * k) / (2.0 * m)\n eigenvalues, eigenvectors = np.linalg.eig(B)\n # sort and keep smallest nonzero \n index = np.argsort(eigenvalues)[-1] # -1 index is largest eigenvalue\n v2 = zip(np.real(eigenvectors[:, index]), G)\n \n C = [set(), set()]\n \n for (u, n) in v2:\n if u < 0:\n C[0].add(n)\n else:\n C[1].add(n)\n return C",
"def inv_sym(self, ):\n m = self.m\n n = self.n\n kQ = self.kQ\n iQ = self.iQ\n iA = self.iA\n kA = self.kA\n kAt = self.kAt\n iAt = self.iAt\n bndmark = self.bndmark\n rngmark = self.rngmark\n\n verbose = self.verbose\n pdf = self.pdf\n\n separable = True\n degree = np.empty(n+m, dtype=np.int)\n nbrs = np.empty(n+m, dtype=object)\n\n #/*-----------------------------------------------------+\n #| First check to see if the problem is separable. */\n\n for j in range(n):\n for k in range(kQ[j], kQ[j+1]):\n if iQ[k] != j:\n separable = False\n break\n\n #/*----------------------------------------------------+\n #| Select ordering priority (primal or dual) */\n\n\n _dense, _fraction, pfillin, dfillin = 0.0, 0.0, 0.0, 0.0\n\n _fraction = 1.0e0\n for j in range(n):\n _dense = float(kA[j+1]-kA[j])/(m+1)\n _fraction = _fraction*(1.0e0 - _dense*_dense)\n\n pfillin = 0.5*m*m*(1.0e0-_fraction)\n if verbose>2:\n print(\"primal fillin estimate: {:10.0f}\".format(pfillin))\n\n _fraction = 1.0e0\n for i in range(m):\n _dense = float(kAt[i+1]-kAt[i])/(n+1)\n _fraction = _fraction*(1.0e0 - _dense*_dense)\n\n dfillin = 0.5*n*n*(1.0e0-_fraction)\n if verbose>2:\n print(\"dual fillin estimate: {:10.0f}\\n\".format(dfillin))\n\n if pdf == self._UNSET:\n if 3*pfillin <= dfillin and separable:\n pdf = self._PRIMAL\n if verbose>2:\n print(\"Ordering priority favors PRIMAL\")\n else:\n pdf = self._DUAL\n if verbose>2:\n print(\"Ordering priority favors DUAL\")\n\n\n #/*----------------------------------------------+\n #| Initialize nbrs so that nbrs[col][k] con- |\n #| tains the row index of the k_th nonzero in |\n #| column col. |\n #| Initialize degree so that degree[col] con- |\n #| tains the number of nonzeros in column col. |\n #| */\n\n for j in range(n):\n ne = kA[j+1] - kA[j] + kQ[j+1] - kQ[j]\n nbrs[j] = np.empty(ne, dtype=np.int)\n ne = 0\n for k in range(kA[j], kA[j+1]):\n nbrs[j][ne] = n+iA[k]\n ne+=1\n for k in range(kQ[j],kQ[j+1]):\n if iQ[k] != j:\n nbrs[j][ne] = iQ[k]\n ne+=1\n\n degree[j] = ne\n\n for i in range(m):\n ne = kAt[i+1] - kAt[i]\n nbrs[n+i] = np.empty(ne, dtype=np.int)\n degree[n+i] = ne\n ne = 0\n for k in range(kAt[i], kAt[i+1]):\n nbrs[n+i][ne] = iAt[k]\n ne+=1\n\n #/*----------------------------------------------+\n #| Initialize tier to contain the ordering |\n #| priority scheme. |\n #| */\n\n if self.tier is None:\n self.tier = np.empty(n+m, dtype=np.int)\n n1 = 0\n if pdf == self._PRIMAL:\n for j in range(n):\n if bndmark[j] != FREEVAR:\n self.tier[j] = 0 # 0\n else:\n self.tier[j] = 1 # 2\n\n for i in range(m):\n if rngmark[i] == UNCONST:\n self.tier[n+i] = 1 # 4\n n1+=1\n elif rngmark[i] == INFINITE:\n self.tier[n+i] = 1 # 1\n else:\n self.tier[n+i] = 1 # 3\n n1+=1\n\n else:\n for j in range(n):\n if bndmark[j] != FREEVAR:\n self.tier[j] = 1 # 1\n else:\n self.tier[j] = 1 # 3\n n1+=1\n\n for i in range(m):\n if rngmark[i] == UNCONST:\n self.tier[n+i] = 1 # 4\n elif rngmark[i] == INFINITE:\n self.tier[n+i] = 0 # 0\n else:\n self.tier[n+i] = 1 # 2\n\n\n #/*---------------------------------------------------------+\n #| compute maximum column degree of tier zero columns */\n\n if self.dense < 0:\n denfac = 3.0\n colhisto = np.zeros(n+m+1, dtype=np.int)\n\n for i in range(n+m):\n if self.tier[i] == 0:\n colhisto[ degree[i] ] += 1\n\n tot = 0\n _max = n1\n for i in range(n+m):\n tot += colhisto[i]\n if tot >= _max:\n break\n i+=1\n tot = 0\n cnt = 0\n for j in range(n+m):\n if self.tier[j] == 0:\n tot += degree[j]\n cnt+=1\n self.dense = dense = int(denfac*i)\n\n #dense = (int)(denfac*MAX(i,tot/cnt))\n \t\t#printf(\"i = %d, n = %d, m = %d, n1 = %d \\n\", i,n,m,n1)\n \t\t#printf(\"tot = %d, cnt = %d\\n\", tot, cnt)\n del(colhisto)\n\n\n if verbose>2:\n print(\"dense: {:5d}\".format(dense))\n\n #/*----------------------------------------------+\n #| Get memory for mark[]. */\n\n self.mark = np.empty(n+m, dtype=np.int)\n\n self.lltsym(degree,nbrs)\n\n del(degree)\n del(nbrs)\n self.tier = None",
"def _get_vp_totvolume(self, geom, n=None):\n if geom.vp is None:\n geom.voronoi(self.pbc, self.ratio)\n if hasattr(geom.vp, 'vp_volume'):\n return geom.vp.vp_volume\n f = geom.vp.vp_faces()\n v, _ = geom.vp.vp_volumes(f)\n if n is not None:\n v = [v[i] for i in n]\n return v",
"def getPartitionFunction(self, Tlist):\n\t\treturn _modes.harmonicoscillator_partitionfunction(Tlist, self.frequency) ** self.degeneracy",
"def sim_split_sym_mig_all_size(params, ns):\n #12 parameters\t\n nuA, nu1a, nu1b, nu2a, nu2b, nu3a, nu3b, m_1, m_2, m_3, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n fs.integrate(nu_T1, T1)\n ## Population function for T2 \n nu_T2 = [nu1b, nu2b, nu3b]\n mig = numpy.array([[0, m_1, m_3],[m_1, 0, m_2], [m_3, m_2, 0]]) \n fs.integrate(nu_T2, T2, m=mig) \n return fs",
"def mass_from_composition(composition):\n mass = 0.0\n for k, v in composition.items():\n if k == 0: # electron\n mass -= v * 5.489e-4\n else:\n mass += v * relative_atomic_masses[k - 1]\n return mass",
"def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs",
"def mass(self):\n\t\treturn self.volume*self.density",
"def inertia_tensor_partial(self, part, masswt=True, zero=ZERO):\n tensor = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\n for i in part:\n if masswt:\n # I(alpha, alpha)\n tensor[0][0] += self.mass(i) * (self.y(i) * self.y(i) + self.z(i) * self.z(i))\n tensor[1][1] += self.mass(i) * (self.x(i) * self.x(i) + self.z(i) * self.z(i))\n tensor[2][2] += self.mass(i) * (self.x(i) * self.x(i) + self.y(i) * self.y(i))\n\n # I(alpha, beta)\n tensor[0][1] -= self.mass(i) * self.x(i) * self.y(i)\n tensor[0][2] -= self.mass(i) * self.x(i) * self.z(i)\n tensor[1][2] -= self.mass(i) * self.y(i) * self.z(i)\n\n else:\n # I(alpha, alpha)\n tensor[0][0] += self.y(i) * self.y(i) + self.z(i) * self.z(i)\n tensor[1][1] += self.x(i) * self.x(i) + self.z(i) * self.z(i)\n tensor[2][2] += self.x(i) * self.x(i) + self.y(i) * self.y(i)\n\n # I(alpha, beta)\n tensor[0][1] -= self.x(i) * self.y(i)\n tensor[0][2] -= self.x(i) * self.z(i)\n tensor[1][2] -= self.y(i) * self.z(i)\n\n # mirror\n tensor[1][0] = tensor[0][1]\n tensor[2][0] = tensor[0][2]\n tensor[2][1] = tensor[1][2]\n\n # Check the elements for zero and make them a hard zero.\n for i in range(3):\n for j in range(3):\n if math.fabs(tensor[i][j]) < zero:\n tensor[i][j] = 0.0\n return tensor",
"def V_angles(atoms):\n \n Va = 0 # this is the variable we will store the sum of all the energies in\n N = len(atoms)\n for i in range(len(atoms)):\n j = (i+1) % N\n k = (i-1) % N\n x_ij = atoms.coords[j] - atoms.coords[i] # vector from atom i to j\n x_ik = atoms.coords[k] - atoms.coords[i] # vector from atom i to k\n theta = np.arccos(np.dot(x_ij, x_ik)/(norm(x_ij)*norm(x_ik))) # angle between the above two\n \n Va += (theta - TH0)**2\n \n return Va",
"def get_element_density(mt):\r\n fraction_matrix = zeros(100)\r\n \r\n composition = Composition(mt['pretty_formula'])\r\n \r\n for element in composition:\r\n fraction = composition.get_atomic_fraction(element) # get the atomic fraction.\r\n fraction_matrix[element.Z] = fraction\r\n \r\n return fraction_matrix",
"def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)",
"def I(material):\n # Lookup table for Z<=13. Key is the \"Z\" of the material. Values are in eV\n lookup_table = {\n 1: 18.9,\n 2: 42.0,\n 3: 38.0,\n 4: 60.0, \n 6: 78.0,\n 7: 85.0,\n 8: 89.0,\n 10: 131.0,\n 13: 163.0\n }\n \n I_list = []\n \n for mat, frac in material.mult_by_mass().items():\n Z = nucname.znum(mat)\n # Check to see if Z is in our table\n I = lookup_table.get(Z)\n\n # If I is not in the table, calculate it\n # Use Anderson Equation 2.33\n if I is None:\n I = 9.73 * Z + 58.8 * Z ** -0.19\n \n I_list.append(I * frac)\n \n I_a = sum(I_list)\n \n # Convert I from eV to MeV\n I_a = I_a * 10**-6.0\n \n return I_a",
"def normal_vol(self, k):\r\n f, s, t = self.f, self.shift, self.t\r\n beta, rho, volvol = self.beta, self.rho, self.volvol\r\n alpha = self.alpha()\r\n v_n = normal_vol(k+s, f+s, t, alpha, beta, rho, volvol)\r\n return v_n",
"def MDL_KLT(data):\n\n eigs = []\n p = 64\n N = len(data[0])//p\n for sig in data:\n splits = np.split(sig, N)\n cov_matrix = np.zeros((p, p), dtype=np.complex128)\n for split in splits:\n split /= np.mean(split)\n cov_matrix += np.outer(split, np.conj(split))\n\n eigv = np.real(scipy.linalg.eigvalsh(cov_matrix)[::-1])\n eigv = eigv/np.mean(eigv)\n \n best_k = 0\n best_MDL = float(\"inf\")\n for k in range(0,p):\n noise_eigs = eigv[k:]\n noise_dim = len(noise_eigs)\n ratio = gmean(noise_eigs)/np.mean(noise_eigs)\n cur_MDL = -np.log(ratio**(noise_dim*N)) + .5*k*(2*p-k)*np.log(N)\n if cur_MDL < best_MDL:\n best_k = k\n best_MDL = cur_MDL\n \n if best_k == 0:\n eigs.append(0)\n else:\n eigs.append(sum(eigv[:best_k]))\n \n return np.real(np.array(eigs))",
"def total_kinetic_energy(V,M):\r\n N = V.shape[0] # number of bodies\r\n K = 0 # initialize kinetic energy\r\n V0 = np.zeros(3) # initialize center of mass velocity\r\n # find refernce velocity\r\n for n in range(N):\r\n V0 = V0 + V[n,:]*M[n]\r\n V0 = V0/np.sum(M)\r\n # find kinetic energy\r\n for n in range(N):\r\n K = K + ((util.enod(V[n,:],V0))**2)* 0.5 * M[n]\r\n \r\n return K",
"def modularity(G, partition):\n m = G.size(weight=\"weight\")\n degrees = dict(G.degree(weight=\"weight\"))\n Q = 0\n for community in partition:\n for u, v in product(community, repeat=2):\n try:\n w = G[u][v].get(\"weight\", 1)\n except KeyError:\n w = 0\n if u == v:\n # Double count self-loop weight.\n w *= 2\n Q += w - degrees[u] * degrees[v] / (2 * m)\n return Q / (2 * m)",
"def Partitioner(q,InvV,Posterior,m_points):\n \n m = InvV.n #get the number of maps being used \n Q = np.zeros([m,m_points.num]) #initialise the partition functions\n \n for j in range(m):\n #backmap the points from the posterior to the intermediate\n backmap = m_points.map(InvV,j)\n #determine the current mixture using a change of variables\n det = InvV.L[j,:,:].diagonal().prod()**2\n Q[j,:] = q[j] * multivariate_normal.pdf(backmap.all,mean=np.zeros(m_points.d),cov=np.eye(m_points.d)) * det\n \n #now we have the total mixture\n g_est = np.sum(Q,axis=0)\n\n for j in range(m):\n #the partitioner can be found from these\n Q[j,:] /= g_est\n #apply the partitioner to the posterior evaluations to get the partitioned components\n \n return Q",
"def zernike_Vnm(rho,theta,n,m):\n\tRnm = 0\n\tfact = lambda x: np.math.factorial(x)\n\tam = abs(m)\n\tfor s in range(0,(n-am)/2):\n\t\tRnm+= (-1)**s*fact(n-s)*rho**(n-2*s)/(\n\t\t\tfact(s)*fact((n+am)/2-s)*fact((n-am)/2-s))\n\tVnm = Rnm*np.exp(1j*m*theta)"
] | [
"0.5707858",
"0.54226637",
"0.5395542",
"0.52172196",
"0.5165793",
"0.5102502",
"0.5102502",
"0.5101261",
"0.50958854",
"0.49969062",
"0.49926218",
"0.49795717",
"0.4946353",
"0.49354938",
"0.49308503",
"0.49158522",
"0.4907569",
"0.4894429",
"0.4889348",
"0.4864072",
"0.48453435",
"0.48379022",
"0.48185363",
"0.48128682",
"0.48117468",
"0.48037538",
"0.48007002",
"0.47995242",
"0.47808802",
"0.4769919"
] | 0.7013808 | 0 |
This function subscribes to the secondaryCam topic and updates its state in the global scope. | def secondayCamCallback(msg):
global secondaryCamString
secondaryCamString = msg.data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)",
"def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)",
"def on_connect(client, userdata, flags, rc):\n client.subscribe(mqtt_Light_topic)",
"def subscribeConsumer(consumer):",
"def on_connect():\n # There is now a connection\n subscribe_to_topic(\"pir\",\"Trigger\")",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def subscribe(receiver):",
"def on_subscribe(self, client, userdata, mid, granted_qos):\n\t\tprint (\"[{}] Client subscribed to {}\".format(\n\t\t\tint(time.time()),\n\t\t\tself.topic\n\t\t))\n\t\t#the following lines are here and not in on_connect() only for printing purpose\n\t\tif not self.printed_sub:\n\t\t\tself.printed_sub = True\n\t\t\tself.subscribe(\"measure/people\")",
"def cbMqtt_on_subscribe(client, userdata, mid, granted_qos):\n # logger.debug('Subscribed to MQTT topic with message id %d', mid)\n pass",
"def on_subscribe( client, userdata, mid, granted_qos ):\n logging.info( \"Topic successfully subcribed with QoS: %s\" %granted_qos )",
"def subscribe(receiver, updateInterval=10):",
"def subscribe(self, topic):\n self.topic = topic\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.loop_start()",
"def subscribe(receiver, updateInterval=None):",
"def subscribe(self, topic):\n\t\tself.topic=topic\n\t\tself.client.subscribe(self.topic)",
"def on_connect(self, client, userdata, flags, rc):\n\n logger.info(f'Connected to {self.topic} with result code {rc}')\n # self.client.publish('Augmented/A.V.A.', str(rc)) # For return the connection situation to the subscriber device.\n if rc == 0:\n self.is_connected = True\n self.client.subscribe(self.topic)",
"def subscribe(self):\n if not self._subscribed and self._connected:\n if ATTR_STREAM_ID not in self.data:\n msg = self._create_message(strings.SUB_MSG)\n self.write(msg)\n else:\n msg = self._create_message(strings.RESUB_MSG)\n self.write(msg)\n self._subscribed = True",
"def on_connect(client, userdata, flags, rc):\n print(f\"Re/Suscribing to TOPIC: {TOPIC}\")\n client.subscribe(TOPIC)\n if rc == 0:\n print(f'Connected OK Returned code={rc}')\n else:\n print('Bad connection Returned code={rc}')",
"def output_topic_callback(self, msg):\n with self.callback_lock:\n if self._time_received_input != 0:\n # Get actual time from ROS\n time_now = self.node.get_clock().now().nanoseconds\n\n # Compute the amount of time elapsed from receiving the last\n # message in the input topic\n measure = time_now - self._time_received_input\n\n # Transform from nanoseconds to milliseconds\n measure = measure / (1000 * 1000)\n\n publish_msg = Int64()\n publish_msg.data = int(measure)\n\n # Publish the measurement\n self._publisher.publish(publish_msg)\n\n self._time_received_input = 0",
"def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)",
"def _on_received(self):\n self._call_subscribers(on_level=0xFF)\n publish_topic(self._on_subscriber_topic, on_level=0xFF)",
"def on_connect(self, client, userdata, flags, rc):\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n sleep(5) # quick delay\n self.client.subscribe(self.subControls)\n self.client.subscribe(self.subSettings)",
"def camera_listener(self):\n camera_sub_cb_grp = ReentrantCallbackGroup()\n self.create_subscription(CameraMsg,\n constants.CAMERA_MSG_TOPIC,\n self.camera_callback,\n 10,\n callback_group=camera_sub_cb_grp)\n display_img_sub_cb_grp = ReentrantCallbackGroup()\n self.create_subscription(Image,\n constants.DISPLAY_MSG_TOPIC,\n self.display_callback,\n 10,\n callback_group=display_img_sub_cb_grp)",
"def on_connect(client, userdata, flags, rc):\n\t# subscribe to the LEDs topic when connected\n\tclient.subscribe(\"SNHU/IT697/leds\")",
"def on_connect(self, client, userdata, flags, rc):\n# client.subscribe(\"power_meter/status/#\")\n client.subscribe(self.mqtt_topic_status)\n client.subscribe(self.mqtt_topic_electricity + '/#')\n client.subscribe(self.mqtt_topic_gas + '/#')\n client.subscribe(self.mqtt_topic_water + '/#')\n self.mqtt_client.publish(self.mqtt_topic_last_will, \"online, \" + str(self.dconn), qos=0, retain=True)\n self.connected = True\n self.log.warning(\"Connected with result code: \" + str(rc))\n self.log.info(\"Connected to: \" + MQTT_SERVER)",
"def primaryCamCallback(msg):\n\n global primaryCamString\n primaryCamString = msg.data",
"def on_connect(client, userdata, flags, rc):\n if rc == 0:\n client.subscribe(topic_subscribe)\n print(\"connected OK with returned code=\", rc)\n else:\n print(\"Bad connection with returned code=\", rc)",
"def on_subscribe(self, mqtt_client, userdata, mid, granted_qos):\n logging.debug(\"DEBUG - subscribe ack received\")",
"def starup(self, sender, **kwargs):\n self._initialize_devices()\n for device_topic in device_topic_dict:\n _log.debug('Subscribing to ' + device_topic)\n self.vip.pubsub.subscribe(peer='pubsub',\n prefix=device_topic,\n callback=self.on_analysis_message)",
"def mqtt_sub_callback(self, client, userdata, message):\n\t#def mqtt_sub_callback(self, message):\n\n\t\t# Decode the message using UTF-8 and convert it\n\t\t# to 'string' datatype\n\t\tpayload = str(message.payload.decode(\"utf-8\"))\n\n\t\trospy.loginfo(\"[BRIDGE] Message Received from MQTT\")\n\n\t\t# Give the appropiate values to the contents of the message\n\t\t# that will be published to '/ros_iot_bridge/mqtt/sub'\n\t\tmsg_mqtt_sub = msgMqttSub()\n\t\tmsg_mqtt_sub.timestamp = rospy.Time.now()\n\t\tmsg_mqtt_sub.topic = message.topic\n\t\tmsg_mqtt_sub.message = payload\n\n\t\t# Publish the message\n\t\tself._handle_ros_pub.publish(msg_mqtt_sub)\n\n\t\t# Upload to Google Sheet\n\t\tret = self.update_gsheet(\"None\", True, payload)"
] | [
"0.6671746",
"0.63125056",
"0.6236678",
"0.6203599",
"0.6199879",
"0.61778575",
"0.61778575",
"0.61778575",
"0.6139608",
"0.6105276",
"0.60968137",
"0.60713357",
"0.60546625",
"0.6009579",
"0.59927255",
"0.596567",
"0.59032744",
"0.588785",
"0.5863523",
"0.5775574",
"0.5775574",
"0.57657975",
"0.5757503",
"0.5757264",
"0.5737805",
"0.5737493",
"0.5737411",
"0.5707207",
"0.56977177",
"0.56582916"
] | 0.6567832 | 1 |
This function first determines which camera is the primary and which is secondary. The image streams from the respective primary and seconday cameras are resized and republished | def resizeAndRepubThread():
# reference globals
global primaryCamString
global secondaryCamString
global armCamImage
global headCamImage
# initialize image publishers
primaryPub = rospy.Publisher(primaryCamRepub, Image, queue_size=1)
secondaryPub = rospy.Publisher(secondaryCamRepub, Image, queue_size=1)
# create CvBridge object for converting CV2 images to sensor_msgs/Image messages
backBridge = CvBridge()
while(True):
primaryImage = np.zeros(shape=[512, 512, 3])
secondaryImage = np.zeros(shape=[512, 512, 3])
# just keep looping until we get images
if(np.sum(headCamImage) == 0 or np.sum(armCamImage) == 0):
rospy.loginfo("still waiting on camera images...")
continue
# get primary image
if(primaryCamString == "head"):
primaryImage = resizeImage(headCamImage, primarySize)
elif(primaryCamString == "leftArm"):
primaryImage = resizeImage(armCamImage, primarySize)
elif(primaryCamString == ""):
pass
else:
rospy.logerr("Invalid Option for primaryCamString recieved!")
# get secondary image
if(secondaryCamString == "head"):
secondaryImage = resizeImage(headCamImage, secondarySize)
elif(secondaryCamString == "leftArm"):
secondaryImage = resizeImage(armCamImage, secondarySize)
elif(secondaryCamString == ""):
pass
else:
rospy.logerr("Invalid Option for secondaryCamString recieved!")
# publish both new images
if(np.sum(primaryImage) != 0 and np.sum(secondaryImage) != 0):
primaryImageMessage = backBridge.cv2_to_imgmsg(primaryImage, "bgr8")
primaryPub.publish(primaryImageMessage)
secondaryImageMessage = backBridge.cv2_to_imgmsg(secondaryImage, "bgr8")
secondaryPub.publish(secondaryImageMessage) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, camera, cameras, settings):\n\n self.cam = None\n self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default\n # check picamera version\n try:\n picamversion = require('picamera')[0].version\n except:\n picamversion = '0'\n\n if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading\n self.threaded_read = cameras[camera]['threaded_read']\n else:\n self.threaded_read = True\n if 'resolution' in cameras[camera]:\n self.resolution = literal_eval(cameras[camera]['resolution'])\n else:\n self.resolution = (320, 240)\n if 'framerate' in cameras[camera]:\n self.framerate = cameras[camera]['framerate']\n else:\n self.framerate = 32\n if 'vflip' in cameras[camera]:\n self.vflip = cameras[camera]['vflip']\n else:\n self.vflip = False\n if 'resize_width' in cameras[camera]:\n # resize_width is a percentage value\n # width in pixels will be computed later after reading a test image\n self.resize_width = cameras[camera]['resize_width']\n else:\n self.resize_width = None\n if 'viewname' in cameras[camera]:\n self.viewname = cameras[camera]['viewname']\n else:\n self.viewname = ' '\n if 'src' in cameras[camera]:\n self.src = cameras[camera]['src']\n else:\n self.src = 0\n if 'exposure_mode' in cameras[camera]:\n self.exposure_mode = cameras[camera]['exposure_mode']\n else:\n self.exposure_mode = None\n if 'iso' in cameras[camera]:\n self.iso = cameras[camera]['iso']\n else:\n self.iso = 0 # default value\n if 'shutter_speed' in cameras[camera]:\n self.shutter_speed = cameras[camera]['shutter_speed']\n else:\n self.shutter_speed = 0 # default value\n if 'sharpness' in cameras[camera]:\n self.sharpness = cameras[camera]['sharpness']\n else:\n self.sharpness = 0 # default value\n if 'contrast' in cameras[camera]:\n self.contrast = cameras[camera]['contrast']\n else:\n self.contrast = 0 # default value\n if 'brightness' in cameras[camera]:\n self.brightness = cameras[camera]['brightness']\n else:\n self.brightness = 50 # default value\n if 'exposure_compensation' in cameras[camera]:\n self.exposure_compensation = cameras[camera]['exposure_compensation']\n else:\n self.exposure_compensation = 0 # 0 default value, integer value between -25 and 25\n if 'awb_mode' in cameras[camera]:\n self.awb_mode = cameras[camera]['awb_mode']\n else:\n self.awb_mode = 'auto' # default value\n\n self.detectors = []\n if 'detectors' in cameras[camera]: # is there at least one detector\n self.setup_detectors(cameras[camera]['detectors'],\n settings.nodename,\n self.viewname)\n if camera[0].lower() == 'p': # this is a picam\n # start PiCamera and warm up; inherits methods from\n # imutils.VideoStream unless threaded_read is False; then uses class\n # PiCameraUnthreadedStream to read the PiCamera in an unthreaded way\n if self.threaded_read:\n self.cam = VideoStream(usePiCamera=True,\n resolution=self.resolution,\n framerate=self.framerate).start()\n else:\n self.cam = PiCameraUnthreadedStream(resolution=self.resolution,\n framerate=self.framerate)\n\n # if an exposure mode has been set in yaml, set it\n if self.exposure_mode:\n self.cam.camera.exposure_mode = self.exposure_mode\n # if an iso has been set in yaml, set it\n if self.iso:\n self.cam.camera.iso = self.iso\n # if an iso has been set in yaml, set it\n if self.shutter_speed:\n self.cam.camera.shutter_speed = self.shutter_speed\n # if an sharpness has been set in yaml, set it\n if self.sharpness:\n self.cam.camera.sharpness = self.sharpness\n # if an contrast has been set in yaml, set it\n if self.contrast:\n self.cam.camera.contrast = self.contrast\n # if an brightness has been set in yaml, set it\n if self.brightness:\n self.cam.camera.brightness = self.brightness\n # if an exposure_compensation has been set in yaml, set it\n if self.exposure_compensation:\n self.cam.camera.exposure_compensation = self.exposure_compensation\n # if an awb_mode has been set in yaml, set it\n if self.awb_mode:\n self.cam.camera.awb_mode = self.awb_mode\n self.cam_type = 'PiCamera'\n else: # this is a webcam (not a picam)\n self.cam = VideoStream(src=0).start()\n self.cam_type = 'webcam'\n sleep(3.0) # allow camera sensor to warm up\n\n # self.text is the text label for images from this camera.\n # Each image that is sent is sent with a text label so the hub can\n # file them by nodename, viewname, and send_type\n # example: JeffOffice Window|jpg\n # Nodename and View name are in one field, separated by a space.\n # send_type is in the next field\n # The 2 field names are separaged by the | character\n node_and_view = ' '.join([settings.nodename, self.viewname]).strip()\n self.text = '|'.join([node_and_view, settings.send_type])\n\n # set up camera image queue\n self.cam_q = deque(maxlen=settings.queuemax)",
"def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None",
"def secondary_cam_setup(cam):\n\ttry:\n\t\tresult = True\n\t\tnodemap = cam.GetNodeMap()\n\n\t\t# Configure the camera to allow for chunk data\n\t\tresult &= configure_chunk_data(nodemap)\n\n\t\t# Set up the pixel format\n\t\tresult &= pixel_format(2, cam, 'BGR8')\n\n\t\t# Set up the secondary camera hardware trigger\n\t\tresult &= configure_trigger(2, cam, 'hardware')\n\n\t\tprint(\"\\n\\t*** CONFIGURING CAMERA ***\")\n\t\tresult &= acquisition_mode(2, cam)\t\t\t\t # Continuous\n\t\tresult &= auto_exposure_mode(2, cam, 'Continuous') # Autoexposure = On\n\t\tresult &= auto_gain_mode(2, cam, 'Continuous') # Autogain = On\n\t\tprint('\\n')\n\n\texcept PySpin.SpinnakerException as ex:\n\t\tprint('Error: %s' % ex)\n\t\tresult = False\n\n\treturn result",
"def read_cameras(self):\n for camera in self.camlist:\n image = camera.cam.read()\n if camera.vflip:\n image = cv2.flip(image, -1)\n if camera.resize_width:\n image = imutils.resize(image, width=camera.width_pixels)\n camera.cam_q.append(image)\n for detector in camera.detectors:\n self.run_detector(camera, image, detector)",
"def main(argv):\n # Get default camera id based on current platform.\n if sys.platform == 'linux' or sys.platform == 'linux2':\n default_cam_ids = ['/dev/video0', '/dev/video1', '/dev/video2']\n else: # darwin win32 win64\n default_cam_ids = [0, 1, 2]\n\n # Parse CLI arguments\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--cam_ids', default=default_cam_ids,\n help=\"camera ids list (ex: ='[/dev/video0, /dev/video1]'\")\n # TODO: implement dict argument parsing settings\n ap.add_argument('-s', '--settings',\n help=\"camera settings list \"\n \"(ex:[[(3, 640), (4, 480)], [(3, 640), (4, 480)]]\")\n args = vars(ap.parse_args())\n\n # Default camera settings\n if args[\"settings\"]:\n settings = args[\"settings\"]\n else:\n settings = [[(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)],\n [(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)],\n [(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)]]\n\n aruco_dict_num = cv2.aruco.DICT_6X6_1000\n # also available: DICT_5X5_1000, DICT_4X4_50, DICT_ARUCO_ORIGINAL\n\n # Initialize Cameras objects with calibration and lens correction\n cam_ids = args['cam_ids']\n if sys.platform != 'linux' and sys.platform != 'linux2':\n cam_ids = [int(cam_id) for cam_id in cam_ids]\n cameras = []\n for cam_id, setting in zip(cam_ids, settings):\n print('Setting up camera %s.' % cam_id)\n cam = CameraCorrected(\n cam_id=cam_id, aruco_dict_num=aruco_dict_num, settings=setting)\n cam.initialize()\n cameras.append(cam)\n\n cameras_fusion = CamerasFusion(cameras)\n cameras_fusion.initialize()\n\n # Open basic live view\n print('Live view running...')\n print(' k to calibrate correction')\n print(' m to save frame')\n print(' v loop between gray2rgb and blue2rgb fusion')\n print(' ESC or q to exit.')\n\n selected_fused = cameras_fusion.read_blue2rgb_fused\n while True:\n if cameras_fusion.fusion_calibration_is_done:\n frame = selected_fused()\n frame = camera[0].draw_fps(frame)\n else:\n for camera in cameras_fusion.cameras:\n frame = camera.read_undistort()\n frame = camera.draw_text(\n frame, 'Please manually adjust Cameras overlapping, then c'\n 'alibrate.', y=camera.height - (camera.height/20),\n thickness=2)\n k = cv2.waitKey(50) % 256\n if k == 27 or k == ord('q'):\n break\n cv2.imshow(\"Live camera\", frame)\n k = cv2.waitKey(40) % 256\n if k == 27 or k == ord('q'):\n break\n elif k == ord('k'):\n if cameras_fusion.calibrate_fusion():\n print('Calibration done!')\n elif k == ord('m'):\n cv2.imwrite('frame_fused_%s.png' % cam.cam_id, frame)\n elif k == ord('v'):\n if selected_fused == cameras_fusion.read_blue2rgb_fused:\n selected_fused = cameras_fusion.read_gray2rgb_fused\n else:\n selected_fused = cameras_fusion.read_blue2rgb_fused\n\n cameras_fusion.release() # DO NOT FORGET TO RELEASE!\n cv2.destroyAllWindows()",
"def update_display(self):\n \n # check availability of display queue of the wide camera\n# if not hasattr(self,'wide_disp_queue'):\n# pass\n# elif self.wide_disp_queue.empty():\n# pass\n# else:\n# try:\n# wide_disp_image = self.wide_disp_queue.get()\n# \n# self.wide_disp_counter += 1\n# self.wide_disp_counter %= 2\n# if self.wide_disp_counter == 0:\n# if type(wide_disp_image) == np.ndarray:\n# if wide_disp_image.shape == (self.wide_cam.settings.height.value(),self.wide_cam.settings.width.value()):\n# try:\n# self.wide_cam_image.setImage(wide_disp_image)\n# except Exception as ex:\n# print('Error: %s' % ex)\n# except Exception as ex:\n# print(\"Error: %s\" % ex)\n \n # check availability of display queue of the track camera \n if not hasattr(self,'track_disp_queue'):\n pass\n elif self.track_disp_queue.empty():\n pass\n else:\n try:\n track_disp_image = self.track_disp_queue.get()\n self.track_disp_counter += 1\n self.track_disp_counter %= 4\n if self.track_disp_counter == 0:\n if type(track_disp_image) == np.ndarray:\n if track_disp_image.shape == (self.track_cam.settings.height.value(),self.track_cam.settings.width.value()):\n try:\n self.track_cam_image.setImage(track_disp_image)\n except Exception as ex:\n print('Error: %s' % ex)\n \n x = int(self.settings.x.value())\n y = int(self.settings.y.value())\n self.tracker_data[:] = 0\n self.tracker_data[x,y] = 1\n self.tracker_image.setImage(np.copy(self.tracker_data))\n except Exception as ex:\n print(\"Error: %s\" % ex)",
"def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)",
"def primary_cam_setup(cam):\n\ttry:\n\t\tresult = True\n\t\tnodemap = cam.GetNodeMap()\n\n\t\t# Configure the camera to allow for chunk data\n\t\tresult &= configure_chunk_data(nodemap)\n\n\t\t# Setup the pixel format\n\t\tresult &= pixel_format(1, cam, 'BGR8')\n\n\t\t# Set up the primary camera output GPIO signal\n\t\tprint('\\n\\t*** CONFIGURING HARDWARE OUTPUT ***')\n\t\tcam.LineSelector.SetValue(PySpin.LineSelector_Line2)\n\t\tcam.V3_3Enable.SetValue(True)\n\t\tprint('\\t\\tCamera 1 Hardware output set to Line 2...')\n\n\t\tresult &= trigger_selector(1, cam, 'FrameStart')\n\t\tresult &= trigger_overlap(1, cam, 'ReadOut')\n\t\tresult &= configure_trigger(1, cam, 'software')\n\n\t\tprint(\"\\n\\t*** CONFIGURING CAMERA ***\")\n\t\tresult &= acquisition_mode(1, cam)\t\t\t# Continuous acquisition\n\t\tresult &= framerate(1, cam)\t\t\t\t\t# Set the framerate\n\t\tresult &= auto_exposure_mode(1, cam, 'Off') # Autoexposure = Off\n\t\tresult &= exposure_change(cam, first_exp) # Set first exposure\n\t\tresult &= auto_gain_mode(1, cam, 'Off')\t\t# Autogain = Off\n\t\tresult &= gain_change(cam, first_gain)\t # Set first gain\n\t\tprint('\\n')\n\n\texcept PySpin.SpinnakerException as ex:\n\t\tprint('Error: %s' % ex)\n\t\tresult = False\n\n\treturn result",
"def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename",
"def run_single_camera(cam):\n\n try:\n # Retrieve TL device nodemap and print device information\n #nodemap_tldevice = cam.GetTLDeviceNodeMap()\n\n #result &= print_device_info(nodemap_tldevice)\n\n # Initialize camera\n cam.Init()\n\n # Retrieve GenICam nodemap\n nodemap = cam.GetNodeMap()\n exposures=[2000,4000,8000,16000]\n index=0\n if cam.ExposureAuto.GetAccessMode() != PySpin.RW:\n print(\"Unable to disable automatic exposure. Aborting...\")\n return False\n node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode(\"AcquisitionMode\"))\n if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):\n print(\"Unable to set acquisition mode to continuous (enum retrieval). Aborting...\")\n return False\n\n # Retrieve entry node from enumeration node\n node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(\"Continuous\")\n if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):\n print(\"Unable to set acquisition mode to continuous (entry retrieval). Aborting...\")\n return False\n\n acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()\n\n node_acquisition_mode.SetIntValue(acquisition_mode_continuous)\n\n print(\"Acquisition mode set to continuous...\")\n\n cam.ExposureAuto.SetValue(PySpin.ExposureAuto_Off)\n '''\n # Set maximum width\n #\n # *** NOTES ***\n # Other nodes, such as those corresponding to image width and height,\n # might have an increment other than 1. In these cases, it can be\n # important to check that the desired value is a multiple of the\n # increment.\n #\n # This is often the case for width and height nodes. However, because\n # these nodes are being set to their maximums, there is no real reason\n # to check against the increment.\n if cam.Width.GetAccessMode() == PySpin.RW and cam.Width.GetInc() != 0 and cam.Width.GetMax != 0:\n cam.Width.SetValue(FRAME_WIDTH)\n print(\"Width set to %i...\" % cam.Width.GetValue())\n\n else:\n print(\"Width not available...\")\n result = False\n\n # Set maximum height\n #\n # *** NOTES ***\n # A maximum is retrieved with the method GetMax(). A node's minimum and\n # maximum should always be a multiple of its increment.\n if cam.Height.GetAccessMode() == PySpin.RW and cam.Height.GetInc() != 0 and cam.Height.GetMax != 0:\n cam.Height.SetValue(FRAME_HEIGHT)\n print(\"Height set to %i...\" % cam.Height.GetValue())\n\n else:\n print(\"Height not available...\")\n result = False\n '''\n print(\"Automatic exposure disabled...\")\n #node_acquisition_framerate = PySpin.CFloatPtr(nodemap.GetNode(\"AcquisitionFrameRate\"))\n\n # if not PySpin.IsAvailable(node_acquisition_framerate) and not PySpin.IsReadable(node_acquisition_framerate):\n # print(\"Unable to retrieve frame rate. Aborting...\")\n # return False\n\n # framerate_to_set = node_acquisition_framerate.GetValue()\n\n # print(\"Frame rate to be set to %d...\" % framerate_to_set)\n canvas=np.zeros((FRAME_HEIGHT*2,FRAME_WIDTH*2,3), np.uint8)\n while True:\n exposure=exposures[index]\n \n configure_exposure(cam, exposure)\n # Acquire images\n err, img,width,height = acquire_images(cam, nodemap)\n if err < 0:\n return err\n\n \n img = img.GetData().reshape(height,width,3)\n\n half_height = int(height/2)\n half_width = int(width/2)\n half_frame_height = int(FRAME_HEIGHT/2)\n half_frame_width = int(FRAME_WIDTH/2)\n \n img = img[half_height-half_frame_height:half_height+half_frame_height,half_width-half_frame_width:half_width+half_frame_width]\n #smallimg=cv2.resize(img,(int(FRAME_WIDTH/2),int(FRAME_HEIGHT/2)))\n if index==0:\n #top left\n canvas[0:FRAME_HEIGHT,0:FRAME_WIDTH]=img\n elif index==1:\n #top right\n canvas[0:FRAME_HEIGHT,FRAME_WIDTH:FRAME_WIDTH*2]=img\n elif index==2:\n #bot left\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,0:FRAME_WIDTH]=img\n else:\n #bot right\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,FRAME_WIDTH:FRAME_WIDTH*2]=img\n index+=1\n if index>=len(exposures):\n index=0\n\n cv2.imshow(\"frame\",canvas)\n if cv2.waitKey(1) &0xff ==ord('q'):\n #stop the feed the 'q'\n break\n cv2.destroyAllWindows()\n # Deinitialize camera\n cam.DeInit()\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False",
"def process_camera():\n\n pic_array = take_picture()\n detections, shapes, descriptors = detect_faces(person_database,pic_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors",
"def set_video_source(self):\n if self.config['camera_device_id'] == 'pi':\n # Raspberry Pi camera as video source\n # only import if needed because it requires specific packages!\n from raspicamera import RasPiCamera\n self.video_stream = RasPiCamera()\n elif self.config['camera_device_id'] == 'network':\n # External camera through network stream as video source\n # only import if needed because it requires specific packages!\n from networkcamera import NetworkCamera\n NetworkCamera.set_url(self.config['camera_stream_url'])\n self.video_stream = NetworkCamera()\n else:\n # Local webcam as video source\n # only import if needed because it requires specific packages!\n from opencvcamera import OpencvCamera\n OpencvCamera.set_video_source(self.config['camera_device_id'])\n self.video_stream = OpencvCamera()",
"def test_generate_camera_info(self):\n data = ET.parse('data/cam_data_0.xml')\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_cam_data(data_str)\n\n (left, right) = tesse_ros_bridge.utils.generate_camera_info(dict, dict)\n self.assertEqual(left.header.frame_id, \"left_cam\")\n self.assertEqual(right.header.frame_id, \"right_cam\")\n self.assertEqual(left.width, dict['parameters']['width'])\n self.assertEqual(left.height, dict['parameters']['height'])\n self.assertEqual(right.width, dict['parameters']['width'])\n self.assertEqual(right.height, dict['parameters']['height'])\n\n # TODO(marcus): add more checks",
"def _open_capture(self):\n\n plat = platform.system()\n if plat == \"Windows\":\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink sync=false'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n # self.capture = cv2.VideoCapture(self._rtsp, apiPreference=cv2.CAP_FFMPEG)\n elif plat == \"Linux\":\n if platform.machine() == 'aarch64': # Jetson Nano\n gst ='rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! appsink sync=false'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n elif platform.machine() == 'armv6l' or platform.machine() == 'armv7l': # Raspberry Pi\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! queue ! rtph264depay ! h264parse ! v4l2h264dec capture-io-mode=4 ! v4l2convert output-io-mode=5 capture-io-mode=4 ! appsink sync=false'\n # might not need the two queue statements above\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n elif plat == \"MacOS\":\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n else:\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n\n self.capture_open = self.capture.isOpened() \n if not self.capture_open:\n self.logger.log(logging.CRITICAL, \"Status:Failed to open camera!\")",
"def checkCamera(self):\n cameraFound = False\n print(\"[INFO]: Searching for camera...\")\n try:\n for camera in glob.glob(\"/dev/video?\"):\n if camera == \"/dev/video2\":\n cameraIndex = 2\n cameraFound = True\n print(\"[INFO]: Using index 2 for the camera.\")\n return cameraIndex, cameraFound\n elif camera == \"/dev/video1\":\n cameraIndex = 1\n cameraFound = True\n print(\"[INFO]: Using index 1 for the camera.\")\n return cameraIndex, cameraFound\n elif camera == \"/dev/video0\":\n cameraIndex = 0\n cameraFound = True\n print(\"[INFO]: Using index 0 for the camera\")\n return cameraIndex, cameraFound\n else:\n print(\"[ERROR]: No camera found.\")\n cameraFound = False\n cameraIndex = 0\n return cameraIndex, cameraFound\n except(TypeError):\n print(\"[ERROR]: Camera is probably not connected.\")",
"def cameraCallback(self, data):\n if not self.isReady:\n cvImage, self.imageInfo['shape'] = u.getCVImage(data)\n if self.measuring is not None:\n self.list, cvImage, self.isReady = self.measuring.getListObjects(cvImage)\n # preview topic /see_main\n msg_image = u.getMsgImage(cvImage)\n self.pub_view_main.publish(msg_image)\n else:\n if self.imageInfo['shape'] is not None:\n self.init()\n else:\n rospy.logerr(\"no video stream. check camera's topic!\")",
"def let_camera_update_parameters(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -ss 00:00:02 -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))",
"def _start_vidmemwriter(self, camType, ip=None, inputres=\"640x480\", outputres=\"640x480\"):\n if not self.__vidmemwriter and not self.__server_mode:\n self.__vidmemwriter = vidmemwriter.VidMemWriter([], [])\n\n if camType in self.__video_sources:\n return True\n\n self.__logger.info(\"I'm starting %s\" % camType)\n\n if ros_pattern.match(camType):\n #The first 4 characters \"ros_\" identify that is a specific ros image\n #The second part *** in \"ros_***/topic\" is the encoding:\n topic = camType[4:]\n encoding = \"passthrough\"\n self.__logger.info(\"camType !!!!!! %s\" % camType)\n if not camType[4] == '/':\n str_list = camType.split(\"_\")\n topic = '_'.join(str_list[2:])\n encoding = str_list[1]\n ros_image_source = rosimage.RosImage(topic, encoding)\n\n if self.__server_mode:\n self.__register_video_source(camType, ros_image_source)\n else:\n self.__vidmemwriter.add_video_source(ros_image_source, camType)\n self.__video_sources.append(camType)\n self.__logger.info(\"rosimage started for topic: %s, with encoding: %s\" % (topic, encoding))\n return True\n elif camType == \"webcam\":\n self.__logger.debug(\"I'm starting webcam\")\n webcamsource = takeimages.TakeImages(self.__camera)\n img = webcamsource.get_image()\n if type(img) is type(\"\"):\n self.__logger.error(\"No camera found. Please check connection!\")\n return False\n\n if webcamsource.Nocamera:\n if self.__camera == -1:\n self.__logger.error(\"No camera found. Please check connection!\")\n else:\n self.__logger.error(\"Camera %d not found. Please check connection!\" % self.__camera)\n return False\n if self.__server_mode:\n self.__register_video_source('webcam', webcamsource)\n else:\n self.__vidmemwriter.add_video_source(webcamsource, \"webcam\")\n self.__video_sources.append(\"webcam\")\n self.__logger.info(\"Webcam started\")\n return True\n elif camType == 'kinect_openni':\n self.__logger.debug(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n self.__video_sources.append(\"kinect_openni\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == 'kinect' or camType == 'kinect_rgb' or camType == 'kinect_depth':\n if self.__use_openni:\n self.__logger.info(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n else:\n self.__logger.info(\"I'm starting kinect using freenect\")\n try:\n import util.kinectmemwriter\n except:\n self.__logger.error(\"Could not load kinectmemwriter module. Check modules.\")\n return False\n\n depth_source = util.kinectmemwriter.KinectDepthSource()\n rgb_source = util.kinectmemwriter.KinectRGBSource()\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == \"naovideo\":\n self.__logger.debug(\"I'm starting naovideo\")\n try:\n import util.naovideo as naovideo\n except:\n self.__logger.error(\"Could not load naovideo module. Check modules\")\n return False\n #get ip of nao:\n #TODO: fix this dirty hack (it should be read from the config file)\n naoip = \"129.125.178.232\"\n if ip:\n naoip = ip\n \n self.__logger.warn(\"Using input resolution %s and output resolution %s\" % (inputres, outputres))\n #use the naovideo module:\n if self.__camera != 0 and self.__camera != 1:\n self.__camera = 0\n try:\n naocamsource = naovideo.VideoModule(naoip, inputres, outputres, camera=self.__camera)\n naocamsource.get_image()\n except:\n self.__logger.error(\"Something went wrong using the camera of the nao (check connection!)\")\n traceback.print_exc()\n return False\n\n if self.__server_mode:\n self.__register_video_source('naovideo', naocamsource)\n else:\n self.__vidmemwriter.add_video_source(naocamsource, \"naovideo\")\n self.__video_sources.append(\"naovideo\")\n self.__nao_camera = naocamsource\n self.__logger.info(\"Naovideo started\")\n return True\n else:\n self.__logger.warning(\"Invalid video source specified: %s\" % camType)\n return False",
"def camera(*args, aspectRatio: Union[float, bool]=0.0, cameraScale: Union[float, bool]=0.0,\n centerOfInterest: Union[float, bool]=0.0, clippingPlanes: bool=True, depthOfField:\n bool=True, displayFieldChart: bool=True, displayFilmGate: bool=True,\n displayFilmOrigin: bool=True, displayFilmPivot: bool=True, displayGateMask:\n bool=True, displayResolution: bool=True, displaySafeAction: bool=True,\n displaySafeTitle: bool=True, fStop: Union[float, bool]=0.0, farClipPlane:\n Union[float, bool]=0.0, farFocusDistance: Union[float, bool]=0.0, filmFit:\n Union[AnyStr, bool]=\"\", filmFitOffset: Union[float, bool]=0.0, filmRollOrder:\n Union[AnyStr, bool]=\"\", filmRollValue: Union[float, bool]=0.0, filmTranslateH:\n Union[float, bool]=0.0, filmTranslateV: Union[float, bool]=0.0, focalLength:\n Union[float, bool]=0.0, focusDistance: Union[float, bool]=0.0, homeCommand:\n Union[AnyStr, bool]=\"\", horizontalFieldOfView: Union[float, bool]=0.0,\n horizontalFilmAperture: Union[float, bool]=0.0, horizontalFilmOffset: Union[float,\n bool]=0.0, horizontalPan: Union[float, bool]=0.0, horizontalRollPivot: Union[float,\n bool]=0.0, horizontalShake: Union[float, bool]=0.0, journalCommand: bool=True,\n lensSqueezeRatio: Union[float, bool]=0.0, lockTransform: bool=True, motionBlur:\n bool=True, name: Union[AnyStr, bool]=\"\", nearClipPlane: Union[float, bool]=0.0,\n nearFocusDistance: Union[float, bool]=0.0, orthographic: bool=True,\n orthographicWidth: Union[float, bool]=0.0, overscan: Union[float, bool]=0.0,\n panZoomEnabled: bool=True, position: Union[List[float, float, float], bool]=None,\n postScale: Union[float, bool]=0.0, preScale: Union[float, bool]=0.0, renderPanZoom:\n bool=True, rotation: Union[List[float, float, float], bool]=None, shakeEnabled:\n bool=True, shakeOverscan: Union[float, bool]=0.0, shakeOverscanEnabled: bool=True,\n shutterAngle: Union[float, bool]=0.0, startupCamera: bool=True,\n stereoHorizontalImageTranslate: Union[float, bool]=0.0,\n stereoHorizontalImageTranslateEnabled: bool=True, verticalFieldOfView: Union[float,\n bool]=0.0, verticalFilmAperture: Union[float, bool]=0.0, verticalFilmOffset:\n Union[float, bool]=0.0, verticalLock: bool=True, verticalPan: Union[float, bool]=0.0,\n verticalRollPivot: Union[float, bool]=0.0, verticalShake: Union[float, bool]=0.0,\n worldCenterOfInterest: Union[List[float, float, float], bool]=None, worldUp:\n Union[List[float, float, float], bool]=None, zoom: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def run(self, live_camera, stream_path):\n\n has_element_err = False\n\n number_sources = 1\n # Standard GStreamer initialization\n GObject.threads_init()\n Gst.init(None)\n # Create gstreamer elements\n # Create Pipeline element that will form a connection of other elements\n print(\"Creating Pipeline \\n \")\n pipeline = Gst.Pipeline()\n\n if not pipeline:\n sys.stderr.write(\" Unable to create Pipeline \\n\")\n has_element_err = True\n if live_camera:\n if constants.RPI_MODE == constants.CAM_MODE:\n print(\"Creating Source \\n \")\n source = Gst.ElementFactory.make(\"nvarguscamerasrc\", \"src-elem\")\n if not source:\n sys.stderr.write(\" Unable to create Source \\n\")\n has_element_err = True\n else:\n print(\"Creating Source \\n \")\n source = Gst.ElementFactory.make(\"v4l2src\", \"usb-cam-source\")\n if not source:\n sys.stderr.write(\" Unable to create Source \\n\")\n has_element_err = True\n\n caps_v4l2src = Gst.ElementFactory.make(\"capsfilter\", \"v4l2src_caps\")\n if not caps_v4l2src:\n sys.stderr.write(\" Unable to create v4l2src capsfilter \\n\")\n has_element_err = True\n print(\"Creating Video Converter \\n\")\n # videoconvert to make sure a superset of raw formats are supported\n vidconvsrc = Gst.ElementFactory.make(\"videoconvert\", \"convertor_src1\")\n if not vidconvsrc:\n sys.stderr.write(\" Unable to create videoconvert \\n\")\n has_element_err = True\n # nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)\n nvvidconvsrc = Gst.ElementFactory.make(\"nvvideoconvert\", \"convertor_src2\")\n if not nvvidconvsrc:\n sys.stderr.write(\" Unable to create Nvvideoconvert \\n\")\n has_element_err = True\n caps_vidconvsrc = Gst.ElementFactory.make(\"capsfilter\", \"nvmm_caps\")\n if not caps_vidconvsrc:\n sys.stderr.write(\" Unable to create capsfilter \\n\")\n has_element_err = True\n else:\n # Source element for reading from the file\n print(\"Creating Source \\n \")\n source = Gst.ElementFactory.make(\"filesrc\", \"file-source\")\n if not source:\n sys.stderr.write(\" Unable to create Source \\n\")\n has_element_err = True\n # Since the data format in the input file is elementary h264 stream,\n # we need a h264parser\n print(\"Creating H264Parser \\n\")\n h264parser = Gst.ElementFactory.make(\"h264parse\", \"h264-parser\")\n if not h264parser:\n sys.stderr.write(\" Unable to create h264 parser \\n\")\n has_element_err = True\n # Use nvdec_h264 for hardware accelerated decode on GPU\n print(\"Creating Decoder \\n\")\n decoder = Gst.ElementFactory.make(\"nvv4l2decoder\", \"nvv4l2-decoder\")\n if not decoder:\n sys.stderr.write(\" Unable to create Nvv4l2 Decoder \\n\")\n has_element_err = True\n # Create nvstreammux instance to form batches from one or more sources.\n streammux = Gst.ElementFactory.make(\"nvstreammux\", \"Stream-muxer\")\n if not streammux:\n sys.stderr.write(\" Unable to create NvStreamMux \\n\")\n has_element_err = True\n # Use nvinfer to run inferencing on decoder's output,\n # behaviour of inferencing is set through config file\n pgie = Gst.ElementFactory.make(\"nvinfer\", \"primary-inference\")\n if not pgie:\n sys.stderr.write(\" Unable to create pgie \\n\")\n has_element_err = True\n\n # Use nv-tracker to keep track of the detected objects\n tracker = Gst.ElementFactory.make(\"nvtracker\", \"NV-Tracker\")\n if not tracker:\n sys.stderr.write(\" Unable to create tracker \\n\")\n has_element_err = True\n\n # Add nvvidconv1 and filter1 to convert the frames to RGBA\n # which is easier to work with in Python.\n print(\"Creating nvvidconv1 \\n \")\n nvvidconv1 = Gst.ElementFactory.make(\"nvvideoconvert\", \"convertor1\")\n if not nvvidconv1:\n sys.stderr.write(\" Unable to create nvvidconv1 \\n\")\n has_element_err = True\n print(\"Creating filter1 \\n \")\n caps1 = Gst.Caps.from_string(\"video/x-raw(memory:NVMM), format=RGBA\")\n filter1 = Gst.ElementFactory.make(\"capsfilter\", \"filter1\")\n if not filter1:\n sys.stderr.write(\" Unable to get the caps filter1 \\n\")\n has_element_err = True\n #filter1.set_property(\"caps\", caps1)\n print(\"Creating tiler \\n \")\n tiler = Gst.ElementFactory.make(\"nvmultistreamtiler\", \"nvtiler\")\n if not tiler:\n sys.stderr.write(\" Unable to create tiler \\n\")\n has_element_err = True\n print(\"Creating nvvidconv \\n \")\n nvvidconv = Gst.ElementFactory.make(\"nvvideoconvert\", \"convertor\")\n if not nvvidconv:\n sys.stderr.write(\" Unable to create nvvidconv \\n\")\n has_element_err = True\n print(\"Creating nvosd \\n \")\n nvosd = Gst.ElementFactory.make(\"nvdsosd\", \"onscreendisplay\")\n if not nvosd:\n sys.stderr.write(\" Unable to create nvosd \\n\")\n has_element_err = True\n print(\"Creating Fake sink \\n\")\n # sink = Gst.ElementFactory.make(\"nveglglessink\", \"nvvideo-renderer\")\n sink = Gst.ElementFactory.make(\"fakesink\", \"fakesink\")\n if not sink:\n sys.stderr.write(\" Unable to create fake sink \\n\")\n has_element_err = True\n print(\"Playing file %s \" %stream_path)\n\n\n if has_element_err:\n\n process_result = False\n\n else:\n\n if live_camera:\n if constants.RPI_MODE == constants.CAM_MODE:\n source.set_property('bufapi-version', True)\n else:\n source.set_property('device', stream_path)\n caps_v4l2src.set_property('caps', \\\n Gst.Caps.from_string(\"video/x-raw, framerate=30/1\"))\n caps_vidconvsrc.set_property('caps', \\\n Gst.Caps.from_string(\"video/x-raw(memory:NVMM)\"))\n else:\n source.set_property('location', stream_path)\n\n streammux.set_property('width', 1920)\n streammux.set_property('height', 1080)\n streammux.set_property('batch-size', 1)\n streammux.set_property('batched-push-timeout', 4000000)\n\n tiler_rows = int(math.sqrt(number_sources))\n tiler_columns = int(math.ceil((1.0*number_sources)/tiler_rows))\n tiler.set_property(\"rows\", tiler_rows)\n tiler.set_property(\"columns\", tiler_columns)\n tiler.set_property(\"width\", constants.FRAME_WIDTH)\n tiler.set_property(\"height\", constants.FRAME_HEIGHT)\n\n if is_aarch64():\n sink.set_property(\"sync\", 0)\n else:\n sink.set_property(\"sync\", 1)\n\n # Use CUDA unified memory in the pipeline so frames\n # can be easily accessed on CPU in Python.\n mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)\n streammux.set_property(\"nvbuf-memory-type\", mem_type)\n nvvidconv.set_property(\"nvbuf-memory-type\", mem_type)\n nvvidconv1.set_property(\"nvbuf-memory-type\", mem_type)\n tiler.set_property(\"nvbuf-memory-type\", mem_type)\n\n filter1.set_property(\"caps\", caps1)\n\n #Set properties of pgie\n pgie.set_property('config-file-path', \"dstest1_pgie_config.txt\")\n\n #Set nv-tracker properties\n tracker.set_property('ll-lib-file', \\\n '/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvdcf.so')\n tracker.set_property('tracker-width', 20*32)\n tracker.set_property('tracker-height', 20*32)\n tracker.set_property('enable-past-frame', 1)\n tracker.set_property('enable-batch-process', 1)\n tracker.set_property('ll-config-file', 'config/tracker_config.yml')\n\n print(\"Adding elements to Pipeline \\n\")\n pipeline.add(source)\n if live_camera:\n if constants.RPI_MODE != constants.CAM_MODE:\n pipeline.add(caps_v4l2src)\n pipeline.add(vidconvsrc)\n pipeline.add(nvvidconvsrc)\n pipeline.add(caps_vidconvsrc)\n else:\n pipeline.add(h264parser)\n pipeline.add(decoder)\n pipeline.add(streammux)\n pipeline.add(pgie)\n pipeline.add(tracker)\n pipeline.add(tiler)\n pipeline.add(nvvidconv)\n pipeline.add(filter1)\n pipeline.add(nvvidconv1)\n pipeline.add(nvosd)\n pipeline.add(sink)\n\n # we link the elements together\n # file-source -> h264-parser -> nvh264-decoder ->\n # nvinfer -> nvvidconv -> nvosd -> video-renderer\n print(\"Linking elements in the Pipeline \\n\")\n if live_camera:\n if constants.RPI_MODE == constants.CAM_MODE:\n source.link(nvvidconvsrc)\n else:\n source.link(caps_v4l2src)\n caps_v4l2src.link(vidconvsrc)\n vidconvsrc.link(nvvidconvsrc)\n nvvidconvsrc.link(caps_vidconvsrc)\n else:\n source.link(h264parser)\n h264parser.link(decoder)\n\n sinkpad = streammux.get_request_pad(\"sink_0\")\n if not sinkpad:\n sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n if live_camera:\n srcpad = caps_vidconvsrc.get_static_pad(\"src\")\n else:\n srcpad = decoder.get_static_pad(\"src\")\n if not srcpad:\n sys.stderr.write(\" Unable to get source pad of decoder \\n\")\n srcpad.link(sinkpad)\n streammux.link(pgie)\n pgie.link(tracker)\n tracker.link(nvvidconv1)\n nvvidconv1.link(filter1)\n filter1.link(tiler)\n tiler.link(nvvidconv)\n nvvidconv.link(nvosd)\n nvosd.link(sink)\n\n # create and event loop and feed gstreamer bus mesages to it\n loop = GObject.MainLoop()\n\n bus = pipeline.get_bus()\n bus.add_signal_watch()\n bus.connect(\"message\", bus_call, loop)\n\n # Lets add probe to get informed of the meta data generated, we add probe to\n # the sink pad of the osd element, since by that time, the buffer would have\n # had got all the metadata.\n tiler_sink_pad = nvvidconv.get_static_pad(\"sink\")\n if not tiler_sink_pad:\n sys.stderr.write(\" Unable to get src pad \\n\")\n else:\n tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, self.__metadata_process, 0)\n\n print(\"Starting pipeline \\n\")\n # start play back and listed to events\n pipeline.set_state(Gst.State.PLAYING)\n\n calib_result = Common.get_instance().check_calibration_file()\n\n if calib_result != constants.V_CALIB_OK:\n\n self.__calibration_mode = constants.ON\n ScreenCalibration.get_instance().run()\n self.__calibration_mode = constants.OFF\n\n # start play back and listed to events\n try:\n loop.run()\n except KeyboardInterrupt:\n pass\n\n # cleanup\n pipeline.set_state(Gst.State.NULL)\n\n process_result = True\n\n return process_result",
"def cameraType(self):\r\n cls = mxs.classof(self._nativePointer)\r\n if cls in (mxs.FreeCamera, mxs.TargetCamera):\r\n return CameraType.Standard\r\n\r\n elif cls == mxs.Physical:\r\n return CameraType.Physical\r\n\r\n elif cls == mxs.VRayPhysicalCamera:\r\n return CameraType.Physical\r\n return 0",
"def _process_cameras(dataset_info, example, is_raw):\n raw_cameras = example['cameras']\n raw_cameras = tf.reshape(raw_cameras, (-1, dataset_info.sequence_size, _NUM_POSE_PARAMS))\n\n if not is_raw:\n position = raw_cameras[:, :, 0:3]\n yaw = raw_cameras[:, :, 3:4]\n pitch = raw_cameras[:, :, 4:5]\n cameras = tf.concat([position, tf.sin(yaw), tf.cos(yaw), tf.sin(pitch), tf.cos(pitch)], axis=2)\n return cameras\n \n else:\n return raw_cameras",
"def concatenate_images(img_1, img_2):\n res_4 = None;\n if not (img_1 is None):\n # Resize Camera and Satellite Image:\n res_1 = cv2.resize(img_2, None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)\n res_2 = cv2.resize(img_1, None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)\n\n #Concatenate Camera and Satellite view on single image\n h_1 = res_1.shape[0];\n w_1 = res_1.shape[1];\n h_2 = res_2.shape[0];\n w_2 = res_2.shape[1];\n scale = float(h_1)/float(h_2);\n\n h_2 = h_1;\n w_2 = int(w_2*scale)\n dim = (w_2, h_2);\n res_3 = cv2.resize(res_2, dim, interpolation = cv2.INTER_CUBIC)\n\n res_4 = np.concatenate((res_1, res_3), axis=1)\n\n return res_4;",
"def getAsSource(self, cameraType):\n res = {}\n for cam in self.__data[cameraType]:\n if cam[\"work\"] == 0:continue\n if cameraType == \"DroneCamera\":\n droneModel = cam[\"droneController\"]\n res[cam[\"id\"]] = {\"source\": initDrone(droneModel), \"altName\": cam['altName'], \"work\":cam[\"work\"]}\n elif cameraType == \"FixedCamera\":\n sourceDetected = False\n if \"ip\" in cam.keys():\n source = cam[\"ip\"]\n sourceDetected = True\n if sourceDetected == False:\n raise KeyError(\"Unable to detect source keys\")\n alertZones = []\n if \"alerts\" in cam.keys():\n alertZones = cam[\"alerts\"]\n res[cam[\"id\"]] = {\"source\": source, \"altName\": cam['altName'], \"alerts\": alertZones, \"work\":cam[\"work\"]}\n elif cameraType == \"Video\":\n res[cam[\"id\"]] = {\"path\" : cam['path'], \n \"altName\":cam['altName'], \n \"timestamp\":cam['timestamp'], \n \"work\":cam[\"work\"]}\n else:\n print(cam)\n print(cameraType)\n raise ValueError('unable to parse cameraType')\n return res",
"def get_camera_streaming(cam_id, w, h, fps):\n capture = cv2.VideoCapture(cam_id)\n capture.set(cv2.CAP_PROP_FRAME_WIDTH, w)\n capture.set(cv2.CAP_PROP_FRAME_HEIGHT, h)\n capture.set(cv2.CAP_PROP_FPS, fps)\n if not capture:\n print(\"Failed to initialize camera\")\n sys.exit(1)\n return capture",
"def main():\n\n # Retrieve singleton reference to system object\n system = PySpin.System.GetInstance()\n\n # Retrieve list of cameras from the system\n cam_list = system.GetCameras()\n\n num_cameras = cam_list.GetSize()\n\n print(\"Number of cameras detected:\", num_cameras)\n # Finish if there are no cameras\n if num_cameras == 0:\n # Clear camera list before releasing system\n cam_list.Clear()\n\n # Release system\n system.ReleaseInstance()\n\n print(\"Not enough cameras!\")\n \n return False\n\n cam = cam_list.GetByIndex(0)\n run_single_camera(cam)\n\n\n # Release reference to camera\n del cam\n\n # Clear camera list before releasing system\n cam_list.Clear()\n\n # Release instance\n system.ReleaseInstance()",
"def generate_test_cameras(self):\n def generate_cameras_for_block(names, block_name, data):\n item_dict = {}\n for name in names:\n item_dict['{}_{}'.format(name, block_name)] = {\n 'block': block_name,\n **data,\n }\n return item_dict\n\n camera_data = {\n 'ip_addr': 'rtsp://192.168.1.1',\n 'coords': [0, 0],\n 'point_coords_in_frame': [0, 1, 2, 3, 4, 5, 6, 7],\n 'point_coords_in_image': [0, 1, 2, 3, 4, 5, 6, 7],\n }\n\n self.cs_b1_f0_l1_o1_dict =\\\n generate_cameras_for_block(\n ['c0', 'c1', 'c2_del', 'c3_del', 'c4_del', 'c5_del'],\n 'b1_f0_l1_o1',\n camera_data)\n self.cs_b1_f0_l1_o2_dict =\\\n generate_cameras_for_block(\n ['c0', 'c1', 'c2_del', 'c3_del', 'c4_del', 'c5_del'],\n 'b1_f0_l1_o2',\n camera_data)\n self.cs_b1_f0_l1_sub1_o1_dict =\\\n generate_cameras_for_block(\n ['c0', 'c1', 'c2_del', 'c3_del', 'c4_del', 'c5_del'],\n 'b1_f0_l1_sub1_o1',\n camera_data)\n self.cs_b1_f0_l1_sub1_o2_dict =\\\n generate_cameras_for_block(\n ['c0', 'c1', 'c2_del', 'c3_del'],\n 'b1_f0_l1_sub1_o2',\n camera_data)\n\n self.cs_dict = {\n **self.cs_b1_f0_l1_o1_dict,\n **self.cs_b1_f0_l1_o2_dict,\n **self.cs_b1_f0_l1_sub1_o1_dict,\n **self.cs_b1_f0_l1_sub1_o2_dict,\n }\n\n # generate blocks in database\n self.cameras = \\\n self.create_cameras_from_data(self.cs_dict, self.blocks)",
"def spinupcvstreams():\n global _riverprocess\n global _cityprocess\n if __name__ == \"__main__\":\n _riverprocess = CVStream(OPENCV_STREAM_RIVER)\n CVPROCESSES.append(_riverprocess)\n _cityprocess = CVStream(OPENCV_STREAM_CITY)\n CVPROCESSES.append(_cityprocess)\n _riverprocess.start()\n _cityprocess.start()",
"def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-hgt\", \"--imgHeight\", help=\"The height of the images, default=720.\",\n type=int, default=720)\n\n parser.add_argument(\"-wd\", \"--imgWidth\", help=\"The width of the images, default=1280.\",\n type=int, default=1280)\n\n parser.add_argument(\"-r\", \"--chessboardRows\", help=\"The rows of the chessboard calibration images, default=6.\",\n type=int, default=6)\n\n parser.add_argument(\"-c\", \"--chessboardCols\", help=\"The cols of the chessboard calibration images, default=9.\",\n type=int, default=9)\n\n parser.add_argument(\"-cp\", \"--calibrationPath\", help=\"The height of the images, default=720.\",\n type=str, default='')\n\n parser.add_argument(\"-in\", \"--inputVideoPath\", help=\"The path to the input video to be processed.\",\n type=str, default='')\n\n parser.add_argument(\"-out\", \"--outputVideoPath\", help=\"The path to the where to store output video.\",\n type=str, default='')\n\n args = parser.parse_args()\n\n print(args)\n\n assert args.calibrationPath != '', \"The path to calibration images can't be empty\"\n assert args.inputVideoPath != '', \"The path to input video can't be empty\"\n assert args.outputVideoPath != '', \"The path to output video can't be empty\"\n\n camera_mtx, dist_coeff = CameraCalibration((args.imgHeight, args.imgWidth),\n (args.chessboardRows, args.chessboardCols),\n args.calibrationPath).calibrate()\n print(\"Camera Mtx\", camera_mtx)\n print(\"Distortion Coefficient\", dist_coeff)\n # img = cv2.imread('test_images/test5.jpg')\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n AdvancedLaneDetection(args.inputVideoPath, camera_mtx, dist_coeff).process_video(args.outputVideoPath)\n\n # cv2.imwrite(\"output.jpg\", result)",
"def initialize_camera(self):\n if Rescue_PI.input_video_file_path is None:\n print(\"[INFO] starting threaded video stream...\")\n self.vs = VideoStream(src=VID_CAM_INDEX).start()\n else:\n self.vs = cv2.VideoCapture(Rescue_PI.input_video_file_path)"
] | [
"0.6055959",
"0.594556",
"0.5910366",
"0.58318543",
"0.5761805",
"0.5758764",
"0.57067573",
"0.56581026",
"0.5576743",
"0.5523943",
"0.5506011",
"0.53973216",
"0.5318885",
"0.5317909",
"0.5289555",
"0.5289161",
"0.52827555",
"0.527724",
"0.52749354",
"0.5268248",
"0.52553976",
"0.52499425",
"0.52488345",
"0.5244013",
"0.52201676",
"0.52106977",
"0.5205049",
"0.5200981",
"0.51871383",
"0.5183824"
] | 0.6135356 | 0 |
Checkout code for CESM If sandbox exists, check that the right tag has been checkedout. Otherwise, download the code, checkout the tag and run manage_externals. The scripts don't seem to like multiple applications of manage_externals. | def code_checkout(cesm_repo, coderoot, tag):
sandbox = os.path.split(coderoot)[-1]
if os.path.exists(coderoot):
print('Check for right tag: '+coderoot)
p = Popen('git status', shell=True, cwd=coderoot, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode('UTF-8')
stderr = stderr.decode('UTF-8')
print(stdout)
print(stderr)
if tag not in stdout.split('\n')[0]:
raise ValueError('tag does not match')
else:
stat = check_call(['mkdir', '-p', coderoot])
if stat != 0: sys.exit(1)
# clone the repo
p = Popen('git clone '+cesm_repo+' '+sandbox, shell=True,
cwd=coderoot+'/..', stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
if p.returncode != 0:
raise Exception('git error')
# check out the right tag
p = Popen('git checkout %s'%tag, shell=True, cwd=coderoot)
stdout, stderr = p.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
if p.returncode != 0:
raise Exception('git error')
# check out externals
p = Popen('./manage_externals/checkout_externals -v', shell=True, cwd=coderoot)
stdout, stderr = p.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
if p.returncode != 0:
raise Exception('git error') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n sandbox = create_sandbox()\n directory = download_package_to_sandbox(\n sandbox,\n 'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'\n )\n print(directory)\n destroy_sandbox(sandbox)",
"def checked_out_MPS():\n\n checked_out_packages = os.path.join(os.environ[\"CMSSW_BASE\"], \"src\", \".git\",\n \"info\", \"sparse-checkout\")\n checked_out = False\n git_initialized = False\n try:\n with open(checked_out_packages, \"r\") as f:\n packages = (\"/Alignment/\", \"/Alignment/MillePedeAlignmentAlgorithm/\",\"/*/\")\n for line in f:\n if line.strip() in packages:\n checked_out = True\n break\n git_initialized = True # since the sparse checkout file is there\n except IOError as e:\n if e.args != (2, 'No such file or directory'): raise\n\n return checked_out, git_initialized",
"def execute(self):\r\n _logger.info(\"=== Stage=checkout = %s\" % self._config.name)\r\n _logger.info(\"++ Started at %s\" % time.strftime(\"%H:%M:%S\", time.localtime()))\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n # for testing: result = session.create(\"ppd_sw-fa1f5132#wbernard2:project:sa1spp#1\")\r\n if (result != None):\r\n _logger.info(\"Project found: '%s'\" % result)\r\n\r\n # setting up the project\r\n self.__setup_project(project, result)\r\n else:\r\n _logger.info(\"Checking out from '%s'.\" % project)\r\n \r\n purpose = None\r\n if self._config.has_key('purpose'):\r\n purpose = self._config['purpose']\r\n _logger.info(\"Using purpose: '%s'\" % purpose)\r\n \r\n version = None\r\n if self._config.has_key('version'):\r\n version = self._config['version']\r\n _logger.info(\"Using version: '%s'\" % version)\r\n\r\n try:\r\n if (not self._config.get_boolean('use.default_wa_path', True)):\r\n wa_path = self._config['dir']\r\n _logger.info(\"Using work area path to checkout directly\")\r\n result = project.checkout(session.create(self._config['release']), version=version, purpose=purpose, path=wa_path)\r\n else:\r\n result = project.checkout(session.create(self._config['release']), version=version, purpose=purpose)\r\n ccm.log_result(result, ccm.CHECKOUT_LOG_RULES, _logger)\r\n self.__setRole(session)\r\n except ccm.CCMException, exc:\r\n ccm.log_result(exc.result, ccm.CHECKOUT_LOG_RULES, _logger)\r\n raise exc\r\n finally:\r\n self.__restoreRole(session)\r\n _logger.info('Checkout complete')\r\n \r\n if result.project != None and result.project.exists(): \r\n _logger.info(\"Project checked out: '%s'\" % result.project)\r\n \r\n try:\r\n self.__setRole(session)\r\n _logger.info(\"Maintaining the workarea...\")\r\n if self.get_threads() == 1:\r\n output = result.project.work_area(True, True, True, self._config['dir'], result.project.name)\r\n else:\r\n output = ccm.extra.FastMaintainWorkArea(result.project, self._config['dir'], result.project.name, self.get_threads())\r\n ccm.log_result(output, ccm.CHECKOUT_LOG_RULES, _logger)\r\n finally:\r\n self.__restoreRole(session)\r\n self.__setup_project(project, result.project)\r\n else:\r\n raise Exception(\"Error checking out '%s'\" % project)\r\n\r\n _logger.info(\"++ Finished at %s\" % time.strftime(\"%H:%M:%S\", time.localtime()))",
"def checkout_qmk():\n if exists('qmk_firmware'):\n rmtree('qmk_firmware')\n\n if not fetch_source(repo_name(QMK_GIT_URL)):\n git_clone(QMK_GIT_URL, QMK_GIT_BRANCH)",
"def lifecycle_approve_for_my_org(self, orderer_url, orderer_tls_rootcert, channel_name, cc_name,\n chaincode_version, policy, sequence=1):\n res, installed = self.lifecycle_query_installed(\"3s\")\n cc_label = cc_name+\"_\"+chaincode_version\n package_id = \"\"\n for each in installed['installed_chaincodes']:\n if each['label'] == cc_label:\n package_id = each['package_id']\n break\n if package_id == \"\":\n return 1, \"not exist the chaincode, please check chaincode_name and chaincode_version\"\n\n if os.getenv(\"CORE_PEER_TLS_ENABLED\") == \"false\" or os.getenv(\"CORE_PEER_TLS_ENABLED\") is None:\n if self.version in BasicEnv.binary_versions_v2:\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} \"\n \" --channelID {} --name {} --version {} --init-required --package-id {} --sequence {}\"\n \" --signature-policy {} > ./approve.txt\"\n .format(self.version, orderer_url, channel_name, cc_name,\n chaincode_version, package_id, sequence, policy))\n else:\n if self.version in BasicEnv.binary_versions_v2:\n res = subprocess.Popen(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} --tls \"\n \"--cafile {} --channelID {} --name {} --version {} --init-required --package-id \"\n \"{} --sequence {} --signature-policy {}\"\n .format(self.version, orderer_url, orderer_tls_rootcert, channel_name,\n cc_name, chaincode_version, package_id, sequence, policy), shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = res.communicate()\n return_code = res.returncode\n\n if return_code == 0:\n content = str(stdout, encoding=\"utf-8\")\n else:\n stderr = str(stderr, encoding=\"utf-8\")\n return return_code, stderr\n return return_code, content",
"def test_checkout_repository(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.contribtool.checkout_repository(TOOLNAME,username,userpass)",
"def sync_code_to_masters(\n cluster: Cluster,\n dcos_checkout_dir: Path,\n sudo: bool,\n) -> None:\n local_packages = dcos_checkout_dir / 'packages'\n local_test_dir = local_packages / 'dcos-integration-test' / 'extra'\n if not Path(local_test_dir).exists():\n message = (\n 'DCOS_CHECKOUT_DIR must be set to the checkout of a DC/OS '\n 'repository.\\n'\n '\"{local_test_dir}\" does not exist.'\n ).format(local_test_dir=local_test_dir)\n raise click.BadArgumentUsage(message=message)\n\n dcos_checkout_dir_variant = _dcos_checkout_dir_variant(\n dcos_checkout_dir=dcos_checkout_dir,\n )\n\n node_test_dir = Path('/opt/mesosphere/active/dcos-integration-test')\n\n test_tarstream = _tar_with_filter(\n path=local_test_dir,\n tar_filter=_cache_filter,\n )\n\n dcos_variant = get_cluster_variant(cluster=cluster)\n if dcos_variant is None:\n message = (\n 'The DC/OS variant cannot yet be determined. '\n 'Therefore, code cannot be synced to the cluster.'\n )\n click.echo(message, err=True)\n sys.exit(1)\n\n syncing_oss_to_ee = bool(\n dcos_variant == DCOSVariant.ENTERPRISE\n and dcos_checkout_dir_variant == DCOSVariant.OSS,\n )\n\n node_active_dir = Path('/opt/mesosphere/active')\n node_test_dir = node_active_dir / 'dcos-integration-test'\n\n if syncing_oss_to_ee:\n # This matches part of\n # https://github.com/mesosphere/dcos-enterprise/blob/master/packages/dcos-integration-test/ee.build\n for master in cluster.masters:\n master.run(args=['rm', '-rf', str(node_test_dir / 'util')])\n\n # This makes an assumption that all tests are at the top level.\n master.run(\n args=[\n 'rm',\n '-rf',\n str(node_test_dir / 'open_source_tests' / '*.py'),\n ],\n # We use a wildcard character, `*`, so we need shell expansion.\n shell=True,\n sudo=sudo,\n )\n\n master.run(\n args=[\n 'mkdir',\n '--parents',\n str(node_test_dir / 'open_source_tests'),\n ],\n sudo=sudo,\n )\n\n _send_tarstream_to_node_and_extract(\n tarstream=test_tarstream,\n node=master,\n remote_path=node_test_dir / 'open_source_tests',\n sudo=sudo,\n )\n master.run(\n args=[\n 'rm',\n '-rf',\n str(node_test_dir / 'open_source_tests' / 'conftest.py'),\n ],\n sudo=sudo,\n )\n master.run(\n args=[\n 'mv',\n str(node_test_dir / 'open_source_tests' / 'util'),\n str(node_test_dir),\n ],\n sudo=sudo,\n )\n else:\n _sync_bootstrap_to_masters(\n cluster=cluster,\n dcos_checkout_dir=dcos_checkout_dir,\n sudo=sudo,\n )\n\n for master in cluster.masters:\n # This makes an assumption that all tests are at the top level.\n master.run(\n args=['rm', '-rf', str(node_test_dir / '*.py')],\n # We use a wildcard character, `*`, so we need shell expansion.\n shell=True,\n sudo=sudo,\n )\n _send_tarstream_to_node_and_extract(\n tarstream=test_tarstream,\n node=master,\n remote_path=node_test_dir,\n sudo=sudo,\n )",
"def compile_code(self,toolname,adminuser,adminpass):\n\n # ssh into a tool session container as the tools manager\n # compile and install the code\n\n # get into a tool session container.\n cm = ContainerManager()\n ws = cm.access(host=self.hubname,username=adminuser,password=adminpass)\n\n session_number,es = ws.execute('echo $SESSION')\n\n # catch errors that happen in the shell\n # so we can properly exit and close the workspace\n try:\n # become the apps user\n ws.send('sudo su - apps')\n ws.start_bash_shell()\n output,es = ws.execute('whoami')\n exit_apps = True\n if output != 'apps':\n exit_apps = False\n msg = \"doesn't look like we were able to become the apps user\"\n self.logger.error(msg)\n raise Exception(msg)\n\n # catch compile and install errors\n # so we can report them back to the developer\n\n # navigate to the tool directory\n cmd = 'cd /apps/%(toolname)s/dev/src' \\\n % { 'toolname' : toolname, }\n ws.execute(cmd)\n\n # if there is a makefile available\n # run:\n # make clean\n # make all\n # make install\n # don't fail if there is no clean or all targets\n if ws.bash_test('-e Makefile'):\n # allow 30 minutes for the code to compile\n ws.timeout = 1800\n output,es = ws.execute('make clean',False)\n output,es = ws.execute('make all',False)\n no_make_all_text = \"make: *** No rule to make target `all'. Stop.\"\n if es > 0:\n if es == 2 and output == no_make_all_text:\n output,es = ws.execute('make')\n else:\n self.logger.exception(output)\n raise ExitCodeError(output)\n output,es = ws.execute('make install')\n ws.timeout = 10\n else:\n msg = \"No Makefile found\"\n print msg\n self.logger.info(msg)\n\n finally:\n # exit sudo\n ws.stop_bash_shell()\n if exit_apps:\n ws.send('exit')\n\n # shut down the ssh connection\n ws.close()",
"def __getFromJEMpage(self):\n\n if not self.__download(self.repo, self.version, self.lib_tar, self.dest_dir): return False\n if not self.__extract(self.lib_tar): return False\n\n if not self.__download(self.repo, self.version, self.launcher_tar, self.dest_dir): return False\n if not self.__extract(self.launcher_tar): return False\n\n self.logger.info(\"successfully downloaded and extracted JEM ver %s from repo %s\" % (self.version, self.repo))\n\n if os.path.exists(self.dest_dir + \"/JEM.py\"):\n os.environ[\"JEM_PACKAGEPATH\"] = self.dest_dir\n\n\n return True",
"def main():\n # Parse command line arguments\n configfile = parse_arguments()\n # Parse config file\n (basedir, gituser, add_own_forks, forks, branches) = parse_config(configfile)\n # Check that base directory exists\n if not os.path.exists(basedir):\n raise Exception('Base directory {0} does not exist'.format(basedir))\n # Configure working directory\n workdir = setup_workdir(basedir)\n # Check out the code\n checkout_code(workdir, gituser, add_own_forks, forks, branches)\n print \"Location of code: {0}\".format(workdir)",
"def test_error_when_student_code_is_incorrectly_packaged(\n self, default_hooks\n ):\n result = default_hooks.act_on_cloned_repo(NO_DIR_STRUCTURE_REPO)\n\n assert result.status == Status.ERROR",
"def checkout(self, checkout, *args):\n return self.cmd('checkout', checkout, *args)",
"def test_link_to_checkout(self):\n self.browser.find_element_by_link_text('Checkout').click()\n self.assertEqual(self.browser.current_url,\n self.live_server_url + self.CHECKOUT_URL)",
"def update_openblock():\n\n tf = tempfile.mktemp(suffix='-openblock')\n local('git clone git://github.com/openplans/openblock.git {0}'.format(tf))\n dest = os.path.join(PROJECT_ROOT, 'requirements', 'sdists')\n for name in ('obadmin', 'ebdata', 'ebpub'):\n package = os.path.join(tf, name)\n os.chdir(package)\n local('pip install -e {source} -d {dest}'.format(source=package,\n dest=dest))\n shutil.rmtree(tf)",
"def start(buildout):\n check = Check(buildout)\n check.extends_cache()\n check.eggs_directory()\n check.download_cache()",
"def test_ML_check_cms_aem_emvevex(self):\n\n self.setup_logFile_for_logger('madgraph.check_cmd')\n files = ['acceptance_test_aem_emvevex.pkl',\n 'acceptance_test_aem_emvevex.log',\n 'acceptance_test_aem_emvevex_widths_increased.pkl',\n 'acceptance_test_aem_emvevex_widths_increased.log']\n output_name = 'SAVEDTMP_CHECK_acceptance_test_aem_emvevex__%s__'\n \n try:\n cwd = os.getcwd()\n \n # Change this when we will make the CMS-ready EW model the default\n self.do('import model loop_qcd_qed_sm')\n for mode in ['NWA','CMS']:\n if path.isdir(pjoin(MG5DIR,output_name%mode)):\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n \n # Make sure it works for an initial run\n command = 'check cms -reuse a e- > e- ve ve~ [virt=QCD QED] '\n options = {'name':'acceptance_test_aem_emvevex',\n 'lambdaCMS':'(1.0e-6,2)',\n 'show_plot':'False',\n 'seed':'666',\n 'resonances':'2',\n 'recompute_width':'first_time',\n 'report':'full'}\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running first CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isdir(pjoin(MG5DIR,output_name%mode)))\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.log')))\n res = open(pjoin(MG5DIR,'acceptance_test_aem_emvevex.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n \n # Now for a Reuse-run with the widths modified by 1%\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n self.setup_logFile_for_logger('madgraph.check_cmd')\n # Now copy the card with recomputed widths in it\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isfile(pjoin(MG5DIR,output_name%mode,\n 'Cards','param_card.dat_recomputed_widths')))\n shutil.copy(pjoin(MG5DIR,output_name%mode,'Cards',\n 'param_card.dat_recomputed_widths'),\n pjoin(MG5DIR,output_name%mode,'Cards','param_card.dat'))\n options['tweak']='allwidths->1.1*allwidths(widths_increased)'\n options['recompute_width']='never'\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running second CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')))\n res = open(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n \n # Clean up duties\n for mode in ['NWA','CMS']:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n for file in files:\n try:\n os.remove(pjoin(MG5DIR,file))\n except:\n pass\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n\n except KeyError as e:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n for mode in ['NWA','CMS']:\n try:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n except:\n pass\n for f in files:\n try:\n os.remove(pjoin(MG5DIR,f))\n except:\n pass\n raise e\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)",
"def check_workspace ():\n\n try:\n ex (\"cd $DOC_ROOT/ACE_TAO && git pull -p\")\n print (\"Successfully updated ACE/TAO working copy\")\n except:\n print (\"Unable to update ACE/TAO workspace at \" + doc_root)\n raise\n\n try:\n ex (\"cd $DOC_ROOT/MPC && git pull -p\")\n print (\"Successfully updated MPC working copy to revision \")\n except:\n print (\"Unable to update the MPC workspace at \" + doc_root + \"/ACE/MPC\")\n raise\n\n vprint (\"Repos root URL = \" + opts.repo_root + \"\\n\")\n vprint (\"Repos MPC root URL = \" + opts.mpc_root + \"\\n\")",
"def code(ctx):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n try:\n code_ref = PolyaxonClient().experiment.get_code_reference(user, project_name, _experiment)\n commit = None\n if code_ref:\n commit = code_ref.commit\n Printer.print_header(\n 'Experiment has code ref: `{}`, downloading ...'.format(commit))\n else:\n Printer.print_warning(\n 'Experiment has no code ref, downloading latest code...')\n PolyaxonClient().project.download_repo(user, project_name, commit=commit)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n Printer.print_success('Files downloaded.')",
"def checkGit(directory):",
"def test_source_package_exists(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n self.assertEqual(response.status_code, status.OK)",
"def setup(token_addr: str, box_addr: str) -> None:\n print(f\"\\nSetting up Contracts....\")\n tokenContract = pytezos.contract(token_addr)\n print(f\"-- Performing Initial Mint to Admin : {pub_key_hash}\")\n tokenContract.initialMint(None).inject(_async=False)\n print(\"-- Funding Fishcake Box Contract\")\n tokenContract.transfer([{\"from_\": pub_key_hash, \"txs\": [\n {\"to_\": box_addr, \"token_id\": 0, \"amount\": default_fsck_box_fund}]}]).inject(_async=False)",
"def forced_checkout_with_real_obstructions_and_unversioned_files(sbox):\n\n # Make a local tree that partially obstructs the paths coming from the\n # repos, make the obstructing files different from the standard greek\n # tree, and finally add some files that don't exist in the stardard tree.\n expected_output = make_local_tree(sbox, True, True)\n\n expected_wc = svntest.main.greek_state.copy()\n expected_wc.tweak('A/mu',\n contents=\"This is the local version of the file 'mu'.\\n\")\n expected_wc.tweak('iota',\n contents=\"This is the local version of the file 'iota'.\\n\")\n expected_wc.add({'sigma' : Item(\"unversioned sigma\"),\n 'A/upsilon' : Item(\"unversioned upsilon\"),\n 'A/Z' : Item(),\n })\n\n svntest.actions.run_and_verify_checkout(sbox.repo_url,\n sbox.wc_dir, expected_output,\n expected_wc, [], '--force')",
"def test_empty_code_for_verification(self, cred):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, ''))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '2'\n assert resp.json()['error_text'] == missing_specific_mandatory_parm_msg.format('code')\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']",
"def testToolchainDownload(self):\n self.assertEqual('https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.9.0/x86_64-gcc-4.9.0-nolibc_arm-unknown-linux-gnueabi.tar.xz',\n self.toolchains.LocateArchUrl('arm'))",
"def checkout_v8():\n if not OFFLINE_MODE:\n exec_cmd('git fetch --tags',\n cwd=V8_HOME,\n msg='Fetch the release tag information')\n\n exec_cmd('git checkout', V8_GIT_TAG,\n cwd=V8_HOME,\n msg='Checkout Google V8 v' + V8_GIT_TAG)",
"def checkout_book(book):\n\tno_token = 'Y'\n\tif no_token == 'Y':\n\t\tsuccessful = 200\n\t\treturn successful\n\telse:\n\t\tlist_of_books = check_out_book(book)\n\t\treturn list_of_books\n\t#end if",
"def check_installation():\n print(\n 'Hooray! CCurl is installed correctly!'\n if is_installed()\n else 'Aww, man! CCurl is NOT installed correctly!'\n )\n print('For support, visit the #iota-libs-pyota channel on the IOTA Slack.')\n print('https://slack.iota.org/')",
"def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")",
"def forced_checkout_with_real_obstructions(sbox):\n\n # Make a local tree that partially obstructs the paths coming from the\n # repos and make the obstructing files different from the standard greek\n # tree.\n expected_output = make_local_tree(sbox, True, False)\n\n expected_wc = svntest.main.greek_state.copy()\n expected_wc.tweak('A/mu',\n contents=\"This is the local version of the file 'mu'.\\n\")\n expected_wc.tweak('iota',\n contents=\"This is the local version of the file 'iota'.\\n\")\n\n svntest.actions.run_and_verify_checkout(sbox.repo_url,\n sbox.wc_dir, expected_output,\n expected_wc, [], '--force')",
"def _get_code_version():\n git_dir = os.path.dirname(os.path.realpath(__file__))\n cwd = os.getcwd()\n file = os.path.join(cwd, VERSION_FILENAME)\n bash_command = f'cd {git_dir}; ' + \\\n f'git rev-parse HEAD > {file}; ' + \\\n f'cd {cwd}; '\n success = False\n try:\n subprocess.check_call(\n bash_command, stderr=subprocess.DEVNULL, shell=True)\n sucess = True\n except subprocess.CalledProcessError:\n # not a git directory\n bash_command = f'rm {file}; cd {cwd}; '\n subprocess.check_call(bash_command, shell=True)\n except OSError:\n # git command not found\n pass\n return success"
] | [
"0.57098264",
"0.5443898",
"0.52824104",
"0.5211898",
"0.50929743",
"0.5078513",
"0.5051536",
"0.50458026",
"0.5009098",
"0.50035506",
"0.49857637",
"0.49795955",
"0.490863",
"0.4892152",
"0.4889629",
"0.4881726",
"0.48687592",
"0.48336747",
"0.4761207",
"0.47558445",
"0.47543064",
"0.47429267",
"0.47189486",
"0.47159085",
"0.4707934",
"0.46640337",
"0.46609634",
"0.46394268",
"0.46389657",
"0.46382028"
] | 0.6543128 | 0 |
Test adding basic Deterministic InnerNode. | def test_addInner(self):
print("\nTest 1: Adding InnerNode")
try:
builder = StaticBuilder()
builder.addInput(10, name="In")
enc_name = builder.addInner(3, name="In")
except AttributeError:
print("\nCAUGHT! Trying to assign the same name to two nodes! "
"AttributeError exception\n")
builder = StaticBuilder()
builder.addInput(10, name="In")
enc_name = builder.addInner(3, name="Det")
enc1 = builder.nodes[enc_name]
print('\nNode keys in builder:', list(builder.nodes.keys()))
print("This node's key:", enc_name)
self.assertEqual(enc1.label, 1, "The label has not been assigned correctly")
self.assertEqual(builder.num_nodes, 2, "The number of nodes has not been "
"assigned correctly")
self.assertEqual(enc1.num_declared_outputs, 0, "The number of outputs of the "
"DeterministicNode has not been assigned correctly")
self.assertEqual(enc1.num_declared_inputs, 0, "The number of inputs of the "
"DeterministicNode has not been assigned correctly") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)",
"def test_add_new_child(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n '_convert_entity_refs',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n\n root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA)\n\n self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string())",
"def test_Tree():",
"def _gen_test_tree_1():\n tree = BinaryNode(5)\n tree.left = BinaryNode(5)\n return tree",
"def _gen_test_tree_5():\n tree = BinaryNode(30)\n tree.right = BinaryNode(30)\n return tree",
"def test_add_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n 'create_node_with_children',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n mock_invoke = self.mock_object(root, 'add_child_elem')\n\n root.add_node_with_children('options')\n\n mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)",
"def test_add_network(self):\n pass",
"def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node",
"def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree",
"def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1",
"def test_instantiate_leaf_node(self):\n try:\n LeafNode('my_label')\n except Exception:\n message = \"LeafNode instantiation failed\"\n self.fail(message)",
"def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])",
"def testAppendChildDecision(self):\n self.node.append_child(self.color_decisions[0])\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )",
"def add_node(self, node):",
"def test_init_node():\n from dll import Node\n new_node = Node(5)\n assert new_node.value == 5",
"def test_create_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'add_new_child', return_value='abc')\n\n result_xml = str(root.create_node_with_children(\n 'options', test1=zapi_fakes.FAKE_XML_STR,\n test2=zapi_fakes.FAKE_XML_STR))\n\n # No ordering is guaranteed for elements in this XML.\n self.assertTrue(result_xml.startswith(\"<options>\"), result_xml)\n self.assertIn(\"<test1>abc</test1>\", result_xml)\n self.assertIn(\"<test2>abc</test2>\", result_xml)\n self.assertTrue(result_xml.rstrip().endswith(\"</options>\"), result_xml)",
"def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)",
"def test_append_left_head_is_new_node(dq_1):\n dq_1.append_left('threve')\n assert dq_1._dll.head.data == 'threve'",
"def test_add_node(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_node()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre + 1\n assert len(net.neurons_in_layer) == num_layers_pre or len(\n net.neurons_in_layer) == num_layers_pre + 1",
"def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree",
"def test_dummy():\n dummyblock = DummyBlockNode(\n name=\"None\",\n parameters=(),\n ancestor=None,\n dirty=False,\n filepath=\"/some/random/path\"\n )\n dummydirective = DummyDirectiveNode(\n name=\"Name\",\n ancestor=None,\n filepath=\"/another/path\"\n )\n dummycomment = DummyCommentNode(\n comment=\"Comment\",\n ancestor=dummyblock,\n filepath=\"/some/file\"\n )",
"def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1",
"def addChild(node):",
"def test_add():\n # Test for addition with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x**2 + x\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value **2 + x.value\n assert x.grad() == sum(weight * var.grad()\n for weight, var in x.children)\n except AssertionError as e:\n print(e)",
"def testAppendChildren(self):\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n self.assertEqual(\n self.color_corrections,\n self.node.color_corrections\n )\n\n self.assertEqual(\n self.color_decisions,\n self.node.color_decisions\n )",
"def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree",
"def test_node_neighbors(graph_no_edges):\n graph_no_edges.add_edge('BB', 82, 5)\n assert graph_no_edges.neighbors('BB') == {82: 5}",
"def test_imbalanced_addition(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler()\n new_vals = [randint(1, POINT_CAP) for _ in range(randint(HEIGHT[0], HEIGHT[1]))]\n for val in new_vals:\n handler.addNewNode(val, b=False)\n true_bal = check_balance(handler.root)\n if handler.balanced is not true_bal:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_vals:\n if val not in state['node_points'].values():\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly add new nodes (non-balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated adding nodes in non-balancing mode in {successes} trees.{BColors.ENDC}\")",
"def test_node_instantiation(create_empty_node):\n from linked_list import Node\n assert create_empty_node.value is None",
"def test_create_trienode():\n node = TrieNode(\"h\")\n assert node.value == \"h\"\n assert node.children == {}"
] | [
"0.634626",
"0.61077243",
"0.60818183",
"0.6013245",
"0.5986549",
"0.597251",
"0.595647",
"0.58965737",
"0.5876589",
"0.5845326",
"0.5809255",
"0.5755622",
"0.5737071",
"0.5720968",
"0.5700372",
"0.5690431",
"0.56798387",
"0.56767136",
"0.56564647",
"0.5654355",
"0.56480926",
"0.564055",
"0.5635012",
"0.56203693",
"0.5614329",
"0.559403",
"0.5586549",
"0.55817217",
"0.55782926",
"0.5563603"
] | 0.751408 | 0 |
Test adding basic OutputNode | def test_addOutput(self):
print("\nTest 2: Adding OutputNode")
builder = StaticBuilder()
builder.addInput(10, name="In")
builder.addInner(3, name="Det")
o_name = builder.addOutput(name="Out")
o1 = builder.nodes[o_name]
print("\nNode keys in builder:", list(builder.nodes.keys()))
print("This node's key:", o_name)
self.assertEqual(o1.label, 2, "The label has not been assigned correctly")
self.assertEqual(builder.num_nodes, 3, "The number of nodes has not been "
"assigned correctly")
self.assertEqual(o1.num_declared_outputs, 0, "The number of outputs of the "
"OutputNode has not been assigned correctly")
self.assertEqual(o1.num_declared_inputs, 0, "The number of inputs of the "
"OutputNode has not been assigned correctly") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_node_outputs(self):\n pass",
"def addOutputsNode():\n return render_template(\"addOutputsNode.html\")",
"def testNewOutputModule(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n output_module = manager.OutputManager.NewOutputModule('test_output')\n self.assertIsInstance(output_module, TestOutput)\n\n with self.assertRaises(ValueError):\n manager.OutputManager.NewOutputModule(1)\n\n with self.assertRaises(KeyError):\n manager.OutputManager.NewOutputModule('bogus')\n\n manager.OutputManager.DeregisterOutput(TestOutput)",
"def add_output_ops(self, graph, output):\n return output",
"def test_output(self):\n new_route = self.route.output(\"test data\", transform=\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"output\"] == \"test data\"\n assert new_route.route[\"transform\"] == \"transformed\"",
"def write_output(self):",
"def writeOutput(self, output):",
"def _generate_output(self):\n raise NotImplementedError()",
"def testHasOutputClass(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n self.assertTrue(manager.OutputManager.HasOutputClass('test_output'))\n self.assertFalse(manager.OutputManager.HasOutputClass('bogus'))\n self.assertFalse(manager.OutputManager.HasOutputClass(1))\n\n manager.OutputManager.DeregisterOutput(TestOutput)",
"def test_node_write_to_output_buffer(graph):\n a = sf.Node()\n graph.render_subgraph(a)\n assert a.output_buffer[0][3] == 0.0\n a.output_buffer[0][3] = 1.0\n assert a.output_buffer[0][3] == 1.0\n\n #--------------------------------------------------------------------------------\n # Why is the output buffer of length 256 (SIGNALFLOW_DEFAULT_BLOCK_SIZE)\n # rather than 2048 (SIGNALFLOW_NODE_BUFFER_SIZE)? Because the output buffer's\n # length is reported by the Python bindings as `last_num_frames`.\n # Whether this is a good idea is open to debate.\n #\n # Better would be to have a precise and rigorous block size throughout, which\n # would mean adding a block buffer between the audio I/O and the Graph.\n #--------------------------------------------------------------------------------\n assert a.output_buffer.shape == (32, 256)\n a.output_buffer[31][255] = 1.0\n assert a.output_buffer[31][255] == 1.0\n with pytest.raises(IndexError):\n a.output_buffer[32][255] == 1.0\n with pytest.raises(IndexError):\n a.output_buffer[31][256] == 1.0",
"def test_create_named_output_edge(self):\n n1, n2 = Node('a'), Node('b')\n result = n1 * 'foo' | n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, 'foo')])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2, 'foo')])\n self.assertEqual(n2.eout, [])",
"def testGetOutput(self):\n #f = open(\"src_output.root\", 'w')\n #f.close()\n\n #1) missing required -d option (the other required option, -r, is ignored)\n go = getoutput(self.logger, self.maplistopt)\n res = go()\n expRes = CommandResult(2001, 'ERROR: Task option is required')\n self.assertEquals(expRes, res)\n\n #2) -d option is present but -r is missing\n analysisDir = self.reqarea\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir])\n res = go()\n expRes = CommandResult(2002, 'ERROR: Range option is required')\n self.assertEquals(expRes, res)\n\n #3) request passed with the -d option does not exist\n #res = go([\"-d\", analysisDir + \"asdf\"])\n #TODO we expect an appropriate answer from the server.\n #By now, the server just answer an empty list\n\n #4) check correct behaviour without specifying output directory\n #N.B.: -p options is required for tests to skip proxy creation and delegation\n destDir = os.path.join(analysisDir, 'results')\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir(destDir))\n self.assertTrue(os.path.isfile(os.path.join(destDir, '1.root')))\n #Remove the directory\n shutil.rmtree(destDir)\n self.assertFalse(os.path.isdir(destDir))\n self.assertEquals(expRes, res)\n\n #5) correct behavior and output directory specified which exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp'))\n destFile = os.path.join('/tmp', '1.root')\n self.assertTrue(os.path.isfile(destFile))\n os.remove(destFile)\n self.assertFalse(os.path.isfile(destFile))\n self.assertEquals(expRes, res)\n\n #6) correct behavior and output directory specified which does not exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp/asdf/qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp/asdf/qwerty'))\n #Remove the directory\n shutil.rmtree('/tmp/asdf/qwerty')\n self.assertEquals(expRes, res)\n\n #7) correct behavior and output directory specified which does not exists (relative path)\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('qwerty'))\n #Remove the directory\n shutil.rmtree('qwerty')\n self.assertEquals(expRes, res)",
"def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node",
"def add_output(self, output, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n out_element = ET.SubElement(cell, 'output')\n out_element.text = output",
"def _add_output(self, node_entries):\n\n for node_entry in node_entries:\n for node_type, output_name in zip(node_entry[\"types\"], node_entry[\"output_names\"]):\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]\n output = onnx.helper.make_tensor_value_info(\n output_name, dtype, shape=get_node_shape(node_type)\n )\n self._mc.add_outputs([output])",
"def save_output_node(out):\n out_wc = out.clone()\n return out_wc",
"def test_5(self):\r\n r1, r2, r3, r4 = MyVariable(1), MyVariable(2), MyVariable(3), MyVariable(4)\r\n o0 = MyOp.make_node(r1, r2)\r\n o1 = MyOp.make_node(o0.outputs[0], r4)\r\n all = io_toposort([], o0.outputs)\r\n assert all == [o0]",
"def test_set_output_implicitly(self):\n self.command.output = \"\"\n self.command.package = self.input_ovf\n self.assertEqual(self.command.output, \"\")\n self.command.run()\n self.assertEqual(self.command.output, self.input_ovf)",
"def test_debug_output(self):\n assert output(self.msg) is not None",
"def add_output_param(self, name, ptype, default_value=NULL_VALUE): \n param_name = self._get_unique_param_name(name, NodeParam.OUTPUT)\n p = NodeParam(self, param_name, ptype, NodeParam.OUTPUT, \n default_value=default_value, user_param=self._params_created) \n self._output_params[param_name] = p\n return p",
"def __init__(self, name, node):\n super(OutputPlug, self).__init__(name, node, (InputPlug, ))\n self.node.outputs[self.name] = self",
"def output(self):\r\n self.logic ( )\r\n return self.output",
"def _create_outputs(self) -> ComponentOutputs:\n raise NotImplementedError",
"def handle_output(self, workunit, label, s):\r\n pass",
"def handle_output(self, workunit, label, s):\r\n pass",
"def addOutput(self, *args):\n return _libsbml.Transition_addOutput(self, *args)",
"def test_default_output(self):\n env = pike.Environment()\n output = pike.Graph('output')\n output.sink = pike.noop()\n with patch.object(output, 'run') as run:\n run.return_value = []\n env.set_default_output(output)\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n run.assert_called_with([])",
"def add_node(self, node):",
"def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)",
"def test_as_output(self):\n self.assertEqual(render('{% output_as 1 %}-{{ out }}'), '1-')\n self.assertEqual(render('{% output_as 1 as out %}-{{ out }}'),\n 'yes_as-1')"
] | [
"0.75490946",
"0.666691",
"0.63000286",
"0.6119677",
"0.6104275",
"0.6039189",
"0.59738773",
"0.5940442",
"0.5934479",
"0.5925732",
"0.5917337",
"0.5912333",
"0.5910766",
"0.58941495",
"0.5879698",
"0.5866728",
"0.5860062",
"0.5858629",
"0.58093554",
"0.5797671",
"0.57790154",
"0.57656026",
"0.57635814",
"0.57453835",
"0.57453835",
"0.57427186",
"0.5732115",
"0.57124406",
"0.5703054",
"0.5690593"
] | 0.78752226 | 0 |
Test building a model with 2 outputs. Test Cloning an output | def test_BuildModel1(self):
print("\nTest 5: Building a Model with cloning")
builder = StaticBuilder("Clone")
in1 = builder.addInput(10)
enc1 = builder.addInner(3)
out1 = builder.addOutput(name="Out1")
out2 = builder.addOutput(name="Out2")
builder.addDirectedLink(in1, enc1)
builder.addDirectedLink(enc1, out1)
builder.addDirectedLink(enc1, out2)
builder.build() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_default_output_2():\n #input_file = os.path.join('.', 'test_files', 'test.input')\n actual = os.path.join('.', 'test_files', 'rc_actual.out')\n times = list(range(0, 30, 5))\n inputs = {\"names\": ['V'],\n \"values\": [\n [1],\n [0],\n [-1],\n [0],\n [1]\n ]\n }\n params=None\n\n default_model_2 = ModelBCMD('rc',\n inputs,\n params,\n times,\n input_file = None,\n input_required=True,\n testing=True,\n workdir=os.path.join('.','test_files'),\n debug=True,\n basedir=BASEDIR)\n\n default_model_2.write_default_input_2()\n print(default_model_2.input_file.encode())\n print(default_model_2.input_file.encode().decode())\n default_model_2.run_2()\n assert_true(filecmp.cmp(default_model_2.output_coarse, actual), msg='Coarse output files do not match actual.')\n os.remove(default_model_2.output_coarse)\n os.remove(default_model_2.output_detail)",
"def test_BuildModel0(self):\n print(\"\\nTest 4: Building a Basic Model\")\n builder = StaticBuilder(scope=\"Basic\")\n in_name = builder.addInput(10)\n enc_name = builder.addInner(3)\n out_name = builder.addOutput()\n builder.addDirectedLink(in_name, enc_name)\n builder.addDirectedLink(enc_name, out_name)\n \n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n \n builder.build()\n inn, enc, out = ( builder.nodes[in_name], builder.nodes[enc_name],\n builder.nodes[out_name] )\n self.assertEqual(inn._oslot_to_otensor[0].shape.as_list()[-1],\n enc._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")\n self.assertEqual(enc._oslot_to_otensor[0].shape.as_list()[-1],\n out._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")",
"def test_part_1(arguments, output):\n # assert part_1.solution(arguments) == output\n assert part_1.solution(arguments) == output",
"def test_BuildModel2(self):\n print(\"\\nTest 6: Building a Model with Concat\")\n builder = StaticBuilder(\"Concat\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3, num_islots=2)\n out1 = builder.addOutput()\n\n builder.addDirectedLink(in1, enc1, islot=0)\n builder.addDirectedLink(in2, enc1, islot=1)\n builder.addDirectedLink(enc1, out1)\n \n builder.build()",
"def test_simple_merge(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(6)(x1)\n x4 = merge([x2, x3], mode=\"concat\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )",
"def test_clone_scenario(self):\n pass",
"def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()",
"def _test_output_shapes(model):\n assert model.r == r\n assert model.m == m\n assert model.c_.shape == (r,)\n assert model.A_.shape == (r,r)\n assert model.Hc_.shape == (r,r*(r+1)//2)\n assert model.H_.shape == (r,r**2)\n assert model.Gc_.shape == (r,r*(r+1)*(r+2)//6)\n assert model.G_.shape == (r,r**3)\n assert model.B_.shape == (r,m)\n assert hasattr(model, \"datacond_\")\n assert hasattr(model, \"dataregcond_\")\n assert round(model.dataregcond_, 6) <= round(model.datacond_, 6)\n assert hasattr(model, \"residual_\")\n assert hasattr(model, \"misfit_\")\n assert round(model.misfit_, 6) <= round(model.residual_, 6)",
"def test_build_model(arguments):\n ...",
"def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')",
"def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass",
"def test_output(data,idx,model):\n x,y = data[idx]\n out = model(x)\n return y.data.cpu().numpy(), out.data.cpu().numpy()",
"def test_prepare_sample_to_forward(self):\n sample = [\n {\"src\": \"ola mundo\", \"ref\": \"hi world\", \"mt\": \"hey world!\", \"score\": 0.8},\n {\"src\": \"ola mundo\", \"ref\": \"hi world\", \"mt\": \"hey world!\", \"score\": 0.8},\n ]\n\n model_input, target = self.estimator.prepare_sample(sample)\n model_output = self.estimator(**model_input)\n self.assertTrue(model_output[\"score\"].shape[0] == 2)\n self.assertTrue(model_output[\"score\"].shape[1] == 1)",
"def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")",
"def test_copied_models_are_equal(dbdiskrepo):\n original = fit_model()\n\n shallow = copy(original)\n assert original.artifact.id == shallow.artifact.id\n assert original.artifact.value_id == shallow.artifact.value_id\n assert hash(original) == hash(shallow)\n\n deep = deepcopy(original)\n assert original.artifact.id == deep.artifact.id\n assert original.artifact.value_id == deep.artifact.value_id\n assert hash(original) == hash(deep)",
"def testModel( self, classTest, classPred):",
"def test_copying_layout(empty_model):\n assert 1 == 0 # TODO",
"def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))",
"def test():\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', type=str, help='name of the model',\n default='model_new_o')\n parser.add_argument('-f', '--filename', type=str,\n help='name of the dataset (.h5 file)', default='./dataset.h5')\n parser.add_argument('-bs', '--batch-size', type=int,\n help='size of the batches of the training data', default=256)\n args = parser.parse_args()\n\n name = args.name\n filename = args.filename\n batch_size = args.batch_size\n\n out_channels = 400\n model_path = './model/' + name\n checkpoint_path = model_path + '/checkpoints'\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n for k, v in vars(args).items():\n print('{0} = \"{1}\"'.format(k, v))\n print('device = \"' + device + '\"')\n\n if not os.path.exists(checkpoint_path):\n print('Model parameters not found: ' + checkpoint_path)\n exit()\n\n # Dataset\n\n input_cols = ['camera', 'pos_x', 'pos_y', 'theta']\n target_cols = ['target_map']\n train_test_split = 11\n\n dataset = get_dataset(filename, device=device, augment=False,\n input_cols=input_cols, target_cols=target_cols)\n split_index = dataset.cumulative_sizes[train_test_split]\n\n # Model\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model = NN(in_channels=3, out_channels=out_channels).to(device)\n model.load_state_dict(torch.load(checkpoint_path + '/best.pth'))\n summary(model, (3, 64, 80), device=device)\n\n auc_function = MaskedAUROC()\n\n # Testing\n\n aucs = []\n for x, px, py, pt, y in dataset.batches(batch_size, start=split_index, shuffle=False):\n pose = torch.stack([px, py, pt], dim=-1).to(device)\n mask = y > -1\n\n preds = model(x)\n\n aucs.append(auc_function(preds, y, mask).cpu().numpy())\n\n auc = np.nanmean(aucs, axis=0).reshape(20, 20)\n auc = np.rot90(auc, 1)\n auc = np.fliplr(auc) * 100\n\n print('AUC: ' + str(auc.mean().item()))\n\n print(auc)\n\n rounded = (100 * coords).round(2).astype(int)\n fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(7, 5.8))\n sns.distplot(auc, bins=int(np.ceil(auc.max() - auc.min())),\n ax=ax[0], kde=False, rug=False, color='red', hist_kws={'rwidth': 0.75})\n sns.heatmap(auc, cmap='gray', annot=True, cbar_kws={'shrink': .8},\n vmin=50, vmax=100, linewidths=0, ax=ax[1])\n plt.yticks(.5 + np.arange(20), np.unique(rounded[:, 0])[::-1])\n plt.xticks(.5 + np.arange(20), np.unique(rounded[:, 1]))\n plt.xlabel('Y [cm]')\n plt.ylabel('X [cm]')\n plt.setp(ax[1].xaxis.get_majorticklabels(), rotation=0)\n plt.setp(ax[1].yaxis.get_majorticklabels(), rotation=0)\n plt.axis('equal')\n plt.tight_layout()\n plt.show()",
"def test09(self):\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n to_json(model, fname=self.fname, wts=wts)\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n from_json(model, fname=self.fname, wts=wts)\n assert model.dual[model.g] == 1\n assert model.ipopt_zL_out[model.x[1]] == 10\n assert model.ipopt_zL_out[model.x[2]] == 10\n assert model.ipopt_zU_out[model.x[1]] == 10\n assert model.ipopt_zU_out[model.x[2]] == 10",
"def test_set_output_implicitly(self):\n self.command.output = \"\"\n self.command.package = self.input_ovf\n self.assertEqual(self.command.output, \"\")\n self.command.run()\n self.assertEqual(self.command.output, self.input_ovf)",
"def test_merge_multiply(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"mul\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )",
"def test10(self):\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n to_json(model, fname=self.fname, wts=wts)\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n from_json(model, fname=self.fname, wts=StoreSpec.suffix())\n assert model.dual[model.g] == 1\n assert model.ipopt_zL_out[model.x[1]] == 10\n assert model.ipopt_zL_out[model.x[2]] == 10\n assert model.ipopt_zU_out[model.x[1]] == 10\n assert model.ipopt_zU_out[model.x[2]] == 10",
"def testOutputs(self):\n # Remember original (correct) example outputs\n old_files = self.read_outputs()\n\n # Set up and run Xanthos\n ini = 'example/pm_abcd_mrtm.ini'\n xth = Xanthos(ini)\n res = xth.execute()\n\n # Check result dimensions\n self.assertEqual(res.Q.shape, (67420, 372))\n\n # Test that new outputs equal old outputs.\n new_files = self.read_outputs()\n for k in new_files.keys():\n pd.testing.assert_frame_equal(new_files[k], old_files[k])",
"def _post_model_build(self):\r\n with tf.variable_scope('copy2test'):\r\n all_variables = tf.global_variables()\r\n train_vars = dict([(v.name, v) for v in all_variables\r\n if not v.name.startswith('test/')])\r\n test_vars = dict([(v.name, v) for v in all_variables\r\n if v.name.startswith('test/')])\r\n self._copy_variables_to_test_model_op = tf.tuple([\r\n test_vars['test/' + k].assign(train_vars[k]) for k in train_vars.keys()\r\n if 'test/' + k in test_vars\r\n ])\r\n\r\n # Begin testing thread\r\n self._coordinator = tf.train.Coordinator()\r\n self._thread = threading.Thread(target=self.test_job,\r\n name='%s_tester' % self.model.identifier)\r\n self._thread.daemon = True\r\n self._thread.start()\r\n\r\n # Pick tensors we need to evaluate\r\n all_tensors = dict(self.model.loss_terms['test'], **self.model.metrics['test'])\r\n self._tensors_to_evaluate = dict([(n, t) for n, t in all_tensors.items()])\r\n loss_terms_to_evaluate = dict([(n, t) for n, t in self.model.loss_terms['test'].items()\r\n if t in self._tensors_to_evaluate.values()])\r\n metrics_to_evaluate = dict([(n, t) for n, t in self.model.metrics['test'].items()\r\n if t in self._tensors_to_evaluate.values()])\r\n\r\n # Placeholders for writing summaries at end of test run\r\n self._placeholders = {}\r\n for type_, tensors in (('loss', loss_terms_to_evaluate),\r\n ('metric', metrics_to_evaluate)):\r\n for name in tensors.keys():\r\n name = '%s/test/%s' % (type_, name)\r\n placeholder = tf.placeholder(dtype=np.float32, name=name + '_placeholder')\r\n self.summary.scalar(name, placeholder)\r\n self._placeholders[name.split('/')[-1]] = placeholder",
"def test_output(self):\n new_route = self.route.output(\"test data\", transform=\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"output\"] == \"test data\"\n assert new_route.route[\"transform\"] == \"transformed\"",
"def test_main_modular_reuse_model(tmpdir_factory: TempdirFactory) -> None:\n\n output_directory = Path(tmpdir_factory.mktemp('output'))\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n output_path = output_directory / 'model'\n\n with freeze_time(TIMESTAMP):\n main(\n [\n '--input',\n str(input_filename),\n '--output',\n str(output_path),\n '--reuse-model',\n ]\n )\n main_modular_dir = EXPECTED_MAIN_PATH / 'main_modular_reuse_model'\n for path in main_modular_dir.rglob('*.py'):\n result = output_path.joinpath(path.relative_to(main_modular_dir)).read_text()\n assert result == path.read_text()",
"def output_model(output_dir=\"./output\", model_out=None): \n # Find the path of MODEL_INIT via the parameter file\n par_file = os.path.join(output_dir, \"seisflows_paths.json\")\n with open(par_file) as f:\n model_init = json.load(f)[\"MODEL_INIT\"]\n\n assert(os.path.exists(model_init)), \\\n f\"MODEL_INIT does not exist\\n{model_init}\"\n print(f\"MODEL INIT: {model_init}\")\n\n # Determine the model number, only choose numbers, no 'init' or 'true'\n if model_out is None:\n available_models = glob(os.path.join(output_dir, \"model_[0-9]???\"))\n model_out = sorted(available_models)[-1]\n else:\n model_out = os.path.join(output_dir, model_out)\n\n assert(os.path.exists(model_out)), f\"MODEL_OUT does not exist\\n{model_out}\"\n print(f\"MODEL OUT: {model_out}\")\n\n # Quick check to make sure NPROC is the same for each directory\n nproc_check = [0, 0]\n for i, m in enumerate([model_init, model_out]):\n nprocs = [os.path.basename(_) for _ in glob(os.path.join(m, \"*\"))]\n # list comprehension strips string parts, e.g. 'proc000001_vp.bin' -> 1\n nproc_check[i] = max([int(_.split('_')[0][4:]) for _ in nprocs])\n assert(nproc_check[0] == nproc_check[1]), f\"NPROCS differ {nproc_check}\"\n print(f\"NPROC: {nproc_check[0]}\")\n \n # Symlink all available files that don't already exist in model_out\n model_init_files = glob(os.path.join(model_init, \"*\"))\n for src in model_init_files:\n dst = os.path.join(model_out, os.path.basename(src))\n if os.path.exists(dst):\n continue\n else:\n os.symlink(src, dst)",
"def main():\n parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')\n parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')\n parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)\n parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)\n parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=5)\n parser.add_argument('-c', help='steering correction', dest='correction', type=float, default=0.2)\n parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=32)\n parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')\n parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-3)\n args = parser.parse_args()\n\n print('-' * 30)\n print('Parameters')\n print('-' * 30)\n for key, value in vars(args).items():\n print('{:<20} := {}'.format(key, value))\n print('-' * 30)\n\n data = load_data(args)\n model = build_model(args)\n train_model(model, args, *data)",
"def test_second_keras_model_created():\n X, _, _, _ = get_data()\n tf.random.set_seed(12345)\n initializer = tf.keras.initializers.Zeros()\n input_data = Input(shape=X[0].shape)\n xx = Dense(128, activation=\"relu\", kernel_initializer=initializer)(input_data)\n xx = Dense(128, activation=\"relu\", kernel_initializer=initializer)(xx)\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(xx)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n _ = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n model2 = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n rightname = \"KerasModel\"\n assert (\n model2.model_type == rightname\n ), \"failed check for second model type being set in init()\"\n # noise multiplier should have been reset from default to one that matches rules.json\n assert model2.noise_multiplier == 0.7"
] | [
"0.6694175",
"0.6504223",
"0.63788325",
"0.6321789",
"0.6308089",
"0.6245688",
"0.6215992",
"0.61850905",
"0.6116955",
"0.6110523",
"0.609222",
"0.6081803",
"0.60653377",
"0.6058217",
"0.6056005",
"0.6037613",
"0.60202926",
"0.5977124",
"0.5976717",
"0.59375304",
"0.59170985",
"0.5893511",
"0.58933586",
"0.58925146",
"0.5884767",
"0.58826715",
"0.5853007",
"0.5833269",
"0.583173",
"0.5831216"
] | 0.7202675 | 0 |
Builds a model with 2 inputs. Test ConcatNode | def test_BuildModel2(self):
print("\nTest 6: Building a Model with Concat")
builder = StaticBuilder("Concat")
in1 = builder.addInput(10)
in2 = builder.addInput(20)
enc1 = builder.addInner(3, num_islots=2)
out1 = builder.addOutput()
builder.addDirectedLink(in1, enc1, islot=0)
builder.addDirectedLink(in2, enc1, islot=1)
builder.addDirectedLink(enc1, out1)
builder.build() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def concat_model():\n x = tf.keras.Input(shape=[10, 10, 3, ])\n x1 = tf.keras.layers.Conv2D(5, (2, 2))(x)\n x2 = tf.keras.layers.Conv2D(6, (2, 2))(x)\n x3 = tf.keras.layers.Conv2D(7, (2, 2))(x)\n z = tf.keras.layers.concatenate([x2, x1, x3], axis=-1)\n z1 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z2 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z = tf.add(z1, z2)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"concat_model\")(z)\n return output",
"def _create_concat(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.attrs[\"axis\"]\n if factor < 0:\n factor = len(inputs[0].shape\n ) + factor # in order to support the negative axis\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def create_helper_concat_node(inputs, output_name, axis=0):\n concat_node = onnx.helper.make_node(\n \"Concat\",\n inputs=inputs,\n outputs=[output_name],\n name=output_name,\n axis=axis,\n )\n return [concat_node]",
"def convert_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"dim\", 1))\n concat_node = onnx.helper.make_node(\n \"Concat\",\n input_nodes,\n [name],\n axis=axis,\n name=name\n )\n return [concat_node]",
"def build(data_shape_1, data_shape_2):\n # create NN model \n # design network\n \n inputs = keras.Input(shape=(data_shape_1, data_shape_2), name='inp')\n cnn1 = layers.Conv1D(16, 5, activation='relu')(inputs)\n cnn2 = layers.Conv1D(32, 3, activation='relu')(cnn1)\n cnn3 = layers.Conv1D(64, 3, activation='relu')(cnn2)\n cnn3 = layers.Flatten()(cnn3)\n lstm = layers.LSTM(100,return_sequences = True, activation='relu')(inputs)\n lstm = layers.Flatten()(lstm)\n x = layers.concatenate([cnn3,lstm])\n x = layers.Dense(100, activation='sigmoid')(x)\n outputs = layers.Dense(24)(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')\n \n return model",
"def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node",
"def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()",
"def build_model(model_id1='bert-base-multilingual-cased',\n model_id2='bert-base-multilingual-uncased',\n max_len=192, dropout=0.2,\n **_):\n print(model_id1, model_id2)\n\n transformer1 = TFAutoModel.from_pretrained(model_id1)\n transformer2 = TFAutoModel.from_pretrained(model_id2)\n\n input_word_ids1 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids1\")\n out1 = transformer1(input_word_ids1)\n\n input_word_ids2 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids2\")\n out2 = transformer2(input_word_ids2)\n\n sequence_output1 = out1[0]\n sequence_output2 = out2[0]\n cls_token1 = sequence_output1[:, 0, :]\n cls_token2 = sequence_output2[:, 0, :]\n\n x = Dropout(dropout)(cls_token1) + Dropout(dropout)(cls_token2)\n out = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[input_word_ids1, input_word_ids2], outputs=out)\n\n return model",
"def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def test_BuildModel0(self):\n print(\"\\nTest 4: Building a Basic Model\")\n builder = StaticBuilder(scope=\"Basic\")\n in_name = builder.addInput(10)\n enc_name = builder.addInner(3)\n out_name = builder.addOutput()\n builder.addDirectedLink(in_name, enc_name)\n builder.addDirectedLink(enc_name, out_name)\n \n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n \n builder.build()\n inn, enc, out = ( builder.nodes[in_name], builder.nodes[enc_name],\n builder.nodes[out_name] )\n self.assertEqual(inn._oslot_to_otensor[0].shape.as_list()[-1],\n enc._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")\n self.assertEqual(enc._oslot_to_otensor[0].shape.as_list()[-1],\n out._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")",
"def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()",
"def multiple_input_model():\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"multiple_input_model\")(x)\n\n return outputs",
"def convert_rnn_param_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n axis = int(attrs.get(\"dim\"))\n\n # mxnet RNN node and ONNX RNN/LSTM/GRU nodes\n # use different ways to store their parameters\n\n # The conversion between these formats is broken into 2 steps\n # The first step (performed here in _rnn_param_concat) regroups the\n # flattened parameters according to the table below.\n # The second step corrects the shapes and orders of gates and is\n # performed and described in more detail in the RNN node\n\n # mxnet [ONNX] -> ONNX (group)\n # i2h_weights [W (+ WB)] -> W (input weights)\n # h2h_weights [R (+ RB)] -> R (recurrence weights)\n # i2h_biases [Wb (+ WBb)] -> B = [Wb + Rb (+ WBb + RBb)]\n # h2h_biases [Rb (+ RBb)] -> (biases)\n\n split = len(input_nodes) // 2\n weights, biases = input_nodes[:split], input_nodes[split:]\n i2h_weights = weights[::2]\n h2h_weights = weights[1::2]\n i2h_biases = biases[::2]\n h2h_biases = biases[1::2]\n reordered_biases = [\n bias\n for pair in zip(i2h_biases, h2h_biases)\n for bias in pair\n ]\n\n # The order of mxnet parameters in the inputs is:\n # [\n # '{}{}_{}_{}'.format(d, l, g, t)\n # for t in ['weight', 'bias']\n # for l in range(num_layers)\n # for d in ['l', 'r'][:num_directions]\n # for g in ['i2h', 'h2h']\n # ]\n\n w = onnx.helper.make_node(\n \"Concat\",\n inputs=i2h_weights,\n outputs=[name + \"__W\"],\n axis=axis,\n name=name + \"__W\"\n )\n r = onnx.helper.make_node(\n \"Concat\",\n inputs=h2h_weights,\n outputs=[name + \"__R\"],\n axis=axis,\n name=name + \"__R\"\n )\n b = onnx.helper.make_node(\n \"Concat\",\n inputs=reordered_biases,\n outputs=[name + \"__B\"],\n axis=axis,\n name=name + \"__B\"\n )\n return [w, r, b]",
"def build_model(input_classes,output_classes):\n dimensions = 20\n inputs = []\n embedded_outputs = []\n for i in input_classes:\n input_layer = Input((1,))\n inputs.append(input_layer)\n embedder = Embedding(input_dim=i,output_dim=dimensions,input_length=1,embeddings_constraint=UnitNorm(axis=0))\n embedded_layer = embedder(input_layer)\n embedded_outputs.append(embedded_layer)\n\n embedded_concats = Concatenate()(embedded_outputs)\n flatten_layer = Flatten()\n\n dense_layer = Dense(output_classes)\n\n flattened_output = flatten_layer(embedded_concats)\n dense_output = dense_layer(flattened_output)\n\n # dense_output = dense_layer(embedded_concats)\n\n model = Model(inputs,dense_output)\n print(model.summary())\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n\n return model",
"def build_model():",
"def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()",
"def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()",
"def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]",
"def __build_model_pyramid(name, model, features):\n return keras.layers.Concatenate(axis=1, name=name)([model(f) for f in features])",
"def construct(self, x1, x2):\n x1 = self.up(x1)\n x = self.concat((x1, x2))\n return self.conv(x)",
"def create_split_concat_net_const(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n import numpy as np\n\n concat_axis = 0\n concat_output_shape = input_shape.copy()\n concat_output_shape[concat_axis] *= 2\n\n const_number = np.prod(input_shape)\n constant = np.random.randint(-127, 127, const_number).astype(np.float)\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_dyn_concat', TensorProto.FLOAT, concat_output_shape)\n\n node_const_def = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=input_shape,\n vals=constant,\n ),\n )\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['const1'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n node_dyn_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=['input', 'output_concat'],\n outputs=['output_dyn_concat'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_split_def, node_concat_def, node_dyn_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net",
"def get_model_concat(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix, inputs_cond)\n\n embedded_inputs_all = tf.concat(1, [embedded_inputs, embedded_inputs_cond]) # concatenating the two embeddings\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length*2, embedded_inputs_all)]\n\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n outputs_fin = outputs[-1]\n\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs, inputs_cond]",
"def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()",
"def test_concat_get_op_product_graph(self):\n\n tf.compat.v1.reset_default_graph()\n\n _ = concat_model()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['concat_model/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(2, conn_graph.branch_count)\n self.assertEqual(13, len(conn_graph.get_all_ops()))\n self.assertEqual(12 + len(tf.compat.v1.get_default_graph().get_collection('variables')),\n len(conn_graph.get_all_products()))\n\n # Check that the order of input products to the concat op matches the order of input tensors in the tf graph\n concat_tf_op = tf.compat.v1.get_default_graph().get_operation_by_name(\"concatenate/concat\")\n concat_op = conn_graph.get_all_ops()['concatenate/concat']\n for index, product in enumerate(concat_op.get_input_products()):\n self.assertTrue(len(product.consumers) == 1)\n self.assertEqual(product.tensor_dict[product.consumers[0]], concat_tf_op.inputs[index])",
"def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def test_merge_add(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"sum\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )",
"def build_model(hype_space):\n print(\"Hyperspace:\")\n print(hype_space)\n\n input = Input(shape=(MAXLEN_SEQ, int(hype_space['embed_dim']) ))\n\n profiles_input = Input(shape=(MAXLEN_SEQ, NB_FEATURES,))\n x1 = concatenate([input, profiles_input])\n x2 = concatenate([input, profiles_input])\n inp = [input, profiles_input]\n\n x1 = Dense(1200, activation=\"relu\")(x1)\n x1 = Dropout(0.5)(x1)\n\n # x1 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x1)\n # Defining a bidirectional LSTM using the embedded representation of the inputs\n x2 = Bidirectional(CuDNNGRU(units=500, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n x2 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n COMBO_MOVE = concatenate([x1, x2])\n w = Dense(500, activation=\"relu\")(COMBO_MOVE) # try 500\n w = Dropout(0.4)(w)\n w = tcn.TCN(return_sequences=True)(w)\n\n y = TimeDistributed(Dense(NB_CLASSES_Q8, activation=\"softmax\"))(w)\n\n # Defining the model as a whole and printing the summary\n model = Model(inp, y)\n # model.summary()\n\n # Setting up the model with categorical x-entropy loss and the custom accuracy function as accuracy\n adamOptimizer = Adam(lr=0.001, beta_1=0.8, beta_2=0.8, epsilon=None, decay=0.0001, amsgrad=False)\n model.compile(optimizer=adamOptimizer, loss=\"categorical_crossentropy\", metrics=[accuracy])\n\n return model",
"def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b",
"def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')",
"def create_split_concat_net(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_concat', TensorProto.FLOAT, input_shape)\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_split_def, node_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net"
] | [
"0.6844526",
"0.6768665",
"0.66675013",
"0.65246445",
"0.6511792",
"0.6232399",
"0.6185943",
"0.61621577",
"0.60782385",
"0.60532033",
"0.6042736",
"0.6000108",
"0.5988612",
"0.59775037",
"0.5974451",
"0.5970998",
"0.59662324",
"0.59449714",
"0.59398437",
"0.58934414",
"0.5888277",
"0.58700323",
"0.58603",
"0.5837908",
"0.58329976",
"0.5818866",
"0.58050555",
"0.5792099",
"0.5768973",
"0.5734099"
] | 0.77477473 | 0 |
Given the IP ADDRESS of the camera, to which you are connected and ACQUISITION MODE into which you want to put the camera, this command will send the according request to the camera. | def command(mode, ip, log):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging_config[log])
# Using the default dict to get a valid format string no matter what
phantom_socket = PhantomSocket(ip)
phantom_socket.connect()
click.echo('CONNECTED TO THE PHANTOM CAMERA')
mode_identifier = _modes[mode]
phantom_socket.set_mode(mode_identifier)
click.echo('PHANTOM WILL TRANSIT INTO THE MODE "%s" NOW!' % mode_identifier)
click.echo('THIS WILL CAUSE A REBOOT OF THE CAMERA, SO PLEASE HAVE PATIENCE')
click.echo('IN CASE A CONNECTION CANNOT BE ESTABLISHED EVEN AFTER SOME TIME, HARD RESET THE CAMERA')
click.echo('AFTER THE HARD RESET, THE MODE SHOULD BE CHANGED')
phantom_socket.disconnect() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def camera_control(camera_host, camera_port, camera_user, camera_pass, q):\n\n try:\n camera = IPCamera(camera_host, camera_port, camera_user, camera_pass)\n q.put(camera.get_rtsp_url())\n except RuntimeError as exc:\n q.put(exc)\n\n try:\n while True:\n camera.move_to(*q.get())\n except KeyboardInterrupt:\n pass",
"def event_btn_confirm_ip(self):\n\n print(\"attempting to open camera\")\n self.change_state(States.ACTIVATE_CAMERA)",
"def camstart():\n\n\trespond = send_command('camstart')",
"def __init__(self, local_ip, local_port, command_timeout=7, tello_ip='192.168.10.1',\r\n tello_port=8889):\r\n \r\n self.abort_flag = False\r\n self.command_timeout = command_timeout\r\n self.response = None \r\n\r\n self.frame = None # numpy array BGR -- current camera output frame\r\n self.last_frame = None\r\n\r\n # self.cap = cv2.VideoCapture(\"udp://@0.0.0.0:11111\")\r\n\r\n self.tello_ip = tello_ip\r\n self.tello_address = (tello_ip, tello_port)\r\n \r\n # Commands\r\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd\r\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.socket.bind((local_ip, local_port))\r\n\r\n # thread for receiving cmd ack\r\n self.receive_thread = threading.Thread(target=self._receive_thread)\r\n self.receive_thread.daemon = True\r\n self.receive_thread.start()\r\n\r\n self.socket.sendto(b'command', self.tello_address)\r\n\r\n # Video\r\n # self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream\r\n # self.local_video_port = 11111 # port for receiving video stream\r\n # self.socket_video.bind((local_ip, self.local_video_port))\r\n\r\n # thread for receiving video\r\n # self.receive_video_thread = threading.Thread(target=self._receive_video_thread)\r\n # self.receive_video_thread.daemon = True\r\n # self.receive_video_thread.start() \r\n\r\n # to receive video -- send cmd: command, streamon\r\n self.socket.sendto(b'streamon', self.tello_address)\r\n\r\n self.stream_state = True\r\n\r\n # TELLO STATE\r\n self.state = {}\r\n\r\n self.socket_state = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving state\r\n self.state_port = 8890 # port for receiving state\r\n self.socket_state.bind((local_ip, self.state_port))\r\n\r\n # thread for receiving state\r\n self.receive_state_thread = threading.Thread(target=self._receive_state_thread)\r\n self.receive_state_thread.daemon = True\r\n self.receive_state_thread.start() \r\n\r\n self.socket_state.sendto('command'.encode('utf-8'), self.tello_address)",
"def camera_start(self):\n mycam = ONVIFCamera(self.__cam_ip, 80, self.__cam_user, self.__cam_password)\n logging.info('Create media service object')\n media = mycam.create_media_service()\n logging.info('Get target profile')\n media_profile = media.GetProfiles()[0]\n logging.info('Camera working!')\n\n self.mycam = mycam\n self.camera_media_profile = media_profile\n self.camera_media = media\n self.mycam = mycam\n\n return self.mycam",
"def pibooth_setup_camera(cfg):",
"def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)",
"def set_camera_module(action):\n\n endpoint = CAMERA_CAPTURE_URL + \"/camera/\" + action\n if DEBUG:\n print(\"Calling endpoint '%s'\" % endpoint)\n\n response = requests.post(endpoint)\n \n if DEBUG:\n print(\"Call to endpoint '%s' returned status code %s. Reason: %s\" % (endpoint, str(response.status_code), response.content))",
"def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False",
"def setMode(self, request, context):\n \n self.vehicle.mode = VehicleMode(str(request.mode))\n self.vehicle.wait_ready('mode')\n \n return droneconnect_pb2.Null()",
"def initialCamera(self, cmd):\n\n pass",
"def run_single_camera(cam):\n\n try:\n # Retrieve TL device nodemap and print device information\n #nodemap_tldevice = cam.GetTLDeviceNodeMap()\n\n #result &= print_device_info(nodemap_tldevice)\n\n # Initialize camera\n cam.Init()\n\n # Retrieve GenICam nodemap\n nodemap = cam.GetNodeMap()\n exposures=[2000,4000,8000,16000]\n index=0\n if cam.ExposureAuto.GetAccessMode() != PySpin.RW:\n print(\"Unable to disable automatic exposure. Aborting...\")\n return False\n node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode(\"AcquisitionMode\"))\n if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):\n print(\"Unable to set acquisition mode to continuous (enum retrieval). Aborting...\")\n return False\n\n # Retrieve entry node from enumeration node\n node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(\"Continuous\")\n if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):\n print(\"Unable to set acquisition mode to continuous (entry retrieval). Aborting...\")\n return False\n\n acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()\n\n node_acquisition_mode.SetIntValue(acquisition_mode_continuous)\n\n print(\"Acquisition mode set to continuous...\")\n\n cam.ExposureAuto.SetValue(PySpin.ExposureAuto_Off)\n '''\n # Set maximum width\n #\n # *** NOTES ***\n # Other nodes, such as those corresponding to image width and height,\n # might have an increment other than 1. In these cases, it can be\n # important to check that the desired value is a multiple of the\n # increment.\n #\n # This is often the case for width and height nodes. However, because\n # these nodes are being set to their maximums, there is no real reason\n # to check against the increment.\n if cam.Width.GetAccessMode() == PySpin.RW and cam.Width.GetInc() != 0 and cam.Width.GetMax != 0:\n cam.Width.SetValue(FRAME_WIDTH)\n print(\"Width set to %i...\" % cam.Width.GetValue())\n\n else:\n print(\"Width not available...\")\n result = False\n\n # Set maximum height\n #\n # *** NOTES ***\n # A maximum is retrieved with the method GetMax(). A node's minimum and\n # maximum should always be a multiple of its increment.\n if cam.Height.GetAccessMode() == PySpin.RW and cam.Height.GetInc() != 0 and cam.Height.GetMax != 0:\n cam.Height.SetValue(FRAME_HEIGHT)\n print(\"Height set to %i...\" % cam.Height.GetValue())\n\n else:\n print(\"Height not available...\")\n result = False\n '''\n print(\"Automatic exposure disabled...\")\n #node_acquisition_framerate = PySpin.CFloatPtr(nodemap.GetNode(\"AcquisitionFrameRate\"))\n\n # if not PySpin.IsAvailable(node_acquisition_framerate) and not PySpin.IsReadable(node_acquisition_framerate):\n # print(\"Unable to retrieve frame rate. Aborting...\")\n # return False\n\n # framerate_to_set = node_acquisition_framerate.GetValue()\n\n # print(\"Frame rate to be set to %d...\" % framerate_to_set)\n canvas=np.zeros((FRAME_HEIGHT*2,FRAME_WIDTH*2,3), np.uint8)\n while True:\n exposure=exposures[index]\n \n configure_exposure(cam, exposure)\n # Acquire images\n err, img,width,height = acquire_images(cam, nodemap)\n if err < 0:\n return err\n\n \n img = img.GetData().reshape(height,width,3)\n\n half_height = int(height/2)\n half_width = int(width/2)\n half_frame_height = int(FRAME_HEIGHT/2)\n half_frame_width = int(FRAME_WIDTH/2)\n \n img = img[half_height-half_frame_height:half_height+half_frame_height,half_width-half_frame_width:half_width+half_frame_width]\n #smallimg=cv2.resize(img,(int(FRAME_WIDTH/2),int(FRAME_HEIGHT/2)))\n if index==0:\n #top left\n canvas[0:FRAME_HEIGHT,0:FRAME_WIDTH]=img\n elif index==1:\n #top right\n canvas[0:FRAME_HEIGHT,FRAME_WIDTH:FRAME_WIDTH*2]=img\n elif index==2:\n #bot left\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,0:FRAME_WIDTH]=img\n else:\n #bot right\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,FRAME_WIDTH:FRAME_WIDTH*2]=img\n index+=1\n if index>=len(exposures):\n index=0\n\n cv2.imshow(\"frame\",canvas)\n if cv2.waitKey(1) &0xff ==ord('q'):\n #stop the feed the 'q'\n break\n cv2.destroyAllWindows()\n # Deinitialize camera\n cam.DeInit()\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False",
"def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))",
"def cozmo_app(coz_conn):\n coz = coz_conn.wait_for_robot()\n coz.camera.image_stream_enabled = True\n coz_ros = CozmoRos(coz)\n coz_ros.run()",
"def camera(ctx, cam_id, analytic_addr, width, height):\n if not analytic_addr:\n analytic_addr = [\"localhost:50051\"]\n db = ctx.obj.db\n client = aceclient.AnalyticMultiClient()\n cap = cv2.VideoCapture(int(cam_id))\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, int(width))\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(height))\n classes = {}\n window_names = []\n f_req = analytic_pb2.FrameRequest()\n for a in analytic_addr:\n analytic = analytic_pb2.AnalyticData()\n analytic.addr = a\n f_req.analytics.append(analytic)\n try:\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n print(\"Stream unavailable. Exiting.\")\n break\n resp = analytic_pb2.CompositeResults()\n resp = client.process_frame(frame, f_req, resp)\n print(len(window_names))\n render(resp, window_names, classes, frame, db)\n finally:\n cv2.destroyAllWindows()\n print(\"Shutting down\")",
"def sendCommand(self, command, code):\r\n if self.visprotocol is not None:\r\n self.visprotocol.RequestArm(command.lower(), code)",
"def setCameraToCOM(self):\n pass",
"def dst_nat_into_vrf():\n\t\n device_params = {\n 'device_type': 'mikrotik_routeros',\n 'port': '11209',\n 'username': 'admin'}\n \t\t\n device_params['ip'] = input('IP Address of managed device: ')\n nd_port = input('SSH port. Blank, if default(11209): ')\n if nd_port:\n device_params['port'] = nd_port\n nd_user = input('Username. Blank, if default (admin): ')\n if nd_user:\n device_params['username'] = nd_user\n device_params['password'] = getpass.getpass()\n outside_address = input('Put outside address for dstnat(default - 93.189.145.82): ')\n if not outside_address:\n outside_address = '93.189.145.82'\n #outside_int = input('Put outside interface (default - ether2(DC Kraud outside int)): ')\n #if not outside_port:\n # outside_port = 'ether2'\n outside_port_dstnat = input('Put outside port for dstnat(Public port): ')\n inside_port = input('Put destination port(only port):') \n inside_address = input('Put inside address for dstnat (Inside adress): ')\n commands = []\n commands.append(f'/ip firewall mangle add action=mark-connection chain=prerouting connection-state=new dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 new-connection-mark=into-vrf passthrough=yes protocol=tcp comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\"')\n commands.append(f'/ip firewall nat add action=dst-nat chain=dstnat comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\" dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 protocol=tcp to-addresses={inside_address} to-ports={inside_port}')\n \n with ConnectHandler(**device_params) as ssh:\n for comm in commands:\n ssh.send_command(comm)\n return print(f'\"{commands[0]}\" and \"{commands[1]}\" are sent to device')",
"def start_cam(cam = 'pi1', host = ' ', port = ' '):\n try:\n # using systemd to manage daemons. {space} is for weird systemd escaping\n space = '\\\\\\\\x20'\n remote_command = f\"ssh -f {cam} systemctl --user restart picamera@'{host}.local{space}{port}'\" \n print(remote_command)\n os.system(remote_command)\n except Exception as exc:\n sys.exit(f'SSH connection to {cam} failed with: {exc}')",
"def __init__(self):\n self.available_angles = [-30, -15, 0, 15, 30]\n self.ros_service = rospy.Service(\"turn_camera\", TurnCamera, self.send_image)",
"def gen():\n global ASK_NAME\n curr_frame = 0\n user_id = None\n mask_on_off = None\n\n cap = cv2.VideoCapture(0)\n # cap = cv2.VideoCapture(\"rtsp://192.168.22.146:8554/mjpeg/1\")\n\n def detect_and_predict_mask(frame, maskNet):\n nonlocal mask_on_off\n face = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n face = cv2.resize(face, (224, 224))\n face = img_to_array(face)\n face = preprocess_input(face)\n\n faces = [face]\n faces = np.array(faces, dtype=\"float32\")\n\n preds = maskNet.predict(faces, batch_size=32)\n # print(preds)\n (mask, withoutMask) = preds[0]\n label = \"Mask\" if mask > withoutMask else \"No Mask\"\n label = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n if mask > withoutMask and mask > 0.9:\n print(label)\n mask_on_off = 1\n r = requests.post(f'http://{BASE_URL}:5000/mask', json={\"mask\": 1})\n if withoutMask > mask and withoutMask > 0.9:\n print(label)\n r = requests.post(f'http://{BASE_URL}:5000/mask', json={\"mask\": 0})\n mask_on_off = 0\n\n def mark_attendance(user_id_detected):\n nonlocal user_id\n print(user_id_detected, 'was seen')\n user_id = user_id_detected\n r = requests.post(\n f'http://{BASE_URL}:5000/face_info', json={\"name\": user_id})\n\n # Read until video is completed\n while CAM_ON:\n # Capture frame-by-frame\n ret, img = cap.read()\n mask_frame = imutils.resize(img, width=400)\n if ret:\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\n\n facesCurFrame = face_recognition.face_locations(imgS)\n encodesCurFrame = face_recognition.face_encodings(\n imgS, facesCurFrame)\n\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\n matches = face_recognition.compare_faces(\n encodeListKnown, encodeFace)\n faceDis = face_recognition.face_distance(\n encodeListKnown, encodeFace)\n # print(faceDis)\n matchIndex = np.argmin(faceDis)\n\n if matches[matchIndex]:\n # name = classNames[matchIndex].upper()\n name = label_names[faceLabels[matchIndex]]\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2),\n (0, 255, 0), cv2.FILLED)\n cv2.putText(img, name.upper(), (x1 + 6, y2 - 12),\n cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n detect_and_predict_mask(mask_frame, maskNet)\n mark_attendance(name)\n else:\n if curr_frame < FRAMES_TO_CAPTURE:\n print(\"Saving Frame\")\n cv2.imwrite(f\"{datetime.datetime.now()}.jpg\", img)\n curr_frame += 1\n else:\n ASK_NAME = True\n cap.release()\n\n frame = cv2.imencode('.jpg', img)[1].tobytes()\n yield b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n'\n time.sleep(0.01)\n else:\n break\n if user_id is not None and mask_on_off is not None:\n print(\"Stopping stream...\")\n cap.release()\n break",
"def _start_vidmemwriter(self, camType, ip=None, inputres=\"640x480\", outputres=\"640x480\"):\n if not self.__vidmemwriter and not self.__server_mode:\n self.__vidmemwriter = vidmemwriter.VidMemWriter([], [])\n\n if camType in self.__video_sources:\n return True\n\n self.__logger.info(\"I'm starting %s\" % camType)\n\n if ros_pattern.match(camType):\n #The first 4 characters \"ros_\" identify that is a specific ros image\n #The second part *** in \"ros_***/topic\" is the encoding:\n topic = camType[4:]\n encoding = \"passthrough\"\n self.__logger.info(\"camType !!!!!! %s\" % camType)\n if not camType[4] == '/':\n str_list = camType.split(\"_\")\n topic = '_'.join(str_list[2:])\n encoding = str_list[1]\n ros_image_source = rosimage.RosImage(topic, encoding)\n\n if self.__server_mode:\n self.__register_video_source(camType, ros_image_source)\n else:\n self.__vidmemwriter.add_video_source(ros_image_source, camType)\n self.__video_sources.append(camType)\n self.__logger.info(\"rosimage started for topic: %s, with encoding: %s\" % (topic, encoding))\n return True\n elif camType == \"webcam\":\n self.__logger.debug(\"I'm starting webcam\")\n webcamsource = takeimages.TakeImages(self.__camera)\n img = webcamsource.get_image()\n if type(img) is type(\"\"):\n self.__logger.error(\"No camera found. Please check connection!\")\n return False\n\n if webcamsource.Nocamera:\n if self.__camera == -1:\n self.__logger.error(\"No camera found. Please check connection!\")\n else:\n self.__logger.error(\"Camera %d not found. Please check connection!\" % self.__camera)\n return False\n if self.__server_mode:\n self.__register_video_source('webcam', webcamsource)\n else:\n self.__vidmemwriter.add_video_source(webcamsource, \"webcam\")\n self.__video_sources.append(\"webcam\")\n self.__logger.info(\"Webcam started\")\n return True\n elif camType == 'kinect_openni':\n self.__logger.debug(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n self.__video_sources.append(\"kinect_openni\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == 'kinect' or camType == 'kinect_rgb' or camType == 'kinect_depth':\n if self.__use_openni:\n self.__logger.info(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n else:\n self.__logger.info(\"I'm starting kinect using freenect\")\n try:\n import util.kinectmemwriter\n except:\n self.__logger.error(\"Could not load kinectmemwriter module. Check modules.\")\n return False\n\n depth_source = util.kinectmemwriter.KinectDepthSource()\n rgb_source = util.kinectmemwriter.KinectRGBSource()\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == \"naovideo\":\n self.__logger.debug(\"I'm starting naovideo\")\n try:\n import util.naovideo as naovideo\n except:\n self.__logger.error(\"Could not load naovideo module. Check modules\")\n return False\n #get ip of nao:\n #TODO: fix this dirty hack (it should be read from the config file)\n naoip = \"129.125.178.232\"\n if ip:\n naoip = ip\n \n self.__logger.warn(\"Using input resolution %s and output resolution %s\" % (inputres, outputres))\n #use the naovideo module:\n if self.__camera != 0 and self.__camera != 1:\n self.__camera = 0\n try:\n naocamsource = naovideo.VideoModule(naoip, inputres, outputres, camera=self.__camera)\n naocamsource.get_image()\n except:\n self.__logger.error(\"Something went wrong using the camera of the nao (check connection!)\")\n traceback.print_exc()\n return False\n\n if self.__server_mode:\n self.__register_video_source('naovideo', naocamsource)\n else:\n self.__vidmemwriter.add_video_source(naocamsource, \"naovideo\")\n self.__video_sources.append(\"naovideo\")\n self.__nao_camera = naocamsource\n self.__logger.info(\"Naovideo started\")\n return True\n else:\n self.__logger.warning(\"Invalid video source specified: %s\" % camType)\n return False",
"def __init__(self, source, ip='localhost', port=12345):\n self.ip = ip\n self.port = port\n self.frame = 1\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.frame_buffer = PriorityQueue()\n self.capture_thread = Thread(target=self.capture_video)\n self.sending_thread = Thread(target=self.send_video)\n self.capture_thread.setDaemon(True)\n self.sending_thread.setDaemon(True)\n self.capturing = False\n self.source = source\n self.addr = (self.ip, self.port)\n self.encode_param = [1, 90]#[int(cv2.IMWRITE_JPEG_QUALITY), 90]",
"def send_rtsp_request(self, request_code):\n last_fileName = ''\n if request_code == self.PLAY:\n last_fileName = self.fileName\n self.fileName = self.movie_name_edit.text()\n if request_code == self.SETUP and self.state == self.INIT:\n threading.Thread(target=self.recv_rtsp_reply).start()\n self.rtsp_seq += 1\n self.rtp_port = int(self.rtp_port_edit.text())\n self.rtcp_port = int(self.rtcp_port_edit.text())\n request = 'SETUP ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(\n self.rtsp_seq) + '\\nTransport: RTP/UDP; client_port= ' + str(self.rtp_port) + \\\n '\\nProtect: RTCP/TCP; rtcp_port= ' + str(self.rtcp_port)\n self.rtsp_command_send = request_code\n elif request_code == self.PLAY and self.state == self.READY:\n if last_fileName != self.fileName:\n self.play_seconds = 0\n self.curr_frame = 0\n self.time_label.setText('00:00:00')\n self.rtsp_seq += 1\n self.timer.start(1000)\n\n request = 'PLAY ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(self.rtsp_seq) + '\\nlevel: ' + \\\n str(self.video_level) + '\\nSession: ' + str(self.session_id) + '\\nRange: ' + str(self.percent)\n self.rtsp_command_send = request_code\n elif request_code == self.PAUSE and self.state == self.PLAYING:\n self.rtsp_seq += 1\n request = 'PAUSE ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(self.rtsp_seq) + '\\nSession: ' + str(\n self.session_id)\n self.rtsp_command_send = request_code\n elif request_code == self.TEARDOWN and not self.state == self.INIT:\n self.rtsp_seq += 1\n request = 'TEARDOWN ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(self.rtsp_seq) + '\\nSession: ' + str(\n self.session_id)\n self.rtsp_command_send = request_code\n else:\n return\n self.rtsp_socket.send(request.encode())",
"def start_camera(config):\n print(\"Starting {} on {}\".format(config.name, config.path))\n cs = CameraServer.getInstance()\n camera = cs.startAutomaticCapture(name=config.name, path=config.path)\n\n camera.setConfigJson(json.dumps(config.config))\n\n return cs, camera",
"def cameraOn():\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly by DirectShow\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow(\" Real-Time Video. Press 'q' to exist.\",frame)\n if cv2.waitKey(8) & 0xFF == ord('q'): #display a frame for 8ms, ~120Hz\n break\n \n cap.release() # release the capture\n cv2.destroyAllWindows()",
"def cam():\n\treturn Response(gen(camera),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame'), 200",
"def request_capturing(self):\n self.socket.sendall(pack('B', codes['request_pokemon']))\n self.receive_pokemon_suggestion()",
"def homeCameraService(req):\n\n global robot\n\n # home the camera\n robot.camera.reset()\n\n # return status\n return homeCameraResponse(True)",
"def __init__(self, name, location, device_id=uuid.uuid4(), ip=None, cam_user=None, cam_password=None,\n capture_path=None, payload=None, authentication = None, cam_ctrl=None):\n Device.__init__(self, name, \"ipcam\", location, device_id)\n self.ip = ip\n self.cam_user = cam_user\n self.cam_password = cam_password\n self.capture_path = capture_path\n self.payload = payload\n self.authentication = authentication\n self.cam_ctrl = cam_ctrl"
] | [
"0.657208",
"0.59614706",
"0.5739002",
"0.5577688",
"0.5528365",
"0.5405239",
"0.54035485",
"0.5352449",
"0.5316245",
"0.53074706",
"0.5200834",
"0.5174972",
"0.51566505",
"0.51535857",
"0.50904423",
"0.50857383",
"0.50825894",
"0.5082426",
"0.507766",
"0.5062548",
"0.5052165",
"0.50233257",
"0.50120944",
"0.4998169",
"0.49683338",
"0.49636835",
"0.49560326",
"0.49514472",
"0.4903696",
"0.48935607"
] | 0.689005 | 0 |
Attempts to insert the supplied genome. If the genome is inserted, this method will return True, otherwise it will return False. | def try_insert_genome(self, genome):
raise Exception("called abstract insert_genome method") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inserted(self):\n return True",
"def insert(self, row):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n self.rows.append(row)\n return True",
"def _can_insert(self, index, value):\n return not bool(self._insert_callback(index, value))",
"def test_verify_insert(self):\n self._verify([self.applied_commands['insert']])",
"def is_insert(self) -> bool:\n return self.statement.is_dml and self.statement.is_insert",
"def can_insert(data):\n return hasattr(data, 'read')",
"def insert(self, val):\n if val in self.record:\n return False\n \n self.record[val] = len(self.data)\n self.data.append(val)\n return True",
"def add_genome(self, genome):\n self.genomes.append(genome)",
"def insert(self, val: int) -> bool:",
"def insert(self, val):\n if val in self.dic:\n return False\n else:\n self.data.append(val)\n self.dic[val]=len(self.data)-1\n return True",
"def insert(self, val: int) -> bool:\n if val not in self.set:\n self.set.add(val)\n return True\n return False",
"def insert_node(self, node):\n if self._is_node_reserved(node):\n return False\n\n # Put node in map\n self._node_map[node.get_id()] = node\n return True",
"def insert(self, val: int) -> bool:\n if self.d.get(val):\n return False\n else:\n self.d[val] = True\n return True",
"def insert(self, val):\n if val in self.d:\n return False\n self.d[val] = len(self.l)\n self.l.append(val)\n return True",
"def insert(self, val: int) -> bool:\n if val in self.randomized_hash:\n self.randomized_hash[val].append(len(self.array))\n self.array.append(val)\n return False\n else:\n self.randomized_hash[val] = [len(self.array)]\n self.array.append(val)\n return True",
"def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)",
"def insert(self, val: int) -> bool:\n if(val not in self.randomSet):\n self.randomSet[val] = 1\n return True\n else:\n return False",
"def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6",
"def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True",
"def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n index = len(self.keys)\n self.map[val] = index\n self.keys.append(val)\n return True",
"def insert(self, val):\n new_item = False\n if val not in self.ds:\n self.ds.add(val)\n self.keys.append(val)\n new_item = True\n return new_item",
"def insert(self):\n pass",
"def has_insert(self, shape):\n for insert in self.inserts:\n if insert.shape == shape:\n return True\n return False",
"def insert(self, val):\n if val in self.map:\n return False\n \n self.nums.append(val)\n self.map[val] = len(self.nums) - 1\n \n return True",
"def insert(self, val):\n res = val in self.map\n idx = len(self.vec)\n if res:\n self.map[val].append(idx)\n self.vec.append(val)\n else:\n self.map[val] = [idx]\n self.vec.append(val)\n return not res",
"def insert(self, val: int) -> bool:\n \n retVal = True if val not in self.map else False\n if retVal:\n self.map[val] = len(self.arr)\n self.arr.append(val)\n return retVal",
"def insert(self, val):\n if val not in self.posFind or self.posFind[val] == -1:\n self.nums.append(val)\n self.posFind[val] = len(self.nums) - 1\n return True\n return False",
"def insert(self, val: int) -> bool:\n if self.store_dict.get(val) != None:\n return False\n self.store_list.append(val)\n self.store_dict[val] = len(self.store_list) - 1\n return True",
"def inject_genome(self, genome: Genome):\n self.population[genome.key] = genome",
"def isPostInsert(self):\n raise ProofException.ProofNotImplementedException( \\\n \"IdGenerator.isPostInsert: need to be overrided.\" )"
] | [
"0.6346422",
"0.6159592",
"0.60494566",
"0.57980555",
"0.578149",
"0.57695425",
"0.5667096",
"0.56136864",
"0.5602667",
"0.5582492",
"0.5562881",
"0.5556916",
"0.55056053",
"0.54988134",
"0.54925364",
"0.54842436",
"0.5480843",
"0.547767",
"0.54687303",
"0.54569113",
"0.5446923",
"0.5444414",
"0.5444406",
"0.54370433",
"0.54211825",
"0.54141146",
"0.53800344",
"0.5377163",
"0.5376591",
"0.5349561"
] | 0.8167088 | 0 |
Instantiate a evaluator class. | def build_evaluator(cfg: CfgNode) -> EvaluatorBase:
name = cfg["name"]
evaluator = simple_build(name, cfg, EVALUATORS)
return evaluator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluator(self, evaluator):\n self.__evaluator = evaluator",
"def _create_evaluators(self):\n pass",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def __init__(\r\n self,\r\n generator,\r\n mode,\r\n tensorboard=None,\r\n verbose=1,\r\n **kwargs\r\n ):\r\n self.generator = generator\r\n\r\n if mode == 'recall':\r\n self.evaluate = eval_recall\r\n elif mode == 'accuracy':\r\n self.evaluate = eval_accuracy\r\n elif mode == 'mAP':\r\n self.evaluate = eval_mAP\r\n else:\r\n raise ValueError('unsupported evaluation callback mode')\r\n self.mode = mode\r\n\r\n self.tensorboard = tensorboard\r\n self.verbose = verbose\r\n self.kwargs = kwargs\r\n\r\n super(Evaluate, self).__init__()",
"def sub_evaluator(self, ast: lark.Tree) -> 'Evaluator':\n return Evaluator(ast, activation=self.activation, functions=self.functions)",
"def __new__(cls,\n input_fn,\n steps=100,\n name=None,\n hooks=None,\n exporters=None,\n delay_secs=120,\n throttle_secs=600):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate steps.\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n\n # Validate name.\n if name is not None and not isinstance(name, six.string_types):\n raise TypeError('`name` must be string, given: {}'.format(name))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n # Validate exporters.\n exporters = _validate_exporters(exporters)\n\n # Validate delay_secs.\n if delay_secs < 0:\n raise ValueError(\n 'Must specify delay_secs >= 0, given: {}'.format(delay_secs))\n\n # Validate throttle_secs.\n if throttle_secs < 0:\n raise ValueError(\n 'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))\n\n return super(EvalSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n steps=steps,\n name=name,\n hooks=hooks,\n exporters=exporters,\n delay_secs=delay_secs,\n throttle_secs=throttle_secs)",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type == \"sem_seg\":\n return SemSegEvaluator(\n dataset_name,\n distributed=True,\n output_dir=output_folder,\n num_classes=4,\n ignore_label=255\n )\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def create_eval(self):\n self.ev_id = \"ev-\" + base64.b32encode(os.urandom(10)).decode(\"ascii\")\n self.ev_name = \"Evaluation: \" + self.ml_name\n self._ml.create_evaluation(\n EvaluationId=self.ev_id,\n EvaluationName=self.ev_name,\n MLModelId=self.ml_id,\n EvaluationDataSourceId=self.fold.eval_ds_id\n )\n logger.info(\"Created Evaluation \" + self.ev_id)",
"def evaluator(self):\n return self.__evaluator",
"def setup_evaluation(evalfile, solufile, tolerance, evalstring=False):\n if evalstring:\n evaluation = IPETEvaluation.fromXML(evalfile)\n else:\n evaluation = IPETEvaluation.fromXMLFile(evalfile[\"path\"])\n\n evaluation.set_grouptags(True)\n evaluation.set_validate(solufile)\n evaluation.set_feastol(tolerance)\n return evaluation",
"def __init__(self, generators: List[Generator] = None, evaluators: List[Evaluator] = None): # noqa: E501\n self.swagger_types = {\n 'generators': List[Generator],\n 'evaluators': List[Evaluator]\n }\n\n self.attribute_map = {\n 'generators': 'generators',\n 'evaluators': 'evaluators'\n }\n self._generators = generators\n self._evaluators = evaluators",
"def __init__(self, grid_points, metrics_eval_func=None):\n self.grid_points = grid_points\n self.metrics_eval_func = metrics_eval_func or self._create_default_metrics_eval_func(grid_points)",
"def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')",
"def instantiate(cls):\n default_xml = '<condition class=\"{0}\" plugin=\"[email protected]\"/>'\n default_xml = default_xml.format(cls.get_jenkins_plugin_name())\n root_node = ElementTree.fromstring(default_xml)\n\n return cls(root_node)",
"def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval",
"def __init__(self, array: Tuple[int, ...]) -> None:\n self.evaluate: Callable[[str], int] = \\\n lambda program: FitnessEvaluator._evaluate(array, program)",
"def __init__(self, expr: typing.Callable[[], typing.Any]):\n\n self.expr = expr",
"def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator",
"def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-fact' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'fact'\n self.dim = 'consistency'",
"def __init__(\n self,\n eval_fn: Callable[[Posting], Union[str, None]] = lambda p: None\n ):\n self.eval_fn = eval_fn",
"def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")",
"def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper:\n evaluator = build_evaluator(cfg.evaluator)\n helper = EvaluateHelper(evaluator)\n return helper",
"def __init__(self, plant, orderList, simulator, evaluator):\n\t\tassert plant != None\n\t\tassert orderList != None\n\t\t\n\t\tself.plant = plant\n\t\tself.orderList = orderList\n\t\tself.simulator = simulator\n\t\tself.evaluator = evaluator\n\t\t\n\t\t# used for benchmarking\n\t\tself.simulatorTime = 0\n\t\t\n\t\t# enable/disable console output\n\t\tself.printing = True\n\t\t\n\t\t# parameters for the evolution strategy algorithm\n\t\tself.populationSize = 0\n\t\tself.indivMutationRate = 0\n\t\tself.selectionRate = 0\n\t\tself.mutationRange = 0\n\t\tself.iterations = 0",
"def __init__(self, md, ev=None, var=None, out=None):\n self.model = md\n\n ## Construct default evaluator\n if ev is None:\n\n def _ev(md, df):\n df_res = md.evaluate_df(df)\n return df_res[md.out]\n\n self.ev = _ev\n self.var = self.model.var\n self.out = self.model.out\n\n ## Use given evaluator\n else:\n self.ev = ev\n self.var = var\n self.out = out\n\n ## Copy model data\n self.runtime = md.runtime(1)\n self.name = copy.copy(md.name)",
"def _instantiate(cls, **kwargs):\n return cls(**kwargs)",
"def _evaluation():\n return {\n 'type' : 'class',\n 'name' : 'evaluation',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('description', 'str', '0.1', None),\n ('did_pass', 'bool', '0.1', None),\n ('explanation', 'str', '0.1', None),\n ('specification', 'str', '0.1', None),\n ('specification_hyperlink', 'str', '0.1', None),\n ('type', 'str', '0.1', None),\n ('type_hyperlink', 'str', '0.1', None),\n ('title', 'str', '0.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date'),\n ('description', 'gmd:evaluationMethodDescription/gco:CharacterString'),\n ('did_pass', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean'),\n ('explanation', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:explanation/gco:CharacterString'),\n ('type', 'child::gmd:result/@xlink:title'),\n ('type_hyperlink', 'child::gmd:result/@xlink:href'),\n ('specification', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:title'),\n ('specification_hyperlink', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:href'),\n ('title', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString'),\n ]\n }",
"def evaluator(self, candidates, args):\n\t\traise NotImplementedError",
"def evaluator(self, candidates, args):\r\n raise NotImplementedError"
] | [
"0.66867024",
"0.6603871",
"0.6451372",
"0.644928",
"0.6330437",
"0.6274948",
"0.61652106",
"0.6148524",
"0.6130907",
"0.5967345",
"0.58819443",
"0.5654034",
"0.56327844",
"0.56159383",
"0.55843073",
"0.5571741",
"0.5563956",
"0.5515962",
"0.5513957",
"0.5500064",
"0.5477984",
"0.5454834",
"0.54501665",
"0.54188055",
"0.5409975",
"0.53916436",
"0.5371526",
"0.53121454",
"0.5309488",
"0.5308969"
] | 0.68942183 | 0 |
Instantiate a evaluate helper class. | def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper:
evaluator = build_evaluator(cfg.evaluator)
helper = EvaluateHelper(evaluator)
return helper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_evaluators(self):\n pass",
"def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator",
"def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator",
"def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')",
"def __new__(cls,\n input_fn,\n steps=100,\n name=None,\n hooks=None,\n exporters=None,\n delay_secs=120,\n throttle_secs=600):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate steps.\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n\n # Validate name.\n if name is not None and not isinstance(name, six.string_types):\n raise TypeError('`name` must be string, given: {}'.format(name))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n # Validate exporters.\n exporters = _validate_exporters(exporters)\n\n # Validate delay_secs.\n if delay_secs < 0:\n raise ValueError(\n 'Must specify delay_secs >= 0, given: {}'.format(delay_secs))\n\n # Validate throttle_secs.\n if throttle_secs < 0:\n raise ValueError(\n 'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))\n\n return super(EvalSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n steps=steps,\n name=name,\n hooks=hooks,\n exporters=exporters,\n delay_secs=delay_secs,\n throttle_secs=throttle_secs)",
"def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")",
"def __init__(\r\n self,\r\n generator,\r\n mode,\r\n tensorboard=None,\r\n verbose=1,\r\n **kwargs\r\n ):\r\n self.generator = generator\r\n\r\n if mode == 'recall':\r\n self.evaluate = eval_recall\r\n elif mode == 'accuracy':\r\n self.evaluate = eval_accuracy\r\n elif mode == 'mAP':\r\n self.evaluate = eval_mAP\r\n else:\r\n raise ValueError('unsupported evaluation callback mode')\r\n self.mode = mode\r\n\r\n self.tensorboard = tensorboard\r\n self.verbose = verbose\r\n self.kwargs = kwargs\r\n\r\n super(Evaluate, self).__init__()",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def eval(self, *args, **kwargs):\n raise NotImplementedError",
"def evaluate(self):\n raise NotImplementedError()",
"def sub_evaluator(self, ast: lark.Tree) -> 'Evaluator':\n return Evaluator(ast, activation=self.activation, functions=self.functions)",
"def evaluator(self, candidates, args):\n\t\traise NotImplementedError",
"def evaluator(self, candidates, args):\r\n raise NotImplementedError",
"def evaluate(self) :\n pass",
"def evaluator(self, evaluator):\n self.__evaluator = evaluator",
"def eval(self):\n raise NotImplementedError",
"def evaluate(self):\n raise NotImplementedError(\"Abstract method\")",
"def _evaluation():\n return {\n 'type' : 'class',\n 'name' : 'evaluation',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('description', 'str', '0.1', None),\n ('did_pass', 'bool', '0.1', None),\n ('explanation', 'str', '0.1', None),\n ('specification', 'str', '0.1', None),\n ('specification_hyperlink', 'str', '0.1', None),\n ('type', 'str', '0.1', None),\n ('type_hyperlink', 'str', '0.1', None),\n ('title', 'str', '0.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date'),\n ('description', 'gmd:evaluationMethodDescription/gco:CharacterString'),\n ('did_pass', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean'),\n ('explanation', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:explanation/gco:CharacterString'),\n ('type', 'child::gmd:result/@xlink:title'),\n ('type_hyperlink', 'child::gmd:result/@xlink:href'),\n ('specification', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:title'),\n ('specification_hyperlink', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:href'),\n ('title', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString'),\n ]\n }",
"def evaluator(self):\n return self.__evaluator",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type == \"sem_seg\":\n return SemSegEvaluator(\n dataset_name,\n distributed=True,\n output_dir=output_folder,\n num_classes=4,\n ignore_label=255\n )\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)",
"def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval",
"def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)",
"def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception(\"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn)"
] | [
"0.64568245",
"0.6257798",
"0.6237631",
"0.6131028",
"0.6065035",
"0.60551",
"0.60094994",
"0.5996954",
"0.59962183",
"0.59601563",
"0.59601563",
"0.59439415",
"0.5902795",
"0.589313",
"0.5882588",
"0.58795404",
"0.58464766",
"0.5829064",
"0.5801519",
"0.5770252",
"0.57448375",
"0.5722661",
"0.5693077",
"0.5693014",
"0.5693014",
"0.5693014",
"0.5674838",
"0.56657946",
"0.56647134",
"0.5660649"
] | 0.70881444 | 0 |
Plot x and y axis of dfs in common graph. | def plot(x, y, *dfs):
ax = None
for df in dfs:
ax = df[[x, y]].set_index(x).plot(kind='line', ylim=(0, None), xlim=(0, None), ax=ax) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max):\n xticks = dfs_all_values(dfs, x)\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # setting the x-column as an index is required to draw the y-column\n # as a function of x argument\n df = df.set_index(x)\n # plot line on the subplot\n df[y].plot.line(ax=ax, rot=45, marker='.')\n\n if xscale == \"linear\":\n ax.set_xscale(xscale)\n else:\n ax.set_xscale(xscale, base=2)\n ax.xaxis.set_major_formatter(ScalarFormatter())\n\n ax.set_xticks(xticks)\n ax.set_xlabel(get_label(x))\n ax.set_ylabel(get_label(y))\n ax.set_ylim(bottom=0)\n if yaxis_max is not None:\n ax.set_ylim(top=float(yaxis_max))\n ax.legend(legend, fontsize=6)\n ax.grid(True)",
"def plot2D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n fig, ax = plt.subplots(figsize=figsize)\n\n for df, color in zip(dfs, cycle(COLORS)):\n X, Y = (df[col] for col in columns)\n plt.scatter(X, Y, c=color, marker=MARKER)\n\n for axis, col in zip(['x', 'y'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.index):\n corr = 2\n ax.annotate(text, xy=(i + corr, j + corr))\n\n plt.show()",
"def plot_graph(self) -> None:",
"def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()",
"def graph(df):\n df.plot()\n plt.show()",
"def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()",
"def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)",
"def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()",
"def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)",
"def plot_2D(df):\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,6))\n fig.clf()\n #Get the current Axes instance on the current figure matching the given \n #keyword args, or create one.\n ax = fig.gca()\n df.plot(kind = 'scatter', x = 'x', y = 'y', ax = ax, alpha = 0.5)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('X vs. Y')\n return 'Done'",
"def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines",
"def plot_xdop_distribution(dRtk: dict, dfXDOP: pd.DataFrame, dfXDOPdisp: pd.DataFrame, logger: logging.Logger, showplot: bool = False):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: creating XDOP distribution plot'.format(func=cFuncName))\n\n # select colors for xDOP coordinate difference\n colors = ('blue', 'green', 'cyan', 'red')\n\n # set up the plot\n plt.style.use('ggplot')\n\n # subplots\n fig = plt.figure(figsize=(14.0, 9.0), tight_layout=False)\n fig.suptitle('{syst:s} - {posf:s} - {date:s}: XDOP'.format(posf=dRtk['info']['rtkPosFile'], syst=dRtk['syst'], date=dRtk['Time']['date']))\n\n # create a grid for lotting the XDOP line plots and 6 XDOP distribution plots\n gs = GridSpec(2, 4)\n\n # plot the XDOPs and #SVs on the first axis\n ax = fig.add_subplot(gs[0, :]) # first row, span all columns\n plot_xdop_svs(dfDops=dfXDOP, colors=colors, axis=ax, logger=logger)\n\n # add the xDOP distributions\n axisShare = None\n for col, xdop, color in zip((0, 1, 2, 3), dfXDOPdisp.columns[-4:], colors):\n # create exis for this figure\n if axisShare is None:\n ax = fig.add_subplot(gs[1, col])\n axisShare = ax\n else:\n ax = fig.add_subplot(gs[1, col], sharey=axisShare)\n # ax.get_yaxis().set_ticklabels([])\n ax.tick_params(labelleft=False)\n\n # plot distribution for a DOP value\n plot_xdop_histogram(dfDopsDist=dfXDOPdisp, xdop=xdop, color=color, axis=ax, logger=logger)\n\n # save the plot in subdir png of GNSSSystem\n amutils.mkdir_p(os.path.join(dRtk['info']['dir'], 'png'))\n pngName = os.path.join(dRtk['info']['dir'], 'png', os.path.splitext(dRtk['info']['rtkPosFile'])[0] + '-XDOP.png')\n fig.savefig(pngName, dpi=fig.dpi)\n\n if showplot:\n plt.show(block=True)\n else:\n plt.close(fig)",
"def plot_datasets(datasets):\n\n\t# plt.grid(True)\n\n\tfor ds in datasets:\n\t\t(f, ax) = plt.subplots()\n\n\t\tax.grid(True)\n\n\t\tif 'xl' in ds:\n\t\t\tax.set_xlabel(ds['xl'])\n\t\tif 'yl' in ds:\n\t\t\tax.set_ylabel(ds['yl'])\n\n\t\tif 'xl' in ds and 'yl' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl'])\n\t\t\tf.canvas.set_window_title(title)\n\n\t\tif 'x' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl']) if 'title' not in ds else ds['title']\n\t\t\tf.canvas.set_window_title(title)\n\t\t\tmarker = 'y1m' in ds and ds['y1m'] or None\n\t\t\tax.plot(ds['x'], ds['y'], label=ds['yl'], marker=marker)\n\t\tif 'x2' in ds:\n\t\t\t# label = \"y2\" if 'y2l' not in ds else ds['y2l']\n\t\t\tlabel = 'y2l' in ds and ds['y2l'] or 'y2'\n\t\t\tmarker = 'y2m' in ds and ds['y2m'] or None\n\t\t\tax.plot(ds['x2'], ds['y2'], label=label, marker=marker)\n\t\t\tax.legend()\n\t\tif 'x3' in ds:\n\t\t\t# label = \"y3\" if 'y3l' not in ds else ds['y3l']\n\t\t\tlabel = 'y3l' in ds and ds['y3l'] or 'y3'\n\t\t\tmarker = 'y3m' in ds and ds['y3m'] or None\n\t\t\tax.plot(ds['x3'], ds['y3'], label=label, marker=marker)\n\t\t\tax.legend()\n\n\t\tif 'sub' in ds:\n\t\t\tfor sub in ds['sub']:\n\t\t\t\t# ax.set_ylabel(sub['yl'])\n\t\t\t\t# ax.set_xlabel(sub['xl'])\n\t\t\t\t# title = \"%s from %s\" % (sub['yl'], sub['xl']) if 'title' not in sub else sub['title']\n\t\t\t\t# f.canvas.set_window_title(title)\n\n\t\t\t\tlabel = 'yl' in sub and sub['yl']\n\t\t\t\tmarker = 'ym' in sub and sub['ym'] or None\n\t\t\t\tax.plot(sub['x'], sub['y'], label=label, marker=marker)\n\t\t\t\tax.legend()\n\n\t\tax.spines['left'].set_position('zero')\n\t\tax.spines['bottom'].set_position('zero')\n\t\tax.spines['left'].set_smart_bounds(True)\n\t\tax.spines['bottom'].set_smart_bounds(True)\n\n\tplt.show()",
"def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()",
"def plot_all_df_columns(df, col_nums, title='', xlabel=''):\n i = 1\n values = df.values\n for col in col_nums:\n plt.subplot(len(col_nums), 1, i)\n plt.plot(values[:, col])\n plt.title(title)\n plt.ylabel(dr_df.columns[col])\n plt.xlabel(xlabel)\n i += 1\n plt.tight_layout()\n plt.show()",
"def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()",
"def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()",
"def show(dfs):\n\n for df in dfs:\n print('{} -> {}'.format(df[0], df[1]))",
"def showVs(df, feat1, feat2):\n colors = ['blue', 'red', 'green', 'coral']\n for u in range(len(cBouts)):\n plt.plot(f[f['clust_ind'] == u][feat1],\n f[f['clust_ind'] == u][feat2], 'o', color=colors[u],\n alpha=0.6, markeredgecolor='none')\n plt.xlabel(feat1)\n plt.ylabel(feat2)\n plt.show()\n return",
"def plot_xdop_svs(dfDops: pd.DataFrame, colors: tuple, axis, logger: logging.Logger):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: creating XDOP / #SVs vs time plot'.format(func=cFuncName))\n\n axis.set_ylim([0, 24])\n axis.set_ylabel('#SVs [-]', fontsize='large', color='grey')\n # axis.set_xlabel('Time [sec]', fontsize='large')\n\n axis.fill_between(dfDops['DT'], 0, dfDops['#SVs'], alpha=0.5, linestyle='-', linewidth=3, color='grey', label='#SVs', interpolate=False)\n # plot PDOP on second y-axis\n axRight = axis.twinx()\n\n axRight.set_ylim([0, 15])\n axRight.set_ylabel('XDOP [-]', fontsize='large')\n\n # plot XDOPs (last 4 columns)\n for dop, color in zip(dfDops.columns[-4:], colors):\n axRight.plot(dfDops['DT'], dfDops[dop], linestyle='-', marker='.', markersize=1, color=color, label=dop)\n\n # add the legend to the plot\n axRight.legend(loc=\"upper right\")\n\n # set title\n axis.set_title('Visible satellites & XDOP', fontsize='x-large')\n\n # create the ticks for the time axis\n dtFormat = plot_utils.determine_datetime_ticks(startDT=dfDops['DT'].iloc[0], endDT=dfDops['DT'].iloc[-1])\n\n if dtFormat['minutes']:\n # axis.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(10, 60, 10), interval=1))\n pass\n else:\n axis.xaxis.set_major_locator(dates.HourLocator(interval=dtFormat['hourInterval'])) # every 4 hours\n axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) # hours and minutes\n\n axis.xaxis.set_minor_locator(dates.DayLocator(interval=1)) # every day\n axis.xaxis.set_minor_formatter(dates.DateFormatter('\\n%d-%m-%Y'))\n\n axis.xaxis.set_tick_params(rotation=0)\n for tick in axis.xaxis.get_major_ticks():\n # tick.tick1line.set_markersize(0)\n # tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('center')",
"def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes",
"def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")",
"def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)",
"def dyplot(self, x, y, name, dir):\n fig, ax1 = plt.subplots(figsize=(6, 4), dpi=500, facecolor='white')\n ax1.plot(x, '-b*', ms=2, linewidth=1)\n ax1.set_xlabel('Epoch', fontsize=9)\n ax1.set_ylabel('Discriminator Loss per Epoch', fontsize=9, color='b')\n ax1.tick_params('y', colors='b')\n\n ax2 = ax1.twinx()\n ax2.plot( y, '-r*', ms=2, linewidth=1)\n ax2.set_ylabel('Generator Loss per Epoch', fontsize=9, color='r')\n ax2.tick_params('y', colors='r')\n fig.tight_layout()\n plt.savefig('{}/{}.png'.format(dir, 'Loss-Adversarial-' + name))\n plt.close()",
"def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )",
"def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()",
"def plot_graphs(x_values, y_values, x_label, y_label, title, path, legend=None):\n # If `x_values.ndim` and `y_values.ndim` are equal to 2\n # and `x_values.shape[0]` is equal to `y_values.shape[1]`\n # for instance, `plot_graphs` does not crash and saves\n # a wrong plot. That is why `x_values.ndim` and `y_values.ndim`\n # are checked.\n if x_values.ndim != 1:\n raise ValueError('`x_values.ndim` is not equal to 1.')\n if y_values.ndim != 2:\n raise ValueError('`y_values.ndim` is not equal to 2.')\n \n # Matplotlib is forced to display only\n # whole numbers on the x-axis if the\n # x-axis values are integers. Matplotlib\n # is also forced to display only whole\n # numbers on the y-axis if the y-axis\n # values are integers.\n current_axis = plt.figure().gca()\n if numpy.issubdtype(x_values.dtype, numpy.integer):\n current_axis.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n if numpy.issubdtype(y_values.dtype, numpy.integer):\n current_axis.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n \n # For the x-axis or the y-axis, if the range\n # of the absolute values is outside [1.e-4, 1.e4],\n # scientific notation is used.\n plt.ticklabel_format(style='sci',\n axis='both',\n scilimits=(-4, 4))\n \n # `plt.plot` returns a list.\n handle = []\n for i in range(y_values.shape[0]):\n handle.append(plt.plot(x_values, y_values[i, :])[0])\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n if legend is not None:\n plt.legend(handle, legend)\n plt.savefig(path)\n plt.clf()",
"def display_feds(list1, list2):\n if len(list1) != len(list2):\n print(\"In display_feds: lists must be of the same length\")\n return \n fig = plt.figure(dpi=128, figsize=(10, 6))\n fed_list_answer = fed_list(list1, list2)\n plt.plot(range(len(fed_list_answer)), fed_list_answer, c='red', alpha=0.5)\n \n plt.title(\"Feature edit distances between corresponding pairs\", fontsize = 24)\n plt.xlabel('', fontsize =16)\n #fig.autofmt_xdate()\n plt.ylabel(\"Distance\", fontsize =16)\n plt.tick_params(axis='both', which = 'major', labelsize=16)\n\n plt.show()",
"def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()",
"def plot(self):\n t = np.linspace(0, self.days, self.days + 1)\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')\n ax1.plot(t, self.S, label=\"Susceptible\", color='r')\n ax1.set_ylabel(\"Number of Susceptible People\")\n ax1.set_title(\"Strong Infecitous Model SEIRV Simulation\")\n ax3.plot(t, self.I, label=\"Active Cases\", color='b')\n ax3.set_ylabel(\"Active Cases\")\n ax2.plot(t, self.E, label=\"Exposed\", color='c')\n ax2.set_ylabel(\"# of Exposed\")\n ax4.plot(t, self.R, label=\"Recovered\", color='m')\n ax5.set_xlabel(\"Days\")\n ax4.set_ylabel('Number of Recovered')\n ax5.plot(t, self.V, label=\"Vaccinated\")\n ax5.set_ylabel(\"# Vaccinated\")\n ax1.legend()\n ax2.legend()\n ax3.legend()\n ax4.legend()\n plt.show()\n return fig"
] | [
"0.7042768",
"0.64889604",
"0.6424103",
"0.62358296",
"0.62245584",
"0.6214347",
"0.6187972",
"0.61645967",
"0.6090626",
"0.6083194",
"0.60742915",
"0.60631174",
"0.6021211",
"0.60032755",
"0.5985849",
"0.5971447",
"0.59706855",
"0.5946714",
"0.59466743",
"0.593274",
"0.5930092",
"0.5907409",
"0.5893017",
"0.5870112",
"0.584623",
"0.5845617",
"0.58428013",
"0.5823288",
"0.5816765",
"0.57937473"
] | 0.7176058 | 0 |
8 microed stepping by faking distance twice as long. | def micro_8(steps, a):
df = pd.DataFrame(index=np.arange(0, steps * 16), columns=('v', 's', 'd', 't'))
t = 0.0
m = 8 # micro level
d = d0 = math.sqrt(1/a/m) # faster accel since distance is longer
s = 0 # steps
p = 0 # position
p_d = 1/m # position delta
for s in range(800):
if s == 0:
d = d0 * 0.676
else:
d -= d * 2 / (4 * s + 1)
s += 1
p += p_d
t += d
df.loc[s] = [1/d/m, p, d, t]
# m = 1
# p_d = 1/m
# d = d * 8
# for s in range(100, 200):
# if s == 0:
# d = d0 * 0.676
# else:
# d -= d * 2 / (4 * s + 1)
# s += 1
# p += p_d
# t += d
# df.loc[s] = [1/d/m, p, d, t]
return df.dropna() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drive_eight(n):\n # Variables for the go_diff function\n fast_speed = 80 \n slow_speed = 25\n # Half a lap time, this is the time the robot turns in a direction before switching\n half_lap_time =6.2 \n # To avoid having tu manually stop the robot we set it to drive continuously for x amount of seconds.\n elapsedSecs = 0\n while elapsedSecs < half_lap_time * 2 * n:\n arlo.go_diff(fast_speed, slow_speed, 1, 1)\n sleep(half_lap_time)\n arlo.go_diff(slow_speed, fast_speed, 1, 1)\n sleep(half_lap_time)\n elapsedSecs += half_lap_time * 2",
"def get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]",
"def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]",
"def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]",
"def test_pos_1024() -> None:\n assert sw.walk_to(1024).distance == 31",
"def dist_to_stop(speed):\n return speed ** 2 / 4",
"def _step(self) -> None:",
"def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4",
"def step(self, move):",
"def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None",
"def move_coarse(self, direction, count=1):\n if self._direction != direction and self.simulate_backlash:\n self._direction = direction\n backlash_offset = randint(-maximum_backlash, maximum_backlash)\n self._move(direction, 1, 8 + backlash_offset)\n self._move(direction, count - 1, 8)\n self.backlash_count += 1\n else:\n self._direction = direction\n self._move(direction, count, 8)",
"def _TIME2STEPS(time):\n return int(time*1000)",
"def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()",
"def Advance():\n warp.step()",
"def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10",
"def step(self, delta_l11, delta_l12, delta_l13, delta_l21, delta_l22, delta_l23):\n self.l11 += delta_l11; self.l12 += delta_l12; self.l13 += delta_l13\n self.l21 += delta_l11; self.l22 += delta_l12; self.l23 += delta_l13\n self.l21 += delta_l21; self.l22 += delta_l22; self.l23 += delta_l23\n # check that all tendon lenghts are within limit\n self.l11 = self.l1min if self.l11 < self.l1min else self.l11\n self.l12 = self.l1min if self.l12 < self.l1min else self.l12\n self.l13 = self.l1min if self.l13 < self.l1min else self.l13\n self.l11 = self.l1max if self.l11 > self.l1max else self.l11\n self.l12 = self.l1max if self.l12 > self.l1max else self.l12\n self.l13 = self.l1max if self.l13 > self.l1max else self.l13\n self.l21 = self.l2min if self.l21 < self.l2min else self.l21\n self.l22 = self.l2min if self.l22 < self.l2min else self.l22\n self.l23 = self.l2min if self.l23 < self.l2min else self.l23\n self.l21 = self.l2max if self.l21 > self.l2max else self.l21\n self.l22 = self.l2max if self.l22 > self.l2max else self.l22\n self.l23 = self.l2max if self.l23 > self.l2max else self.l23\n old_tip_vec = self.tip_vec2 # used for potential reward\n self.update_variables()\n new_tip_vec = self.tip_vec2 # used for potential reward\n reward = self.r_static\n return reward",
"def get_step():\n\n # Decide which direction to go and how far to go in that direction.\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4, 5, 6, 7, 8])\n step = direction * distance\n\n # Reject moves that go nowhere.\n if step == 0:\n get_step()\n else:\n return step",
"def _step(self, board, elapsedTime):\n\t\tpass",
"def fifteen():\r\n\r\n currentcell = 1.0\r\n cellpaths = 2.0\r\n \r\n while currentcell < 20.0:\r\n currentcell += 1.0\r\n cellpaths = cellpaths * (4.0 - 2.0/currentcell)\r\n \r\n return cellpaths",
"def cooroutine_helper(self):\n prev = yield\n running_distance = 0\n while True:\n nxt = yield running_distance\n running_distance += distance(prev, nxt).meters\n prev = nxt",
"def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return",
"def shiftAsideMark(state, opp, distDemar):\n dest = None\n while True:\n dest = Vector2D.create_random(low=-1, high=1)\n dest.norm = distDemar\n dest += opp.position\n if state.is_valid_position(dest) and \\\n distance_horizontale(dest, state.my_goal) > 10.+distance_horizontale(opp.position, state.my_goal):\n break\n return goTo(state, dest)",
"def target_position(self, time):\n \"\"\"\n start_pos = self.points[self.cur_start]\n seg_time = time - self.last_checkpoint_time\n\n #The arguement of target-velocity dosent matter\n cur_pos = self.target_velocity(time)*seg_time + start_pos\n\n \n # or time > (self.total_time / 4)*(self.cur_start + 1)\n cur_pos_norm = length(cur_pos - start_pos)\n\n next_corner = self.points[(self.cur_start + 1)%4]\n \n seg_norm = length(next_corner - start_pos)\n print(\"cur_pos : \", cur_pos, \"segment: \", self.cur_start, seg_norm - cur_pos_norm)\n\n if cur_pos_norm >= seg_norm:\n self.cur_start = (self.cur_start + 1) % 4\n self.last_checkpoint_time = time\n return cur_pos\n \"\"\"\n\n #Possibly use rospy.sleep()\n total_time = self.total_time\n\n\n if time < total_time/4:\n return self.path1.target_position(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time < total_time/2:\n return self.path2.target_position(time - (total_time/4 + 0.5))\n # return self.path2.target_position(time - (total_time/4 ))\n\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= total_time/4*3:\n return self.path3.target_position(time - (total_time/2 + 1))\n # return self.path3.target_position(time - (total_time/2))\n\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n else:\n return self.path4.target_position(time - (total_time/4*3 + 1.5))\n # return self.path4.target_position(time - (total_time/4*3))",
"def compute_step(X):\n return MOVING_STEP",
"def step_forward(self):",
"def walk(self):\n self.speed = self.speed + (0.2 * self.legs)",
"def takeoff(self, n, e, d):\n pass",
"def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance",
"def nearest_test_pulse(self):",
"def _step(self, start):\n #angle = np.random.uniform(0,2*np.pi) # only 2-dim\n #direction = angle2vec(angle)\n\n angle = np.random.randn(self.dim)\n direction = angle / la.norm(angle)\n \n if not self.query(start):\n print(f\"Given an invalid point! {start}\")\n \n testCounter = 0\n max_iter = 1000\n \n ## Case for adding to direction ##\n high = 1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-5\n t_plus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_plus-old_t) > tol):\n old_t = t_plus\n t_plus = (high+low)/2\n testpoint = current + t_plus*direction\n if( self.query(testpoint) ):\n low = t_plus\n else:\n high = t_plus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus loop with: \\n\\\n t_plus = {t_plus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_plus = old_t\n \n ## Case for subtracting from direction\n high = -1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-10\n t_minus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_minus-old_t) > tol):\n old_t = t_minus\n t_minus = (high+low)/2\n testpoint = current + t_minus*direction\n if( self.query(testpoint) ):\n low = t_minus\n else:\n high = t_minus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus loop with: \\n\\\n t_minus = {t_minus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_minus = old_t\n \n # Make the step\n final_t = np.random.uniform(t_minus, t_plus)\n #print(f\"Final t = {final_t}\")\n \n # remove extra returns for now for other compatibility\n return start + final_t*direction #, start+t_plus*direction, start+t_minus*direction"
] | [
"0.6142764",
"0.5769263",
"0.57421744",
"0.5703275",
"0.5681504",
"0.5650031",
"0.5599577",
"0.5527048",
"0.55249375",
"0.55100065",
"0.55055285",
"0.549204",
"0.54680324",
"0.5440701",
"0.54385084",
"0.54321384",
"0.5418194",
"0.54137677",
"0.54083496",
"0.5375917",
"0.5365879",
"0.5364452",
"0.53611696",
"0.5351481",
"0.5342448",
"0.5340236",
"0.52958703",
"0.52939165",
"0.5287813",
"0.5273693"
] | 0.6169425 | 0 |
Add the elements in ref_gen to an existing index. | def update_index(self, ref_gen):
testing = True
logging.warning('Updating index')
es_insert.index(es, ref_gen, self.index_name, testing, action="update")
logging.warning('Finished updating') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)",
"def build_index(self):\n self.rebuild_index()",
"def insert_index(self):\n pass",
"def index_add(all_index, this_index, samples, caller):\n for key, record in this_index.iteritems():\n if key not in all_index:\n all_index[key] = {}\n for sample_id in samples:\n if sample_id not in all_index[key]:\n all_index[key][sample_id] = {caller: []}\n elif caller not in all_index[key][sample_id]:\n all_index[key][sample_id][caller] = []\n # NB: If caller was run twice, will have 2 records here\n all_index[key][sample_id][caller].append(record)",
"def build_index():\n pass",
"def create_index():",
"def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))",
"def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))",
"def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)",
"def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)",
"def add_index(self, idx, subproblem_shape):\n self.indices.append(int(idx))\n self.subproblem_shapes.append(subproblem_shape)",
"def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)",
"def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)",
"def store_index(self, index, doc_type, source_list, init_id):\n\n bulk_actions = []\n doc_id = init_id\n\n for source in source_list:\n data_body = ElasticSearchUtility.__index_data_body(index, doc_type, doc_id, source[\"_source\"])\n bulk_actions.append(data_body)\n doc_id += 1\n\n print 'inserting - ', len(bulk_actions)\n helpers.bulk(self.es, bulk_actions)",
"def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self",
"def typesense_index_referral(ref, client=None):\n if not client:\n client = typesense_client()\n\n ref_document = {\n 'id': str(ref.pk),\n 'created': ref.created.timestamp(),\n 'type': ref.type.name,\n 'referring_org': ref.referring_org.name,\n 'regions': [i.name for i in ref.regions.all()],\n 'reference': ref.reference if ref.reference else '',\n 'description': ref.description if ref.description else '',\n 'address': ref.address if ref.address else '',\n 'lga': ref.lga.name if ref.lga else '',\n 'dop_triggers': [i.name for i in ref.dop_triggers.all()],\n }\n if ref.point:\n ref_document['point'] = [ref.point.x, ref.point.y]\n client.collections['referrals'].documents.upsert(ref_document)",
"def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()",
"def reindex(self):",
"def reindex(self):",
"def gene(self, idx, value):\r\n self.genes[idx] = value",
"def add_to_index(self, term_, doc_id_):\n\n if(term_ not in self.inverted_index.keys()):\n postingsList=LinkedList()\n postingsList.insert_at_end(doc_id_)\n #Doc freq\n postingsList.length=postingsList.length+1\n self.inverted_index[term_]=postingsList\n# self.inverted_index[term_].start_node.term_frequency += 1\n elif(not self.is_doc_id_in_posting_list(self.inverted_index[term_],doc_id_,term_)):\n self.inverted_index[term_].insert_at_end(doc_id_)\n self.inverted_index[term_].length=self.inverted_index[term_].length+1",
"def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name",
"def add_read_to_vec_using_ref(self, read):\n\t\ti = read.offset\n\t\tfor p in self.refmap.gap_map[read.ref_seq_id][read.offset:(read.offset+len(read.seq))]:\n\t\t\ts = self.refmap.fasta[read.ref_seq_id].seq[i]\n\t\t\tif s=='U': s='T'\n\t\t\tif s not in ('A','T','C','G'): s='N'\n\t\t\tDF.add_to_vec(self, nt=s, positions=[p], counts=[read.copy])\n\t\t\ti += 1",
"def create_reference_index(target, sclass):\n # Retrieve reference & store in FileStoreID\n ref_path = sclass.unavoidable_download_method(target, 'ref.fasta')\n\n # Tool call\n command = 'samtools faidx {}'.format(sclass.docker_path(ref_path))\n sclass.docker_call(command, tool_name='samtools')\n\n # Update FileStoreID of output\n target.updateGlobalFile(sclass.ids['ref.fai'], ref_path + '.fai')",
"def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))",
"def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')",
"def add_target_and_index(self, name, sig, signode):\n key = normalize_object_name(name)\n if key in self.state.document.ids:\n return\n\n signode['names'].append(name)\n signode['ids'].append(key)\n signode['first'] = not self.names\n self.indexnode['entries'].append(\n ('single', 'JSON Objects; {}'.format(name), key, '', None))",
"def store(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n idx = doc.features.get(self.idxfeatname())\n if idx is None:\n raise Exception(\"Cannot append document, no __idx_ID feature\")\n self.__setitem__(idx, doc)",
"def add_ref(self, irsb_addr, stmt_idx, insn_addr):\n\n ref = (irsb_addr, stmt_idx, insn_addr)\n if ref not in self.refs:\n self.refs.add(ref)",
"def addIndex(self, index):\r\n assert type(index)==int\r\n assert 0<=index and index < self._dataset.getSize()\r\n\r\n if not (index in self._indices):\r\n self._indices.append(index)"
] | [
"0.6223327",
"0.6165989",
"0.6162208",
"0.6124569",
"0.5892641",
"0.5827929",
"0.5818835",
"0.5818835",
"0.5806102",
"0.57331675",
"0.5731806",
"0.57138515",
"0.57138515",
"0.569752",
"0.5685238",
"0.5673912",
"0.56449544",
"0.5584928",
"0.5584928",
"0.55495024",
"0.5501107",
"0.54773134",
"0.54759943",
"0.54496276",
"0.5429104",
"0.5428337",
"0.54098165",
"0.5406296",
"0.5398407",
"0.5395309"
] | 0.7487727 | 0 |
Print crossword assignment to the terminal. | def print(self, assignment):
letters = self.letter_grid(assignment)
for i in range(self.crossword.height):
for j in range(self.crossword.width):
if self.crossword.structure[i][j]:
print(letters[i][j] or " ", end="")
else:
print("█", end="")
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_word_scheme(self) -> None:\n print(\"\".join(self.word2))",
"def print(self):\n\n def format_guessed_word(word):\n return ' '.join(list(word))\n\n def format_blank_word(word):\n return ' '.join(list('_' * len(word)))\n\n print('\\n' + \"Board\" + '=' * 75)\n for word in self._words:\n word_str = format_guessed_word(word) \\\n if word in self._words_guessed \\\n else format_blank_word(word)\n print(word_str)\n print(\"{}/{} words remaining\".format(self._num_words - len(self._words_guessed),self._num_words))\n print('=' * 80 + '\\n')",
"def print(self):\n self.print_avec_separateur(\" \")",
"def show(self):\n print('\\n'+'\\n'.join([' '.join([['.', 'O', 'X'][self.board[3*j + i]]\n for i in range(3)]) for j in range(3)]))",
"def printBoard(self):\n\t\tkey = [' ', 'X', 'O']\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[0][0]] + ' | ' + key[self.state[0][1]] + ' | ' + key[self.state[0][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[1][0]] + ' | ' + key[self.state[1][1]] + ' | ' + key[self.state[1][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[2][0]] + ' | ' + key[self.state[2][1]] + ' | ' + key[self.state[2][2]])\n\t\tprint(' | |')",
"def text_output(self):\n print(self.board)\n print()",
"def print_colored(word):\n for char in word:\n print(c.rc() + char + c.x, end='')",
"def __str__(self):\n s = 'word chain: ' + '\\n'\n for word in self._used_words[:-1]:\n s += word + ' -> '\n s += self._used_words[-1] + '\\ntarget word: ' + self._target\n return s",
"def printBoard(self):",
"def printBoard(self):\n print(\"\"\"\nSpace 1 Space 2 Space 3 Space 4 Space 5 Space 6\n------- ------- ------- ------- ------- -------\"\"\")\n print(\"{:>4}{:>10}{:>10}{:>10}{:>10}{:>10}\".format(str(self.space1), str(self.space2), str(self.space3), str(self.space4), str(self.space5), str(self.space6)))\n print()",
"def print_board(self):\n \n # How to show empty/p1/p2\n VALS = \".XO\"\n\n print(\"\\n a b c d e f g\")\n print(\" /--+-+-+-+-+-+--\\\\\")\n for r in range(_HEIGHT - 1, -1, -1):\n s = \"%s |\" % r\n for c in range(_WIDTH):\n # Print mark next to most recent move\n mark = \">\" if self.last_play_rc == (r, c) else \" \"\n s += mark + VALS[self.board[r * 7 + c]]\n print(s + \" |\")\n print(\" \\\\--+-+-+-+-+-+--/\")\n print(\" a b c d e f g\\n\")",
"def verse_2():\n print(\"Old MacDonald had a farm\")\n print(\"E-I-E-I-O\")",
"def shout(word):\n print(word+\"!\")",
"def print_line():\n print('+ - - - - + - - - - +'),",
"def print(self):\n for word in self.words:\n print(word)",
"def show( self):\n def symbol( i):\n return i<0 and (i==-2 and ' ' or '0') or chr(ord('a') + i)\n \n X, Y = np.max( self.board.positions, 0)\n # -2 to indicate outside board.\n display = np.zeros( (X+1,Y+1), dtype=int) - 2 \n for x, y in self.board.positions:\n display[x, y] = -1 # -1 to indicate unoccupied\n for p, i in self.occupation.items():\n x, y = self.board.positions[p]\n display[x, y] = i\n for x in xrange(X+1):\n s = ''.join( [ symbol( display[x, y]) for y in xrange(Y+1) ])\n print s",
"def print_full_phrase(self):\n print(\"The phrase was...\")\n print(\"\\t\", end=\"\")\n for i in self.active_phrase:\n print(\"*\", end=\"\")\n print(f\"\\n\\t{self.active_phrase}\")\n print(\"\\t\", end=\"\")\n for i in self.active_phrase:\n print(\"*\", end=\"\")\n print()",
"def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')",
"def print(self):\r\n self.print_avec_separateur()",
"def _print(txt):\n\n # Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Style: DIM, NORMAL, BRIGHT, RESET_ALL\n print('{0}{1}'.format(Style.BRIGHT + txt, Fore.RESET + Back.RESET + Style.RESET_ALL))",
"def print_post():\n print('| | |'),",
"def print_chars(self):\n for v in voc.split('\\n'):\n pair = v.split(',')\n print(pair[0], pair[1], '\\t', self.epi.xsampa_list(pair[0]))",
"def print(self):\n print(\" a b c d e f g h \")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n for row in range(8, 0, -1):\n pieces = \" │ \".join(self.state[row - 1])\n print(f\"{row} │ {pieces} │ {row}\")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n print(\" a b c d e f g h \")",
"def show_word(self):\n self.display_word = len(self.chosen_word) * \"_ \"\n Donatello.draw_word(self.display_word)\n return self.display_word",
"def print(self):\r\n base = 8 * self.width\r\n print(base * \"-\")\r\n for x in range(self.height):\r\n output = \"\"\r\n for y in range(self.width):\r\n output = output + self.board[x][y] + \"|\"\r\n print(\"|\" + output)\r\n print(base * \"-\")",
"def print(self) -> str:\n if self.is_unoccupied():\n return \"\"\n return str(\"%s-%s\" % (self.piece.color.name, self.piece.name.name))",
"def print_trail(word):\n if len(word) == 0:\n return\n print(word, end = ' ')\n t = is_reducible(word, word_dict)\n print_trail(t[0])",
"def space():\n print(' ', end='')",
"def print_board(self):\n print(\n self.BOARD_TEMPLATE.format(\n *[self.COUNTER_REPRESENTATION[counter] for counter in self.board])\n )",
"def print_possibility_space():\n\n print(\"Possibility space:\")\n print(\" {} unique sword images\".format(calculate_image_possibilities()))"
] | [
"0.6418134",
"0.6091784",
"0.60144675",
"0.5868591",
"0.58374834",
"0.5763381",
"0.5702663",
"0.56456727",
"0.56032985",
"0.5602622",
"0.5599726",
"0.55596346",
"0.5540012",
"0.55338246",
"0.5492764",
"0.54739493",
"0.5469291",
"0.5463655",
"0.54530674",
"0.54021096",
"0.54003537",
"0.5392449",
"0.53673506",
"0.53634274",
"0.5343367",
"0.5339937",
"0.5328883",
"0.53235555",
"0.5316245",
"0.5312369"
] | 0.72022206 | 1 |
Save crossword assignment to an image file. | def save(self, assignment, filename):
from PIL import Image, ImageDraw, ImageFont
cell_size = 100
cell_border = 2
interior_size = cell_size - 2 * cell_border
letters = self.letter_grid(assignment)
# Create a blank canvas
img = Image.new(
"RGBA",
(self.crossword.width * cell_size,
self.crossword.height * cell_size),
"black"
)
font = ImageFont.truetype("assets/fonts/OpenSans-Regular.ttf", 80)
draw = ImageDraw.Draw(img)
for i in range(self.crossword.height):
for j in range(self.crossword.width):
rect = [
(j * cell_size + cell_border,
i * cell_size + cell_border),
((j + 1) * cell_size - cell_border,
(i + 1) * cell_size - cell_border)
]
if self.crossword.structure[i][j]:
draw.rectangle(rect, fill="white")
if letters[i][j]:
w, h = draw.textsize(letters[i][j], font=font)
draw.text(
(rect[0][0] + ((interior_size - w) / 2),
rect[0][1] + ((interior_size - h) / 2) - 10),
letters[i][j], fill="black", font=font
)
img.save(filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, filename):\n print(\"Saving...\", end=\"\\r\")\n canvas = self.canvas[self.N:self.S,self.W:self.E]\n cv2.imwrite(\"./Output/\"+filename, canvas)\n print(\"Saved:\",filename)",
"def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)",
"def save_as(self, filename):\n opencv.imwrite(filename, self.img)",
"def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")",
"def save_groudtruth(im, coords, filename):\n print 'Saving ground truth ......{0}'.format(filename)\n img_draw = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img_draw)\n for coord in coords:\n draw.polygon([(float(coord[0]), float(coord[1])), (float(coord[2]), float(coord[3])),\n (float(coord[4]), float(coord[5])), (float(coord[6]), float(coord[7]))],\n outline=\"red\", fill=\"blue\")\n img_draw = np.array(img_draw)\n img_draw = cv2.cvtColor(img_draw, cv2.COLOR_RGB2BGR)\n bname_excludepoint = filename.split('/')[-1].split('.')[0]\n image_path = '/home/yuquanjie/Documents/deep-direct-regression/result/' + bname_excludepoint + '_gt.jpg'\n cv2.imwrite(image_path, img_draw[0: img_draw.shape[0], 0: img_draw.shape[1]])",
"def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)",
"def save(self):\n filename = os.path.expanduser(\"~/\" + self.name)\n print(filename)\n np.savetxt(filename + \"_left.txt\", self.central)\n np.savetxt(filename + \"_right.txt\", self.boundaries)",
"def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")",
"def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)",
"def save_image(image, file_name):\n io.imsave(file_name,image)",
"def img_save(name,img):\n cv2.imwrite(name,img)",
"def write_image(self, filename):\n cv2.imwrite(filename, self.image)",
"def save_PPM(self, fileName, imageComment = \"\"):\n file = open(add_ext_if_needed(fileName, \"ppm\"), \"w\")\n file.write(\"P3\\n\"); \n file.write(\"# \" + imageComment + \"\\n\")\n file.write(str(self._image.width()) + \" \" + str(self._image.height()) + \"\\n\")\n file.write(\"255\\n\")\n for y in range(self._image.height()):\n for x in range(self._image.width()):\n r,g,b = self.getPixel(x,y);\n file.write(str(r) + \" \" + str(g) + \" \" + str(b) + \"\\n\")\n file.close()",
"def save_image(name, image):\n image_name = 'output/' + name + '.png'\n cv2.imwrite(image_name, image)",
"def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)",
"def save(self, x, y, names, path=\"\", zoom=False):\n for i in range(len(x)):\n image = self.generate(x[i], label=np.argmax(y[i]), zoom=zoom)\n image = Image.fromarray((image*255).astype(\"uint8\"))\n image.save(path + names[i] + \".png\", \"PNG\")",
"def save_pca(self, filepath):\n mean_beam, principal_components, variances = self.pca()\n image_shape = np.array(self.image_shape)\n with open(filepath, 'wb') as f:\n np.save(f, image_shape)\n np.save(f, mean_beam)\n np.save(f, principal_components)\n np.save(f, variances)\n np.save(f, self.mask)",
"def save_image(self, filename):\n if filename[-4:] != '.pkl':\n filename + '.pkl'\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)",
"def write(self, filename):\n\n self.__image.save(filename)",
"def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"",
"def save(self, fn):\n plt.imsave(fn, self.image)",
"def save(self, filename):\n \n path, name = os.path.split(filename)\n ext = name.split(\".\")[-1]\n _tkExec(self.image.write, filename, format=ext)",
"def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)",
"def save(self):\n img = Image.new(\"1\", (self.container.width, self.container.height))\n draw = ImageDraw.Draw(img)\n for item in self.items:\n draw.ellipse(item.box_coordinates(), fill=1)\n del draw\n img.save(\"plot.bmp\", \"bmp\")",
"def saveCanvas(self,fileName):\n if self.sync==False:\n self._drawOnCanvas()\n fileName=fileName+'.bmp'\n cv.imwrite(fileName,self.canvas)",
"def save_image(self):\n self.save()",
"def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)",
"def write(filename, data):\r\n with open(filename, \"wb\") as f:\r\n pic.dump(data, f)",
"def store_image(self):\n cv2.imwrite(self.__diff_filename(), self.__diff_image())",
"def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()"
] | [
"0.65461063",
"0.6395748",
"0.63688743",
"0.6313389",
"0.62285316",
"0.61451906",
"0.6133265",
"0.6110629",
"0.61080384",
"0.60995334",
"0.6096377",
"0.60782754",
"0.60167956",
"0.60059804",
"0.59735614",
"0.5961569",
"0.5951604",
"0.5941784",
"0.591758",
"0.5905942",
"0.58904254",
"0.58610684",
"0.5856268",
"0.5855799",
"0.5845757",
"0.58446544",
"0.5829126",
"0.5824401",
"0.5798292",
"0.5796669"
] | 0.78399444 | 1 |
Return True if `assignment` is complete (i.e., assigns a value to each crossword variable); return False otherwise. | def assignment_complete(self, assignment):
# print("Entered assignment_complete Function")
for var in assignment:
if assignment[var] is None:
return False
return self.consistent(assignment)
# raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True",
"def assignment_complete(self, assignment):\n if len(assignment) == len(self.domains):\n return True\n\n else:\n return False",
"def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True",
"def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError",
"def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)",
"def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0",
"def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)",
"def is_complete(self, assignment):\n for a in self.agents:\n if self.calc_agent_budget(a, assignment):\n return False\n for t in self.tasks:\n if self.calc_task_budget(t, assignment):\n return False\n return True",
"def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False",
"def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None",
"def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True",
"def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False",
"def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None",
"def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True",
"def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))",
"def holds(self,assignment):\n return self.condition(*tuple(assignment[v] for v in self.scope))",
"def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))",
"def assignment(self):\n shards = self.line.split('=')\n if len(shards) == 2:\n return True",
"def is_assign(self):\n return self.var.initializer is not None",
"def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0",
"def fold_assignment(self):\n return self._parms.get(\"fold_assignment\")",
"def backtrack(csp):\n\n if len(csp.assignment) == len(csp.variables):\n return True\n\n variable = select_unassigned_variable(csp)\n value = order_domain_values(csp, variable)\n #print variable\n #print value\n flag = 0\n for x in value:\n csp.variables.begin_transaction()\n if is_consistent(csp, variable, x):\n #print \"past is_consistent\"\n for var in csp.variables:\n if var == variable:\n var.assign(x)\n var.is_assigned()\n solution = backtrack(csp)\n if solution != False:\n return True\n csp.variables.rollback()\n return False",
"def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False",
"def check_assignment(assignments: dict, point: Point, value: str) -> bool:\n\n # check base condition: do the constraints hold for current point\n if not check_constraint_satisfied(assignments, point, value):\n print(' → base constraint failed:', point, '=', value)\n return False\n\n # check neighbouring conditions: do the constraints (still) hold for other points\n temp_assignment = copy.deepcopy(assignments)\n temp_assignment[point] = value\n\n # loop through points that can attack the current point, as kings\n print(' > checking neighbouring kings')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'king', attack_points_king[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # loop through points that can attack the current point, as knights\n print(' > checking neighbouring knights')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'knight', attack_points_knight[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # all constraints are satisfied!\n return True",
"def __forward_check(self, assigned_var, assigned_value, unassigned_vars):\n for unassigned_neighbor in self.__unassigned_neighbors(assigned_var, unassigned_vars):\n consistent_values = self.__consistent_domain_values(assigned_var, assigned_value, unassigned_neighbor)\n if len(consistent_values) == 0:\n return False\n else:\n unassigned_neighbor.domain = consistent_values\n return True",
"def _are_last_assignments_valid(assignments, output_vars, ignore_exception=True):\n assert isinstance(assignments, collections.OrderedDict)\n if len(assignments) == 0:\n return False\n last_assignments = []\n for assign_outvar in reversed(assignments):\n last_assignments.append([assign_outvar, assignments[assign_outvar]])\n if len(last_assignments) == len(output_vars):\n break\n last_assignments = list(reversed(last_assignments)) # proper order\n for i, (assign_outvar, expr) in enumerate(last_assignments):\n if not(assign_outvar == output_vars[i]) or \\\n not(isinstance(expr, SSAReturn)) or \\\n not(expr.args[0] not in output_vars):\n if not ignore_exception:\n last_assignments_vrepr = [(k.vrepr(), v.vrepr()) for (k, v) in last_assignments]\n raise ValueError(\"last assignments are not of the form \"\n \"output_var <- SSAReturn(non_output_var)\"\n f\"\\noutput vars = {output_vars}\"\n f\"\\nlast assignments {last_assignments_vrepr}\"\n f\"\\n{assignments}\")\n return False\n return True",
"def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")",
"def _check_assigned(self):\n\n if self.values is None and self.lazy:\n raise ValueError(\"This instance has not been assigned any data.\")",
"def __inferences(self, assigned_var, assigned_value, unassigned_vars, do_forward_checking):\n if do_forward_checking:\n return self.__forward_check(assigned_var, assigned_value, unassigned_vars)\n else:\n return True",
"def are_all_jobs_assigned(self, numjobsassigned):\n if not self.uniform_mode:\n return numjobsassigned == len(self.jobs)\n return True"
] | [
"0.8660917",
"0.7774996",
"0.7236496",
"0.715314",
"0.6777561",
"0.67663616",
"0.6716149",
"0.6668017",
"0.6563448",
"0.65057194",
"0.64147335",
"0.6396104",
"0.6369181",
"0.63129765",
"0.63119334",
"0.6073311",
"0.6054601",
"0.603511",
"0.6031876",
"0.5906102",
"0.5893759",
"0.5834775",
"0.58189565",
"0.5803393",
"0.5753126",
"0.56860167",
"0.56567794",
"0.5616562",
"0.5597932",
"0.55476034"
] | 0.81902444 | 1 |
Return True if `assignment` is consistent (i.e., words fit in crossword puzzle without conflicting characters); return False otherwise. | def consistent(self, assignment):
# print("Entered consistent Function")
# print("assignment")
# print(assignment)
overlaps = self.crossword.overlaps
value_set = set()
for variable in assignment:
#checking overlaps with neighbors
neighbors = self.crossword.neighbors(variable)
for neighbor in neighbors:
overlap = overlaps[(variable, neighbor)]
if (neighbor in assignment):
# print("var 1 overlap letter")
# print(assignment[variable][overlap[0]])
# print("var 2 overlap letter")
# print(assignment[neighbor][overlap[1]])
if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):
return False
# print("neighbors")
# print(neighbors)
#checking that the assignment is the correct length for the variable
if (variable.length != len(assignment[variable])):
return False
#the set to check for distinct variables later
value_set.add(assignment[variable])
#Checking that all variables are distinct
#these should be the same length unless two or more variables share an value
if( len(value_set) is not len(assignment)):
return False
return True
# raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True",
"def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True",
"def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True",
"def assignment_complete(self, assignment):\n if len(assignment) == len(self.domains):\n return True\n\n else:\n return False",
"def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError",
"def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))",
"def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)",
"def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)",
"def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False",
"def assignment(self):\n shards = self.line.split('=')\n if len(shards) == 2:\n return True",
"def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False",
"def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True",
"def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))",
"def check_assignment_consistency(self, assign_df=None, threshold=0.1):\n \n # If the user hasn't specified an assign_df, use one already calculated \n # for this NAPS_assigner instance\n if assign_df is None:\n set_assign_df = True\n assign_df = self.assign_df\n else:\n set_assign_df = False\n \n # First check if there are any sequential atoms\n carbons = pd.Series([\"C\",\"CA\",\"CB\"])\n carbons_m1 = carbons + \"m1\"\n seq_atoms = carbons[carbons.isin(assign_df.columns) & \n carbons_m1.isin(assign_df.columns)]\n seq_atoms_m1 = seq_atoms+\"m1\"\n #seq_atoms = list(seq_atoms)\n \n if seq_atoms.size==0:\n # You can't do a comparison\n assign_df[\"Max_mismatch_prev\"] = np.NaN\n assign_df[\"Max_mismatch_next\"] = np.NaN\n assign_df[\"Num_good_links_prev\"] = np.NaN\n assign_df[\"Num_good_links_next\"] = np.NaN\n return(assign_df)\n else:\n # First, get the i and i-1 shifts for the preceeding and \n # succeeding residues\n tmp = assign_df.copy()\n tmp = tmp.loc[tmp[\"Dummy_res\"]==False,]\n tmp.index = tmp[\"Res_N\"]\n tmp = tmp[list(seq_atoms)+list(seq_atoms_m1)]\n tmp_next = tmp.copy()\n tmp_next.index -= 1\n tmp_prev = tmp.copy()\n tmp_prev.index += 1\n tmp = tmp.join(tmp_next, rsuffix=\"_next\")\n tmp = tmp.join(tmp_prev, rsuffix=\"_prev\")\n # Calculate mismatch for each atom type\n for atom in seq_atoms:\n tmp[\"d\"+atom+\"_prev\"] = tmp[atom+\"m1\"] - tmp[atom+\"_prev\"]\n tmp[\"d\"+atom+\"_next\"] = tmp[atom] - tmp[atom+\"m1_next\"]\n # Calculate maximum mismatch\n tmp[\"Max_mismatch_prev\"] = tmp[\"d\"+seq_atoms+\"_prev\"].max(axis=1, \n skipna=True)\n tmp[\"Max_mismatch_next\"] = tmp[\"d\"+seq_atoms+\"_next\"].max(axis=1,\n skipna=True)\n \n # Calculate number of consistent matches\n tmp[\"Num_good_links_prev\"] = (tmp[\"d\"+seq_atoms+\"_prev\"]<threshold).sum(axis=1)\n tmp[\"Num_good_links_next\"] = (tmp[\"d\"+seq_atoms+\"_next\"]<threshold).sum(axis=1)\n \n # Join relevant columns back onto assign_df\n tmp[\"Res_N\"] = tmp.index\n assign_df = assign_df.join(tmp.loc[:,[\"Max_mismatch_prev\", \n \"Max_mismatch_next\", \n \"Num_good_links_prev\", \n \"Num_good_links_next\"]], \n on=\"Res_N\")\n if set_assign_df:\n self.assign_df = assign_df\n return(assign_df)",
"def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0",
"def win_condition(self):\n if self.letters_wrong < 5:\n if '__ ' in self.new_string:\n return False\n else:\n return True\n else:\n return True",
"def FullCheck(field):\n temp_list = field[:]\n field_copy = field[:]\n if temp_list == Transform(field_copy, \"w\"):\n if temp_list == Transform(field_copy, \"a\"):\n if temp_list == Transform(field_copy, \"s\"):\n if temp_list == Transform(field_copy, \"d\"):\n return True\n return False",
"def _consistentWithWA_(self, span, lan):\n\t\tif lan == 'src':\n\t\t\twordAlign = self.waMatrix\n\t\telse:\n\t\t\twordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))] \n\n\t\tpos1 = [j for i in xrange(span[0], span[1]) for j in xrange(len(wordAlign[i])) if wordAlign[i][j] == 1]\n\t\tif pos1 == []: return True\n\n\t\tfor i in xrange(span[0], span[1]):\n\t\t\tfor j in xrange(min(pos1), max(pos1) + 1):\n\t\t\t\tif sum([wordAlign[row][j] for row in xrange(len(wordAlign[:span[0]]))]) == 0 and \\\n\t\t\t\t\t\tsum([wordAlign[row][j] for row in xrange(span[1], len(wordAlign))]) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\t#print >> debug_log, 'consistent:', span\n\t\treturn True",
"def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1",
"def viableWord(self, word, filledInSpaces):\r\n \r\n # Check if it fits blanks\r\n for (index, letter) in filledInSpaces:\r\n if letter != word[index]:\r\n return False\r\n\r\n # Check if it fits unused\r\n for letter in word:\r\n if letter in self.wrongLetters:\r\n return False\r\n\r\n return True",
"def checkPermutation(string1, string2):\n string1_content = {}\n # Hash the first string\n for i in string1:\n if string1_content.get(i) is None:\n string1_content[i] = 1\n else:\n string1_content[i] += 1\n\n # For each character in the section string, search for it\n for i in string2:\n if string1_content.get(i) is None:\n return False\n string1_content[i] -= 1\n\n # Make sure every character in the first string had a matching character in the second string\n for key, value in string1_content.items():\n if value != 0:\n return False\n return True",
"def inconsistent(p, guesses):\n for guess in guesses:\n res = check(guess[0], p)\n (rightly_positioned, permutated) = guess[1]\n if res != [rightly_positioned, permutated]:\n return True # inconsistent\n return False # i.e. consistent",
"def check_combination(self, combination):\n\n # we first check if there are any pieces of the right value well placed.\n for j in range(0, 4):\n if combination[j] == self.answer[j]:\n self.try_return['well_placed'] += 1\n self.already_checked += [combination[j]]\n self.avoid += [j]\n\n for p in range(0, 4):\n for s in range(0, 4):\n if not p in self.avoid:\n if combination[s] == self.answer[p] and not combination[s] in self.already_checked:\n\n self.try_return['misplaced'] += 1\n self.duplicate += [combination[s]]\n if self.duplicate.count(combination[s]) > 1:\n self.try_return['misplaced'] -= 1",
"def check_permutation_of(string1,string2):\n if len(string1) != len(string2): #O(1)\n return False\n return collections.Counter(string1) == collections.Counter(string2) #O(n+n) to make the dictionaries\n #O(n+n) to compare equality?\n #so O(4n) == O(n).",
"def same_as(self, space, in_space):\n if self.marks == space.marks and self.genus == space.genus:\n return True\n space = space.complementary_component(in_space)\n if self.marks == space.marks and self.genus == space.genus:\n return True\n return False",
"def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None",
"def test_get_consensus_assignment_overlapping_names(self):\r\n # here the 3rd level is different, but the 4th level is the same\r\n # across the three assignments. this can happen in practice if\r\n # three different genera are assigned, and under each there is\r\n # an unnamed species\r\n # (e.g., f__x;g__A;s__, f__x;g__B;s__, f__x;g__B;s__)\r\n # in this case, the assignment should be f__x.\r\n in1 = [['Ab', 'Bc', 'De', 'Jk'],\r\n ['Ab', 'Bc', 'Fg', 'Jk'],\r\n ['Ab', 'Bc', 'Hi', 'Jk']]\r\n\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n expected = (['Ab', 'Bc'], 1., 3)\r\n t = UclustConsensusTaxonAssigner(params)\r\n self.assertEqual(t._get_consensus_assignment(in1),\r\n expected)\r\n\r\n # here the third level is the same in 4/5 of the\r\n # assignments, but one of them (z, y, c) refers to a\r\n # different taxa since the higher levels are different.\r\n # the consensus value should be 3/5, not 4/5, to\r\n # reflect that.\r\n in2 = [['a', 'b', 'c'],\r\n ['a', 'd', 'e'],\r\n ['a', 'b', 'c'],\r\n ['a', 'b', 'c'],\r\n ['z', 'y', 'c']]\r\n expected = (['a', 'b', 'c'], 0.6, 5)\r\n t = UclustConsensusTaxonAssigner(params)\r\n self.assertEqual(t._get_consensus_assignment(in2),\r\n expected)",
"def is_permutation(a, b):\n a, b = str(a), str(b)\n return(len(a) == len(b) and Counter(a) == Counter(b))",
"def check_win(puzzle: str, solution: str) -> bool:\r\n # Check if every character besides the last is the same\r\n return puzzle[:-1] == solution[:-1]",
"def _does_words_matches(original_word: str, encoded_word: str) -> bool:\n return(\n len(original_word) == len(encoded_word) and\n original_word[0] == encoded_word[0] and\n original_word[-1] == encoded_word[-1] and\n sorted(original_word[1:-1]) == sorted(encoded_word[1:-1])\n )"
] | [
"0.8402212",
"0.8299707",
"0.702421",
"0.6465896",
"0.64497894",
"0.6389745",
"0.6059358",
"0.6028566",
"0.58353883",
"0.5827617",
"0.5789568",
"0.5764622",
"0.57099473",
"0.5694708",
"0.5602207",
"0.55700195",
"0.5525498",
"0.5517784",
"0.5466885",
"0.546393",
"0.54609096",
"0.54444313",
"0.54313844",
"0.54272515",
"0.54264945",
"0.5421803",
"0.5414445",
"0.5414093",
"0.5387538",
"0.5380882"
] | 0.83261746 | 1 |
Using Backtracking Search, take as input a partial assignment for the crossword and return a complete assignment if possible to do so. `assignment` is a mapping from variables (keys) to words (values). If no assignment is possible, return None. | def backtrack(self, assignment):
# print("Entered backtrack Function")
# Check if assignment is complete
if len(assignment) == len(self.domains):
return assignment
# Try a new variable
var = self.select_unassigned_variable(assignment)
word_list = self.order_domain_values(var, assignment)
for word in word_list:
new_assignment = assignment.copy()
new_assignment[var] = word[0]
if self.consistent(new_assignment):
result = self.backtrack(new_assignment)
if result is not None:
return result
return None
# raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None",
"def backtrack(self, assignment):\n #if a solution has been found, returns the solution, this is used for recursive purposes\n if self.assignment_complete(assignment) and self.consistent(assignment):\n return assignment\n #select the most optimal variable/node\n var = self.select_unassigned_variable(assignment)\n #assigns a word left in the domain of var and assigns it to var\n for word in self.order_domain_values(var, assignment):\n assignment[var]= word\\\n #if the assignment is consistent, recursively call backtrack\n if self.consistent(assignment):\n result= self.backtrack(assignment)\n if result != False:\n return assignment\n #if the assignment is not consistent at any point, remove the latest assignment\n assignment.pop(var)\n\n return None",
"def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False",
"def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]",
"def backtracking_search(csp):\n if backtrack(csp):\n return csp.assignment\n else:\n return None",
"def backtracking_search(csp):\n if backtrack(csp):\n return csp.assignment\n else:\n return None",
"def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True",
"def select_unassigned_variable(self, assignment):\n # print(\"Entered select_unassigned_variable Function\")\n # print(\"Assignment\")\n # print(assignment)\n variables = set()\n variables.update(self.domains.keys())\n unassigned_variables = set()\n unassigned_variables.update(variables.difference(assignment.keys()))\n # print(\"All Variables\")\n # print(variables)\n # print(\"Unassigned Variables\")\n # print(unassigned_variables)\n\n # This chooses the variables with the smallest domain from this list (unassigned_variables)\n var_list = []\n for variable in unassigned_variables:\n var_list.append( (variable, len(self.domains[variable]), len(self.crossword.neighbors(variable)) ) )\n \n var_list.sort(key = self.sort_by_domain)\n var_list.sort(reverse=True, key = self.sort_by_neighbors)\n\n # print(\"var_list\")\n # print(var_list) \n \n return var_list[0][0]\n\n # raise NotImplementedError",
"def satisfying_assignment(formula):\n if len(formula) == 0:\n return {}\n solution = find_solution(formula)\n if solution != {}:\n return solution\n return None",
"def backtracking_search(self):\n # Make a so-called \"deep copy\" of the dictionary containing the\n # domains of the CSP variables. The deep copy is required to\n # ensure that any changes made to 'assignment' does not have any\n # side effects elsewhere.\n assignment = copy.deepcopy(self.domains)\n\n # Run AC-3 on all constraints in the CSP, to weed out all of the\n # values that are not arc-consistent to begin with\n self.inference(assignment, self.get_all_arcs())\n # Call backtrack with the partial assignment 'assignment'\n\n return self.backtrack(assignment)",
"def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None",
"def select_unassigned_variable(self, assignment):\n # sort crossword variables that are not in assignment by the length of their domain lists\n available = sorted([x for x in self.crossword.variables if x not in assignment], key=lambda x: len(self.domains[x]))\n # sort the list of available variables that have the same size domain as the shortest by the number of neighbors they have\n available = sorted([x for x in available if len(self.domains[x]) == len(self.domains[available[0]])], key=lambda x: len(self.crossword.neighbors(x)))\n # return the last element of the array\n return available.pop()",
"def select_unassigned_variable(self, assignment):\n # Simply just pick the next value that has more than one value\n # in the variable list\n for key, value in assignment.iteritems():\n if len(value) > 1:\n return key, value",
"def fold_assignment(self):\n return self._parms.get(\"fold_assignment\")",
"def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError",
"def holds(self,assignment):\n return self.condition(*tuple(assignment[v] for v in self.scope))",
"def recurse(assignment, states, domains, neighbors, user_dict):\n\t\tif len(unassigned) == 0:\n\t\t\treturn assignment\n\n\t\tvarr[0] = randomchooseanddelete()\n\n\t\tfor val in stardomain(varr[0], curr_domains):\n\t\t\tassignment[varr[0]] = val\n\t\t\tforwardcheck(varr[0], val, assignment, user_dict)\n\t\t\tnextstep = recurse(assignment, states, domains, neighbors, user_dict)\n\t\t\tif nextstep != None:\n\t\t\t\treturn nextstep\n\t\treturn None",
"def satisfying_assignment(formula):\n return solver(convert_formula(formula))",
"def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None",
"def _select_unassigned_variable(self, assignment, csp):\n for var in csp.get_variables():\n if not assignment.has_assignment_for(var):\n return var\n\n return None",
"def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True",
"def solve_crossword(vocab, blanks):\n # this value can be freely adjusted\n attempts = len(blanks)**2\n # attempts to solve puzzle with random restart if a \"failure\" occurs\n # this is one way to deal getting stuck at a local maximum or plateau when hill climbing\n for i in range(attempts):\n # print(\"Attempt \" + str(i) + \": \")\n solution = solve_crossword_helper(vocab, blanks)\n if solution:\n return solution\n return None",
"def first_unassigned_variable(assignment, csp):\r\n # print(first([var for var in csp.variables if var not in assignment]))\r\n\r\n return first([var for var in csp.variables if var not in assignment])",
"def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True",
"def solve_part_one(self):\n self.initialize_values_and_rules()\n current_bot = None\n ret = None\n while True:\n for k in self.bots:\n if len(self.bots[k]) == 2:\n current_bot = k\n if current_bot is None:\n break\n\n low_type, dest_low, high_type, dest_high = self.rules[current_bot]\n chips = sorted(self.bots[current_bot])\n if chips[0] == 17 and chips[1] == 61:\n ret = current_bot\n\n del self.bots[current_bot]\n current_bot = None\n\n self.assign(low_type, dest_low, chips[0])\n self.assign(high_type, dest_high, chips[1])\n return ret",
"def solver(formula):\n # dictionary initializing output solution\n assignments={}\n\n # check and simplify unit clauses\n for clause in formula:\n # if clause is a unit clause\n if len(clause)==1:\n # extract random literal from clause\n var,val=get_from_set(clause)\n # make assignment such that unit clause is true\n assignments[var] = val\n # update rest of the formula with such assignment\n formula = expand(formula,var,val)\n\n # RECURSION BASE CASE 1: found one of possible solutions\n # NOTE: since I eliminate clauses once satisfied, list is \n # empty when all clauses are satisfied. \n if not formula:\n return assignments\n\n # RECURSION BASE CASE 2: impossible due to contradiction\n # NOTE: if any of the clauses is false, then no solution\n if not all(formula):\n return None\n\n # CORE OF RECURSION: recursive simplification of CNF formula\n var, val = get_from_set(formula[0])\n for attempt in (val, not val): # e.g try True, if no success try False \n assignments[var] = attempt\n new_assignments = solver(expand(formula,var,attempt))\n if new_assignments is not None:\n assignments.update(new_assignments)\n return assignments\n\n # if we get to this line, neither attempt yields a solution\n return None",
"def __set_has_homework_or_assignment(text=str, replacement_text=str, word_list=list):\n word_set = set()\n tokenized_text = nltk.word_tokenize(text)\n # loop through all the words to see if it contains homework or its synonyms\n for word in tokenized_text:\n word_lem = wordnet.morphy(word, wordnet.NOUN)\n if (word_lem is not None) and (word_lem in word_list):\n word_set.add(word)\n # convert to list and sort based on length\n word_set = list(word_set)\n word_set.sort(key=len, reverse=True)\n # replace those words, if any, with the replacement text\n for word in word_set:\n text = text.replace(word, replacement_text)\n return text",
"def check(self,word):\n if self.pre:\n def sub_word(chars):\n if re.match('^'+chars+'.*',word):\n return word[len(chars):]\n else:\n return None\n else:\n def sub_word(chars):\n if re.match('^.*'+chars+'$',word):\n return word[:-len(chars)]\n else:\n return None\n\n if word == '':\n return self\n for chars in self.branches.keys():\n res = sub_word(chars)\n if res:\n return self.branches[chars].check(res)\n elif res == '':\n return self.branches[chars]\n return None",
"def select_unassigned_variable(csp:list,assignment:set,method=0) -> variable:\n if(method not in range(3)):\n return \"method out of bounds\"\n \n if(method == 0):\n y = rdint(0,len(csp)-1) #rdint is inclusive, hence the -1\n var = csp[y]\n while(var in assignment):\n y = rdint(0,len(csp)-1) #rdint is inclusive, hence the -1\n var = csp[y]\n return var\n \n elif(method == 1):\n #1:minimum-remaining value\n least_domain = math.inf\n low_var = None\n for var in csp:\n if(var not in assignment):\n dm_size = var.domain_size()\n if(dm_size == 0):\n return False\n if(dm_size < least_domain):\n least_domain = dm_size\n low_var = var\n return low_var\n \n elif(method == 2):\n #2:minimum-remaining value together with degree\n #the degree of the node works as a tie breaker, otherwise it works\n #just like minimum remaining value\n least_domain = math.inf\n low_var = None\n for var in csp:\n if(var not in assignment):\n dm_size = var.domain_size()\n if(dm_size == 0):\n return False\n if(dm_size < least_domain):\n least_domain = dm_size\n low_var = var\n elif(dm_size == least_domain and var.constraint_size() > low_var.constraint_size()):\n least_domain = dm_size\n low_var = var\n return low_var",
"def solve(puzzle_input):\r\n return {'a': part_a(puzzle_input), 'b': part_b(puzzle_input)}"
] | [
"0.7360267",
"0.70080864",
"0.6352615",
"0.62211263",
"0.6217823",
"0.6217823",
"0.5960085",
"0.59551066",
"0.5912701",
"0.58554476",
"0.5837058",
"0.5765993",
"0.57650095",
"0.5723373",
"0.55428743",
"0.5452668",
"0.5425594",
"0.5342919",
"0.53365654",
"0.5329358",
"0.53284115",
"0.52099735",
"0.51919985",
"0.5122795",
"0.5088097",
"0.4996302",
"0.49825743",
"0.49695617",
"0.49614692",
"0.49407268"
] | 0.70967716 | 1 |
Draws text onto a given surface. | def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence
text_obj = font.render(text, True, color)
text_rect = text_obj.get_rect()
text_rect.center = (x, y)
surface.blit(text_obj, text_rect) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_text(screen, font, text, surfacewidth, surfaceheight):\n\tfw, fh = font.size(text) # fw: font width, fh: font height\n\tsurface = font.render(text, True, (0, 0, 255))\n\t# // makes integer division in python3 \n\tscreen.blit(surface, (0,0))",
"def drawText(text, font, surface, x, y, textcolour):\r\n textobj = font.render(text, 1, textcolour)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)",
"def render_text_on_surface(text, surface, font, color=BLACK, top_padding=0, left_pading=0):\n rect = surface.get_rect()\n \n last_top = rect.top + top_padding\n for index, line in enumerate(text.split(\"\\n\")):\n text_surf = font.render(line, True, color)\n text_rect = text_surf.get_rect()\n text_rect.topleft = (rect.left + left_pading, last_top)\n surface.blit(text_surf, text_rect)\n \n last_top += text_rect.h",
"def draw_text(display, font_name, text, size, color, x, y):\n font = pg.font.Font(font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n display.blit(text_surface, text_rect)",
"def draw_text(\n self, text: str, size: int, color: Tuple[int, int, int], x: int, y: int\n ) -> None:\n # TODO: Select and use a better font\n font = pg.font.Font(pg.font.get_default_font(), size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n self.screen.blit(text_surface, text_rect)",
"def draw_text(screen, text, size, x, y):\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, WHITE)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x, y)\r\n screen.blit(text_surface, text_rect)\r\n pygame.display.update()",
"def draw_text(self, text, size, x, y ,color=pygame.Color(\"white\")):\n font = pygame.font.Font(self.font_name,size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.center = (x,y)\n self.display.blit(text_surface,text_rect)",
"def draw_text(surf, font, text, pos,\n antialiasing=True,\n color=(255, 255, 255),\n anchor=\"northwest\"):\n x, y = pos\n s = font.render(text, antialiasing, color)\n s_rect = s.get_rect()\n\n if \"north\" in anchor:\n s_rect.y = y\n elif \"south\" in anchor:\n s_rect.y = y - s_rect.h\n else:\n s_rect.y = y - s_rect.h/2\n\n if \"west\" in anchor:\n s_rect.x = x\n elif \"east\" in anchor:\n s_rect.x = x - s_rect.w\n else:\n s_rect.x = x - s_rect.w/2\n\n surf.blit(s, s_rect)",
"def display_text(text, x, y, size):\r\n font = pygame.font.Font('freesansbold.ttf', size)\r\n text_surf, text_rect = text_objects(text, font)\r\n text_rect.center = (x, y)\r\n display.blit(text_surf, text_rect)",
"def draw_pos_text(self, text):\n fw, fh = self.font.size(text) # fw: font width, fh: font height\n surface = self.font.render(text, True, (0, 255, 0))\n # // makes integer division in python3\n self.screen.blit(surface, ((self.width - fw) // 2, (self.height - fh) // 2))",
"def draw_text(self, text, color = (100, 255, 100), dw = 0, dh = 0):\n fw, fh = self.font.size(text) # fw: font width, fh: font height\n surface = self.font.render(text, True, color)\n # // makes integer division in python3\n self.screen.blit(surface, ((self.width - fw - dw) // 2, (self.height - dh) // 2))",
"def draw_text(self, display, text, size, x, y , mode):\n font = pygame.font.Font(self.font_name, size)\n text_surface = font.render(text, True, (0,0,0))\n text_rect = text_surface.get_rect()\n if mode == \"left\":\n text_rect.topleft = (x,y)\n elif mode == \"center\":\n text_rect.center = (x,y)\n display.blit(text_surface, text_rect)",
"def text_draw(self, x, y, text, style={}):",
"def draw_text(self, i, j, text, col, bg=None):\n txt = self.font.render(text, True, col, bg)\n rect = txt.get_rect()\n rect.center = self.get_rect(i, j).center\n self.screen.blit(txt, rect)",
"def draw_text(window, text, size, text_pos, color=WHITE, bold=False):\n font = pygame.font.Font(FONT_PATH, size)\n if bold:\n font.set_bold(1)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = text_pos\n window.blit(text_surface, text_rect)",
"def drawtxt(txt,font,fs,clr,x,y,w,h,tf):\n if tf == True:\n pygame.draw.rect(screen, BLACK, (x,y,w,h))\n if pygame.font:\n font = pygame.font.Font(font,fs)\n text = font.render(txt, False, clr)\n screen.blit(text, (x,y))\n pygame.display.update(x,y,w,h)",
"async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")",
"def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)",
"def draw_text(SCREEN, text, x, y):\n text = constants.CALIBRI_25.render(text, True, constants.BLACK)\n SCREEN.blit(text, (x, y))",
"def draw_text(text: str, surface: Surface, rect: Rect, font: Font, color: Color, line_spacing: int = -2, center: bool = True) -> list:\n\n font_height = font.size(\"Tg\")[1]\n if not isinstance(text, list):\n text = wrap_text(text, font, rect.width)\n\n printable_lines = 1\n for i in range(1, len(text)):\n if ((font_height + line_spacing) * (i + 1)) <= rect.height:\n printable_lines += 1\n\n y = rect.top\n if center:\n y = (rect.height / 2) - (((font_height + line_spacing) * printable_lines) / 2)\n\n for line in text[:printable_lines]:\n # render the line\n image = font.render(line, True, color)\n\n x = rect.left\n if center:\n x = (rect.width / 2) - (image.get_width() / 2)\n\n # blit the line\n surface.blit(image, (x, y))\n y += font_height + line_spacing\n\n return text[printable_lines:]",
"def print_text(TINY_FONT, x, y, text, color = white):\n text_image = TINY_FONT.render(text, True, color)\n gameDisplay.blit(text_image, (x,y))",
"def render_text(self, text, x_pos, y_pos, z_pos):\n GL.glDisable(GL.GL_LIGHTING)\n GL.glRasterPos3f(x_pos, y_pos, z_pos)\n font = GLUT.GLUT_BITMAP_HELVETICA_10\n\n for character in text:\n if character == '\\n':\n y_pos = y_pos - 20\n GL.glRasterPos3f(x_pos, y_pos, z_pos)\n else:\n GLUT.glutBitmapCharacter(font, ord(character))\n\n GL.glEnable(GL.GL_LIGHTING)",
"def draw_text(self, text, i, j, **params):",
"def render(cls, surface, text, font, position, anchor=Anchor.top_left, blend=0) -> None:\n x, y, w, h = cls.measure(text, font, position, anchor)\n gw = font[GLY][2]\n gh = font[GLY][3]\n\n for n, char in enumerate(text):\n if char in font[CHR]:\n ind = font[CHR].index(char)\n else:\n ind = 0\n\n # the char glyph tile x,y position in the grid\n tile = Vec.swap_xy(divmod(ind, font[GRD][0]))\n\n gx = (tile.x * font[CEL][0]) + font[GLY][0]\n gy = (tile.y * font[CEL][1]) + font[GLY][1]\n\n surface.blit(font[BMP], (x, y), (gx, gy, gw, gh), blend)\n\n x += gw",
"def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\r\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\r\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\r\n return surface.convert_alpha()",
"def add_text(self, text, color, pos, font):\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)",
"def _draw_text(self, screen: Surface, changes: List[Rect]) -> None:\n orignal_rect = self._text_image.get_rect()\n\n centered_rect = orignal_rect.copy()\n centered_rect.center = self._rect.center\n\n clip_rect = centered_rect.clip(self._rect)\n centered_clip_rect = clip_rect.copy()\n centered_clip_rect.center = orignal_rect.center\n\n changes.append(screen.blit(self._text_image,\n clip_rect, centered_clip_rect))",
"def create_text(text, font_size, bold, text_color):\n myfont = pygame.font.SysFont(\"Courier\", font_size, bold)\n surface = myfont.render(text,True,text_color)\n return surface",
"def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()",
"def drawText(font, textstr, clear_screen=True, color=(250, 10, 10)):\n if clear_screen:\n screen.fill(black) # black screen\n\n # Render font\n pltText = font.render(textstr, 1, color)\n\n # Center text\n textpos = pltText.get_rect()\n textpos.centerx = screen.get_rect().centerx\n textpos.centery = screen.get_rect().centery\n\n # Blit onto screen\n screen.blit(pltText, textpos)\n\n # Update\n pygame.display.update()"
] | [
"0.8523884",
"0.8420744",
"0.82012075",
"0.81627655",
"0.80817306",
"0.7885264",
"0.78392565",
"0.78264725",
"0.7806616",
"0.77619326",
"0.77171636",
"0.7670391",
"0.7642468",
"0.7597258",
"0.7542746",
"0.7457794",
"0.745692",
"0.7357429",
"0.7349292",
"0.7319174",
"0.729991",
"0.72916424",
"0.728086",
"0.7253746",
"0.7253538",
"0.7242685",
"0.7224358",
"0.72200406",
"0.7219605",
"0.7218411"
] | 0.85818034 | 0 |
Split the file and save chunks to separate files | def split(self):
print 'Splitting file', self.__filename
print 'Number of chunks', self.__numchunks, '\n'
try:
f = open(self.__filename, 'rb')
except (OSError, IOError), e:
raise FileSplitterException, str(e)
bname = (os.path.split(self.__filename))[1]
# Get the file size
fsize = os.path.getsize(self.__filename)
# Get size of each chunk
self.__chunksize = int(float(fsize)/float(self.__numchunks))
chunksz = self.__chunksize
total_bytes = 0
for x in range(self.__numchunks):
chunkfilename = bname + '-' + str(x+1) + self.__postfix
# if reading the last section, calculate correct
# chunk size.
if x == self.__numchunks - 1:
chunksz = fsize - total_bytes
try:
print 'Writing file',chunkfilename
data = f.read(chunksz)
total_bytes += len(data)
chunkf = file(chunkfilename, 'wb')
chunkf.write(data)
chunkf.close()
except (OSError, IOError), e:
print e
continue
except EOFError, e:
print e
break
print 'Done.' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size",
"def write_chunks(file, chunks):\n\n\tfor c in chunks:\n\n\t\tchunk(file, c[0], c[1])",
"def split_file(filename, split_num):\n root, ext = os.path.splitext(filename)\n with open(filename) as f:\n lines = f.readlines()\n total_line = len(lines)\n\n print lines[0].split('\\t')\n\n size = total_line / split_num\n\n print 'Total line: %d, splited file line number: %d' % (total_line, size)\n\n total_line - size * split_num\n for i in range(0, split_num):\n split_file = root + '_' + str(i+1) + ext\n\n start = i * size;\n end = (i+1) * size;\n if i == split_num - 1:\n end = total_line\n\n print 'splite file %s: line from %d to %d' % (split_file, start, end)\n\n with open(split_file, 'w') as fw:\n for j in range(start, end):\n fw.write('%s' % lines[j])",
"def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()",
"def split_single_file(self, filename):\n file_size = os.path.getsize(filename)\n chunk_size = (file_size + self.worker_num - 1) / self.worker_num\n file_handler = open(filename, \"r\")\n chunks = []\n pos = 0\n while pos < file_size:\n next_pos = min(pos + chunk_size, file_size)\n if pos == 0:\n chunks.append((filename, pos, self.find_next_newline(file_handler, next_pos)))\n else:\n chunks.append((filename, self.find_next_newline(file_handler, pos), self.find_next_newline(file_handler, next_pos)))\n pos = next_pos\n file_handler.close()\n return chunks",
"def combine(self):\n\n import re\n \n print 'Creating file', self.__filename\n \n bname = (os.path.split(self.__filename))[1]\n bname2 = bname\n \n # bugfix: if file contains characters like +,.,[]\n # properly escape them, otherwise re will fail to match.\n for a, b in zip(['+', '.', '[', ']','$', '(', ')'],\n ['\\+','\\.','\\[','\\]','\\$', '\\(', '\\)']):\n bname2 = bname2.replace(a, b)\n \n chunkre = re.compile(bname2 + '-' + '[0-9]+')\n \n chunkfiles = []\n for f in os.listdir(\".\"):\n print f\n if chunkre.match(f):\n chunkfiles.append(f)\n\n\n print 'Number of chunks', len(chunkfiles), '\\n'\n chunkfiles.sort(self.sort_index)\n\n data=''\n for f in chunkfiles:\n\n try:\n print 'Appending chunk', os.path.join(\".\", f)\n data += open(f, 'rb').read()\n except (OSError, IOError, EOFError), e:\n print e\n continue\n\n try:\n f = open(bname, 'wb')\n f.write(data)\n f.close()\n except (OSError, IOError, EOFError), e:\n raise FileSplitterException, str(e)\n\n print 'Wrote file', bname",
"def split(self):\n if(self.back == 'y'):\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1\n backNames = [self.file_path + str(num) + 'b' for num in range(len(files))]\n for num,file in enumerate(files):\n open(backNames[num],'w').write(file)\n else:\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1",
"def splitter(file_name: str, MAX_SIZE: int = 7):\n\n # convertion to MB\n MAX_SIZE = MAX_SIZE * 1024 * 1024\n\n # index go throught the bit stream\n start_index: int = 0\n\n # harvested data\n data: bytes = None\n\n created_files: int = 0\n\n with open(file_name, \"rb\") as input_stream:\n # while we didn't go out the file\n while data != b'':\n # we place the cursor at start index\n input_stream.seek(start_index)\n # read a chunk of size MAX_SIZE bytes\n data = input_stream.read(MAX_SIZE)\n\n if data == b'':\n break\n # then we open an output file\n with open(str(start_index) + \"_\" + file_name, \"wb\") as ouput_stream:\n # A write the related chunk in it\n ouput_stream.write(data)\n\n created_files += 1\n\n # we translate the cursor\n start_index += MAX_SIZE\n\n print(\"Done! \", created_files, \" files created\")",
"def split_file(self):\n # process lines into blocks with Parser until EOF triggers StopIteration\n while self.maf_lines:\n try:\n # rest counters and open new file at the top of the loop AFTER\n # the most recent yield\n if self._stop:\n self._yield(new_file=True)\n # try to get next block from Parser and write to current file\n block_string = self.parser.get_block(self.maf_lines).next()\n self.current_file.write(block_string)\n # update char count for the current file\n self.char_count += len(block_string)\n # if char count crosses limit, yield current file name start new file\n if self._stop:\n yield self.current_filename\n\n except StopIteration:\n self._yield(new_file=False)\n yield self.current_filename",
"def split_file(self, input_file, buffer=1024) -> str:\n file_size = os.stat(input_file).st_size\n with create_pg(total=file_size, leave=False, unit='B', unit_scale=True, unit_divisor=1024,\n desc='Splitting file') as t:\n\n with open(input_file, 'rb') as src:\n while True:\n with tempfile.NamedTemporaryFile() as f:\n with open(f.name, 'wb') as dest:\n written = 0\n while written < self.max_size:\n data = src.read(buffer)\n if data:\n dest.write(data)\n written += buffer\n t.update(len(data))\n else:\n if written == 0:\n return # file has ended on split size - don't yield\n\n break\n\n yield f.name",
"def join_chunks(self):\n if self.state == self.STATE_UPLOADING and self.total_chunks_uploaded == self.total_chunks:\n\n # create file and write chunks in the right order\n temp_file = open(self.full_path, \"wb\")\n for chunk in self.chunks.all():\n chunk_bytes = chunk.file.read()\n temp_file.write(chunk_bytes)\n temp_file.close()\n\n # set state as completed\n self.state = self.STATE_COMPLETED\n super(FlowFile, self).save()\n\n # delete chunks automatically if is activated in settings\n if FLOWJS_AUTO_DELETE_CHUNKS:\n self.chunks.all().delete()",
"def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()",
"def splitFile(f, rootdir=\"/tmp\", splitCmd=\"/usr/bin/split\", chunkSize=\"100m\"):\n d = str(uuid.uuid4())\n path = os.path.join(rootdir, d)\n # I want it to fail hard here\n os.makedirs(path)\n prefix = os.path.join(path, \"chunk-\")\n subprocess.check_call([splitCmd, \"-b\", chunkSize, \"-d\", \"-a\", \"5\", f, prefix])\n chunks = glob.glob(os.path.join(path, \"chunk-*\"))\n chunks.sort()\n return chunks",
"def split_file(in_file, num_splits, split_dir, mut_file):\n\n # create the output directory if it does\n # not exist\n if not os.path.exists(split_dir):\n os.mkdir(split_dir)\n\n # open the info file\n f = open(in_file)\n pdb_header = f.readline()\n\n # open the mutation file\n m = open(mut_file)\n mut_header = m.readline()\n\n # read into a dictionary containing\n # structure ids as keys and lines pertaining\n # to it as values\n pdb_dict = read_file(f)\n mut_dict = read_file(m)\n\n # determine total num of ids in file\n total_ids = len(list(pdb_dict.keys()))\n print(total_ids)\n # determine num of ids to put in each split\n num_ids = int(total_ids/num_splits)\n\n # counters\n count_file = 0\n count_id = num_ids\n\n # randomize order of insertions\n keys = list(pdb_dict.keys())\n random.shuffle(keys)\n\n # iterate through dict and write to files\n #for key in sorted(pdb_dict):\n for key in keys:\n\n # check if we need a new file\n if (count_id == num_ids and count_file < num_splits):\n count_id = 0\n pdb_out = open(split_dir + \"/pdb_info_split_\" + str(count_file) + \".txt\", 'w')\n pdb_out.write(pdb_header)\n mut_out = open(split_dir + \"/mut_info_split_\" + str(count_file) + \".txt\", 'w')\n mut_out.write(mut_header)\n count_file += 1\n\n # write all lines pertaining to the structure id\n for line in pdb_dict[key]:\n pdb_out.write(line)\n if key in mut_dict:\n for line in mut_dict[key]:\n mut_out.write(line)\n\n count_id += 1",
"def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)",
"def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)",
"def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]",
"def split_file(self):\n title = \"row_id,x,y,accuracy,time,place_id\\n\"\n print \"splitting files into grid files...\"\n sub_folder = os.path.join(Setting.grid_path, str(self.xsplit)+\"_\"+str(self.ysplit))\n if not os.path.exists(sub_folder):\n os.mkdir(sub_folder)\n for m in range(self.xsplit):\n # to avoid open too many files (ysplit should less than 1000 here)\n print \"starting No.\", m, \" subprocess...\"\n train_writers = []\n for n in range(self.ysplit):\n xfolder = os.path.join(sub_folder, str(m))\n if not os.path.exists(xfolder):\n os.mkdir(xfolder)\n yfolder = os.path.join(xfolder, str(n))\n if not os.path.exists(yfolder):\n os.mkdir(yfolder)\n train_file = os.path.join(yfolder, \"train.csv\")\n train_writers.append(open(train_file, \"w\"))\n train_writers[-1].write(title)\n\n for record in read_record(self.train_path):\n place_id = record[-1]\n rec_str = \",\".join([str(x) for x in record])\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if place_id in self.grid_place[slot]:\n train_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in train_writers:\n writer.close()\n\n test_writers = []\n for n in range(self.ysplit):\n test_file = os.path.join(sub_folder, str(m), str(n), \"test.csv\")\n test_writers.append(open(test_file, \"w\"))\n test_writers[-1].write(title)\n\n for record in read_record(self.test_path):\n x_ind, y_ind = grid_cut(record[0], record[1], self.xsplit, self.ysplit)\n grid_slot = x_ind*self.ysplit + y_ind\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if grid_slot == slot:\n rec_str = \",\".join([str(x) for x in record])\n test_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in test_writers:\n writer.close()",
"def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()",
"def split_train_into_chunks(chunk_size):\n for syscall_type in SYSCALLS:\n syscalls_split_file = open(f\"{TEMP_DIR}/{syscall_type}-split.train\", \"w\")\n snd_train_path = f\"{FILE_PATH}/{syscall_type}/{syscall_type}.train\"\n with open(snd_train_path) as train_file:\n for syscall in train_file:\n # Generate all n-grams of the current syscall\n n_grams = extract_n_grams(syscall.strip(),chunk_size,unique=True)\n if len(n_grams)==0:\n continue\n # Write n-grams to syscall chunks file\n syscalls_split_file.writelines(n_grams)\n syscalls_split_file.close()",
"def splitFileIntoShards(filename, shardsize):\n os.popen('split -a 4 -d --additional-suffix=_shard -l{} {}'.format(shardsize, filename))",
"def split_file(file_path, chunk_output_dir, compress=True, encrypt_key=None):\n\n file_size = os.stat(file_path).st_size\n logger.debug('original file size is %s' % file_size)\n chunk_sizes = []\n data_stream = ''\n if encrypt_key:\n esalt = encrypt_key.binary_salt\n if esalt is None:\n esalt = ''\n assert isinstance(encrypt_key, AESKey)\n # account for iv\n # size should be same as key\n iv = os.urandom(16)\n assert len(iv) == len(encrypt_key.binary_key)\n encryptor = Cipher(\n CIPHER_MODE, encrypt_key.binary_key, iv, CIPHER_ENCODE\n )\n data_stream = data_stream + esalt + iv\n if not compress:\n chunk_sizes = calc_chunk_sizes(file_size + len(data_stream))\n logger.debug('splitting %s into %s' % (file_path, chunk_output_dir))\n logger.debug('compress: %s' % compress)\n if encrypt_key:\n logger.debug('encrypt: True')\n f = open(file_path, 'rb')\n chunks = []\n chunk_prefix = 'chunk'\n if compress:\n compressor = zlib.compressobj(9)\n chunk_prefix = 'tmp_chunk'\n # figure out the size of the first chunk\n if chunk_sizes:\n chunk_size = chunk_sizes.pop(0)\n else:\n chunk_size = CHUNK_SIZE_MAX\n\n def chunk_stream(data_stream, chunk_size, check_size=True):\n # check_size is for the last bit of data that is smaller than a\n # chunk when data is compressed and the data sizes are\n # unpredictable.\n min_size = chunk_size\n if not check_size:\n min_size = 1\n while len(data_stream) >= min_size:\n chunk_data = data_stream[:chunk_size]\n # If compressing, will have to create new chunks later.\n chunks.append(ClientChunk.create(chunk_data, chunk_output_dir,\n prefix=chunk_prefix))\n data_stream = data_stream[chunk_size:]\n if chunk_sizes:\n # next chunk size may be different\n chunk_size = chunk_sizes.pop(0)\n return (data_stream, chunk_size)\n\n while f.tell() < file_size:\n data = f.read(CHUNK_SIZE_MAX)\n if compress:\n data = compressor.compress(data)\n if encrypt_key:\n data = encryptor.update(data)\n assert not encryptor.final()\n data_stream += data\n data_stream_len = len(data_stream)\n logger.debug('data stream length: %s' % data_stream_len)\n (data_stream, chunk_size) = chunk_stream(data_stream, chunk_size)\n # process data not chunked yet\n logger.debug('%s bytes left over' % len(data_stream))\n if compress:\n # may have compressed data left.\n flushed_data = compressor.flush()\n if flushed_data:\n logger.debug(\n 'another %s bytes of flushed data' % len(flushed_data))\n if encrypt_key:\n flushed_data = encryptor.update(flushed_data)\n assert not encryptor.final()\n data_stream += flushed_data\n if data_stream:\n (data_stream, chunk_size) = chunk_stream(data_stream, chunk_size,\n False)\n assert not chunk_sizes\n f.close()\n # finished initial data chunking.\n new_size = sum((c.size - 4) for c in chunks)\n if not compress:\n emsg = ('original size was %s. expected new size to be '\n '%s, but it is %s')\n expected_size = file_size\n if encrypt_key:\n expected_size = file_size + len(esalt) + len(iv)\n emsg = emsg % (file_size, expected_size, new_size)\n assert expected_size == new_size, emsg\n else:\n # must reorganize the chunks.\n new_chunks = []\n chunk_sizes = calc_chunk_sizes(new_size)\n # just replace the old chunk with the new one.\n data_stream = ''\n for chunk_size in chunk_sizes:\n # read the old chunks until there is enough to write.\n while len(data_stream) < chunk_size:\n old_chunk = chunks.pop(0)\n data_stream += old_chunk.read(raw=True)[:-4]\n # free up the space\n os.unlink(old_chunk.file_path)\n # small files will not fill a chunk\n if not chunks:\n break\n chunk_data = data_stream[:chunk_size]\n new_chunks.append(ClientChunk.create(chunk_data, chunk_output_dir))\n data_stream = data_stream[chunk_size:]\n chunks = new_chunks\n # There should not be anything left over.\n assert not data_stream\n # size for comparison\n size_ratio = 1.0 * new_size / file_size\n logger.debug('new size (combined chunks) is %s bytes' % new_size)\n logger.debug('size ratio is %f' % size_ratio)\n logger.debug('split file into %s chunks' % len(chunks))\n return chunks",
"def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()",
"def split(self):\n overall_chunks = []\n for filename in self.get_all_files():\n file_chunks = self.split_single_file(filename)\n overall_chunks.extend(file_chunks)\n return overall_chunks",
"def split(self):\n\n # FIXME: user should be able to change the default behavior of\n # this function (for instance user may require one filter not\n # to split the content of the input file and the same input \n # to be used by the next filter.\n \n utils.split_file(self.files['hit_ids'],\n self.files['input'],\n self.files['filtered_reads'],\n self.files['survived_reads'])",
"def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks",
"def splitting():\n n = 1\n with open('numbers.txt', 'r+') as f:\n f.readline()\n seek_2 = f.tell()\n seek_1 = 0\n\n while seek_1 != seek_2:\n print(n)\n n += 1\n with open('numbers.txt', 'r+') as f, open('numbers.txt', 'r+') as f_2:\n f.seek(seek_1)\n f_2.seek(seek_2)\n seek_1, seek_2 = merge(f, f_2)\n\n make_result_file(seek_1)",
"def splitFileContents(f, delimiter, BLOCKSIZE=8192):\n remainder = StringIO()\n while True:\n block = f.read(BLOCKSIZE)\n if not block:\n break\n parts = block.split(delimiter)\n remainder.write(parts[0])\n for part in parts[1:]:\n yield remainder.getvalue()\n remainder = StringIO()\n remainder.write(part)\n yield remainder.getvalue()",
"def split_data(raw_data, output_pref):\n train_data = output_pref + \".train\"\n test_data = output_pref + \".test\"\n random.shuffle(raw_data)\n with open(train_data, \"w\", encoding=\"utf8\") as fw1:\n with open(test_data, \"w\", encoding=\"utf8\") as fw2:\n with open(train_data + \".raw\", \"w\", encoding=\"utf8\") as fw3:\n with open(test_data + \".raw\", \"w\", encoding=\"utf8\") as fw4:\n for idx, (line, item) in enumerate(raw_data):\n if idx < 1000:\n fw2.write(line + \"\\n\")\n fw4.write(\"\\t\".join([str(i) for i in item]) + \"\\n\")\n else:\n fw1.write(line + \"\\n\")\n fw3.write(\"\\t\".join([str(i) for i in item]) + \"\\n\")",
"def _split_and_write(\n path: str,\n saved_model: saved_model_pb2.SavedModel,\n max_size: int,\n export_files: Sequence[str],\n):\n constants.debug_set_max_size(max_size)\n\n if \"pbtxt\" in export_files:\n output_path = f\"{path}.pbtxt\"\n file_io.write_string_to_file(output_path, str(saved_model))\n logging.info(\" %s written\", output_path)\n if \"pb\" in export_files:\n output_path = f\"{path}.pb\"\n file_io.write_string_to_file(output_path, saved_model.SerializeToString())\n logging.info(\" %s written\", output_path)\n if \"cpb\" in export_files:\n splitter = split_saved_model.SavedModelSplitter(saved_model)\n splitter.write(path)\n chunks, _ = splitter.split()\n if len(chunks) > 1:\n logging.info(\" %s.cpb written\", path)\n else:\n raise RuntimeError(\n \"For some reason this graph was not chunked, so a .cpb file was not\"\n \" produced. Raising an error since this should not be the case.\"\n )"
] | [
"0.73668265",
"0.70914865",
"0.7090537",
"0.7067825",
"0.7001469",
"0.68641955",
"0.67107993",
"0.65979683",
"0.65870893",
"0.6580552",
"0.6572284",
"0.65702844",
"0.6551708",
"0.64280057",
"0.6383532",
"0.6352524",
"0.63487375",
"0.629163",
"0.62073165",
"0.6201502",
"0.61509836",
"0.6134791",
"0.61319137",
"0.612877",
"0.6123829",
"0.6093117",
"0.6089306",
"0.6063661",
"0.60080594",
"0.60020274"
] | 0.745905 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.