query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Create each ability, to ensure no errors are encountered. | def testabilities(self):
for ability in AmuletAbility.typelist:
a = AmuletAbility(ability)
self.assertEqual(a.type, ability)
if ability != 'Attribute':
self.assert_(ability in str(a))
self.assertTrue(isinstance(a.AC, int))
self.assertTrue(isinstance(a.description(), str)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_with_permissions(self):\n permissions = Permission.objects.filter(name__in=('Can add course mode', 'Can change course mode'))\n for permission in permissions:\n self.user.user_permissions.add(permission)\n\n self.assert_can_create_course()",
"def test_control_acl_new_people_create(self):\n control_body = self.prepare_control_request_body()\n access_control_list = {\n \"Admin\": [\n {\n \"email\": \"[email protected]\",\n \"name\": \"user1\",\n },\n {\n \"email\": \"[email protected]\",\n \"name\": \"user2\",\n },\n ]\n }\n control_body.update({\n \"access_control_list\": access_control_list,\n })\n\n response = self.api.post(all_models.Control, data={\n \"control\": control_body\n })\n\n self.assert201(response)\n for expected_person in access_control_list[\"Admin\"]:\n user = all_models.Person.query.filter_by(\n email=expected_person[\"email\"]\n ).one()\n self.assertEqual(user.name, expected_person[\"name\"])\n self.assertEqual([ur.role.name for ur in user.user_roles], [\"Creator\"])\n control = all_models.Control.query.get(123)\n self.assert_obj_acl(control, access_control_list)",
"def testabilities(self):\n for ability in WeaponAbility.typelist:\n a = WeaponAbility(ability)\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def __add_expanded_abilities(self, name):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n abilities = loop.run_until_complete(self.api.process_requests(\n \"ability\", name))\n ability_list = []\n factory = PokemonAbilityFactory(abilities, True)\n for ability in factory.create():\n ability_list.append(ability)\n return ability_list",
"def add_ability(self, ability):\n self.abilities.append(ability)",
"def testrandom(self):\n for i in range(100):\n AmuletAbility()",
"def create(self):\n\n self._calculate_hp()\n self.race.alterAbilities()\n self.race.racialAbilities()",
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def test_control_acl_create(self, access_control_list):\n control_body = self.prepare_control_request_body()\n control_body.update({\n \"access_control_list\": access_control_list,\n })\n self.setup_people(access_control_list)\n\n response = self.api.post(all_models.Control, data={\n \"control\": control_body\n })\n\n self.assert201(response)\n control = all_models.Control.query.get(123)\n self.assert_obj_acl(control, access_control_list)",
"def abilities_all_types():\r\n\r\n ability_mods = abilities_gen_mods()\r\n\r\n with patch(\"funclg.utils.data_mgmt.id_gen\", side_effect=ability_ids()):\r\n all_abilities = []\r\n for index, a_type in enumerate(ABILITY_TYPES):\r\n\r\n all_abilities.append(\r\n Abilities(\r\n name=f\"Ability_{index}\",\r\n ability_type=a_type,\r\n description=f\"{a_type} ability\",\r\n mod=ability_mods[a_type],\r\n )\r\n )\r\n\r\n all_abilities.append(\r\n Abilities(\r\n name=\"Ability_Error_NoMod\",\r\n ability_type=\"Error\",\r\n description=\"Error ability\",\r\n )\r\n )\r\n return all_abilities",
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"def test_add_facility_pt4(self):\n with self.assertRaises(InvalidPermission):\n self.assertFalse(self.learner1.has_perm('auth.add_facility', obj=[]))",
"def test_ipam_roles_create(self):\n pass",
"def testinvalidenhancements(self):\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=[])\n list = [MentalAbility('Fireball', 3),] * 6\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [PhysicalAbility('Sword', 3),]\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [MentalAbility('Fireball', 3),] * 3\n self.assertRaises(AbilityError,\n WeaponAbility, 'Guided', abilities=list)",
"def test_models_organization_get_abilities_preset_role(self):\n access = factories.UserOrganizationAccessFactory(role=\"member\")\n access.organization.user_role = \"member\"\n\n with self.assertNumQueries(0):\n abilities = access.organization.get_abilities(access.user)\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )",
"def test_models_organization_get_abilities_administrator(self):\n access = factories.UserOrganizationAccessFactory(role=\"administrator\")\n abilities = access.organization.get_abilities(access.user)\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": True,\n \"put\": True,\n \"manage_accesses\": True,\n },\n )",
"def testinvalidability(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Invalid')\n self.assertRaises(AbilityError, AmuletAbility, '')",
"def __init__(self, privileges=[ 'can add post', 'can delete post',\n 'can ban user','can add user', \n 'can mute user']):\n self.privileges = privileges",
"def testAmenities(self):\n place = Place()\n self.assertTrue(hasattr(place, \"amenity_ids\"))\n self.assertEqual(type(place.amenity_ids), list)\n self.assertEqual(len(place.amenity_ids), 0)",
"def testinvalidability(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Invalid')\n self.assertRaises(AbilityError, WeaponAbility, '')",
"def create_basic_roles(script):\n roles = script.do(Roles.GetAll())\n roles = [a['name'] for a in roles]\n\n 'developers' in roles or script.do(Roles.Create('developers'))\n 'supervisors' in roles or script.do(Roles.Create('supervisors'))\n 'readers' in roles or script.do(Roles.Create('readers'))",
"def test_bulk_create(self):\n urls = [reverse('api:user-list')]\n data = [\n {\n \"username\": \"newuser1\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n },\n {\n \"username\": \"newuser2\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n },\n ]\n access = {\n \"forbidden\": [self.admin_client, self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": []\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )",
"def setupPermissions( self, p ):\n mp = p.manage_permission\n for entry in Config.PortalPermissions:\n apply( mp, entry )",
"def testsize(self):\n for size in range(5):\n AttributeAbility(size=size + 1)",
"def __init__(self):\n self.privileges=[\"can add post\", \"can delete post\", \"can ban user\"]",
"def create_missions(random, world):\n return [\n missions.AwakeMission(random, world),\n missions.BoardCraftMission(random, world),\n missions.DebriefMission(random, world),\n missions.WinMission(random, world),\n ]",
"def __init__(self):\n\t\tself.privileges = [\"can add post\", \"can delete post\", \"can ban user\"]",
"def test_models_organization_get_abilities_member_user(self):\n access = factories.UserOrganizationAccessFactory(role=\"member\")\n\n with self.assertNumQueries(1):\n abilities = access.organization.get_abilities(access.user)\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )",
"def test_models_organization_get_abilities_authenticated(self):\n organization = factories.OrganizationFactory()\n abilities = organization.get_abilities(factories.UserFactory())\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )",
"def testAC(self):\n for size in range(5):\n for attr in ('ST', 'DX'):\n a = AttributeAbility([attr,], size + 1)\n self.assertEqual(a.AC, (2000, 4000, 7000, 15000, 25000)[size])\n for attr in ('IQ', 'Dam'):\n a = AttributeAbility([attr,], size + 1)\n self.assertEqual(a.AC, (1000, 2000, 3500, 7000, 15000)[size])\n a = AttributeAbility(['MA',], size + 1)\n self.assertEqual(a.AC, (1000, 2000, 3000, 6000, 12000)[size])\n a = AttributeAbility(['Hit',], size + 1)\n self.assertEqual(a.AC, (1000, 2500, 5000, 10000, 18000)[size])"
] | [
"0.65154845",
"0.6027863",
"0.60043085",
"0.5975127",
"0.59668714",
"0.58903754",
"0.5886313",
"0.5862154",
"0.5807895",
"0.574079",
"0.5737699",
"0.57014215",
"0.5621218",
"0.5620973",
"0.56114036",
"0.5607721",
"0.54945326",
"0.54680264",
"0.5420151",
"0.5416647",
"0.5411306",
"0.5377346",
"0.5366824",
"0.5363845",
"0.534332",
"0.5341681",
"0.53267956",
"0.53256124",
"0.5318006",
"0.53171307"
] | 0.6188402 | 1 |
Provide an invalid element, to generate an error. | def testinvalidelement(self):
self.assertRaises(AbilityError,
AmuletAbility, 'Proof', element='Invalid')
self.assertRaises(AbilityError, AmuletAbility, 'Proof', element='')
self.assertRaises(AbilityError,
AmuletAbility, 'Control NPC', element='Fire') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)",
"def test_invalid_xml(self):\r\n with self.assertRaises(Exception):\r\n self.build_problem(type=\"invalidtextgroup\")",
"def test_invalid_input_tag(self):\r\n with self.assertRaisesRegexp(Exception, \"Error in xml\"):\r\n self.check_group('checkboxtextgroup', 'invalid', 'checkbox')",
"def _setInvalidElementInContent (self, value):\n if not (value in ( self.IGNORE_ONCE, self.GIVE_UP, self.RAISE_EXCEPTION )):\n raise ValueError(value)\n self.__invalidElementInContent = value",
"def invalid(self):\n pass",
"def unknownElement(self, element):\n raise UnknownElementError(\"Invalid element in %s: '%s'\" % (self.__class__.__name__, element.nodeName))",
"def __getInvalidElementInContent (self):\n return self.__invalidElementInContent",
"def validate_element(element, validator, schema=SCHEMA):\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n \n raise Exception(message_string.format(field, error_string))",
"def validate_element(element, validator, schema=SCHEMA):\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n \n raise Exception(message_string.format(field, error_string))",
"def validate_element(element, validator, schema=SCHEMA):\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n \n raise Exception(message_string.format(field, error_string))",
"def validate_element(element, validator, schema=SCHEMA):\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n \n raise Exception(message_string.format(field, error_string))",
"def validate_element(element, validator, schema=SCHEMA):\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n\n raise Exception(message_string.format(field, error_string))",
"def validate_element(element, validator, schema=SCHEMA):\n if validator.validate(element, schema) is not True:\n field, errors = next(validator.errors.iteritems())\n message_string = \\\n \"\\nElement of type '{0}' has the following errors:\\n{1}\"\n error_string = pprint.pformat(errors)\n\n raise Exception(message_string.format(field, error_string))",
"def validate_element(element, validator, schema=SCHEMA):\r\n if validator.validate(element, schema) is not True:\r\n print(validator.errors)\r\n for field, errors in validator.errors.items() :\r\n message_string = \"\\nElement of type '{0}' has the following errors:\\n{1}\"\r\n error_string = pprint.pformat(errors)\r\n\r\n raise Exception(message_string.format(field, error_string))",
"def validateElement(self, doc, elem):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlValidateElement(self._o, doc__o, elem__o)\n return ret",
"def validateElement(self, ctxt, elem):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o)\n return ret",
"def parse_error(self, error: Union[str, Exception],\n elem: Optional[ElementType] = None,\n validation: Optional[str] = None) -> None:\n if validation is not None:\n check_validation_mode(validation)\n else:\n validation = self.validation\n\n if validation == 'skip':\n return\n elif elem is None:\n elem = self.elem\n elif not is_etree_element(elem):\n msg = \"the argument 'elem' must be an Element instance, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(elem))\n\n if isinstance(error, XMLSchemaParseError):\n error.validator = self\n error.namespaces = getattr(self, 'namespaces', None)\n error.elem = elem\n error.source = getattr(self, 'source', None)\n elif isinstance(error, Exception):\n message = str(error).strip()\n if message[0] in '\\'\"' and message[0] == message[-1]:\n message = message.strip('\\'\"')\n error = XMLSchemaParseError(self, message, elem)\n elif isinstance(error, str):\n error = XMLSchemaParseError(self, error, elem)\n else:\n msg = \"'error' argument must be an exception or a string, not {!r}.\"\n raise XMLSchemaTypeError(msg.format(error))\n\n if validation == 'lax':\n self.errors.append(error)\n else:\n raise error",
"def xpathErr(self, error):\n libxml2mod.xmlXPathErr(self._o, error)",
"def error(self):\n x = first_element_or_none(self.element.xpath('./a:Error', namespaces=namespaces))\n if x is None:\n return\n return ProductError(x, self.identifier)",
"def error(self, code, message=None):\n return xpath_error(code, message, self, self.error_prefix)",
"def xpatherror(self, file, line, no):\n libxml2mod.xmlXPatherror(self._o, file, line, no)",
"def _check_for_errors(etree: ET.ElementTree):\n if etree.getroot().tag == 'error':\n raise APIError(etree.getroot().text)",
"def timeout_element_error(self, selector, name):\n\t\tBasePage.LOGGER.error(\"Timeout - < {1} > element not found: {0} \\n\".format(selector, name))\n\t\traise Exception(\"Timeout - < {1} > element not found: {0}\".format(selector, name))",
"def test_invalid_tag(self):\r\n with self.assertRaises(Exception):\r\n self.check_group('invalid', 'choice', 'checkbox')",
"def test_send_http_request_value_error(self, na_element):\n\n self.assertRaises(ValueError, self.root.send_http_request, na_element)",
"def decorate_error(self, element, error):\n if error is None:\n return element\n\n div = self.div(class_='nagare-error-input')\n div.append(element)\n\n return self.div(\n div,\n self.div(error, class_='nagare-error-message'),\n class_='nagare-error-field'\n )",
"def _missingElement(self, element, request):\n e = errors.MissingResourceError(\"no such element %s\" % (element,))\n\n try:\n encoder, contentType = self._getEncoder(request)\n except errors.UnacceptableRequestError:\n contentType = self.defaultContentType\n encoder = self.encoders[contentType]\n\n return RESTErrorPage(e, encoder, contentType)",
"def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)",
"def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')",
"def test_not_enough_inputs(self):\n n = Node('a')\n with self.assertRaises(ValidationError):\n n.validate()\n n.validate(False)"
] | [
"0.69439214",
"0.6649658",
"0.6549366",
"0.6526067",
"0.6515148",
"0.64893574",
"0.63802725",
"0.6375399",
"0.6375399",
"0.6375399",
"0.6375399",
"0.6364096",
"0.6347471",
"0.63307416",
"0.62093705",
"0.6206851",
"0.6202528",
"0.6188424",
"0.61858267",
"0.61582255",
"0.6085006",
"0.6054694",
"0.6022984",
"0.6021679",
"0.60151273",
"0.5989799",
"0.59840435",
"0.59375626",
"0.59352845",
"0.59312105"
] | 0.6935137 | 1 |
Ensure random abilities may be generated without error. | def testrandom(self):
for i in range(100):
AmuletAbility() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"def random_die():\n return randrange(1, 6)",
"def test_insufficient_shuffle(self):\n self.deck._deal(1)\n with self.assertRaises(ValueError):\n self.deck.shuffle()",
"def luck_check(chance):\n return randint(0, 100) < chance",
"def testinvalidability(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Invalid')\n self.assertRaises(AbilityError, WeaponAbility, '')",
"def testrange(self):\n for range_ in range(1, 5):\n a = WeaponAbility('Animated', range=range_)\n self.assert_(str(range_) in str(a))",
"def random_legal_move():\n return random.choice(legal_moves())",
"def testinvalidrange(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=0)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=6)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range='3')\n self.assertRaises(AbilityError, WeaponAbility, 'Changling', range=3)",
"def testinvalidenhancements(self):\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=[])\n list = [MentalAbility('Fireball', 3),] * 6\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [PhysicalAbility('Sword', 3),]\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [MentalAbility('Fireball', 3),] * 3\n self.assertRaises(AbilityError,\n WeaponAbility, 'Guided', abilities=list)",
"def randomAction():\n return np.random.randint(0, POSSIBLE_ACTIONS)",
"def check_for_combat():\n if random.randint(1, 4) == 1:\n return True\n else:\n return False",
"def _validate_random_seeds(self):\n if self.random_seeds:\n if len(self.random_seeds) != len(self.sampler):\n raise ValueError(\"Number of given range objects in random_seeds\"\\\n \"and number of sampler objects need to be equal!\")\n if len(set(list(map(len,self.random_seeds)))) != 1:\n raise ValueError(\"Length of range objects in random_seeds\"\\\n \"list must be equal!\")",
"def random_valid(self):\n if random_exp > 0:\n args.exp = random.sample(exp_choices, random_exp)\n elif random_exp < 0:\n args.exp = random.sample(exp_choices, random.randint(0, -random_exp))\n btypes_str = 'T'*8+'S'*4+'U'*(5 - len(args.exp))+'P'*3+'G'*2+'F'*2+'A'*3+'1'*3+'2'*2+'3'*1+'4'*1+'5'*1+'O'*8+'M'*(-args.monuments if args.monuments < 0 else 0)\n btypes_min_str = 'T'*0+'S'*0+'U'*len(args.exp)+'P'*0+'G'*0+'F'*0+'A'*0+'1'*0+'2'*0+'3'*0+'4'*0+'5'*0+'O'*0+'M'*(args.monuments if args.monuments > 0 else 0)\n len_min = len(btypes_min_str)\n while 1:\n ## TSU_PG_FA_12345_OM\n ## tot845_32_23_32111_81\n ## min00E_00_00_00000_00\n bpos = list(range(20))\n self.b = ['_'] * 20\n self.f = [1] * 20\n cnt_b = 0\n btypes_min = list(btypes_min_str)\n random.shuffle(btypes_min)\n while cnt_b < len_min:\n s_bpos = random.choice(bpos)\n c_bding = self.b[s_bpos]\n if c_bding == 'T' or c_bding == 'O':\n if self.f[s_bpos] < 5 and c_bding in btypes_min:\n btypes_min.remove(c_bding)\n cnt_b += 1\n self.f[s_bpos] += 1\n else:\n bpos.remove(s_bpos)\n else:\n s_bding = btypes_min.pop(-1)\n cnt_b += 1\n self.b[s_bpos] = s_bding\n if s_bding != 'T' and s_bding != 'O':\n bpos.remove(s_bpos)\n btypes = list(btypes_str)\n random.shuffle(btypes)\n while cnt_b < 20:\n s_bpos = random.choice(bpos)\n c_bding = self.b[s_bpos]\n if c_bding == 'T' or c_bding == 'O':\n if self.f[s_bpos] < 5 and c_bding in btypes:\n btypes.remove(c_bding)\n cnt_b += 1\n self.f[s_bpos] += 1\n else:\n bpos.remove(s_bpos)\n else:\n s_bding = btypes.pop(-1)\n cnt_b += 1\n self.b[s_bpos] = s_bding\n if s_bding != 'T' and s_bding != 'O':\n bpos.remove(s_bpos)\n self.calc_resources()\n if self.popula_used <= self.popula and self.energy_used <= self.energy:\n break",
"def testinvalidability(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Invalid')\n self.assertRaises(AbilityError, AmuletAbility, '')",
"def confused(self, rand):\n return rand > 0",
"def generate_random_roles():\n\n role_names = set()\n roles_size = 0\n while roles_size != 3:\n role_name = ''\n for i in range(3):\n role_name += random.choice(['0', '1'])\n if role_name not in role_names:\n role_names.add(role_name)\n roles_size += 1\n\n for role_name in role_names:\n delete_access = ActionTypes.DELETE.value if role_name[0] == '1' else ''\n write_access = ActionTypes.WRITE.value if role_name[1] == '1' else ''\n read_access = ActionTypes.READ.value if role_name[2] == '1' else ''\n\n allowed_actions = [access for access in (delete_access, write_access, read_access) if access]\n Role(role_name, allowed_actions)",
"def throw(self):\n self.side = random.randint(1, self.num_sides)",
"def test_sample(self):\n liste = list(range(10))\n extrait = random.sample(liste, 5)\n for element in extrait:\n self.assertIn(element, liste)\n\n with self.assertRaises(ValueError):\n random.sample(liste, 20)",
"def is_exhausted(self):\n return random.random() < 0.5",
"def randomLeggings():\n return random.choice(LEGGINGS)",
"def test_uniform_basic():\r\n yield check_uniform_basic, False\r\n yield check_uniform_basic, False, True\r\n yield check_uniform_basic, True",
"def get_random_excuse():\n return excuses[random.randint(1, len(excuses) - 1)]",
"def test_sample(self):\n extrait = random.sample(self.liste, 5)\n for element in extrait:\n self.assertIn(element, self.liste)\n\n with self.assertRaises(ValueError):\n random.sample(self.liste, 20)",
"def test_sample(self):\n extrait = random.sample(self.liste, 5)\n for element in extrait:\n self.assertIn(element, self.liste)\n\n with self.assertRaises(ValueError):\n random.sample(self.liste, 20)",
"def test_sample(self):\n extrait = random.sample(self.liste, 5)\n for element in extrait:\n self.assertIn(element, self.liste)\n\n with self.assertRaises(ValueError):\n random.sample(self.liste, 20)",
"def random_test(self):\r\n return 1",
"def random_test(self):\r\n return 1",
"def test_rng_invalid_value(self):\n with pytest.raises(ValueError) as exc:\n check_random_state(\"oh_no_oh_no\")\n\n assert \"'oh_no_oh_no' cannot be used to seed\" in str(exc.value)",
"def mover_aleatoriamente(self):\n self.randomizador = random.randint(0,4)",
"def __validate__(self):\n if self.train:\n assert self.random is not None"
] | [
"0.66446924",
"0.6261142",
"0.6225658",
"0.61589783",
"0.61494786",
"0.61290914",
"0.61236346",
"0.61155415",
"0.6106622",
"0.60941434",
"0.60484105",
"0.6033411",
"0.60260797",
"0.60189956",
"0.59546584",
"0.59362406",
"0.59291255",
"0.59213823",
"0.58713335",
"0.5846206",
"0.5846033",
"0.58418804",
"0.5816671",
"0.5816671",
"0.5816671",
"0.5814146",
"0.5814146",
"0.5812342",
"0.57235146",
"0.57134414"
] | 0.71204704 | 0 |
Create each Animated weapon range, to ensure no errors occur. | def testrange(self):
for range_ in range(1, 5):
a = WeaponAbility('Animated', range=range_)
self.assert_(str(range_) in str(a)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setRange(self):\n # first determine ranges\n if len(self.activeWeapons) > 0:\n myPrimaryWeapon = self.activeWeapons[0]\n self.range = myPrimaryWeapon.myWeaponData.range * 1.0\n else:\n # no weapons left RUN\n self.mode = 'escape'\n self.range = 99999",
"def create(self, range):\n raise NotImplementedError",
"def generateEnemyStats(healthRange, powerRange, smartsRating):\n\n stats = {\n 'healthRating': healthRange,\n 'powerRating': powerRange,\n 'smartsRating': smartsRating\n }\n return stats",
"def prep_ammo(self):\r\n self.ammo = Group()\r\n for ammo in range(self.stats.ammo):\r\n ship_ammo = ShipAmmo(self.ai_settings, self.screen)\r\n ship_ammo.rect.x = 20 + ammo * (ship_ammo.rect.width + 10)\r\n ship_ammo.rect.y = 60\r\n self.ammo.add(ship_ammo)",
"def testinvalidrange(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=0)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=6)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range='3')\n self.assertRaises(AbilityError, WeaponAbility, 'Changling', range=3)",
"def prep_health(self):\r\n self.health = Group()\r\n for ship_number in range(self.stats.ships_left):\r\n ship_health = ShipHealth(self.ai_settings, self.screen)\r\n ship_health.rect.x = 20 + ship_number * (ship_health.rect.width + 10)\r\n ship_health.rect.y = 28\r\n self.health.add(ship_health)",
"def UpgradeWeapon(self):\n label = self.wepSpin.get()\n for index in range(min(self.squad.current_size, self.unitToWeap[label][1])):\n upgradedUnit = next(x for x in self.squad.units if x.name == self.unitToWeap[label][3])\n upgradedUnit.armRangedWeapon(weapon.ranged_weapons[self.unitToWeap[label][0]])\n self.squad.point_cost += self.unitToWeap[label][2] \n self.pointLabel['text'] = self.squad.point_cost",
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"def create(self):\n\n self._calculate_hp()\n self.race.alterAbilities()\n self.race.racialAbilities()",
"def prep_shield(self):\r\n self.shield = Group()\r\n for shield in range(self.stats.shields_left):\r\n stats_shield = ShipShield(self.ai_settings, self.screen, self.ship)\r\n stats_shield.rect.x = 20\r\n stats_shield.rect.y = 750\r\n self.shield.add(stats_shield)",
"def __init__(self, attacker, target, damage,\n ticker='Unknown', weapon='Unknown', enemy_ships=[]):\n self._attacker = attacker\n self._target = target\n self._damage = list(damage)\n self._ticker = ticker\n self._weapon = weapon or 'Unknown'\n self._enemy_ships = ', '.join(enemy_ships) or 'Unknown'\n self._total_damage = sum(d[1] for d in self._damage)\n if self._damage:\n self._start_time = self._damage[0][0]\n self._end_time = self._damage[-1][0]\n else:\n self._start_time = None\n self._end_time = None",
"def advance(self):\n\t\tself.level += 1\t\n\t\t\"\"\"Creating the new wave of asteroids. Each level starts with the number of asteroids equal to the level number. So, the first level starts with only one asteroid, the second with two, and so on. Now, creating a bunch of asteroids is easy, but I need to make sure that no new asteroid is created right on top of the ship. Otherwise, the ship will explode just as the new level begins.\"\"\"\n\t\t# amount of space around ship to preserve when creating asteroids\n\t\tBUFFER = 150 #BUFFER is a constant for the amount of safe space needed around the ship. BUFFER=x_min+y_min\n\t\t# create new asteroids\n\t\tfor i in range(self.level):\n\t\t\t# calculate an x and y at least BUFFER distance from the ship\n\t\t\t# choose minimum distance along x-axis and y-axis\n\t\t\tx_min = random.randrange(BUFFER)# x_min is the minimum distance the new asteroid should be from the ship along the x-axis,\n\t\t\ty_min = BUFFER - x_min # y_min is the minimum distance that the new asteroid should be from the ship alongthe y-axis\n\t\t\t# choose distance along x-axis and y-axis based on minimum distance\n\t\t\tx_distance = random.randrange(x_min, games.screen.width - x_min) # x_distance is the distance from the ship for the new asteroid along the x-axis, It is a randomly\n\t\t\t#selected number that ensures that the new asteroid will be at least x_min distance from the ship\n\t\t\ty_distance = random.randrange(y_min, games.screen.height - y_min) # y_distance is the distance from the ship for the new asteroid along the y-axis. It is a randomly #selected number that ensures that the new asteroid will be at least y_min distance from the ship\n\t\t\t# calculate location based on distance\n\t\t\tx = self.ship.x + x_distance #x is the x-coordinate for the new asteroid\n\t\t\ty = self.ship.y + y_distance #y is the y-coordinate for the new asteroid\n\t\t\t# wrap around screen, if necessary\n\t\t\tx %= games.screen.width\n\t\t\ty %= games.screen.height\n\t\t\t# create the asteroid\n\t\t\tnew_asteroid = Asteroid(game = self,x = x, y = y,size = Asteroid.LARGE)\n\t\t\tgames.screen.add(new_asteroid)\n\t\t\t# display level number\n\t\tlevel_message = games.Message(value = \"Level \" + str(self.level),\n\t\t\t\t\t\t\t\t\t\t size = 40, \n\t\t\t\t\t\t\t\t\t\t color = color.yellow,\n\t\t\t\t\t\t\t\t\t\t x = games.screen.width/2,\n\t\t\t\t\t\t\t\t\t\t y = games.screen.width/10,\n\t\t\t\t\t\t\t\t\t\t lifetime = 3 * games.screen.fps,\n\t\t\t\t\t\t\t\t\t\t is_collideable = False)\n\t\tgames.screen.add(level_message)\n\t\t\t\n\t\t\t# play new level sound (except at first level)\n\t\tif self.level > 1:\n\t\t\tself.sound.play()",
"def create_deaths(wof_settings,screen,deaths,levelMap):\n \n death_width = wof_settings.element_width\n death_height = wof_settings.element_height \n \n # Create deaths\n for death_position in levelMap['death']:\n death = Death(wof_settings,screen)\n death.x = death_position[1] * death_width\n death.y = death_position[0] * death_height\n death.rect.x = death.x\n death.rect.y = death.y\n deaths.add(death)",
"def test_data_range(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n lenrange = random.randint(1, 10)\n nreps = random.randint(1, 10)\n\n ex.range = [\"i\", range(lenrange)]\n ex.nreps = nreps\n\n ex.vary[\"X\"][\"along\"] = 0\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n + (nreps - 1) * m], cmds)\n rangeidx = random.randint(0, lenrange - 1)\n repidx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", repidx * m,\n \"X_%d_%d\" % (rangeidx, repidx)], cmds)",
"def _create_fleet(self):\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n available_space_y = self.settings.screen_height - (2 * alien_height)\n number_aliens_y = available_space_y // (2 * alien_height)\n sideways_ship_width = self.sideways_ship.rect.width\n available_space_x = (\n self.settings.screen_width - (3 * alien_width) -\n sideways_ship_width\n )\n number_columns = available_space_x // (2 * alien_width) + 1\n for column_number in (range(1, number_columns)):\n for alien_number in range(number_aliens_y):\n self._create_alien(alien_number, column_number)",
"def __init__(self, name, loot, strength):\n self.name = name\n self.x = 0\n self.y = 0\n self.health = 10\n self.strength = strength\n self.loot = loot\n self.is_alive = True\n self.MAX_HEALTH = 15\n self.magic_key = False\n logging.debug(\"{0} created with health of {1} and strength of {2}\"\n .format(self.name, self.health, self.strength))\n \"\"\" Test Results Part A:\n When increasing MAX_HEATH to 100, rounds tended to go on.\n When decreasing MAX_HEATH to 0.05, rounds end very quickly.\n This is expected because the Sprites will be easier or harder \n to defeat depending on how high their health can get. It will \n take more attacks to defeat a Sprite with more health and less\n attacks to defeat a Sprite with less health. \n \n Test Results Part B:\n Test: change strength of Enemy to 20 (higher than Avatar)\n Prediction: the Enemy should win most/all of the time because the player \n with more strength has a harder attack.\n Results: The Enemy won during all trials. If the roles were switched, the \n same could be said about Avatar.\n \n Test: set health of Avatar to 5\n Prediction: the Avatar will die more often than the Enemy because it can \n receive less attacks\n Results: The Avatar died during most trials. \n \n Test: set MAX_HEALTH for Enemy to 5\n Prediction: Enemy will be able to have less health, so it will be defeated\n more often than the Avatar\n Results: The enemy died in almost all trials\n \"\"\"",
"def build(self):\n self._start = np.zeros_like(self._rooms.shape)\n self._start[0] = random.randrange(self._rooms.shape[0])\n position = self._start\n egress = Direction.South\n distance = 1\n while position in self:\n room = self[position]\n room.egress = egress\n room.distance = distance\n yield position\n options = {}\n for direction in Direction.range():\n if direction != egress:\n new_position = position + direction.offset()\n if new_position in self:\n if self.is_sealed(new_position):\n options[direction] = new_position\n if options:\n direction = random.choice(tuple(options.keys()))\n room.remove_wall(direction)\n position = options[direction]\n egress = direction.reverse()\n distance += 1\n else:\n position += egress.offset()\n if position in self:\n egress = self[position].egress\n distance = self[position].distance",
"def make_hh_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-97 from excel_import\r\n self.hhpos = self.determine_hhpos(hh_row, 'house_latitude', 'house_longitude')\r\n self.hh_id = return_values(hh_row, 'hh_id')\r\n self.admin_village = 1\r\n\r\n # 2016\r\n mig_remittances = return_values(hh_row, 'mig_remittances') # remittances of initial migrant\r\n if mig_remittances is None:\r\n mig_remittances = 0\r\n household_income_list[hh_row - 1] = int(mig_remittances)\r\n household_remittances_list[hh_row - 1] = int(mig_remittances)\r\n\r\n if return_values(hh_row, 'initial_migrants') is not None:\r\n out_mig_list[hh_row - 1] = 1\r\n household_migrants_list.append(self.hh_id)\r\n cumulative_mig_list[hh_row - 1] = 1\r\n\r\n num_labor_list[hh_row - 1] = initialize_labor(hh_row)\r\n hh_size_list[hh_row - 1] = len(return_values(hh_row, 'age'))\r\n\r\n a = HouseholdAgent(hh_row, self, self.hh_id, self.admin_village)\r\n self.space.place_agent(a, self.hhpos) # admin_village placeholder\r\n self.schedule.add(a)",
"def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)",
"def __init__(self, width, height):\n self.width = width\n self.height = height\n self.plane = Plane(0,200)\n self.background = [Background(0),Background(1024),Background(2048)]\n self.bullets = []\n self.enemies = [Enemy(1080, randint(100,356))]",
"def create_enemies(self, count):\n self.enemies = []\n\n while count > 0:\n # Randomly select a cell\n x = int(random() * self.map_size[0])\n y = int(random() * self.map_size[1])\n\n # If it has been filled with something, choose another cell\n if self.is_filled(x, y):\n continue\n\n # Randomly select a type of enemy to generate\n r = int(random() * 10)\n if 4 < r and r < 8:\n enemy = self.create_enemy_bombeater_at(x, y)\n elif r == 8:\n enemy = self.create_enemy_flying_at(x, y)\n elif r == 9:\n enemy = self.create_enemy_bomber_at(x, y)\n else:\n enemy = self.create_enemy_normal_at(x, y)\n\n # Create dummy objects to prevent enemies \n # from concentrating at one place\n self.create_dummy_obj_at(x - 1, y)\n self.create_dummy_obj_at(x + 1, y)\n self.create_dummy_obj_at(x, y - 1)\n self.create_dummy_obj_at(x, y + 1)\n\n self.enemies.append(enemy)\n count -= 1",
"def move_attack_area(self):\n min_range, max_range = self.curr_unit.get_weapon_range()\n self.attack_area = []\n for (x, y) in self.move_area:\n self.__set_attack_area((x, y), min_range, max_range)",
"def _create_fleet(self):\n # Make an alien and find the amount of aliens in a row\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n available_space_x = self.settings.screen_width-(2 * alien_width)\n number_aliens_x = available_space_x // (2* alien_width)\n\n # Determine the amount of alien rows\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * alien_height) - ship_height)\n number_rows = available_space_y // (2 * alien_height)\n\n # Create full fleet\n for row_number in range (number_rows):\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number, row_number)",
"def create_agents(game_map, km, num_crew, num_imp, num_tasks, num_visuals, cooldown, stat_thres):\n agents = [Crewmate(x, num_crew, num_imp, game_map, km, num_tasks, num_visuals) for x in range(num_crew)]\n # noinspection PyTypeChecker\n [agents.append(Impostor(num_crew + x, num_crew, num_imp, game_map, km, cooldown, stat_thres)) for x in range(num_imp)]\n\n return agents",
"def __init__(self):\n for i in range(100):\n Controller.currentAsteroids.append(Asteroid.generate_random_asteroid())",
"def create_enemies(self):\n if not self.ENEMY_DIST:\n raise ValueError('You must initialize ENEMY_DIST. Import turnable.rooms.FightRoom and '\n 'call FightRoom.set_enemy_dist(your_dist).')\n\n amount = random.randint(1, 3)\n for c in range(amount):\n en = self._get_enemy()(pos=self.pos)\n en.game = self.game\n self.enemies.append(en)",
"def setWeaponStatus(self):\n self.readyWeapons = []\n self.activeWeapons = []\n self.amsWeapons = []\n weaponValues = {}\n self.externalRadius = self.radius\n for position, myQuad in self.quads.iteritems():\n for id, myWeapon in myQuad.weapons.iteritems():\n if myWeapon.operational == 1:\n if myWeapon.myWeaponData.AMS == 0:\n weaponValue = myWeapon.getMyValue()\n weaponValues[myWeapon.myQuad.position + '-' + myWeapon.id] = weaponValue\n i = 0\n for weapon in self.activeWeapons:\n if weaponValues[weapon.myQuad.position + '-' + weapon.id] < weaponValue:\n self.activeWeapons.insert(i, myWeapon)\n break\n i += 1\n if myWeapon not in self.activeWeapons:\n self.activeWeapons.append(myWeapon)\n else:\n self.amsWeapons.append(myWeapon)\n self.externalRadius = myWeapon.myWeaponData.range",
"def _create_fleet(self):\n\t\t#Make an alien\n\t\talien = Alien (self)\n\t\talien_width, alien_height = alien.rect.size\n\t\tship_height = self.ship.rect.height\n\t\t\n\t\tavailable_space = self.settings.screen_width - (2*alien_width)\n\t\tavailable_space_y = self.settings.screen_height - (3*alien_height) - ship_height\n\t\tnumber_aliens_x = available_space // (2*alien_width)\n\t\tnumber_rows = available_space_y // (2*alien_height)\n\t\t\n\n\t\t#create first row of aliens\n\t\tfor row_number in range (number_rows):\n\t\t\tfor alien_number in range(number_aliens_x):\n\t\t\t\tself._create_alien(alien_number, alien_width, alien_height, row_number)",
"def make_individual_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-94 from excel_import\r\n individual_id_list = return_values(hh_row, 'name')\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.hh_id = hh_id\r\n agelist = return_values(hh_row, 'age') # find the ages of people in hh\r\n genderlist = return_values(hh_row, 'gender')\r\n marriagelist = return_values(hh_row, 'marriage')\r\n educationlist = return_values(hh_row, 'education')\r\n income_local_off_farm = float(return_values(hh_row, 'income_local_off_farm'))\r\n income_local_off_farm_list[hh_row - 1] = income_local_off_farm\r\n household_income_list[hh_row - 1] = household_income_list[hh_row - 1] + income_local_off_farm\r\n if individual_id_list is not None and individual_id_list is not []:\r\n for i in range(len(individual_id_list)):\r\n self.individual_id = str(self.hh_id) + str(individual_id_list[i]) # example: 2c\r\n self.age = int(agelist[i])\r\n # if genderlist is not None and genderlist is not []:\r\n self.gender = int(genderlist[i])\r\n try:\r\n self.education = educationlist[i]\r\n except:\r\n self.education = 0\r\n self.marriage = marriagelist[i]\r\n IndividualAgent.create_initial_migrant_list(self, hh_row)\r\n self.age_at_step_0 = self.age\r\n self.income_local_off_farm = return_values(self.hh_row, 'income_local_off_farm')\r\n ind = IndividualAgent(hh_row, self, self.hh_id, self.individual_id, self.age, self.gender,\r\n self.education, self.marriage, self.past_hh_id, self.non_gtgp_area,\r\n self.step_counter, self.age_at_step_0, self.income_local_off_farm)\r\n self.schedule.add(ind)",
"def shotgenerator():\n return random.randint(0, 9), random.randint(0, 9)"
] | [
"0.6210735",
"0.5866768",
"0.58583087",
"0.578064",
"0.57570106",
"0.56918156",
"0.5606103",
"0.5557127",
"0.5484591",
"0.5478335",
"0.53735477",
"0.53352934",
"0.53319234",
"0.5275543",
"0.5247675",
"0.5247172",
"0.5241505",
"0.5228203",
"0.52197033",
"0.52137804",
"0.521354",
"0.5205826",
"0.52056867",
"0.5204874",
"0.51767117",
"0.51726013",
"0.51704645",
"0.5157545",
"0.5148664",
"0.51438224"
] | 0.66504484 | 0 |
Exercise the specification of abilities for Enhanced weapons. | def testenhancements(self):
list = [MentalAbility('Fireball', 3),]
a = WeaponAbility('Enhanced', abilities=list)
self.assertEqual(a.abilities, list)
self.assertEqual(a.AC, list[0].AC)
list *= 5
a = WeaponAbility('Enhanced', abilities=list)
self.assertEqual(a.abilities, list)
self.assertEqual(a.AC, list[0].AC * (1 + 2 + 4 + 8 + 16)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testabilities(self):\n for ability in WeaponAbility.typelist:\n a = WeaponAbility(ability)\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def _handlespecials(self, secondary, typeset, abilitiesset, \n secondaryweapon):\n\n # Guided weapons must be distance weapons.\n GUIDED = WeaponAbility('Guided')\n if GUIDED in self.abilities and not self.isdistance():\n if typeset and abilitiesset:\n raise ArtifactError(\"Cannot comply with both type/style\" +\n \" and ability requirements.\")\n elif not typeset:\n newstyle = random.choice(('Missile Weapon', 'Thrown Weapon'))\n self._setweapontype(newstyle)\n else: # not abilitiesset\n self.abilities.remove(GUIDED)\n replacement = WeaponAbility()\n while (replacement == GUIDED or\n replacement in self.abilities):\n replacement = WeaponAbility()\n self.abilities.append(replacement)\n _MultiAbilityArtifact._setname(self)\n\n # Changlings are two weapons in one, with two separate ability sets\n CHANGLING = WeaponAbility('Changling')\n self.changling = False\n if not secondary and CHANGLING in self.abilities:\n\n # What we have so far becomes the primary\n self.primaryweapon = Weapon(self.style, self.type,\n abilities=list(self.abilities), \n secondary=True)\n\n # And we roll a new set for the secondary...\n if secondaryweapon is not None:\n self.secondaryweapon = secondaryweapon\n if not isinstance(self.secondaryweapon, Weapon):\n raise ArtifactError('%s is not a weapon!' % \n self.secondaryweapon)\n else:\n if 'Bow' in self.style:\n newstyle = random.choice(('Sword', 'Ax/Mace/Hammer',\n 'Pole Weapon', 'Unusual Weapon'))\n else:\n newstyle = random.choice(('Drawn Bow', 'Cross Bow'))\n self.secondaryweapon = Weapon(style=newstyle, secondary=True)\n\n # ... which must also include Changling (so max four)\n if CHANGLING in self.secondaryweapon.abilities:\n self.secondaryweapon.abilities.remove(CHANGLING)\n if len(self.secondaryweapon.abilities) == 5:\n self.secondaryweapon.abilities = \\\n self.secondaryweapon.abilities[:4]\n\n # Update this weapon to show ALL abilities, types, etc\n self.abilities += self.secondaryweapon.abilities\n self.type = \"%s / %s\" % (self.primaryweapon.type,\n self.secondaryweapon.type)\n self.itemtype = \"Changling %s\" % self.type\n self.value = (5000 + max(self.primaryweapon.value,\n self.secondaryweapon.value) +\n 2 * min(self.primaryweapon.value,\n self.secondaryweapon.value))\n\n # Finally, remove 'Changling' from the primary's list\n # to clean up the display, and update.\n self.primaryweapon.abilities.remove(CHANGLING)\n _MultiAbilityArtifact._setname(self.primaryweapon)\n self.changling = True\n _MultiAbilityArtifact._setname(self)",
"def testinvalidenhancements(self):\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=[])\n list = [MentalAbility('Fireball', 3),] * 6\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [PhysicalAbility('Sword', 3),]\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [MentalAbility('Fireball', 3),] * 3\n self.assertRaises(AbilityError,\n WeaponAbility, 'Guided', abilities=list)",
"def cmd_weaponcontrol(self, data, client, cmd=None):\n if not data:\n restrictions = []\n for weap in self._forbiddenWeapons:\n restrictions.append(find_key(self.weaponCodes, weap))\n\n if len(restrictions) == 0:\n client.message('^7No weapon restriction')\n else:\n client.message('^7Weapon restrictions: %s'% (', '.join(restrictions)))\n return True\n else:\n if not data[:4] in ('all', 'rese',\n '+ber', '+de', '+spa', '+mp5', '+ump', '+hk', '+lr', '+g36', '+psg', '+sr8', '+ak', '+neg', '+m4', '+he', '+smo', '+kev', '+hel', '+sil', '+las', '+med', '+nvg', '+xtr',\n '-ber', '-de', '-spa', '-mp5', '-ump', '-hk', '-lr', '-g36', '-psg', '-sr8', '-ak', '-neg', '-m4', '-he', '-smo', '-kev', '-hel', '-sil', '-las', '-med', '-nvg', '-xtr'):\n if client:\n client.message('^7Invalid data, try !help weaponcontrol')\n else:\n self.debug('Invalid data sent to cmd_weaponcontrol')\n return False\n\n if data[:3] == 'all':\n self._forbiddenWeapons = []\n self.console.say('^7All weapons/items allowed')\n elif data[:3] == 'res':\n self._forbiddenWeapons = self._forbiddenWeaponsFromConfig\n restrictions = []\n for weap in self._forbiddenWeapons:\n restrictions.append(find_key(self.weaponCodes, weap))\n self.console.say('^7Weapon restrictions: %s'% (', '.join(restrictions)))\n else:\n if data[1:4] == 'ber':\n bit='F'\n elif data[1:3] == 'de':\n bit='G'\n elif data[1:4] == 'spa':\n bit='H'\n elif data[1:3] == 'mp':\n bit='I'\n elif data[1:4] == 'ump':\n bit='J'\n elif data[1:3] == 'hk':\n bit='K'\n elif data[1:3] == 'lr':\n bit='L'\n elif data[1:4] == 'g36':\n bit='M'\n elif data[1:4] == 'psg':\n bit='N'\n elif data[1:3] == 'sr':\n bit='Z'\n elif data[1:3] == 'ak':\n bit='a'\n elif data[1:4] == 'neg':\n bit='c'\n elif data[1:3] == 'm4':\n bit='e'\n elif data[1:4] == 'hel':\n bit='W'\n elif data[1:3] == 'he':\n bit='O'\n elif data[1:4] == 'smo':\n bit='Q'\n elif data[1:4] == 'kev':\n bit='R'\n elif data[1:4] == 'sil':\n bit='U'\n elif data[1:4] == 'las':\n bit='V'\n elif data[1:4] == 'med':\n bit='T'\n elif data[1:4] == 'nvg':\n bit='S'\n elif data[1:4] == 'xtr':\n bit='X'\n elif data[1:4] == 'ext':\n bit='X'\n else:\n return False\n\n if data[:1] == '-':\n if bit not in self._forbiddenWeapons:\n self._forbiddenWeapons.append(bit)\n self.console.say('^4%s^7 is now ^1disallowed'% find_key(self.weaponCodes, bit))\n else:\n client.message('^4%s^7 is already forbidden' % find_key(self.weaponCodes, bit))\n elif data[:1] == '+':\n if bit in self._forbiddenWeapons:\n try:\n self._forbiddenWeapons.remove(bit)\n self.console.say('^4%s^7 is now ^2allowed'% find_key(self.weaponCodes, bit))\n except:\n pass\n else:\n client.message('^4%s^7 is already allowed' % find_key(self.weaponCodes, bit))\n else:\n client.message('^7Invalid data, try !help weaponcontrol')\n return False\n\n self.checkConnectedPlayers()\n return True",
"def qualifies(self, weapon):\n return True",
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def testabilities(self):\n for ability in AmuletAbility.typelist:\n a = AmuletAbility(ability)\n self.assertEqual(a.type, ability)\n if ability != 'Attribute':\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def GroundExcelAddEnemyArmorType(builder, EnemyArmorType):\n return AddEnemyArmorType(builder, EnemyArmorType)",
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"def disable_weapon(self, weapon):\n if weapon == \"nothing\":\n weapon = 0\n elif weapon == \"main\":\n weapon = 1\n elif weapon == \"secondary\":\n weapon = 2\n elif weapon == \"everything\":\n weapon = 3\n cmd = '{}testDisableWeaponMode {}'.format(self.console, weapon)\n self.write_command(cmd)",
"def equip(self):\n item_name = input(\"What item do you want to equip?\\n>\")\n if item_name in self.backpack:\n item = self.backpack[item_name]\n else:\n return \"You don't have this\"\n if item.type in self.equipped:\n self.equipped[item.type] = item\n if item.type == \"Weapon\":\n self.strength = item.strength\n return f\"You have equipped {item.name} on {item.type} item slot\"\n else:\n return \"You can not equip this\"",
"def __add_expanded_abilities(self, name):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n abilities = loop.run_until_complete(self.api.process_requests(\n \"ability\", name))\n ability_list = []\n factory = PokemonAbilityFactory(abilities, True)\n for ability in factory.create():\n ability_list.append(ability)\n return ability_list",
"def interpretSkillAdventurerAttack(\n skillEffectsWithName: tuple[str, list], adventurer: \"Adventurer\", enemy: \"Enemy\"\n) -> AdventurerSkill | None:\n # for index_to maybe list {\"modifier\": \"End. & Mag.\", \"target\": \"skill\", \"attribute\": \"indexed_to\",\"speed\": \"None\" }\n\n # test if skill effects empty\n if skillEffectsWithName:\n _, skillEffects = skillEffectsWithName\n else:\n skillEffects = []\n\n damage_skills = [\n x\n for x in skillEffects\n if x.attribute.lower().strip() == \"damage\"\n or (\n (x.element is not None and x.element != \"\")\n and (x.type == \"physical_attack\" or x.type == \"magic_attack\")\n )\n ]\n if len(damage_skills) > 0:\n damage_skill = damage_skills[0]\n # do the damage first if attribute == element and modifier== high/medium etc, type = attack\n index_to_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"indexed_to\"\n ]\n index_to_modifier = set()\n # modifier is the index_to target\n for index_to_effect in index_to_effects:\n # \"attribute\" index_to\n index_to_modifier.add(index_to_effect.modifier)\n \"\"\"\n For temp boosts\n {\n \"modifier\": \"normal2_str\",\n \"target\": \"skill\",\n \"attribute\": \"temp_boost\",\n }\n \"\"\"\n temp_boost_effects = [\n x for x in skillEffects if x.attribute.lower().strip() == \"temp_boost\"\n ]\n if len(temp_boost_effects) > 0:\n temp_boost_mod = temp_boost_effects[0].modifier\n else:\n temp_boost_mod = \"none\"\n\n # loop through the variables to check if attribute exists\n extra_boosts_effects = [\n x for x in skillEffects if \"per_each\" in x.attribute.lower().strip()\n ]\n extra_boosts_value = 1.0\n # for example str/mag debuff\n if len(extra_boosts_effects) > 0:\n for extra_boosts in extra_boosts_effects:\n temp_extra_boosts = interpretExtraBoostWrapper(\n extra_boosts, adventurer, enemy\n )\n extra_boosts_value = extra_boosts_value + temp_extra_boosts\n # SELECT ase.AdventurerSkillEffectsid, ase.AdventurerSkillid, ase.duration, e.name AS element, m.value AS modifier, ty.name AS type, ta.name AS target, a.name AS attribute, s.name AS speed, ad.stars, ad.title, ad.alias, ad.limited, c.name\n ret = AdventurerSkill(\n damage_skill.target,\n temp_boost_mod,\n damage_skill.modifier,\n extra_boosts_value,\n 0,\n damage_skill.type,\n damage_skill.element,\n index_to_modifier,\n )\n return ret\n else:\n return None",
"def apply_enhancements(ability: dict, target: Player, self: Player) -> None:\n self.status_effects.append([\"enhancement_sickness\", 1])\n\n for enhancement in ability[\"enhancements\"]:\n if enhancement[\"target\"] == \"target\":\n getattr(combat_effects, \"inflict_\" + enhancement[\"effect\"])(\n value=enhancement[\"value\"], player=target\n )\n elif enhancement[\"target\"] == \"self\":\n getattr(combat_effects, \"inflict_\" + enhancement[\"effect\"])(\n value=enhancement[\"value\"], player=self\n )",
"def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()",
"def hk_armor(self):\n self.name = \"Holy Knight's Armor\"\n self.rarity = \"Common\"\n self.pdef_value = 40\n self.mdef_value = 10\n self.increase_crit = 0\n self.desc = \"Armor of the Holy Guard, you feel the light flowing.\"",
"def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self.self_id = allies[0].unit_type\n self_weapon_range = weapon_ranges[self.self_id]\n self_radius = unit_sizes[self.self_id] / float(2)\n self_unit_type = unit_type[self.self_id]\n self_speed = unit_speed[self.self_id]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n self.enemy_id = enemies[0].unit_type\n enemy_weapon_range = weapon_ranges[self.enemy_id]\n enemy_radius = unit_sizes[self.enemy_id] / float(2)\n enemy_unit_type = unit_type[self.enemy_id]\n enemy_speed = unit_speed[self.enemy_id]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n in_enemy_range = 0\n for ally in allies:\n for enemy in enemies:\n if self.retrieve_distance_between_positions([enemy.x, enemy.y], [ally.x, ally.y]) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n break\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed, self.self_id,\n self.enemy_id]",
"def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + enemy_weapon_range + enemy_radius):\n in_enemy_range = 1\n else:\n in_enemy_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, in_enemy_range, prev_cmd, north_bound, south_bound,\n west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type]",
"def SADamageFunction(\n skill: AdventurerSkill | None,\n adventurer: \"Adventurer\",\n enemy: \"Enemy\",\n memboost: dict[str, int | float],\n combo: int,\n saRng: float,\n) -> int:\n if skill is None:\n return 0\n\n # lowercase everything\n target = skill.target.lower()\n tempBoostName = skill.tempBoost.lower()\n powerCoefficientName = skill.powerCoefficient.lower()\n powerCoefficient = 1.0\n\n if tempBoostName == \"none\":\n tempBoost = 1.0\n elif \"normal\" in tempBoostName:\n tempBoost = 1.4\n else:\n tempBoost = 1.7\n\n if skill.target == \"foe\":\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.5\n case \"mid\" | \"medium\":\n powerCoefficient = 1.7\n case \"high\":\n powerCoefficient = 1.9\n case \"super\":\n powerCoefficient = 2.1\n case \"ultra\":\n powerCoefficient = 4.0\n else:\n match powerCoefficientName:\n case \"low\" | \"lo\":\n powerCoefficient = 1.1\n case \"mid\" | \"medium\":\n powerCoefficient = 1.15\n case \"high\":\n powerCoefficient = 1.2\n case \"super\":\n powerCoefficient = 1.4\n case \"ultra\":\n powerCoefficient = 3.6\n\n if \"physical\" in skill.type:\n stat_key = \"strength\"\n resist_key = \"physical\"\n else:\n stat_key = \"magic\"\n resist_key = \"magic\"\n\n tempPower = adventurer.stats[stat_key]\n tempPowerBoostAdv = adventurer.statsBoostAdv[stat_key]\n tempPowerBoostAst = adventurer.statsBoostAst[stat_key]\n tempMemBoost = memboost[stat_key]\n\n tempTypeResistDownBase = enemy.typeResistDownBase[resist_key]\n tempTypeResistDownAdv = enemy.typeResistDownAdv[resist_key]\n tempTypeResistDownAst = enemy.typeResistDownAst[resist_key]\n # check enemy buffs p/m resist\n tempTypeResistBuff = enemy.get_buff_mod(f\"{resist_key}_resist\")\n\n # get strength/magic debuff\n powerDebuff = adventurer.get_boostCheckAdv(False, stat_key)\n tempPowerBoostDebuff = 0.0\n if powerDebuff is not None:\n tempPowerBoostDebuff = abs(powerDebuff.modifier)\n else:\n tempPowerBoostDebuff = 0\n\n if len(skill.index_to) != 0:\n tempPower = 0\n tempPowerBoostAdv = 0.0\n tempPowerBoostAst = 0.0\n tempMemBoost = 0\n powerCoefficient = powerCoefficient * 1.96\n for index_to_attributes in skill.index_to:\n tempPower += adventurer.stats[index_to_attributes]\n tempPowerBoostAdv += adventurer.statsBoostAdv[index_to_attributes]\n tempPowerBoostAst += adventurer.statsBoostAst[index_to_attributes]\n tempMemBoost += memboost[index_to_attributes]\n tempElementBoostDebuff = 0.0\n if skill.element != \"\" and skill.noType != 1:\n # elementResistDownBase\n tempElementResistDownBase = enemy.elementResistDownBase[skill.element]\n # elementResistDownAdv\n tempElementResistDownAdv = enemy.elementResistDownAdv[skill.element]\n # elementResistDownAst\n tempElementResistDownAst = enemy.elementResistDownAst[skill.element]\n # elementDamageBoostAdv[location]\n\n tempElementDamageBoostAdv = adventurer.elementDamageBoostAdv[skill.element]\n if memboost.get(f\"{skill.element}_attack\") is not None:\n tempElementDamageBoostAdv += memboost[f\"{skill.element}_attack\"]\n # elemental damage boost from weapon\n if adventurer.stats.get(skill.element) is not None:\n tempElementDamageBoostAdv += cast(float, adventurer.stats[skill.element])\n # elementDamageBoostAst[location]\n tempElementDamageBoostAst = adventurer.elementDamageBoostAst[skill.element]\n # element debuff\n tempEleDebuff = adventurer.get_boostCheckAdv(False, f\"{skill.element}_attack\")\n if tempEleDebuff is not None:\n tempElementBoostDebuff = abs(tempEleDebuff.modifier)\n else:\n tempElementResistDownBase = 0.0\n tempElementResistDownAdv = 0.0\n tempElementResistDownAst = 0.0\n tempElementDamageBoostAdv = 0.0\n tempElementDamageBoostAst = 0.0\n\n if target == \"foe\":\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"st\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"st\"]\n # foes\n else:\n temptargetResistDownAdv = enemy.targetResistDownAdv[\"aoe\"]\n temptargetResistDownAst = enemy.targetResistDownAst[\"aoe\"]\n\n temp_enemy_end = enemy.stats\n\n tempDamage = (\n (\n max(\n 2\n * tempPower\n * tempBoost\n * (\n 1\n + tempPowerBoostAdv\n + tempPowerBoostAst\n + tempMemBoost\n - tempPowerBoostDebuff\n )\n - temp_enemy_end[\"endurance\"],\n 0,\n )\n )\n * (\n 1\n - tempElementResistDownBase\n - tempElementResistDownAdv\n - tempElementResistDownAst\n - tempTypeResistDownBase\n - tempTypeResistDownAdv\n - tempTypeResistDownAst\n - tempTypeResistBuff\n )\n * (\n 1\n + tempElementDamageBoostAdv\n + tempElementDamageBoostAst\n - tempElementBoostDebuff\n )\n * (1 + adventurer.critPenBoost + 0.06)\n * (1 - temptargetResistDownAdv - temptargetResistDownAst)\n * powerCoefficient\n * 1.5\n * (skill.extraBoost)\n * (0.8 + combo * 0.2)\n * saRng\n )\n return int(tempDamage)",
"def test_enhancement(self):\n # Arrange - add statuses here\n expected_statuses = [[2, \"prone\", 1],\n [2, \"disorient\", 1]]\n ability_combos = [[\"disrupt\", \"block\"],\n [\"area\", \"disrupt\"]]\n\n # Act\n for expected_status, ability_combo in zip(expected_statuses, ability_combos):\n player = Character.objects.get(pk=1)\n target = Character.objects.get(pk=2)\n expected_player_hp = player.hit_points\n expected_target_hp = target.hit_points - 100\n\n # Perform a round of combat\n object_to_test = Combat(player=player,\n target=target,\n player_attack_type=ability_combo[0],\n target_attack_type=ability_combo[1],\n player_enhanced=True)\n\n # Act\n # Perform a round of combat\n player, target = object_to_test.do_combat_round()\n status_effects_list = list(StatusEffects.objects.all().values_list())\n\n # Assert\n self.assertEqual(player.hit_points, expected_player_hp)\n self.assertEqual(target.hit_points, expected_target_hp)\n self.assertEqual(expected_status[0], status_effects_list[0][1])\n self.assertEqual(expected_status[1], status_effects_list[0][2])\n self.assertEqual(expected_status[2], status_effects_list[0][3])",
"def testrange(self):\n for range_ in range(1, 5):\n a = WeaponAbility('Animated', range=range_)\n self.assert_(str(range_) in str(a))",
"def testworsethan(self):\n large = WeaponAbility('Animated', range=3)\n small = WeaponAbility('Animated', range=2)\n self.assertFalse(small.worsethan(small))\n self.assertFalse(small.worsethan(WeaponAbility('Animated', range=2)))\n self.assertFalse(large.worsethan(small))\n self.assertTrue(small.worsethan(large))\n large = WeaponAbility('Defender', size=3)\n small = WeaponAbility('Defender', size=2)\n self.assertFalse(small.worsethan(small))\n self.assertFalse(small.worsethan(WeaponAbility('Defender', size=2)))\n self.assertFalse(large.worsethan(small))\n self.assertTrue(small.worsethan(large))\n abil = MentalAbility('Beacon', 1)\n large = WeaponAbility('Enhanced', abilities=[abil,])\n abil = MentalAbility('Avert', 2)\n small = WeaponAbility('Enhanced', abilities=[abil,])\n self.assertFalse(small.worsethan(small))\n self.assertFalse(small.worsethan(\n WeaponAbility('Enhanced', abilities=[abil,])))\n self.assertFalse(large.worsethan(small))\n self.assertTrue(small.worsethan(large))",
"def apply_effects(self, item_name):\n item = self.get(item_name)\n\n # Enable commands\n for command in item.effects.get(\"enable_commands\", []):\n if command not in self.game.state.commands_enabled:\n self.game.alert(\"You unlocked the `{}` command\", command)\n self.game.state.commands_enabled.append(command)\n\n # Enable resouces\n for resources in item.effects.get(\"enable_resources\", []):\n if resources not in self.game.state.resources_enabled:\n self.game.alert(\"You can now mine *{}*.\", resources)\n self.game.state.resources_enabled.append(resources)\n\n # Enable items\n for item_name in item.effects.get(\"enable_items\", []):\n if item_name not in self.game.state.tools_enabled:\n self.game.alert(\"You can now craft ${}$.\", item_name)\n self.game.state.tools_enabled.append(item_name)\n\n # Enable research\n for research in item.effects.get(\"enable_research\", []):\n if research not in self.game.state.research_enabled:\n self.game.alert(\"You can now research @{}@.\", research)\n self.game.state.research_enabled.append(research)\n\n # Trigger flags\n for trigger in item.effects.get(\"triggers\", []):\n if trigger not in self.game.state.triggers:\n self.game.state.triggers.append(trigger)\n\n # Grant resources\n for resource in RESOURCES:\n if resource in item.effects:\n value = to_float(item.effects[resource])\n self.game.resources.add(resource, value)\n if value > 0:\n self.game.alert(\"You found *{} {}*.\", value, resource)\n else:\n self.game.alert(\"You lost *{} {}*.\", -value, resource)\n\n # Change mining difficulty\n for resource in RESOURCES:\n change = item.effects.get(f\"{resource}_mining_difficulty\", None)\n if change:\n change = to_float(change)\n self.game.mining_difficulty.multiply(resource, 1 - change)\n self.game.alert(\n \"*{}* mining difficulty reduced by {:.0%}.\", resource, change\n )\n\n # Trigger events\n self.game.events.trigger(*item.effects.get(\"events\", []))",
"def test_product_stealability_and_explode(self):\n prod = Product('Test Product', weight=200, flammability=5.0)\n stealable = prod.stealability()\n explodable = prod.explode()\n self.assertEqual(stealable, 'Not so stealable...')\n self.assertEqual(explodable, '...BABOOM!!')",
"def retrieve_handcrafted_inputs(self, obs):\n feature_units = obs.observation.feature_units\n allies = [unit for unit in feature_units if unit.alliance == _PLAYER_SELF]\n enemies = [unit for unit in feature_units if unit.alliance == _PLAYER_ENEMY]\n\n current_hp = self.calculate_hitpoints(feature_units, _PLAYER_SELF)\n current_hp = current_hp / self.initial_self_hit_points\n\n weapon_cooldown = 0\n for ally in allies:\n if ally.weapon_cooldown > 0:\n weapon_cooldown += 1\n if weapon_cooldown > (len(allies) / 2):\n # nn input weapon cooldown = 1 means the majority cannot fire\n weapon_cooldown = 1\n else:\n weapon_cooldown = 0\n\n self_weapon_range = 5\n self_radius = 1\n self_unit_type = 1\n self_speed = 1\n if len(allies) > 0:\n self_weapon_range = weapon_ranges[allies[0].unit_type]\n self_radius = unit_sizes[allies[0].unit_type] / float(2)\n self_unit_type = unit_type[allies[0].unit_type]\n self_speed = unit_speed[allies[0].unit_type]\n\n enemy_radius = 1\n enemy_weapon_range = 1\n enemy_unit_type = 0\n enemy_speed = 1\n if len(enemies) > 0:\n enemy_weapon_range = weapon_ranges[enemies[0].unit_type]\n enemy_radius = unit_sizes[enemies[0].unit_type] / float(2)\n enemy_unit_type = unit_type[enemies[0].unit_type]\n enemy_speed = unit_speed[enemies[0].unit_type]\n\n if self.retrieve_distance_between_positions(self.retrieve_enemy_location(obs),\n self.get_current_location(obs)) < (\n self_radius + self_weapon_range + enemy_radius):\n enemy_in_range = 1\n else:\n enemy_in_range = 0\n\n north_bound, south_bound, west_bound, east_bound = self.calculate_distance_to_bounds(obs)\n\n if self.previous_command == \"FIGHT\":\n prev_cmd = 1\n elif self.previous_command == \"FLEE\":\n prev_cmd = 0\n\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence = self.detect_enemies_by_region(obs)\n\n return [current_hp, weapon_cooldown, enemy_in_range, prev_cmd, north_bound, south_bound, west_bound, east_bound,\n nw_enemy_presence, ne_enemy_presence, sw_enemy_presence, se_enemy_presence, self_unit_type,\n enemy_unit_type, self_weapon_range, enemy_weapon_range, self_speed, enemy_speed]",
"def canWield(self, item):\n if (item.weapon or item.spell) and 'no_weapons' in self.status_bundle:\n return False\n if Weapons.TRIANGLE.isMagic(item) and 'no_magic_weapons' in self.status_bundle:\n return False\n # if the item is a weapon\n if item.weapon:\n itemLvl = item.weapon.LVL\n elif item.spell:\n itemLvl = item.spell.LVL\n else:\n return True # does not have a level so it can be used\n\n idx = Weapons.TRIANGLE.name_to_index[item.TYPE]\n unitwexp = self.wexp[idx]\n if itemLvl in Weapons.EXP.wexp_dict and unitwexp >= Weapons.EXP.wexp_dict[itemLvl]:\n return True\n elif itemLvl == self.name: # If this weapon is for me!\n return True\n else:\n return False",
"def powerWeapons(self, interval, availPower):\n if self.allWeaponsPowered == 0:\n weaponList = []\n for position, myQuad in self.quads.iteritems():\n weaponIDList = []\n weaponIDList.extend(funcs.sortStringList(myQuad.weapons.keys()))\n for wID in weaponIDList:\n weaponList.append(myQuad.weapons[wID])\n\n while availPower > 0 and self.allWeaponsPowered == 0:\n toCharge = []\n toChargeAMS = []\n # go through each quadrant looking for weapons to power\n for myWeapon in weaponList:\n if myWeapon.operational == 1 and myWeapon.currentPower < myWeapon.myWeaponData.maxPower:\n if 1 == myWeapon.myWeaponData.AMS:\n toChargeAMS.append(myWeapon)\n else:\n toCharge.append(myWeapon)\n\n if len(toChargeAMS) == 0 and len(toCharge) == 0:\n self.allWeaponsPowered = 1\n return availPower\n\n #AMS are charged first and sequentially\n if len(toChargeAMS) != 0:\n if availPower !=0:\n for myW in toChargeAMS:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= availPower:\n myW.currentPower+=availPower\n availPower=0\n break\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=defecit\n\n #non-AMS weapons are charged concurrently; each gets an equal share of the available power \n if len(toCharge) != 0:\n kW=availPower/len(toCharge)\n if kW !=0:\n #print \"tT:\",len(toCharge),\"aP:\",availPower,\"kW each:\",kW\n for myW in toCharge:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= kW:\n myW.currentPower+=kW\n availPower-=kW\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=kW-defecit\n else:\n availPower=0\n\n return availPower",
"async def play_axe(game_state) -> None:\n if len(game_state.active_player.zombies) > 0:\n play_weapon(game_state, Supply.AXE)\n else:\n game_state.active_player.print(f'You cannot play {Supply.AXE.value} for nothing!')",
"def visit_equipment(self, equipment):",
"def test_visualize_equipment(self):\n pass"
] | [
"0.63859636",
"0.62416124",
"0.62350196",
"0.60346496",
"0.60204816",
"0.59555095",
"0.57886255",
"0.5700426",
"0.56838894",
"0.5664235",
"0.56232107",
"0.5617325",
"0.5582766",
"0.5571561",
"0.55504894",
"0.5511715",
"0.5506456",
"0.54907763",
"0.54576594",
"0.54487455",
"0.5444778",
"0.5428952",
"0.5422938",
"0.5412076",
"0.5411227",
"0.54073536",
"0.5397731",
"0.53910863",
"0.53768027",
"0.53717184"
] | 0.7143327 | 0 |
Provide invalid mental abilities, to generate errors. | def testinvalidenhancements(self):
self.assertRaises(AbilityError,
WeaponAbility, 'Enhanced', abilities=[])
list = [MentalAbility('Fireball', 3),] * 6
self.assertRaises(AbilityError,
WeaponAbility, 'Enhanced', abilities=list)
list = [PhysicalAbility('Sword', 3),]
self.assertRaises(AbilityError,
WeaponAbility, 'Enhanced', abilities=list)
list = [MentalAbility('Fireball', 3),] * 3
self.assertRaises(AbilityError,
WeaponAbility, 'Guided', abilities=list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testinvalidability(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Invalid')\n self.assertRaises(AbilityError, AmuletAbility, '')",
"def testinvalidability(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Invalid')\n self.assertRaises(AbilityError, WeaponAbility, '')",
"def testinvalidrange(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=0)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=6)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range='3')\n self.assertRaises(AbilityError, WeaponAbility, 'Changling', range=3)",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='Dam')\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='')\n self.assertRaises(AbilityError, \n AmuletAbility, 'Control NPC', attr='ST')",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AttributeAbility, 'Invalid')\n self.assertRaises(AbilityError, AttributeAbility, '', 3)",
"def testinvalidelement(self):\n self.assertRaises(AbilityError, \n AmuletAbility, 'Proof', element='Invalid')\n self.assertRaises(AbilityError, AmuletAbility, 'Proof', element='')\n self.assertRaises(AbilityError, \n AmuletAbility, 'Control NPC', element='Fire')",
"def invalid(self):\n pass",
"def test_invalid_action(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Ask for permission.\n state_changer = request.state_changer\n self.assertFalse(state_changer.can_perform(context, a.COMPLETE))\n\n # Beg for forgiveness.\n err = fysom.FysomError\n self.assertRaises(err, state_changer.perform, context, a.COMPLETE, None)",
"def invalidsize(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Skepticism', size=0)\n self.assertRaises(AbilityError, AmuletAbility, 'Skepticism', size=6)\n self.assertRaises(AbilityError, AmuletAbility, 'Control NPC', size=3)",
"def testinvalidsize(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Defender', size=0)\n self.assertRaises(AbilityError, WeaponAbility, 'Defender', size=6)\n self.assertRaises(AbilityError, WeaponAbility, 'Defender', size='3')\n self.assertRaises(AbilityError, WeaponAbility, 'Changling', size=3)",
"def unable(self):\n response.status = 400\n return {'message':'current state does not allow modification'}",
"def test_check_for_unacceptable_matches_hospitals(game):\n\n hospital = game.hospitals[0]\n resident = Resident(name=\"foo\")\n hospital.matching.append(resident)\n\n issues = game._check_for_unacceptable_matches(\"hospitals\")\n assert len(issues) == 1\n\n issue = issues[0]\n assert issue.startswith(hospital.name)\n assert issue.endswith(f\"{hospital.prefs}.\")\n assert resident.name in issue\n\n with pytest.raises(MatchingError) as e:\n game.check_validity()\n error = e.unacceptable_matches[0]\n assert issue == error",
"def test_health_facilities_endpoint_with_bad_endpoint(self):\n response = self.client.get(\"search/healthfacilities?q=Kitale\")\n self.assertIn(b'\"status\": \"FAILED\"', response.data)",
"def test_get_accessory_invalid_aid(caplog):\n assert get_accessory(None, State('light.demo', 'on'),\n None, config=None) is None\n assert caplog.records[0].levelname == 'WARNING'\n assert 'invalid aid' in caplog.records[0].msg",
"def testinvalidsize(self):\n self.assertRaises(AbilityError, AttributeAbility, size=0)\n self.assertRaises(AbilityError, AttributeAbility, size=6)",
"def test_regularUserCantIlluminate(self):\n objects.LocationLighting(thing=self.location,\n store=self.location.store,\n candelas=100)\n self._test(\n \"illuminate 0\",\n [\"You are insufficiently brilliant to do that directly.\"])\n self.assertEquals(self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location).candelas, 100)",
"def test_kyc_get_validation_legal(self):\n pass",
"def violated(self) -> bool:\n ...",
"def whyNotLegal(self):\r\n return self._getLegalityStatus()[1]",
"def test_check_for_unacceptable_matches_residents(game):\n\n resident = game.residents[0]\n hospital = Hospital(name=\"foo\", capacity=1)\n resident.matching = hospital\n\n issues = game._check_for_unacceptable_matches(\"residents\")\n assert len(issues) == 1\n\n issue = issues[0]\n assert issue.startswith(resident.name)\n assert issue.endswith(f\"{resident.prefs}.\")\n assert hospital.name in issue\n\n with pytest.raises(MatchingError) as e:\n game.check_validity()\n error = e.unacceptable_matches[0]\n assert issue == error",
"def test_gender_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_gender(val))",
"def testInvalidDescriptions(self):\n self.assertFalse(self.app._ignore_jobs(\"telecommuting is not an option\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommuting\"))\n self.assertFalse(self.app._ignore_jobs(\"No telecommute\"))\n self.assertFalse(self.app._ignore_jobs(\"TELECOMMUTE IS NOT AN OPTION\"))",
"def test_invalid_inputs(self):\n f = gtrutils.check_petition_combos\n \n self.assertFalse( f(-1, 1, [], False, False))\n self.assertFalse( f( 0, 1, [], False, False))\n self.assertFalse( f( 1, 0, [], False, False))\n self.assertFalse( f( 1, 1, [-1], False, False))\n self.assertFalse( f( 1,-1, [], False, False))\n self.assertFalse( f( 1, 1, [1], False, False)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], True, True)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1,3], True, True)) # n_off_role can never be 1\n\n self.assertFalse( f( 3, 0, [2,3,3], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 3, 0, [2,3,3], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 2, 0, [2,3,3], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 2, 0, [2,3,3], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 5, 1, [6,6], True, False)) # n_off_role can never be 1",
"def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.careers(student_id)\n self.assertFalse(result)",
"def handle_illegal_action(self, state, illegal_action):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n #############################\n print(\"An illegal action was attempted:\")\n print('State: ' + str(state))\n print('Action: ' + str(illegal_action))\n\n #self.game.set_state(,game.get_last_player())",
"def test_lta_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lta_order_bad)",
"def test_admin_employees_menu_wrong_choice(self, inputs):\n inputs.side_effect = ['9', '5']\n\n assert Admin(object).admin_employees_menu() is True",
"def conditionFailed(self):\n result = Activatable(self.effects, condition=AlwaysFalseCondition()).canActivate(self.game)\n self.assertFalse(result, \"The Activatable should not be activatable\")",
"def test_err_handling(self):\r\n problem = self.build_problem(answer=4)\r\n\r\n errors = [ # (exception raised, message to student)\r\n (calc.UndefinedVariable(\"x\"), r\"You may not use variables \\(x\\) in numerical problems\"),\r\n (ValueError(\"factorial() mess-up\"), \"factorial function evaluated outside its domain\"),\r\n (ValueError(), \"Could not interpret '.*' as a number\"),\r\n (pyparsing.ParseException(\"oopsie\"), \"Invalid math syntax\"),\r\n (ZeroDivisionError(), \"Could not interpret '.*' as a number\")\r\n ]\r\n\r\n with mock.patch('capa.responsetypes.evaluator') as mock_eval:\r\n for err, msg_regex in errors:\r\n\r\n def evaluator_side_effect(_, __, math_string):\r\n \"\"\"Raise an error only for the student input.\"\"\"\r\n if math_string != '4':\r\n raise err\r\n mock_eval.side_effect = evaluator_side_effect\r\n\r\n with self.assertRaisesRegexp(StudentInputError, msg_regex):\r\n problem.grade_answers({'1_2_1': 'foobar'})",
"def test_start_list_of_invalid_mechanisms(self):\r\n self.sasl.start(['invalid1', 'invalid2'])\r\n self.assertEqual(self.sasl.getError(), 'None of the mechanisms listed meet all required properties')"
] | [
"0.69706166",
"0.6763972",
"0.64050066",
"0.63184166",
"0.62757283",
"0.61525935",
"0.59503865",
"0.5758525",
"0.57436514",
"0.5706633",
"0.5680509",
"0.55470926",
"0.5533897",
"0.5517258",
"0.55008376",
"0.5483395",
"0.5479991",
"0.54770255",
"0.5465361",
"0.5460375",
"0.5440214",
"0.54334056",
"0.54251975",
"0.5418449",
"0.5415034",
"0.54010653",
"0.53906095",
"0.53903335",
"0.5388807",
"0.5369715"
] | 0.69791776 | 0 |
Ensure random abilities may be generated without error. | def testrandom(self):
for i in range(100):
WeaponAbility() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testrandom(self):\n for i in range(100):\n AmuletAbility()",
"def random_die():\n return randrange(1, 6)",
"def test_insufficient_shuffle(self):\n self.deck._deal(1)\n with self.assertRaises(ValueError):\n self.deck.shuffle()",
"def luck_check(chance):\n return randint(0, 100) < chance",
"def testinvalidability(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Invalid')\n self.assertRaises(AbilityError, WeaponAbility, '')",
"def testrange(self):\n for range_ in range(1, 5):\n a = WeaponAbility('Animated', range=range_)\n self.assert_(str(range_) in str(a))",
"def random_legal_move():\n return random.choice(legal_moves())",
"def testinvalidrange(self):\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=0)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range=6)\n self.assertRaises(AbilityError, WeaponAbility, 'Animated', range='3')\n self.assertRaises(AbilityError, WeaponAbility, 'Changling', range=3)",
"def testinvalidenhancements(self):\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=[])\n list = [MentalAbility('Fireball', 3),] * 6\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [PhysicalAbility('Sword', 3),]\n self.assertRaises(AbilityError, \n WeaponAbility, 'Enhanced', abilities=list)\n list = [MentalAbility('Fireball', 3),] * 3\n self.assertRaises(AbilityError,\n WeaponAbility, 'Guided', abilities=list)",
"def randomAction():\n return np.random.randint(0, POSSIBLE_ACTIONS)",
"def check_for_combat():\n if random.randint(1, 4) == 1:\n return True\n else:\n return False",
"def _validate_random_seeds(self):\n if self.random_seeds:\n if len(self.random_seeds) != len(self.sampler):\n raise ValueError(\"Number of given range objects in random_seeds\"\\\n \"and number of sampler objects need to be equal!\")\n if len(set(list(map(len,self.random_seeds)))) != 1:\n raise ValueError(\"Length of range objects in random_seeds\"\\\n \"list must be equal!\")",
"def random_valid(self):\n if random_exp > 0:\n args.exp = random.sample(exp_choices, random_exp)\n elif random_exp < 0:\n args.exp = random.sample(exp_choices, random.randint(0, -random_exp))\n btypes_str = 'T'*8+'S'*4+'U'*(5 - len(args.exp))+'P'*3+'G'*2+'F'*2+'A'*3+'1'*3+'2'*2+'3'*1+'4'*1+'5'*1+'O'*8+'M'*(-args.monuments if args.monuments < 0 else 0)\n btypes_min_str = 'T'*0+'S'*0+'U'*len(args.exp)+'P'*0+'G'*0+'F'*0+'A'*0+'1'*0+'2'*0+'3'*0+'4'*0+'5'*0+'O'*0+'M'*(args.monuments if args.monuments > 0 else 0)\n len_min = len(btypes_min_str)\n while 1:\n ## TSU_PG_FA_12345_OM\n ## tot845_32_23_32111_81\n ## min00E_00_00_00000_00\n bpos = list(range(20))\n self.b = ['_'] * 20\n self.f = [1] * 20\n cnt_b = 0\n btypes_min = list(btypes_min_str)\n random.shuffle(btypes_min)\n while cnt_b < len_min:\n s_bpos = random.choice(bpos)\n c_bding = self.b[s_bpos]\n if c_bding == 'T' or c_bding == 'O':\n if self.f[s_bpos] < 5 and c_bding in btypes_min:\n btypes_min.remove(c_bding)\n cnt_b += 1\n self.f[s_bpos] += 1\n else:\n bpos.remove(s_bpos)\n else:\n s_bding = btypes_min.pop(-1)\n cnt_b += 1\n self.b[s_bpos] = s_bding\n if s_bding != 'T' and s_bding != 'O':\n bpos.remove(s_bpos)\n btypes = list(btypes_str)\n random.shuffle(btypes)\n while cnt_b < 20:\n s_bpos = random.choice(bpos)\n c_bding = self.b[s_bpos]\n if c_bding == 'T' or c_bding == 'O':\n if self.f[s_bpos] < 5 and c_bding in btypes:\n btypes.remove(c_bding)\n cnt_b += 1\n self.f[s_bpos] += 1\n else:\n bpos.remove(s_bpos)\n else:\n s_bding = btypes.pop(-1)\n cnt_b += 1\n self.b[s_bpos] = s_bding\n if s_bding != 'T' and s_bding != 'O':\n bpos.remove(s_bpos)\n self.calc_resources()\n if self.popula_used <= self.popula and self.energy_used <= self.energy:\n break",
"def testinvalidability(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Invalid')\n self.assertRaises(AbilityError, AmuletAbility, '')",
"def confused(self, rand):\n return rand > 0",
"def generate_random_roles():\n\n role_names = set()\n roles_size = 0\n while roles_size != 3:\n role_name = ''\n for i in range(3):\n role_name += random.choice(['0', '1'])\n if role_name not in role_names:\n role_names.add(role_name)\n roles_size += 1\n\n for role_name in role_names:\n delete_access = ActionTypes.DELETE.value if role_name[0] == '1' else ''\n write_access = ActionTypes.WRITE.value if role_name[1] == '1' else ''\n read_access = ActionTypes.READ.value if role_name[2] == '1' else ''\n\n allowed_actions = [access for access in (delete_access, write_access, read_access) if access]\n Role(role_name, allowed_actions)",
"def throw(self):\n self.side = random.randint(1, self.num_sides)",
"def test_sample(self):\n liste = list(range(10))\n extrait = random.sample(liste, 5)\n for element in extrait:\n self.assertIn(element, liste)\n\n with self.assertRaises(ValueError):\n random.sample(liste, 20)",
"def is_exhausted(self):\n return random.random() < 0.5",
"def randomLeggings():\n return random.choice(LEGGINGS)",
"def test_uniform_basic():\r\n yield check_uniform_basic, False\r\n yield check_uniform_basic, False, True\r\n yield check_uniform_basic, True",
"def get_random_excuse():\n return excuses[random.randint(1, len(excuses) - 1)]",
"def test_sample(self):\n extrait = random.sample(self.liste, 5)\n for element in extrait:\n self.assertIn(element, self.liste)\n\n with self.assertRaises(ValueError):\n random.sample(self.liste, 20)",
"def test_sample(self):\n extrait = random.sample(self.liste, 5)\n for element in extrait:\n self.assertIn(element, self.liste)\n\n with self.assertRaises(ValueError):\n random.sample(self.liste, 20)",
"def test_sample(self):\n extrait = random.sample(self.liste, 5)\n for element in extrait:\n self.assertIn(element, self.liste)\n\n with self.assertRaises(ValueError):\n random.sample(self.liste, 20)",
"def random_test(self):\r\n return 1",
"def random_test(self):\r\n return 1",
"def test_rng_invalid_value(self):\n with pytest.raises(ValueError) as exc:\n check_random_state(\"oh_no_oh_no\")\n\n assert \"'oh_no_oh_no' cannot be used to seed\" in str(exc.value)",
"def mover_aleatoriamente(self):\n self.randomizador = random.randint(0,4)",
"def __validate__(self):\n if self.train:\n assert self.random is not None"
] | [
"0.71204704",
"0.6261142",
"0.6225658",
"0.61589783",
"0.61494786",
"0.61290914",
"0.61236346",
"0.61155415",
"0.6106622",
"0.60941434",
"0.60484105",
"0.6033411",
"0.60260797",
"0.60189956",
"0.59546584",
"0.59362406",
"0.59291255",
"0.59213823",
"0.58713335",
"0.5846206",
"0.5846033",
"0.58418804",
"0.5816671",
"0.5816671",
"0.5816671",
"0.5814146",
"0.5814146",
"0.5812342",
"0.57235146",
"0.57134414"
] | 0.66446924 | 1 |
Reads the table and returns a dataframe. This is basically just a short script that lets users import data without having to worry about filetype. | def read_table(file_name: Union[str, Path], **kwargs):
file_name = Path(file_name)
extension = file_name.suffix
default_args = {
'.csv': {'delimiter': ','},
'.tsv': {'delimiter': '\t'}
}
# arguments = self._cleanArguments(extension, arguments)
file_name = str(file_name.absolute())
if extension in {'.xls', '.xlsx', '.xlsm'}: # .xlsm is not a typo.
df = pandas.read_excel(file_name, **kwargs)
elif extension in {'.csv', '.tsv', '.fsv', '.txt'}:
arguments = {**default_args.get(extension), **kwargs}
if 'sheetname' in arguments: arguments.pop('sheetname')
df = pandas.read_table(file_name, **arguments)
elif extension == '.pkl':
df = pandas.read_pickle(file_name)
else:
raise NameError("{} does not have a valid extension!".format(file_name))
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df",
"def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)",
"def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()",
"def read(tablename: str()):\n return pd.read_csv(tablename, dtype={'source_id': str})",
"def load_raw_table(conf, table):\n confrd = load_config_raw_data(conf)\n path_table = Path(confrd[table][\"path\"])\n sep = confrd[table][\"sep\"]\n encoding = confrd[table][\"encoding\"]\n df = pd.read_csv(path_table, sep=sep, encoding=encoding)\n return df",
"def read_table(file, **kwargs):\n extn = Reader.get_extension(file).lower()\n if extn.startswith('.xls'):\n return read_table_excel(file, **kwargs)\n elif extn == '.gsheet':\n if hasattr(file, 'as_posix'): # a pathlib.Path object\n file = str(file)\n elif hasattr(file, 'name'): # a TextIOWrapper object\n file = file.name\n return read_table_gsheets(file[:-7], **kwargs) # ignore the extension\n else:\n return read_table_text(file, **kwargs)",
"def import_files_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)",
"def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)",
"def load(file):\n return pq.read_table(file).to_pandas()",
"def load_data(database_filepath, table_name):\r\n # instance to the database engine\r\n engine = create_engine('sqlite:///{}'.format(database_filepath))\r\n\r\n # read form the database table\r\n df = pd.read_sql_table(table_name, con=engine)\r\n\r\n return df # return our df\r",
"def load_table(**kargs):\n from transformer import dehyphenate\n sep = LoincMTRT.delimit # kargs.get('sep', ',')\n input_dir = kargs.get('input_dir', 'data')\n dehyphen = kargs.get('dehyphenate', True)\n deq = kargs.get('dequote', True)\n one_to_one = kargs.get('one_to_one', True)\n\n df = dp.load_generic(input_file=LoincMTRT.table, sep=sep, input_dir=input_dir) \n if dehyphen: \n df = dehyphenate(df, col=LoincMTRT.col_key) # inplace\n # 12345-7 or 123457 \n df = df.drop_duplicates(keep='last') # drop duplicates\n\n if deq: \n df = dequote(df, col=LoincMTRT.col_value)\n\n if one_to_one: \n df = LoincMTRT.resolve_duplicates(df, verbose=1)\n\n return df",
"def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)",
"def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df",
"def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df",
"def load_data(fn):\n return pandas.read_csv(fn, dtype={'Name': str, 'Reason': str, 'Amount': float, 'Day': int})",
"def import_experiments_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def OSW2df(osw_file, table_name):\n conn = connOSW(osw_file)\n df = pd.read_sql_query(\"SELECT * FROM \" + table_name, conn)\n conn.close()\n return df",
"def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df",
"def getDataframe(file_name):\n # maak pandas dataframe van KNMI zonder comments\n if '.csv' in file_name:\n dataframe = pd.read_csv(file_name, delimiter = ';', comment='#')\n return dataframe\n elif '.txt' in file_name:\n dataframe = pd.read_csv(file_name, delimiter = ',', comment='#')\n return dataframe\n else:\n quit('Usage: use files of .csv or .txt format')",
"def open_data(table):\n engine = create_engine(myDB, encoding='latin1') \n conn = engine.connect()\n select = conn.execute('select * from ' + table)\n\n df = pd.DataFrame(select.fetchall()) \n df.columns = select.keys()\n\n conn.close()\n return df",
"def _get_data(*, from_web: bool) -> pd.DataFrame:\n\n df = read_in_data.SaveFormats.CSV.read(from_web=from_web)\n return df",
"def load_table_as_pd(conn, tablename: str):\n # get table as a pandas dataframe\n statement = f\"\"\"\n SELECT *\n FROM '{tablename}';\n \"\"\"\n df = pd.read_sql_query(statement, conn)\n return df",
"def table_to_dataframe(file):\n columns = ['instrument', 'dataset', 'flowcell', 'well', \n 'well_tile', 'cell', 'blob', 'position_i', 'position_j',\n 'read', 'quality']\n\n columns_drop = ['instrument', 'flowcell', 'dataset', 'well_tile']\n\n df = pd.read_csv(file, sep='\\s+', header=None, quoting=3)\n df.columns = columns\n df['tile'] = df['well_tile'] % 1000\n df = df.drop(columns_drop, axis=1)\n return df",
"def load_data(filepath):\n\tlogging.info(f\"Load data from {filepath}\")\n\tdf = pd.read_csv(filepath)\n\tdf = set_dtypes(df)\n\tdf = df.sort_values(by='query_date')\n\n\treturn df",
"def read_data(name: str) -> pd.DataFrame:\n import_dir = Path.cwd().joinpath('eikon_data_files')\n\n path = Path.joinpath(import_dir, Path(name))\n if path.exists():\n return pd.read_csv(path, sep=',')\n else:\n print('File type \"' + name + '.csv' + ' does not exist. Aborted.')\n quit()",
"def create_table_from_file():\n\n full_path = os.getcwd()\n file_name = full_path + \"/inventory/inventory.csv\"\n\n if os.path.exists(file_name):\n table = data_manager.get_table_from_file(file_name)\n\n else:\n ui.print_error_message(\"There is no file to read!\")\n table = []\n\n return table",
"def load_tsv(path: str, ncols: int, nonames: bool) -> DataFrame:\n cols = range(ncols) if ncols else None\n return pandas.read_csv(path, usecols=cols, sep='\\t', skipinitialspace=True, header='infer' if not nonames else None)",
"def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df",
"def _read_table(hdulist, extname, **kwargs):\n t = _read_ext(Table, hdulist, extname, **kwargs)\n h = hdulist[extname].header\n for i in range(h['TFIELDS']):\n try:\n t.columns[i].unit = h['TUNIT%d' % (i + 1)]\n except Exception:\n pass\n return t"
] | [
"0.73241764",
"0.7220796",
"0.7152453",
"0.7099584",
"0.7081273",
"0.6864048",
"0.6840184",
"0.67669684",
"0.67324567",
"0.67061985",
"0.66576475",
"0.6655316",
"0.6607207",
"0.65883917",
"0.65197957",
"0.65108925",
"0.6483658",
"0.64634323",
"0.6460126",
"0.64505523",
"0.64264697",
"0.641699",
"0.6393391",
"0.63914275",
"0.63660234",
"0.63375115",
"0.63367444",
"0.63293105",
"0.63288397",
"0.63147086"
] | 0.72589296 | 1 |
Saves the table as an Excel spreadsheet, where multiple tables can be given.. | def to_spreadsheet(tables: Dict[str, pandas.DataFrame], filename: Path) -> Path:
writer = pandas.ExcelWriter(str(filename))
include_index = False
# python 3.5 or 3.6 made all dicts ordered by default, so the sheets will be ordered in the same order they were defined in `tables`
for sheet_label, df in tables.items():
if df is None: continue
df.to_excel(writer, sheet_label, index = include_index)
writer.save() # otherwise color_table_cells will not be able to load the file
return filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)",
"def saveAll(self):\r\n path = saveFile(ftype='xlsx')\r\n writer = pd.ExcelWriter(path)\r\n df = pd.DataFrame(self.saveAll)\r\n df.to_excel(writer, header=False, index=False)\r\n writer.save()\r\n \r\n #Format the excel file\r\n try:\r\n import openpyxl\r\n from openpyxl.styles import Alignment, Font, Border, Side\r\n #Load the workbook and worksheet\r\n wb = openpyxl.load_workbook(filename=path)\r\n ws = wb.get_sheet_by_name(\"Sheet1\")\r\n cells = ['E1','H1','K1','N1','Q1','T1','W1','Z1']\r\n ws.merge_cells('E1:G1')\r\n ws.merge_cells('H1:J1')\r\n ws.merge_cells('K1:M1')\r\n ws.merge_cells('N1:P1')\r\n ws.merge_cells('Q1:S1')\r\n ws.merge_cells('T1:V1')\r\n ws.merge_cells('W1:Y1')\r\n ws.merge_cells('Z1:AB1')\r\n #Bold and center the headers\r\n ft = Font(bold=True)\r\n for cell in cells:\r\n ws[cell].alignment = Alignment(horizontal=\"center\")\r\n ws[cell].font = ft\r\n #Add borders\r\n rows,_ = self.saveAll.shape\r\n for i in range(rows):\r\n for cell in cells:\r\n c = cell[0]+str(i+1)\r\n ws[c].border = Border(left=Side(style='thin'))\r\n\r\n \r\n \r\n wb.save(path)\r\n \r\n except ImportError:\r\n pass",
"def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb",
"def save_table(data, out_file):\n logging.info(\"Saving table\")\n #header, data = data\n #out = pd.DataFrame(data=data, columns = header.keys())\n joblib.dump(data, out_file)",
"def toxlsx(tbl, filename, sheet=None, encoding='utf-8'):\n\n try:\n import openpyxl\n except ImportError as e:\n raise UnsatisfiedDependency(e, dep_message)\n wb = openpyxl.Workbook(optimized_write=True, encoding=encoding)\n ws = wb.create_sheet(title=sheet)\n for row in tbl:\n ws.append(row)\n wb.save(filename)",
"def export_helped_table(db):\r\n # Get current date.\r\n date = datetime.datetime.today().strftime('%Y-%m-%d')\r\n # Create directory and file.\r\n if not os.path.exists(backup_dir):\r\n os.makedirs(backup_dir)\r\n backup_file = backup_dir + \"backup_\" + date + \".xlsx\"\r\n # Create workbook and add worksheet.\r\n workbook = xlsxwriter.Workbook(backup_file)\r\n worksheet = workbook.add_worksheet()\r\n # Add bold format to highlight cells.\r\n bold = workbook.add_format({'bold': True})\r\n # Create data headers.\r\n worksheet.write('A1', 'Customer Number', bold)\r\n worksheet.write('B1', 'Name', bold)\r\n worksheet.write('C1', 'Username', bold)\r\n worksheet.write('D1', 'RU_ID', bold)\r\n worksheet.write('E1', 'OS_Platform', bold)\r\n worksheet.write('F1', 'Description', bold)\r\n # Get number of rows in table.\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM helped\")\r\n customers = c.fetchall()\r\n # Loop through the data and write it row by row.\r\n for row in range(0, len(customers)):\r\n for col in range(0, 6):\r\n worksheet.write((row + 1), col, customers[row][col])\r\n workbook.close()",
"def save(self, filename):\n writer = pd.ExcelWriter(filename+\".xlsx\")\n for tab in self.group.df_assignment_merge.keys():\n self.group.df_assignment_merge[tab].to_excel(writer,tab)\n\n writer.save()\n\n #df.to_excel(\"filename.xlsx\")",
"def dfs_tabs(df_list, sheet_list, file_name):\n\n writer = pd.ExcelWriter(file_name,engine='xlsxwriter') \n for dataframe, sheet in zip(df_list, sheet_list):\n dataframe.to_excel(writer, sheet_name=sheet, startrow=0 , startcol=0, index=False) \n writer.save()",
"def append_data2xls(filename, table_name, data):\n r_xls = xlrd.open_workbook(filename)\n r_sheet = r_xls.sheet_by_name(table_name)\n end_row_number = r_sheet.nrows\n w_xls = copy(r_xls)\n sheet_write = w_xls.get_sheet(0)\n\n for i, row in enumerate(data):\n for j, value in enumerate(row):\n sheet_write.write(end_row_number + i, j, value)\n w_xls.save(filename)",
"def dumptoexcel(source_html, output_excel):\r\n\r\n arguments = {'srcName' : source_html, 'desName' :output_excel }\r\n\r\n #Reading from HTML file.\r\n soup = BeautifulSoup(open(arguments['srcName']))\r\n table = soup.find('table')\r\n table_rows = table.find_all('tr')\r\n\r\n \r\n #Opening Excel File.\r\n desWorkBook = openpyxl.Workbook()\r\n desSheet = desWorkBook.active\r\n\r\n\r\n #Getting data ready to write.\r\n all_rows = []\r\n\r\n table_head = table.find_all('th')\r\n row = [i.text for i in table_head]\r\n all_rows.append(row)\r\n \r\n for tr in table_rows:\r\n td = tr.find_all('td')\r\n row = [i.text for i in td]\r\n if(len(row) != 0):\r\n all_rows.append(row)\r\n\r\n rowLen = len(all_rows[0])\r\n maxColWidths = [0]*rowLen\r\n \r\n for row in all_rows:\r\n for i in range(0,rowLen):\r\n temp = len(row[i])\r\n if(maxColWidths[i]<temp):\r\n maxColWidths[i] = temp\r\n\r\n \r\n #Writing to Excel File.\r\n rowNo = 1\r\n for row in all_rows:\r\n colNo = 1\r\n row_len = len(row)\r\n for i in xrange(1,row_len):\r\n\r\n desSheet.cell(row=rowNo, column=colNo).value = row[i]\r\n desSheet.column_dimensions[get_column_letter(colNo)].width = maxColWidths[i] \r\n colNo = colNo+1\r\n \r\n rowNo = rowNo+1\r\n\r\n #Saving Excel File.\r\n \r\n desWorkBook.save(arguments['desName'])",
"def save_table_scraperwiki(uniques,table,name):\n for row in table:\n scraperwiki.sqlite.save(\n unique_keys=uniques\n , data=row\n , table_name=name\n )",
"def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass",
"def save_table(date, table):\n if os.path.isfile(date+\".table\"):\n file_using = open(date+\".table\", \"w\")\n else:\n return False\n file_using.seek(0)\n file_using.truncate()\n for line in table:\n file_using.write(\"{},{},{},{},{}\\n\".format(line[0], line[1], line[2], line[3], line[4]))\n file_using.close()",
"def save_table(self, table_name=None,\n year_start=None, year_end=None):\n import pandas as pd\n\n self.sanity_check()\n self.data_loaded_check()\n\n if table_name is None:\n table_name = (f\"{self.config.dir_data}/\"\n f\"table_{self.config.experiment_id}.xlsx\")\n\n print(table_name)\n\n writer = pd.ExcelWriter(table_name)\n\n if (year_start is not None) and (year_end is not None):\n tedges = np.arange(1999.5, 2021.5, 1)\n years = np.arange(2000, 2021, 1)\n else:\n tedges = np.arange(self.history.index[0] - 0.5,\n self.history.index[-1] + 1.5, 1)\n years = np.arange(self.history.index[0],\n self.history.index[-1] + 1, 1)\n\n dfa = pd.DataFrame()\n dfa['year'] = years\n\n Ht = np.zeros(len(years))\n auth_names = list(self.pub_auth_all.author1.unique())\n for a in auth_names:\n df = self.pub_auth_all[self.pub_auth_all['author1'].isin([a])]\n y = [int(i) for i in df.year.values]\n if len(y) == 0:\n H = [[0] * (len(tedges) - 1), None]\n else:\n y = np.array(y)\n H = np.histogram(y, bins=tedges)\n dfa[a] = H[0]\n Ht = Ht + H[0]\n self.history['npapers_all'] = Ht\n dfa.to_excel(writer, sheet_name='top')\n\n Ht = np.zeros(len(years))\n auth_names = list(self.pub_auth_top.author1.unique())\n for a in auth_names:\n df = self.pub_auth_top[self.pub_auth_top['author1'].isin([a])]\n y = [int(i) for i in df.year.values]\n if len(y) == 0:\n H = [[0] * (len(tedges) - 1), None]\n else:\n y = np.array(y)\n H = np.histogram(y, bins=tedges)\n dfa[a] = H[0]\n Ht = Ht + H[0]\n self.history['npapers_top'] = Ht\n dfa.to_excel(writer, sheet_name='top')\n\n writer.save()",
"def write2file(self, save_to):\n headerstyle = xlwt.easyxf(self.header_style.get_style_string())\n missing_val_style = xlwt.easyxf(\n self.missing_value_style.get_style_string())\n row_styles = [xlwt.easyxf(self.first_style.get_style_string()),\n xlwt.easyxf(self.second_style.get_style_string())]\n\n properties, sections, table = self._build_table()\n\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet(self.sheet_name)\n\n if os.path.splitext(save_to)[-1] == '':\n save_to += '.xls'\n\n max_col_len = []\n\n if (self.switch):\n\n for i, prop in enumerate([''] + properties):\n sheet.write(0, i, prop, headerstyle)\n max_col_len.append(len(str(prop)))\n\n for row_num, sec in enumerate(sections):\n sheet.write(row_num + 1, 0, sec, headerstyle)\n if len(str(sec)) > max_col_len[0]:\n max_col_len[0] = len(str(sec))\n\n for row_num, row in enumerate(table):\n for col_num, elem in enumerate(row):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n else:\n\n for i, sec in enumerate([''] + sections):\n sheet.write(0, i, sec, headerstyle)\n max_col_len.append(len(str(sec)))\n\n for row_num, prop in enumerate(properties):\n sheet.write(row_num + 1, 0, prop, headerstyle)\n if len(str(prop)) > max_col_len[0]:\n max_col_len[0] = len(str(prop))\n\n for col_num, col in enumerate(table):\n for row_num, elem in enumerate(col):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n # adjust width of he columns\n for col_id, col_len in enumerate(max_col_len):\n sheet.col(col_id).width = (256 * (col_len+1))\n\n workbook.save(save_to)",
"def save(file, table):\n pq.write_table(pa.Table.from_pandas(table), file)",
"def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')",
"def write_excel(self, filename):\n writer = pd.ExcelWriter(filename)\n self.df_avg.to_excel(writer, 'Simulation')\n self.manager_df.to_excel(writer, 'FleetManagers')\n self.customer_df.to_excel(writer, 'Customers')\n self.transport_df.to_excel(writer, 'Transports')\n writer.save()",
"def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()):\n\t\tif queryset:\n\t\t\t[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\tfor q in queryset:\n\t\t\t\t# object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne\n\t\t\t\t[row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time)\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')",
"def save_new_excel_data(df, req_file_name, sheet):\r\n try:\r\n # select rows for a specific column and save a excel file\r\n dtc_table_ext = ['SW_DTC', 'Diagnosis_IDENTIFIER', 'Symptom', 'SW_Module', 'ISO_Pcode',\r\n 'Cust_Pcode', 'ScanT_Pcode', 'Description', 'Lamp_Manager', 'EPC_Lamp',\r\n 'SnapShot', 'MIL_FUEL_CONF', 'Diagnosis_Enabled', 'Diagnosis_presence',\r\n 'Severity', 'Priority', 'Diag_Call_task', 'Diag_Validation', 'Unit',\r\n 'Diag_DeValidation', 'DTC_available', 'EPC', 'MIL_FuelConf_bit1',\r\n 'MIL_FuelConf_bit0', 'Lamp_Manager_bit2', 'Lamp_Manager_bit1', 'Lamp_Manager_bit0',\r\n 'AUTOyyy', 'Prio_bit3', 'Prio_bit2', 'Prio_bit1', 'Prio_bit0',\r\n 'Snapshot_bit2', 'Snapshot_bit1', 'Snapshot_bit0', 'empty', 'ETC_highbit', 'ETC_lowbit']\r\n # Save df_all_cols extracted to a new excel file\r\n file_to_save = sheet+'_'+req_file_name\r\n with pd.ExcelWriter(file_to_save) as writer: # for writing more than 1 sheet\r\n df.to_excel(writer, sheet_name=sheet, index=False)\r\n # df.to_excel(writer, sheet_name=sheet, columns=dtc_table_ext, index=False)\r\n except PermissionError:\r\n print('DEBUG-->save_new_excel_data: exception raised: ', sys.exc_info())",
"def glue_table(name: str, df: pd.DataFrame, build_path=\"_build\"):\n\n if not os.path.exists(build_path):\n os.mkdir(build_path)\n df.to_excel(os.path.join(build_path, f\"{name}.xlsx\"))\n\n glue(name, df)",
"def write_table(final_df, outfile, stages):\n\n workbook = xlsxwriter.Workbook(\n str(outfile), {\"constant_memory\": False}\n )\n worksheet = workbook.add_worksheet(\"Table 1\")\n\n header_color = \"#F2DCDB\"\n white = \"#000000\"\n black = \"#FFFFFF\"\n loc_cell_width = 20\n data_cell_width = 15\n column_start = 65\n\n header_format = get_format_obj(\n workbook, bg_color=header_color, font_size=12, bold=True\n )\n title_format = get_format_obj(\n workbook, bg_color=white, font_size=13, align=False, bold=True\n )\n title_format.set_font_color(black)\n \n # Column length is basically all columns in the dataframe except 'level'\n col_len = final_df.shape[1]-1\n \n data_cols = final_df.drop([\"level\", \"lancet_label\"], axis=1).columns.values\n \n cols = list(map(chr, range(column_start, column_start+col_len)))\n worksheet.set_column(cols[0]+\":\"+cols[0], loc_cell_width)\n worksheet.set_column(cols[1]+\":\"+cols[-1], data_cell_width)\n\n # place-holder to manually adjust title as needed\n title = (\n \"Title goes here.\"\n )\n curr_row = 1\n end_row = curr_row + CELL_HT[\"title\"]\n row_range = cols[0] + str(curr_row) + \":\" + cols[-1] + str(end_row)\n worksheet.merge_range(row_range, title, title_format)\n\n curr_row = end_row+1\n page_row_count = 1\n page_breaks = []\n\n for _, row in final_df.iterrows():\n page_row_count += 1\n \n ### Insert page break after 20 rows.\n if row[\"level\"] == 0 or (page_row_count != 0 and\n page_row_count % 20 == 0):\n page_row_count = 0\n page_breaks.append(curr_row - 1)\n curr_row = write_header(\n worksheet, curr_row, cols, data_cols,\n header_format, stages\n )\n end_row = curr_row + CELL_HT[\"data_cols\"]\n col_idx = 0\n\n if row[\"level\"] < 3:\n loc_fmt_obj = get_format_obj(\n workbook, font_size=11,\n bg_color=header_color, bold=True,\n align=False\n )\n data_fmt_obj = get_format_obj(\n workbook, font_size=11,\n bg_color=header_color, bold=True\n )\n else:\n loc_fmt_obj = get_format_obj(\n workbook, font_size=11, align=False\n )\n data_fmt_obj = get_format_obj(\n workbook, font_size=11\n )\n\n for col in final_df:\n if col == \"level\":\n continue\n\n row_range = (\n cols[col_idx] + str(curr_row) + \":\" +\n cols[col_idx] + str(end_row)\n )\n if col == \"lancet_label\":\n loc_name = INDENT_MAP[row[\"level\"]] + row[col]\n worksheet.merge_range(row_range, loc_name, loc_fmt_obj)\n else:\n worksheet.merge_range(row_range, row[col], data_fmt_obj)\n\n col_idx += 1\n curr_row = end_row+1\n\n worksheet.set_h_pagebreaks(page_breaks[1:])\n worksheet.fit_to_pages(1, 0)\n workbook.close()",
"def save_comparative_tables(result_best_models, metric):\n models = result_best_models['MODEL'].unique()\n datasets = result_best_models['DATASET'].unique()\n forecast_horizons = result_best_models['FORECAST_HORIZON'].unique()\n\n if not os.path.exists('../results_best/tables/'):\n os.mkdir('../results_best/tables/')\n\n excel_path = '../results_best/tables/table_best_models_' + metric + '.xlsx'\n excel = pd.ExcelWriter(excel_path, engine='openpyxl')\n excel.book = openpyxl.Workbook()\n\n for horizon in forecast_horizons:\n\n res = pd.DataFrame(columns=models)\n for dataset in datasets:\n row = []\n\n for model in models:\n row.append(result_best_models.loc[(result_best_models['DATASET'] == dataset) &\n (result_best_models['FORECAST_HORIZON'] == horizon) &\n (result_best_models['MODEL'] == model), :][metric].values[0])\n res.loc[dataset, :] = row\n res.to_excel(excel, sheet_name=str(horizon))\n\n excel.save()\n default_sheet = excel.book[excel.book.sheetnames[0]]\n excel.book.remove(default_sheet)\n\n excel.close()",
"def dataframe_to_excel(df, sheet_title, project_constants_lst, \n current_date=str(date.today()), force_flag = False, freeze_column='A'):\n \n project_steps_df, max_title, _, report_requisites_sr, *_ = project_constants_lst\n report_type, export_flag, df_decription = project_steps_df.loc[sheet_title, ['report_type', 'export_to_excel', 'description']].values\n \n # check DataFrame report type to save\n if report_type == 'report':\n report_mark = report_requisites_sr['project_title'] + '_tables'\n else:\n report_mark = report_type\n \n # construct excel filename\n file_name = report_requisites_sr['customer_name'] + '_' + report_mark + '_' + current_date + '.xlsx'\n\n # information string\n info = f'Exporting {sheet_title} table to {report_mark} file'\n print(info, end =\" \")\n file_path = os.path.join(report_requisites_sr['today_report_folder'], file_name)\n \n # save DataFrame to excel file if export_to_excel trigger is ON\n # and DataFrame is not empty\n if (force_flag or export_flag) and not df.empty:\n fsop.create_folder(report_requisites_sr['today_report_folder'], max_title, display_status=False)\n file_mode = 'a' if os.path.isfile(file_path) else 'w'\n df = df.apply(pd.to_numeric, errors='ignore')\n try:\n if_sheet_exists_param = 'replace' if file_mode == 'a' else None\n content_df, item_exist = generate_table_of_contents(file_path, file_mode, sheet_title, df_decription)\n df_flat = drop_multindex(df)\n # write table of contents and data dataframe to the excel file\n with pd.ExcelWriter(file_path, mode=file_mode, if_sheet_exists=if_sheet_exists_param, engine='openpyxl') as writer:\n if file_mode == 'w' or not item_exist:\n content_df.to_excel(writer, sheet_name='Содержание', index=False)\n df_flat.to_excel(writer, sheet_name=sheet_title, startrow=2, index=False)\n # format table of contents and data worksheets\n workbook = openpyxl.load_workbook(file_path)\n format_workbook(workbook, sheet_title, df_decription, freeze_column)\n workbook.save(file_path)\n except PermissionError:\n status_info('fail', max_title, len(info))\n print('\\nPermission denied. Close the file.\\n')\n sys.exit()\n else:\n status_info('ok', max_title, len(info))\n return file_path \n else:\n # if save key is on but DataFrame empty\n if project_steps_df.loc[sheet_title, 'export_to_excel'] and df.empty:\n status_info('no data', max_title, len(info))\n else: \n status_info('skip', max_title, len(info))\n return None",
"def rite2xl(df, file_name):\r\n print('writing dataframe to excel',)\r\n writer = pd.ExcelWriter(file_name ,engine = 'xlsxwriter')\r\n df.to_excel(writer,file_name)\r\n writer.save()\r\n print('writing to excel sheet completed')\r\n return(df)",
"def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")",
"def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r",
"def export_to_excel(self, workbook, tailan_queryset):\n\t\t# workbook argumentdaa avna\n\t\tif tailan_queryset:\n\t\t\t#[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\t\n\t\t\tworksheet = workbook.add_worksheet(u'Гүний худаг')\n\t\t\tqueryset = Hudag.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.gunii_hudags:\n\t\t\t\t\tqueryset = tailan.gunii_hudags.hudags.all()\n\t\t\t\t\t[row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsevershuuleh:\n\t\t\t\t\tqueryset = tailan.tsevershuuleh.tsevershuuleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tseverleh:\n\t\t\t\t\tqueryset = tailan.tseverleh.tseverleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Усан сан')\n\t\t\tqueryset = UsanSan.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.usansan:\n\t\t\t\t\tqueryset = tailan.usansan.usan_sans.all()\n\t\t\t\t\t[row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_nasos_stants:\n\t\t\t\t\tqueryset = tailan.tsever_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_nasos_stants:\n\t\t\t\t\tqueryset = tailan.bohir_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Лаборатори')\n\t\t\tqueryset = Lab.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.lab:\n\t\t\t\t\tqueryset = tailan.lab.labs.all()\n\t\t\t\t\t[row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.tsever_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.bohir_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'АХББ')\n\t\t\tqueryset = ABB.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.abb:\n\t\t\t\t\tqueryset = tailan.abb.abbs.all()\n\t\t\t\t\t[row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв')\n\t\t\tqueryset = UsDamjuulahBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_damjuulah_tov:\n\t\t\t\t\tqueryset = tailan.us_damjuulah_tov.usDamjuulahBair.all()\n\t\t\t\t\t[row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус түгээх байр')\n\t\t\tqueryset = UsTugeehBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_tugeeh:\n\t\t\t\t\tqueryset = tailan.us_tugeeh.us_tugeeh_bairs.all()\n\t\t\t\t\t[row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны машин')\n\t\t\tqueryset = WaterCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.water_car:\n\t\t\t\t\tqueryset = tailan.water_car.water_cars.all()\n\t\t\t\t\t[row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны машин')\n\t\t\tqueryset = BohirCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_car:\n\t\t\t\t\tqueryset = tailan.bohir_car.bohir_cars.all()\n\t\t\t\t\t[row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ажилчдын судалгаа')\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.ajiltans:\n\t\t\t\t\tqueryset = tailan.ajiltans.ajiltans.all()\n\t\t\t\t\t[row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\t\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')",
"def write_xlsx(data):\n workbook = xlsxwriter.Workbook('MyWorkbook.xlsx')\n main_sheet = workbook.add_worksheet('MySheet')\n\n date_format = workbook.add_format(\n {'num_format': 'mm/dd/yy hh:mm:ss AM/PM'})\n length = str(len(data) + 1)\n \n main_sheet.add_table(('A1:D' + length), \n {'data': data,\n 'columns': [{'header': 'Department'}, {'header': 'Students'},\n {'header': 'Cumulative GPA'},\n {'header': 'Final Date',\n 'format': date_format}]})\n\n department_grades = workbook.add_chart({'type':'column'})\n department_grades.set_title(\n {'name':'Department and Grade distribution'})\n department_grades.add_series(\n {'categories':'=MySheet!$A$2:$A$5',\n 'values':'=MySheet!$C$2:$C$5'})\n main_sheet.insert_chart('A8', department_grades)\n workbook.close()",
"def writeTable(table, filename):\n with open(filename, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(table)"
] | [
"0.7324861",
"0.7021304",
"0.7011961",
"0.69541585",
"0.68894243",
"0.6813231",
"0.67438275",
"0.6703414",
"0.670096",
"0.6673346",
"0.6641729",
"0.66115904",
"0.65920734",
"0.6574278",
"0.65613693",
"0.65596414",
"0.6499027",
"0.64789695",
"0.64779127",
"0.6472889",
"0.6405206",
"0.6340019",
"0.63318795",
"0.63004494",
"0.6267326",
"0.6249265",
"0.62222826",
"0.6215611",
"0.6173375",
"0.61704636"
] | 0.70738363 | 1 |
Connect to the PostGreSQL database,run selected query and return the result | def psql_connection(query):
try:
conn = psycopg2.connect(database=DB)
cur = conn.cursor()
cur.execute(query)
except Exception:
("Error connecting to database")
else:
print("Calling database...")
print("")
results = cur.fetchall()
conn.close()
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def execute_query(query):\n try:\n # enter your code here to get a database connection and cursor,\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # execute the query\n c.execute(query)\n # store the results\n results = c.fetchall()\n # close the database connection\n db.close()\n # return the results\n return results\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)",
"def run_query(query):\r\n db = psycopg2.connect('dbname=' + database)\r\n connect = db.cursor()\r\n connect.execute(query)\r\n rows = connect.fetchall()\r\n db.close()\r\n return rows",
"def execute_query(query):\n\n db = psycopg2.connect(database=\"news\")\n cursor = db.cursor()\n cursor.execute(query)\n query_result = cursor.fetchall()\n db.close()\n return query_result",
"def pgquery( conn, sqlcmd, args ):\n retval = False\n query_result = []\n with conn:\n with conn.cursor() as cur:\n try:\n if args is None:\n cur.execute(sqlcmd)\n else:\n cur.execute(sqlcmd, args)\n for record in cur:\n query_result.append(record)\n retval = True\n except Exception as e:\n print(\"DB Read Error: \")\n print(e)\n return query_result",
"def execute_query(query):\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(query)\n results = c.fetchall()\n db.close()\n return results",
"def query_conn(sql_request):\n try:\n conn = psycopg2.connect(database=\"news\")\n cursor = conn.cursor()\n cursor.execute(sql_request)\n results = cursor.fetchall()\n conn.close()\n return results\n except psycopg2.Error as e:\n print(\"Unable to connect to the database\")\n print(e.pgerror)\n print(e.diag.message_detail)\n sys.exit(1)",
"def pg_execute(pg_conn, sql):\n print sql\n # XXX execute command",
"def exec_results(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n result = c.fetchall()\n db.close()\n return result",
"def main():\n config = Config.init_from_env()\n\n while True:\n logger.info('running query...')\n\n conn = connect_to_db(config)\n with conn.cursor() as cur:\n cur.execute(\"\"\"SELECT datname from pg_database\"\"\")\n rows = cur.fetchall()\n for row in rows:\n logger.info(f'fetched row: f{row[0]}')\n conn.close()\n\n time.sleep(5)",
"def run(self):\n rows = None\n if self.sql.startswith('select'):\n conn = self.table.connect()\n with conn.cursor() as curs:\n try:\n curs.execute(self.sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {self.sql}:\n {error.code}\"\"\")\n self.excep = exc\n raise exc\n else:\n rows = curs.fetchall()\n # logging.critical(f\"\"\"executed {self.sql}\"\"\")\n self.result_exec = rows",
"def run_query(query_command):\n connection = psycopg3.connect(database=DBNAME)\n cursor = connection.cursor()\n cursor.execute(query_command)\n data = cursor.fetchall()\n connection.close()\n return data",
"def db_connection(query):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n return c.fetchall()\n db.close()",
"def _run_query(self):",
"def query(sql):\n if (sql is None):\n raise Exception(\"SQL not specified\") \n try:\n database = App.instance().environment.database\n connection = psycopg2.connect(host=database.host, dbname=database.database, \n user=database.user, password=database.password)\n cursor = connection.cursor()\n cursor.execute(sql)\n fields = [ x[0] for x in cursor.description]\n return (fields, cursor.fetchall())\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Error connecting to database\", error)\n finally:\n if not connection is None:\n cursor.close()\n connection.close()",
"def db_query(query):\n conn = psycopg2.connect(\"dbname=news\")\n cursor = conn.cursor()\n cursor.execute(query)\n response = cursor.fetchall()\n conn.close()\n return response",
"def get_query_results(query):\n try:\n dbconn = psycopg2.connect(database=\"news\")\n cursor = dbconn.cursor()\n cursor.execute(query)\n result = cursor.fetchall()\n dbconn.close()\n return result\n except Exception as e:\n print(e)\n exit(1)",
"def run_select_query(query, args = None):\n cursor = db.get_cursor()\n cursor.execute(query, args)\n return cursor.fetchall()",
"def make_sql_call(self):\n c_data = {'db_host': self.server,\n 'db_user': self.user,\n 'db_password': self.password,\n 'db_database': self.database}\n db_conn = self.SH.sql.helper.sql_conn_obj(c_data)\n result, detail = db_conn.connect()\n self.print_to_log(detail)\n result, detail = db_conn.execute(self.sql)\n db_conn.shutdown()\n self.print_to_log(detail)",
"def _run_query(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n return cursor.fetchall()",
"def run_query_tap_postgres(self, query):\n return db.run_query_postgres(\n query,\n host=self.get_conn_env_var('TAP_POSTGRES', 'HOST'),\n port=self.get_conn_env_var('TAP_POSTGRES', 'PORT'),\n user=self.get_conn_env_var('TAP_POSTGRES', 'USER'),\n password=self.get_conn_env_var('TAP_POSTGRES', 'PASSWORD'),\n database=self.get_conn_env_var('TAP_POSTGRES', 'DB'),\n )",
"def run_query_target_postgres(self, query: object) -> object:\n return db.run_query_postgres(\n query,\n host=self.get_conn_env_var('TARGET_POSTGRES', 'HOST'),\n port=self.get_conn_env_var('TARGET_POSTGRES', 'PORT'),\n user=self.get_conn_env_var('TARGET_POSTGRES', 'USER'),\n password=self.get_conn_env_var('TARGET_POSTGRES', 'PASSWORD'),\n database=self.get_conn_env_var('TARGET_POSTGRES', 'DB'),\n )",
"def db_query(query, db_name, user):\n\n # Attempt connection to DB with given parameters\n try:\n conn = psycopg2.connect(dbname=db_name, user=user)\n except psycopg2.Error as e:\n print(e)\n raise SystemExit\n\n # Create cursor and try to execute given query\n # and return fetched data\n cur = conn.cursor()\n try:\n cur.execute(query)\n except psycopg2.Error as e:\n print(e)\n conn.close()\n else:\n return cur.fetchall()\n cur.close()\n conn.close()",
"def connect_psql(kid, var):\n\n code = f\"\"\"from sqlalchemy import create_engine\nconn_string = f\"postgresql://{cfg.sql_name}:{cfg.sql_password}@localhost/{cfg.sql_dbname}\"\nengine = create_engine(conn_string)\nwith engine.begin() as conn:\n conn.execute(\"INSERT INTO {cfg.sql_schema_name}.{cfg.sql_table_name} (var_value, var_name) VALUES (9,'c')\")\n result = conn.execute(\"select * from {cfg.sql_schema_name}.{cfg.sql_table_name}\")\n for row in result:\n print(row)\n \"\"\"\n\n print(\"---Attempting to execute SQL code---\")\n\n return exec_code(kid, var, code)",
"def run_query(db, query):\n log.debug(\"run query on %s: %s\", db, query)\n conn = _connect(show_dbs(db)[db][\"uri\"])\n return conn.cursor().execute(query).fetchall()",
"def runningwithqueries(query):\n print(\"\\nRunning Query: \" + str(query) + \"\\nResult :\\n\")\n crsr = cnxn.execute(query)\n columns = [column[0] for column in crsr.description]\n print(columns)\n for row in crsr.fetchall():\n print(row)\n crsr.close()",
"def select_sql(command):\n logging.debug(\"Running Select sql \"+str(command))\n try:\n## host, userid, password, database instance\n con = mdb.connect(serverip, username, userpass, schema);\n cursor = con.cursor()\n \n sql = command\n cursor.execute(sql)\n return cursor.fetchall()\n \n con.close()\n\n except mdb.Error, e:\n logger.error(e)",
"def main():\n\n db_config_params = config.get_db_config('psql')\n print(f'Connecting to {db_config_params[\"database\"]} database.')\n\n try:\n # Will commit and close the connection\n with psycopg2.connect(**db_config_params) as connection:\n with connection.cursor() as cursor:\n # Using the sql statements to modify the tables\n cursor.execute('CREATE TABLE If NOT EXISTS student(id SERIAL PRIMARY KEY, name VARCHAR);')\n # cursor.execute(\"INSERT INTO student(name) VALUES (%s);\", ('Jack Smith',))\n # connection.commit()\n cursor.execute('SELECT * FROM student;')\n print(*cursor.fetchall(), sep='\\n')\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)",
"def connect(query, tugas):\n conn = None\n result = []\n try:\n # read connection parameters\n params = config()\n \n # connect to the PostgreSQL server\n # print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(**params)\n \n # create a cursor\n cur = conn.cursor(cursor_factory=RealDictCursor)\n # execute a statement\n cur.execute(query)\n \n if tugas == 'GET' :\n columns = ('id', 'oid', 'root', 'approved', 'name')\n result = json.dumps(cur.fetchall(), indent=2)\n else : \n # display the PostgreSQL database server version\n result = cur.fetchone()\n \n # commit the changes to the database\n conn.commit()\n # close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print('ini error = ', error)\n finally:\n if conn is not None:\n conn.close()\n return str(result)",
"def db_results(self, query):\n try:\n # Connect to database\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n # executing query and getting all results\n c.execute(query)\n db_res = c.fetchall()\n # closing connection\n db.close()\n return db_res\n except Error:\n print (\"Error when attempting to connect to db\")",
"def run_query(conn, query):\n\tcur = conn.cursor()\n\tcur.execute(query)\n\trows = cur.fetchall()\n\treturn rows"
] | [
"0.717092",
"0.7003607",
"0.6933716",
"0.69042325",
"0.6886534",
"0.6875132",
"0.6873246",
"0.6821606",
"0.6772765",
"0.6757581",
"0.67341745",
"0.67148185",
"0.6697957",
"0.6689113",
"0.6687928",
"0.66876215",
"0.6682956",
"0.66637564",
"0.6613967",
"0.6611754",
"0.66006875",
"0.65932673",
"0.65923434",
"0.65810704",
"0.6543423",
"0.6536852",
"0.6534046",
"0.65266526",
"0.6523689",
"0.651845"
] | 0.74556375 | 0 |
bike_stations contains array of station objects {name, stationId, bikesAvailable, lat, lon} | def sort_bike_stations(bike_stations, location):
stations = bike_stations.copy()
for index, station in enumerate(stations):
station_location = (station["lat"], station["lon"])
dist = distance.distance(station_location, location).m
stations[index]["distance"] = dist
stations = sorted(stations, key=lambda station: station["distance"])
stations = list(filter(lambda station: station["bikesAvailable"] > 0, stations))
return stations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stations():\n\n return station_list",
"def get_bikes_for_week(cls, dbsession, station_id):\n station = [(\"Day\", \"Available Bikes\")]\n station_data = dbsession.query(func.weekday(cls.last_update),\n func.avg(cls.available_bikes)) \\\n .filter(cls.station_id == station_id) \\\n .group_by(func.weekday(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0,0)])\n\n return station",
"def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n station = [(\"Time\", \"Available Bikes\", \"Available Stands\")]\n\n station_data = dbsession.query(func.hour(cls.last_update),\n func.avg(cls.available_bikes),\n func.avg(cls.available_bike_stands)) \\\n .filter(cls.station_id == station_id,\n func.weekday(cls.last_update) == weekday) \\\n .group_by(func.hour(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in station_data])\n else:\n station.extend([(0,0,0)])\n return station",
"def find_5near_stations(lon, lat):\r\n engine = get_sql_engine()\r\n bikestation5 = text(\r\n \"\"\"\r\n SELECT name, \"addressStreet\" as address,\r\n \"bikesAvailable\" as available_bikes, geom,\r\n\t ST_X(geom) as lon, ST_Y(geom)as lat,\r\n\t ST_Distance(ST_SetSRID(ST_MakePoint(:lon, :lat), 4326)::geography, geom::geography) AS distance\r\n FROM indego_rt1130\r\n ORDER BY 7 ASC\r\n LIMIT 5\r\n \"\"\"\r\n )\r\n near_bike = gpd.read_postgis(bikestation5, con=engine, params={\"lon\": lon, \"lat\": lat})\r\n return near_bike",
"def get_station_boroughs(self):\\",
"def stations(self):\n stations = []\n f = self._fetch(Citibike.STATION_URL)\n data = json.load(f)\n if 'stationBeanList' not in data or len(data['stationBeanList']) == 0:\n raise BadResponse('Station Fetch Failed', data)\n for station in data['stationBeanList']:\n stations.append(Station._from_json(station))\n logging.debug(\"Retrieved %d stations\" % len(stations))\n return stations",
"def prep_stations(url):\n stations = []\n _stations = requests.get(url).json()\n\n for _station in _stations['stationBeanList']:\n if _station['statusKey'] == 1:\n stations.append([_station['stationName'], _station['id'],\n _station['availableDocks'], _station['totalDocks'],\n _station['latitude'], _station['longitude']])\n\n return stations",
"def get_stations(self):\n return self.__request('stations')['stations']",
"def station_list() -> List[Dict]:\n return STATIONS",
"def stations(): \n # creating the Docstring\n session = Session(engine)\n\n # creat the Query stations\n\n stations_qu = session.query(measurement.station).group_by(measurement.station).all()\n\n # Converting the list of tuples into a normal list\n stations_qu_dict = list(np.ravel(stations_qu))\n session.close()\n\n return jsonify(stations_qu_dict)",
"def get_all_stations():\n latest_scraping_time = db.session \\\n .query(func.max(DublinBike.scraping_time)) \\\n .one()[0]\n\n stations = db.session.query(DublinBike) \\\n .filter(DublinBike.scraping_time == latest_scraping_time) \\\n .order_by(DublinBike.number.asc()) \\\n .all()\n\n return jsonify({\n 'data': [station.serialize for station in stations]\n })",
"def stations():\n # Query all station names from dataset\n station_list = session.query(Measurement.station).distinct().all()\n all_stations = list(np.ravel(station_list))\n\n return jsonify(all_stations)",
"def stations_dict(self):\n return self.__stations_dict",
"def get_stations(self, limit=250):\n\n endpoint = \"/station/getStations\"\n response = self._send(endpoint, \"POST\", {\"pageSize\": limit})\n stations = response.json()[\"stations\"]\n return stations",
"def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}",
"def stations():\n session = Session(engine)\n # Query all Stations\n stations = session.query(Station.station).all()\n\n # Convert list of tuples into normal list\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)",
"def stations():\n # Create a link to the session\n session = Session(engine)\n \n # Query all station records\n results = session.query(Stations.station, Stations.name).all()\n \n session.close()\n\n # Create a dictionary from the query results\n all_stations = []\n for station, name in results:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n all_stations.append(station_dict)\n \n return jsonify(all_stations)",
"def stations():\n list_of_stations = session.query(Station.station, Station.name)\n all_stations = []\n for s, n in list_of_stations:\n station_dict = {}\n station_dict[\"station\"] = s\n station_dict[\"name\"] = n\n all_stations.append(station_dict)\n return jsonify(all_stations)",
"def stations():\n\n active_stations = session.query(Station.station).all()\n\n # Convert list of tuples into normal list \n stations = list(np.ravel(active_stations))\n\n return jsonify(stations)",
"def stations():\n \n # Query all the stations\n results = session.query(Station).all()\n\n # Create a dictionary to append the station data\n stations_info = []\n for stations in results:\n stations_dict = {}\n stations_dict[\"Station\"] = stations.station\n stations_dict[\"Station Name\"] = stations.name\n stations_dict[\"Latitude\"] = stations.latitude\n stations_dict[\"Longitude\"] = stations.longitude\n stations_dict[\"Elevation\"] = stations.elevation\n all_stations.append(stations_dict)\n \n return jsonify(stations_info)",
"def stations(self):\n try:\n stations_api = requests.get(self._stations_url)\n stations = {}\n for station in stations_api.json():\n station_id = station['id']\n station_name = station['name']\n stations[station_id] = station_name\n\n return stations\n except (RequestException, KeyError) as exc:\n LOG.error('could not read from api: %s', exc)\n raise SlfError('could not read from api: %s' % exc) from None",
"def stations ():\n # Query all passengers\n Stns= session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).all()\n\n allStationns = list(np.ravel(Stns))\n\n return jsonify(allStations)",
"def stations_call():\n # Query all stations\n stations_call = session.query(Station.station).all()\n all_stations = list(np.ravel(stations_call))\n \n return jsonify(all_stations)",
"def stations():\n\t\n\n\tstationquery = session.query(Station.station).all()\n\n\tstationlist = list(np.ravel(stationquery))\n\t\n\treturn jsonify(stationlist)",
"def stations():\n # Query all stations\n\n stations = session.query(Station.station).all()\n all_stations = list(np.ravel(stations))\n\n return jsonify(all_stations)",
"def stations():\n \n station_result = session.query(Station.station).all()\n stations = []\n # Convert list of tuples into normal list\n stations = list(np.ravel(station_result))\n return jsonify(stations)",
"def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])",
"def stations():\n # Create link from Python to db\n session = Session(engine)\n\n # Query stations.\n stations = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()\n\n session.close()\n\n # Convert to a dictionary.\n all_stations = []\n for station, name, latitude, longitude, elevation in stations:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n station_dict[\"latitude\"] = latitude\n station_dict[\"longitude\"] = longitude\n station_dict[\"elevation\"] = elevation\n all_stations.append(station_dict)\n\n # Return JSON\n return jsonify(all_stations)",
"def stations():\n Stationlist = session.query(Station.name).all()\n session.close()\n # Unravel results into a 1D array and convert to a list\n allstations = list(np.ravel(Stationlist))\n return jsonify(allstations)",
"def stations():\n # Query \n results = session.query(Station.station).all()\n \n list = []\n for result in results:\n list.append(result)\n return jsonify(list)"
] | [
"0.691254",
"0.68177104",
"0.66731423",
"0.6667146",
"0.6630423",
"0.64471847",
"0.64053655",
"0.6382136",
"0.62274975",
"0.62242377",
"0.61863405",
"0.6173621",
"0.6167462",
"0.61557275",
"0.61210793",
"0.61108625",
"0.610515",
"0.6101489",
"0.60884637",
"0.6054263",
"0.6051285",
"0.60458803",
"0.60386217",
"0.6026381",
"0.6026343",
"0.6025953",
"0.6008266",
"0.6005729",
"0.6005012",
"0.59994406"
] | 0.69216037 | 0 |
Build speech for yes intent, takes array of two next stations. By default after this speech session is ended. | def build_next_stations(stations):
station_0_bikes = stations[0]['bikesAvailable']
station_1_bikes = stations[1]['bikesAvailable']
return f"On station {stations[0]['name']} is {station_0_bikes} " \
f"bike{'s' if station_0_bikes > 1 else ''} available and on station" \
f"{stations[1]['name']} is {station_1_bikes} " \
f"bike{'s' if station_1_bikes > 1 else ''} available. Goodbye and happy cycling!" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toggle_next_speech(self):\n self.bot.loop.call_soon_threadsafe(self.play_next_speech.set)",
"def get_speech(self, phrase):\n src = os.path.join(constants.CONFIG_PATH, self.voice)\n text = phrase\n\n def preprocess(syllables):\n temp = []\n for syllable in syllables:\n for p in self.punctuation:\n syllable = syllable.replace(p, \"\")\n if syllable.isdigit():\n syllable = atc.num2chinese(syllable)\n new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)\n for e in new_sounds:\n temp.append(e)\n else:\n temp.append(syllable)\n return temp\n \n if not os.path.exists(src):\n logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))\n return None\n logger.debug(\"{} 合成中...\".format(self.SLUG))\n delay = 0\n increment = 355 # milliseconds\n pause = 500 # pause for punctuation\n syllables = lazy_pinyin(text, style=pypinyin.TONE3)\n syllables = preprocess(syllables)\n \n # initialize to be complete silence, each character takes up ~500ms\n result = AudioSegment.silent(duration=500*len(text))\n for syllable in syllables:\n path = os.path.join(src, syllable+\".wav\")\n sound_file = Path(path)\n # insert 500 ms silence for punctuation marks\n if syllable in self.punctuation:\n short_silence = AudioSegment.silent(duration=pause)\n result = result.overlay(short_silence, position=delay)\n delay += increment\n continue\n # skip sound file that doesn't exist\n if not sound_file.is_file():\n continue\n segment = AudioSegment.from_wav(path)\n result = result.overlay(segment, position=delay)\n delay += increment\n\n tmpfile = ''\n with tempfile.NamedTemporaryFile() as f:\n tmpfile = f.name\n result.export(tmpfile, format=\"wav\")\n logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))\n return tmpfile",
"def IntroduceNao():\n\n\t# First, wake up\n\t#motionProxy.wakeUp()\n\tpostureProxy.goToPosture(\"Crouch\", 0.5)\n\tturn_on_eye()\n\tmotionProxy.setBreathEnabled('Body', False)\n\tmotionProxy.setBreathEnabled('Arms', True)\n\t#motionProxy.setBreathEnabled('Head', True)\n\t#motionProxy.rest()\n\n\n\t'''if msg.data in story_dict:\n\t\tstoryNum = story_dict[msg.data]\n\t\tprint storyNum'''\n\n\t'''if msg.data == '[0, 1]' or msg.data == '[1, 0]':\n\t\tif pairs_dict['[0, 1]'] == False:\n\t\t\tprint \"test [0, 1]\"\n\t\t\tpairs_dict['[0, 1]'] = True'''\n\tstory.setLanguage('English')\n\t#story.say(\"\\\\rspd=90\\\\ Hello \\\\pau=500\\\\ My name is nao \\\\pau=500\\\\ I really like reading short stories\")\n\t#story.say(\"\\\\rspd=90\\\\ Do you want to listen to them?\")\n\t#story.say(\"\\\\rspd=90\\\\ sometimes I make mistakes, can you help me to correct them?\")\n\ttime.sleep(1)\n\t#story.say(\"\\\\rspd=90\\\\ If you want to read with me, please bring the book\")\n\tstory.say(\"\\\\rspd=90\\\\ Hello\")\n\tpitch_angle = 0.1\n\tLookAtTheBook(pitch_angle)\n\ttime.sleep(2)",
"def get_ok_response():\n\n session_attributes = {}\n card_title = \"Setting the mood\"\n speech_output = \"Setting the mood\"\n should_end_session = True\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))",
"def nao_speech(possible_sentences):\n\n print(random.choice(possible_sentences))",
"def y_o_question(analysis):\n\n #init\n phrase = []\n\n #Recovering the subject\n subject = element_rebuilding.nom_struc_rebuilding(analysis.sn)\n\n if analysis.sv:\n #Recovering the end of the sentence\n phrase = element_rebuilding.end_question_rebuilding(phrase, analysis.sv, analysis.sn, analysis.aim)\n\n #We need special processing to find the position of the subject\n if analysis.sv[0].state == VerbalGroup.negative:\n phrase = phrase[0:2] + subject + phrase[2:]\n else:\n phrase = [phrase[0]] + subject + phrase[1:]\n\n #Recovering subsentences\n for s in analysis.sv[0].vrb_sub_sentence:\n phrase = phrase + sub_process(s)\n else:\n phrase = subject\n\n #Eliminate redundancies if there are\n phrase = other_functions.eliminate_redundancy(phrase)\n\n #If it is a question about the origin\n if analysis.aim == 'origin':\n return phrase + ['from'] + ['?']\n\n return phrase + ['?']",
"def get_confirmation(intent, session):\r\n card_title = intent['name']\r\n should_end_session = False\r\n selected_question = get_question()\r\n session_attributes = create_question_attributes(selected_question)\r\n #reprompt_text = speech_output = \"\"\r\n if 'YesNo' in intent['slots']:\r\n confirmation = intent['slots']['YesNo'].get('value')\r\n if confirmation == \"yes\":\r\n speech_output = \"Thanks for confirmation. \" \\\r\n \"While Responding, 'Say Answer, and then tell your answer'. \" \\\r\n \"Let's begin. Your First Question, '\" + selected_question + \"'\"\r\n reprompt_text = \"Thanks for confirmation. \" \\\r\n \"While Responding, 'Say Answer, and then tell your answer'. \" \\\r\n \"Let's begin. Your First Question, '\" + selected_question + \"'\"\r\n else:\r\n speech_output = \"Oops, Sorry to hear you don't want to Play.\" \\\r\n \" Bye Bye, Hope to meet with you soon.\"\r\n reprompt_text = \"Oops, Sorry to hear you don't want to Play.\" \\\r\n \" Bye Bye, Hope to meet with you soon.\"\r\n should_end_session = True\r\n\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))",
"def get_welcome_response():\n\n session_attributes = {}\n card_title = \"Time Phrase\"\n speech_output = \"Welcome to the Time Phrase Alexa Skill. \" \\\n \"I'm going to say few phrases related to time. Ready to tell the equivalent time ?\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please tell me if you are ready by saying yes.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))",
"def build_speechlet_response(title, output, reprompt_text, should_end_session):\r\n\r\n return {\r\n 'outputSpeech': {\r\n 'type': 'SSML',\r\n 'ssml': \"<speak>\"+output+\"</speak>\"\r\n },\r\n\r\n 'card': {\r\n 'type': 'Simple',\r\n 'title': CardTitlePrefix + \" - \" + title,\r\n 'content': output\r\n },\r\n \r\n 'reprompt': {\r\n 'outputSpeech': {\r\n 'type': 'SSML',\r\n 'ssml': \"<speak>\"+reprompt_text+\"</speak>\"\r\n }\r\n },\r\n 'shouldEndSession': should_end_session\r\n }",
"def respond(self,obs):\n if obs.timestep == 0:\n #If it's the first timestep, we have no clue. Since we don't even know if we are going to ask questions in the\n #future, we go ahead and init the inference engine for future use.\n self.p_obs = copy.deepcopy(obs)\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n #And set the knowledge source to inference so the next step we know where to look for in the upcoming step.\n self.knowledge.source[0] = ORIGIN.Inference\n\n #And pick a target station at random since we have to move forward.\n target_station = np.random.choice(self.tracking_stations) #pick a station at random.\n\n else:\n curr_k_id = self.knowledge.get_current_job_station_id()\n\n #Checking what knowledge we have.\n if (self.knowledge.source[curr_k_id]==ORIGIN.Answer):\n #Then we simply work on the station because we have an answer telling us that that's the station to work on.\n target_station = self.knowledge.station_order[curr_k_id]\n\n elif (self.knowledge.source[curr_k_id] == None):\n #which means we just finished a station in the last time-step. This calls for re-initalizing the inference_engine\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n target_station = np.random.choice(self.tracking_stations)\n\n elif (self.knowledge.source[curr_k_id]==ORIGIN.Inference):\n #Which means we have been working on a inference for a station.\n target_station = self.inference_engine.inference_step(self.p_obs,obs)\n self.knowledge.update_knowledge_from_inference(target_station)\n warnings.WarningMessage(\"Provision resetting inference_engine when a station is finished\")\n\n else:\n #it should never come to this.\n raise Exception(\"Some mistake around\")\n\n \"\"\"\n Okay, now that we know which station we should be headed to, we need to ensure the nitty-gritty details.\n Do we have a tool?\n If yes,\n if it matches our target station:\n destination: station\n else:\n destination: base\n else:\n destination: base\n \n Are we near our destination?\n Yes:\n Is it the base?\n Pick up the tool.\n else:\n execute work action.\n No:\n keep moving. \n \"\"\" \n\n if self.tool is not None:\n if self.tool == target_station:\n destination = obs.allPos[obs.stationIndices[target_station]]\n else:\n destination = global_defs.TOOL_BASE\n else:\n destination = global_defs.TOOL_BASE\n\n if utils.is_neighbor(self.pos,destination):\n if destination == global_defs.TOOL_BASE:\n #We are at the base to pick up a tool.\n desired_action = global_defs.Actions.NOOP\n self.tool = target_station\n else:\n #we are the station to work.\n desired_action = global_defs.Actions.WORK\n else:\n #Navigate to destination.\n desired_action = None\n\n obstacles = copy.deepcopy(obs.allPos).remove(self.pos)\n proposal = utils.generate_proposal(self.pos,destination,obstacles,desired_action)\n return proposal",
"def yes():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n state = RESPONSEOPTIONS[0]\r\n # print(\"state under update:\",state)\r\n state.updateStateIncoming(CurrentState.id)\r\n state.updateStateWords(CurrentInput)\r\n # print(\"writing state:\",state)\r\n writeState(state)\r\n CurrentState = getState(state.id)",
"def speech_callback(self, data):\n speech = data.data\n print \"RECEIVED SPEECH: \", speech\n if \"keyword detected\" in speech:\n if self.idling:\n self.control_pub.publish(\"ft go; idle stop; stt go\")\n self.behav_pub.publish(\"greet\")\n # self.behav_pub.publish(random.choice(categorized_behaviors['greeting']))\n elif \"play\" in speech:\n print \"STARTING GAME\"\n self.start_game = \"TTT\"\n elif \"bye\" in speech:\n self.control_pub.publish(\"idle go; stt go; stt_keyword go\")\n elif \"okay\" in speech:\n self.ok = True",
"def init_speech():\n program = True\n while program is True:\n print('Listening...')\n with sr.Microphone() as source:\n audio = r.listen(source)\n\n try:\n command = r.recognize_google(audio)\n print(command)\n except:\n continue\n\n if command in ['quit', 'exit', 'exits', 'exxat', 'bye', 'by' 'good-by', 'goodbye']:\n program = False\n play_audio('./audio/sentnc16.wav')\n break\n\n cmmd.discover(command)",
"def func(self):\n if not self.raw:\n self.msg(\"Say what?\")\n return\n options = {\"is_pose\": True}\n speech = self.raw.lstrip(\" \")\n # calling the speech hook on the location\n speech = self.caller.location.at_say(speech)\n # Feedback for the object doing the talking.\n langstring = \"\"\n current = self.caller.languages.current_language\n if current and current.lower() != \"arvani\":\n langstring = \" in %s\" % current.capitalize()\n options.update({\"language\": current, \"msg_content\": speech})\n self.msg(\n 'You say%s, \"%s{n\"' % (langstring, speech),\n from_obj=self.caller,\n options=options,\n )\n # Build the string to emit to neighbors.\n pre_name_emit_string = ' says%s, \"%s{n\"' % (langstring, speech)\n self.caller.location.msg_action(\n self.caller, pre_name_emit_string, exclude=[self.caller], options=options\n )\n self.caller.posecount += 1",
"def yes_intent(intent, session):\n if session.get('attributes', {}).get('add_address') and \\\n session['attributes']['next_step'] == 'store_address':\n return store_address(intent, session)\n elif session.get('attributes', {}).get('remove_address'):\n return remove_address(intent, session)\n else:\n return reply.build(\"Sorry, I don't know what you mean. Try again?\",\n persist=session.get('attributes', {}),\n is_end=False)",
"def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches",
"def _build_speechlet_response(self):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': self._speech_output\n },\n 'card': {\n 'type': 'Simple',\n 'title': self._card_title,\n 'content': self._card_output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': self._reprompt_text\n }\n },\n 'shouldEndSession': self._should_end_session\n }",
"def add_start_and_end_of_sentence_symbols(fst_1):\n\n # Create start of sentence FSA\n # 1 is start of sentence label\n start_of_sentence = fst.Transducer()\n start_of_sentence.add_arc(0, 1, 0, 1)\n start_of_sentence[1].final = True\n\n # Create end of sentence FSA\n # 2 is end of sentence label\n end_of_sentence = fst.Transducer()\n end_of_sentence.add_arc(0, 1, 0, 2)\n end_of_sentence[1].final = True\n\n # Modify start_of_sentence by concatenating fst_1\n start_of_sentence.concatenate(fst_1)\n\n # Modify joint start_of_sentence and fst_1 by concatenating end_of_sentence\n start_of_sentence.concatenate(end_of_sentence)\n\n return start_of_sentence",
"def build_speechlet_response(title, output, reprompt_text, should_end_session):\n\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }",
"def build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': CardTitlePrefix + \" - \" + title,\n 'content': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }",
"def speech_response(output, endsession):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'shouldEndSession': endsession\n }",
"def speech_response(output, endsession):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'shouldEndSession': endsession\n }",
"def ask(self, question):\n\n\t\t# If you're just trying to test voice detection, you can uncomment\n\t\t# the following 5 lines. Bobby will guess \"yellow flashlight\" and will prompt\n\t\t# you to correct him by saying \"blue flashlight\"\n\n\t\t# fake_answers = [\"no\", \"yes\", \"yes\", \"yes\", \"no\", \"yes\", \"yes\"]\n\t\t# global count\n\t\t# count += 1\n\t\t# print question\n\t\t# return fake_answers[count - 1]\n\n\t\t# self.say(question)\n\t\t# #starts listening for an answer\n\t\t# self.asr.subscribe(\"TEST_ASR\")\n\t\t# data = (None, 0)\n\t\t# while not data[0]:\n\t\t# \tdata = self.mem.getData(\"WordRecognized\")\n\t\t# #stops listening after he hears yes or no\n\t\t# self.asr.unsubscribe(\"TEST_ASR\")\n\t\t#\n\t\t# print data\n\t\t#\n\t\t# for word in self.yes_no_vocab:\n\t\t# \tfor syn in self.yes_no_vocab[word]:\n\t\t# \t\tif data[0] == syn:\n\t\t# \t\t\treturn word",
"def observe_hypotheses(self, prompt, label, agents, hyp0, hyp1):\n def make_observation(agent, hyp_0, hyp_1):\n if 'task_data' in hyp_0:\n agent.observe({'id':'Prompt', 'text': prompt['text'] + \n '\\n<b>Label</b>: '+ label +\n '\\n\\n<b>'+'Your claim'+'</b>: '+hyp_0['text'] + \n '\\n<b>'+'Their claim'+'</b>: '+hyp_1['text'],\n 'task_data': hyp_0['task_data'] })\n else:\n agent.observe({'id':'Prompt', 'text':prompt['text'] + \n '\\n<b>Label</b>: '+ label +\n '\\n\\n<b>'+'Your claim'+'</b>: '+hyp_0['text'] + \n '\\n<b>'+'Their claim'+'</b>: '+hyp_1['text']})\n\n # To-do: remove unnecessary if continuing to not do a coin flip.\n # flip = random.randint(0,1)\n if len(agents) > 1:\n make_observation(agents[0], hyp0, hyp1)\n make_observation(agents[1], hyp1, hyp0)\n else:\n make_observation(agents[0], hyp0, hyp1)\n # flip = 0\n mappy = [self.noflip, self.noflip]\n # if flip == 0:\n # # mappy_0 = self.noflip\n # for i, agent in enumerate(agents):\n # make_observation(agent, hyp0, hyp1)\n # mappy[i] = self.noflip\n # make_observation(agents[0], hyp0, hyp1)\n # make_observation(agents[1], hyp1, hyp0)\n # else:\n # flip0 = copy.deepcopy(hyp0)\n # flip1 = copy.deepcopy(hyp1)\n # flip0['id'] = 'Claim 2'\n # flip1['id'] = 'Claim 1'\n # if len(agents) == 2:\n # make_observation(agents[0], hyp0, hyp1)\n # make_observation(agents[1], flip1, flip0)\n # mappy[0] = self.noflip\n # mappy[1] = self.yesflip\n # elif len(agents) == 1:\n # make_observation(agents[0], flip1, flip0)\n # mappy[0] = self.yesflip\n # else:\n # assert \"Do not currently support more than 2 agents in a single set.\"\n return mappy[0], mappy[1]",
"def buildTrainingSequences(voc, maxlen=50, step=3):\n \n text, sym_indices, _ = voc\n sentences = []\n next_syms = []\n \n syms = set(text) # unique symbols (chars or words)\n \n for i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_syms.append(text[i + maxlen])\n print('nb sequences:', len(sentences))\n \n X = np.zeros((len(sentences), maxlen), dtype=np.int)\n y = np.zeros((len(sentences), len(syms)), dtype=np.bool)\n\n for i, sentence in enumerate(sentences):\n for j, sym in enumerate(sentence):\n X[i,j] = sym_indices[sym] \n \n y[i, sym_indices[next_syms[i]]] = 1 # one-hot enconding\n\n return (X,y)",
"def onCurrentSentence(self, *_args):\n global instance\n log(str(_args))\n #if (instance.isSpeaking and len(_args[1])==0): instance.SpeakDone()\n return",
"def check_commute(intent, session):\n user_data = database.get_user_data(session['user']['userId'])\n if not user_data:\n return reply.build(\"I don't remember any of your addresses. \"\n \"You can ask me to \\\"save an address\\\" \"\n \"if you want me to be able to check \"\n \"on your daily commute.\",\n is_end=True)\n stations = location.get_stations(config.bikes_api)\n utter = ''\n card_text = ['Checked at %s' % _time_string()]\n first_phrase = True\n for which, av_func, av_name in \\\n [('origin', _get_bikes_available, 'bikes'),\n ('destination', _get_docks_available, 'docks')]:\n if user_data.get(which):\n lat = user_data[which]['latitude']\n lon = user_data[which]['longitude']\n nearest_st = geocoding.station_from_lat_lon(\n lat, lon, stations, n_nearest=2)\n\n n_thing = av_func(nearest_st[0])\n st_name = location.text_to_speech(nearest_st[0]['name'])\n av_slice = slice(0, (-1 if n_thing == 1 else None)) # singular?\n phrase = ('%d %s at the %s station' %\n (n_thing, av_name[av_slice], st_name))\n if first_phrase:\n verb = 'is' if n_thing == 1 else 'are'\n phrase = ('There %s ' % verb) + phrase\n else:\n phrase = ', and ' + phrase\n utter += phrase\n first_phrase = False\n card_text.append(\"%s: %d %s at %s\" %\n (which.capitalize(),\n n_thing,\n av_name[av_slice],\n nearest_st[0]['name']))\n\n if n_thing < 3:\n # If there's not many bikes/docks at the best station,\n # refer users to the next nearest station.\n n_thing = av_func(nearest_st[1])\n av_slice = slice(0, (-1 if n_thing == 1 else None)) # singular?\n st_name = location.text_to_speech(nearest_st[1]['name'])\n utter += (', and %d %s at the next nearest station, %s. ' %\n (n_thing, av_name[av_slice], st_name))\n first_phrase = True # Start a new sentence next time\n card_text.append(\"Next Best %s: %d %s at %s\" %\n (which.capitalize(),\n n_thing,\n av_name[av_slice],\n nearest_st[1]['name']))\n\n return reply.build(utter,\n card_title=(\"Your %s Commute Status\" %\n config.network_name),\n card_text='\\n'.join(card_text),\n is_end=True)",
"def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning = None\n cellmates = self.model.grid.get_cell_list_contents([self.pos])\n\n # If other agents on the same cell\n if len(cellmates) > 1:\n hearer = self.random.choice(cellmates)\n\n while (hearer == self): # agents should not talk to themselves\n hearer = self.random.choice(cellmates)\n\n meaning = self.random.choice(self.model.schedule.agents).unique_id\n\n # If the speaker is not acquainted with the meaning\n if meaning not in self.meanings:\n print(\"New meaning added to speaker\")\n self.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # If the hearer is not acquainted with the meaning\n if meaning not in hearer.meanings:\n print(\"New meaning added to hearer\")\n hearer.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # 50% chance of having an anticipated meaning default\n if self.random.random() <= self.model.antecipated_prob:\n print(\" \" + str(self.unique_id) +\n \" points at \" + str(meaning))\n anticipated_meaning = meaning\n\n # If the speaker has a word for the meaning\n if meaning in self.meaning2word:\n word = self.meaning2word[meaning]\n\n # If the hearer has a word for the meaning\n if word in hearer.word2meaning:\n # If the hearer has no anticipated meaning\n if anticipated_meaning == None:\n return Conversation(word=word, meaning=meaning, success=1.0)\n # If anticipated meaning different from hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning != hearer.word2meaning[word]):\n hearer.delete_link(word)\n hearer.create_link(word, anticipated_meaning)\n return None\n # If anticipated meaning same as hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning == hearer.word2meaning[word]):\n return Conversation(word=word, meaning=meaning, success=1.0)\n\n # If the hearer has no word for the meaning\n else:\n # If anticipated meaning same as speaker meaning\n if (anticipated_meaning != None\n and word not in hearer.word2meaning\n and anticipated_meaning not in hearer.meaning2word):\n hearer.create_link(word, anticipated_meaning)\n return Conversation(word=word, meaning=meaning, success=0.0)\n\n # If the speaker has no word for the meaning\n if meaning not in self.meaning2word:\n return Conversation(word=None, meaning=meaning, success=0.0)",
"def detect_intent_audio():\n audio_file_path = home+\"catkin_ws/src/robot_ears/speech_wavs/normalized.wav\"\n session_client = dialogflow.SessionsClient()\n\n # Note: hard coding audio_encoding and sample_rate_hertz for simplicity.\n audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16\n sample_rate_hertz = 16000\n session = session_client.session_path(\"toibot-1549026967633\", \"gal1\")\n print('Session path: {}\\n'.format(session))\n\n with open(audio_file_path, 'rb') as audio_file:\n input_audio = audio_file.read()\n\n audio_config = dialogflow.types.InputAudioConfig(\n audio_encoding=audio_encoding, language_code=\"en\",\n sample_rate_hertz=sample_rate_hertz)\n\n query_input = dialogflow.types.QueryInput(audio_config=audio_config)\n response = session_client.detect_intent(\n session=session, query_input=query_input,\n input_audio=input_audio)\n\n print('=' * 20)\n # save query.txt \n write_to_file(home+\"catkin_ws/src/robot_ears/text_files/query.txt\", response.query_result.query_text)\n print(\"query: \" + response.query_result.query_text)\n # save intent.txt \n write_to_file(home+\"catkin_ws/src/robot_ears/text_files/intent.txt\", response.query_result.intent.display_name)\n print(\"response: \" + response.query_result.intent.display_name)\n # save response.txt \n write_to_file(home+\"catkin_ws/src/robot_ears/text_files/response.txt\", response.query_result.fulfillment_text)\n print(\"intent: \" + response.query_result.fulfillment_text)\n print('=' * 20)",
"def start_survey():\n title=satisfaction_survey.title\n instructions = satisfaction_survey.instructions\n # session['responses'] =[]\n return render_template(\"instructions.html\", survey_title=title, instructions=instructions)"
] | [
"0.5466116",
"0.53381044",
"0.5165462",
"0.5158772",
"0.5136469",
"0.5119758",
"0.51051253",
"0.50937116",
"0.50589454",
"0.50452626",
"0.49854103",
"0.4973108",
"0.4939487",
"0.48700067",
"0.48483983",
"0.48335534",
"0.48153678",
"0.48100355",
"0.47805727",
"0.47753873",
"0.4773796",
"0.4773796",
"0.47436327",
"0.4742052",
"0.47197607",
"0.47184893",
"0.47119913",
"0.47060877",
"0.4697693",
"0.46975628"
] | 0.541555 | 1 |
Main function of filtering events. Parse in user input of url and auth token and use them to create an example archivist connection and passedin properties attributes to filter all events of the selected properties and attributes through function get_matching_events. | def main():
with open(".auth_token", mode="r") as tokenfile:
authtoken = tokenfile.read().strip()
# Initialize connection to Archivist
aconn = Archivist(
"https://soak-0-avid.engineering-k8s-stage-2.dev.wild.jitsuin.io",
auth=authtoken,
)
# Get all assets with required attributes and properties
props = {"confirmation_status": "CONFIRMED"}
attrs = {"arc_display_type": "Traffic light"}
for event in aconn.events.list(asset_id="assets/-", props=props, attrs=attrs):
print("event", event)
# alternatively one could pull the list and cache locally...
events = aconn.events.list(asset_id="assets/-", props=props, attrs=attrs)
for event in events:
print("event", event) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)",
"def collect_events(helper, ew):\n\n opt_start_time_start = helper.get_arg('start_time_start')\n opt_endpoints = helper.get_arg('endpoints')\n opt_interval = int(helper.get_arg('interval'))\n opt_live = False\n\n proxy = helper.get_proxy()\n if proxy:\n proxy_auth = \"{}:{}\".format(\n proxy['proxy_username'], proxy['proxy_password'])\n proxies = {\n \"https\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port']),\n \"http\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port'])\n }\n else:\n proxies = None\n\n helper.log_debug(\n \"[-] webex password_type: {}\".format(helper.get_global_setting(\"password_type\")))\n\n params = {\"opt_username\": helper.get_global_setting(\"username\"),\n \"opt_password\": helper.get_global_setting(\"password\"),\n \"opt_site_name\": helper.get_global_setting(\"site_name\"),\n \"limit\": 500,\n \"timezone\": \"20\",\n # \"password_type\": authentication_type[\"Password Authentication\"],\n # \"password_type\": authentication_type[\"OAuth\"],\n \"password_type\": authentication_type[helper.get_global_setting(\"password_type\")],\n \"client_id\": helper.get_global_setting(\"client_id\"),\n \"client_secret\": helper.get_global_setting(\"client_secret\"),\n \"refresh_token\": helper.get_global_setting(\"refresh_token\"),\n \"proxies\": proxies}\n\n # Historical Data\n helper.log_debug(\"Historical Data\")\n for opt_endpoint in opt_endpoints:\n helper.log_debug(\"[-] \\t At {}\".format(opt_endpoint))\n\n # endtime is midnight of GMT - 3days\n enddt = datetime.utcnow().date() - timedelta(3)\n end_time = datetime.combine(\n enddt, datetime.max.time()).strftime('%m/%d/%Y %H:%M:%S')\n\n # create checkpoint key for offest and timestamp\n timestamp_key = \"timestamp_{}_{}_processing\".format(\n helper.get_input_stanza_names(), opt_endpoint)\n\n start_time = helper.get_check_point(timestamp_key)\n if start_time is None:\n # if it's the 1st time, get the start_time from UI, and then save it in checkpoint\n start_time = opt_start_time_start\n helper.save_check_point(timestamp_key, start_time)\n else:\n # shift the start_time by 1 second\n start_time = (datetime.strptime(start_time, '%m/%d/%Y %H:%M:%S') +\n timedelta(seconds=1)).strftime('%m/%d/%Y %H:%M:%S')\n\n helper.log_debug(\"Start time: {}\".format(start_time))\n helper.log_debug(\"End time: {}\".format(end_time))\n\n # Update Parameters\n params.update({\"mode\": \"historical\"})\n params.update({\"opt_endpoint\": opt_endpoint})\n params.update({\"start_time\": start_time})\n params.update({\"end_time\": end_time})\n params.update({\"timestamp_key\": timestamp_key})\n\n records = params['limit']\n offset = 1\n while (records == params['limit']):\n helper.log_debug(\"current_offset: {}\".format(offset))\n params['offset'] = offset\n records = fetch_webex_logs(ew, helper, params)\n helper.log_debug(\"\\t Offet:{}\\tLimit: {}\\tRecords Returned: {}\".format(\n offset, params['limit'], records))\n if records:\n offset += records",
"def test_otoroshi_controllers_adminapi_analytics_controller_filterable_events(self):\n pass",
"async def filter(self, **kwargs):\n\n pass",
"def analyze(self, event):\n \n # TRIGGER\n if not self.trigger(event):\n return False\n ###print \"%s %s passed the trigger %s\"%('-'*20,event.event,'-'*40)\n \n # TRIGGER OBJECTS\n trigObjects = { }\n for trigobj in Collection(event,'TrigObj'):\n ###print trigobj, trigobj.filterBits\n if trigobj.id not in [11,13,15]: continue\n if trigobj.id not in trigObjects:\n trigObjects[trigobj.id] = { }\n trigObjects[trigobj.id][trigobj] = [ ]\n \n # PREPARE COUNTERS\n nMatches = { }\n nPairMatches = { }\n filterMatches = { f: [ ] for f in self.filters }\n for filter in self.unique_filters:\n nMatches[filter] = { }\n default = -2 # filter's trigger was not fired\n if filter.trigger.fired(event):\n trigObjExists = False\n if filter.id in trigObjects:\n for trigobj, filters in trigObjects[filter.id].iteritems():\n if filter.hasbits(trigobj.filterBits):\n filters.append(filter)\n trigObjExists = True\n if trigObjExists:\n default = 0 # event has trigger object for these filter bits\n else:\n default = -1 # event has no trigger object for these filter bits\n for wpbit, wp in self.objectIDWPs[filter.id]:\n nMatches[filter][wpbit] = default\n for pair in self.filterpairs:\n nPairMatches[pair] = { }\n default = -2 # filter's trigger was not fired\n if pair.trigger.fired(event):\n if nMatches[pair.filter1][0]<0 or nMatches[pair.filter2][0]<0:\n default = -1 # event has trigger object for these filter bits\n else:\n default = 0 # event has no trigger object for these filter bits\n for wpbit, wp in self.objectIDWPs[15]:\n nPairMatches[pair][wpbit] = default\n \n # MATCH ELECTRONS\n if 11 in trigObjects:\n electrons = Collection(event,'Electron')\n for electron in electrons:\n for trigobj, filters in trigObjects[11].iteritems():\n if electron.DeltaR(trigobj)>0.3: continue\n for filter in filters:\n #if electron.pt<filter.ptmin: continue\n nMatches[filter][0] += 1\n filterMatches[filter].append((trigobj,electron))\n \n # MATCH MUONS\n if 13 in trigObjects:\n muons = Collection(event,'Muon')\n for muon in muons:\n for trigobj, filters in trigObjects[13].iteritems():\n if muon.DeltaR(trigobj)>0.3: continue\n for filter in filters:\n #if muon.pt<filter.ptmin: continue\n nMatches[filter][0] += 1\n filterMatches[filter].append((trigobj,muon))\n \n # MATCH TAUS\n if 15 in trigObjects:\n taus = Collection(event,'Tau')\n for tau in taus:\n #dm = tau.decayMode\n #if dm not in [0,1,10]: continue\n for trigobj, filters in trigObjects[15].iteritems():\n if tau.DeltaR(trigobj)>0.3: continue\n for filter in filters:\n #if tau.pt<filter.ptmin: continue\n filterMatches[filter].append((trigobj,tau))\n for wpbit, wp in self.objectIDWPs[15]: # ascending order\n if tau.idMVAoldDM2017v2<wpbit: break\n nMatches[filter][wpbit] += 1\n \n # MATCH PAIRS\n for pair in self.filterpairs:\n if pair.filter1==pair.filter2: # for ditau\n for i, (trigobj1,recoobj1) in enumerate(filterMatches[pair.filter1]):\n for trigobj2, recoobj2 in filterMatches[pair.filter1][i+1:]:\n if trigobj1==trigobj2: continue\n if recoobj1==recoobj2: continue\n #if recoobj1.DeltaR(recoobj2)<0.4: continue\n for wpbit, wp in self.objectIDWPs[15]: # ascending order\n if recoobj1.idMVAoldDM2017v2<wpbit or recoobj2.idMVAoldDM2017v2<wpbit: break\n nPairMatches[pair][wpbit] += 1\n else: # for eletau and mutau\n for trigobj1, recoobj1 in filterMatches[pair.filter1]:\n for trigobj2, recoobj2 in filterMatches[pair.filter2]:\n if trigobj1.DeltaR(trigobj2)<0.3: continue\n if recoobj1.DeltaR(recoobj2)<0.3: continue\n for wpbit, wp in self.objectIDWPs[15]: # ascending order\n if recoobj2.idMVAoldDM2017v2<wpbit: break\n nPairMatches[pair][wpbit] += 1\n \n # FILL BRANCHES\n self.out.fillBranch(\"trigger_etau\", self.triggers['etau'].fired(event))\n self.out.fillBranch(\"trigger_mutau\", self.triggers['mutau'].fired(event))\n self.out.fillBranch(\"trigger_ditau\", self.triggers['ditau'].fired(event))\n self.out.fillBranch(\"trigger_SingleElectron\", self.triggers['SingleElectron'].fired(event))\n self.out.fillBranch(\"trigger_SingleMuon\", self.triggers['SingleMuon'].fired(event))\n for filter in self.unique_filters:\n for wpbit, wp in self.objectIDWPs[filter.id]:\n wptag = \"\" if wp=='all' else '_'+wp\n self.out.fillBranch(\"n%s_%s%s\"%(filter.collection,filter.name,wptag),nMatches[filter][wpbit])\n for pair in nPairMatches:\n for wpbit, wp in self.objectIDWPs[15]:\n wptag = \"\" if wp=='all' else '_'+wp\n self.out.fillBranch(\"nPair_%s%s\"%(pair.name,wptag\n ),nPairMatches[pair][wpbit])\n return True",
"def main():\n\n # parses arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', action='store', dest='start_index', type=int,\n help='The starting index for events. Default is 0')\n\n parser.add_argument('-e', action='store', dest='end_index', type=int,\n help='The starting index for events. Default is 5,000')\n\n results = parser.parse_args()\n\n start_index = results.start_index or 0\n\n end_index = results.end_index or 5000\n\n scraper = Scraper()\n\n # these are the event column titles from the sample import csv given by localist\n event_column_titles = [\n 'Title','Description','Date From','Date To','Recurrence','Start Time','End Time',\n 'Location','Address','City','State','Event Website','Room','Keywords','Tags',\n 'Photo URL','Ticket URL','Cost','Hashtag','Facebook URL','Group','Department',\n 'Allow User Activity','Allow User Attendance','Visibility','Featured Tabs',\n 'Sponsored','Venue Page Only','Exclude From Trending','Event Types','Invited Audience', 'Original URL',\n 'Location Details'\n ]\n\n out_stream = open('event_import.csv', 'w')\n\n writer = Writer(event_column_titles, out_stream)\n\n writer.write_headers()\n\n # iterates through the specified event numbers and scrapes each one and writes\n # it to the output file\n for i in range(start_index, end_index + 1):\n current_url = 'http://test-ucscevents.pantheonsite.io/event/' + str(i)\n print(\"processing url: \" + current_url)\n r = requests.get(current_url)\n if r.status_code != requests.codes.ok:\n print(' 404')\n else:\n soup = get_soup_from_url(current_url)\n events = scraper.scrape_event(soup)\n for event in events:\n event['Original URL'] = current_url\n\n writer.write_object(event) # event written to output file here\n\n out_stream.close()",
"def get_some_events(cls, field, filter):\n try:\n events = list(events_coll.find({field: filter}))\n events_list = []\n if events is not None:\n for event in events:\n one_event = cls(**event)\n events_list.append(one_event)\n return events_list\n except Exception as e:\n print(e)",
"def test_filters_anonymous_filtering():\n event = {\"username\": \"john\"}\n anonymous_event = {\"username\": \"\"}\n assert filters.anonymous(event) == event\n assert filters.anonymous(anonymous_event) is None",
"def listings(request, category1, category2, category3, page = 1):\n \n # Creating URL for request\n base_url = \"https://www.eventbriteapi.com/v3/events/search/\"\n token_component = \"token=BKKRDKVUVRC5WG4HAVLT\" #I had this token in my mail link\n category_component = \"categories=\" + category1 + ',' + category2 + ',' + category3\n page_component = \"page=\" + str(page)\n url_without_page = base_url + \"?\" + token_component + \"&\" + category_component\n url_complete = url_without_page + \"&\" + page_component\n \n # GET events from Eventbrite\n f = urllib2.urlopen(url_complete) \n json_string = f.read() \n parsed_json = json.loads(json_string) \n\n # Parse through JSON\n events = parsed_json['events']\n eventsList = []\n \n for i in events:\n eventsList.append(event_container())\n \n # Parse further through JSON\n eventsList[-1].name = i['name']['text']\n eventsList[-1].id = i['id']\n eventsList[-1].url = i['url']\n try:\n eventsList[-1].description = i['description']['text']\n except:\n eventsList[-1].description = \"No description available\"\n eventsList[-1].resource_uri = i['resource_uri']\n \n \n listings_url_base = '/topthree/listings/'+ category1 + '/' + category2 + '/' + category3 + '/'\n \n # Pagination\n \n \"\"\"\n Performing manual pagination instead of Django pagination \n because GET request for events pulls in paginated data already\n \"\"\"\n \n next_page = int(page) + 1\n next_page_url = listings_url_base + str(next_page) \n \n if int(page)>1:\n prev_page = int(page) - 1\n prev_page_url = listings_url_base + str(prev_page) \n\n else:\n prev_page = 0\n prev_page_url = \"#\"\n \n \n # Sending values to template\n \n template = loader.get_template('listings.html')\n\n context = RequestContext(request, {\n 'eventsList': eventsList,\n 'prev_page_url':prev_page_url,\n 'next_page_url':next_page_url,\n 'prev_page':prev_page,\n 'page':page,\n 'category1':category1,\n 'category2':category2,\n 'category3':category3,\n })\n \n return HttpResponse(template.render(context))",
"def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events",
"def collect_events(helper, ew): # pylint: disable=no-self-argument,invalid-name,too-many-statements,too-many-branches\n\n def clear_checkbox(session_key, stanza):\n \"\"\" Sets the 'reindex_data' value in the REST API to 0 to clear it. Splunk then automatically restarts the input.\"\"\"\n url = f'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/data/inputs/strava_api/{stanza}'\n headers = {'Authorization': f'Splunk {session_key}'}\n payload = 'reindex_data=0'\n helper.send_http_request(url, \"POST\", headers=headers, payload=payload, verify=False, use_proxy=False)\n\n def get_activities(ts_activity, access_token):\n \"\"\"Gets all activities, 30 per page as per Strava's default.\"\"\"\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response\n\n def get_activity(activity, token):\n \"\"\"Gets specific activity.\"\"\"\n url = f'https://www.strava.com/api/v3/activities/{activity}?include_all_efforts=true'\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n \"\"\"Gets the activity stream for given activity id.\"\"\"\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_athlete(token):\n \"\"\"Gets details on currently logged in athlete.\"\"\"\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_epoch(timestamp):\n \"\"\"Converts Strava datetime to epoch timestamp\"\"\"\n timestamp_dt = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n epoch = calendar.timegm(timestamp_dt.timetuple())\n return epoch\n\n def get_token(client_id, client_secret, token, renewal):\n \"\"\"Get or refresh access token from Strava API.\"\"\"\n url = \"https://www.strava.com/api/v3/oauth/token\"\n\n if renewal:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': token,\n 'grant_type': 'refresh_token'}\n message = \"Successfully refreshed Strava token.\"\n else:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'code': token,\n 'grant_type': 'authorization_code'}\n message = \"Successfully authenticated with Strava using access code.\"\n\n response = return_json(url, \"POST\", payload=payload)\n helper.log_info(message)\n return response\n\n def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments\n \"\"\"Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.\"\"\"\n url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save'\n headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'}\n payload = [{\"_key\": athlete_id, \"id\": athlete_id, \"firstname\": firstname, \"lastname\": lastname, \"fullname\": firstname + \" \" + lastname, \"weight\": weight, \"ftp\": ftp}]\n helper.send_http_request(url, \"POST\", headers=headers, payload=payload, verify=False, use_proxy=False)\n\n def parse_data(data, activity_id, activity_start_date):\n \"\"\"Gets raw JSON data, parses it into events and writes those to Splunk.\"\"\"\n data_dict = {}\n final_dict = {}\n for i in data:\n data_dict[i['type']] = i['data']\n\n counter = 1\n nrange = len(data_dict['time'])\n for item in range(1, nrange + 1):\n final_dict[item] = {}\n\n for key, value in data_dict.items():\n counter = 1\n for i in value:\n final_dict[counter][key] = i\n final_dict[counter]['activity_id'] = activity_id\n\n if 'time' in key:\n final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date\n final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time']))\n\n if 'latlng' in key:\n final_dict[counter]['lat'] = final_dict[counter]['latlng'][0]\n final_dict[counter]['lon'] = final_dict[counter]['latlng'][1]\n final_dict[counter].pop('latlng')\n counter += 1\n\n result_list = [value for key, value in final_dict.items()]\n\n for event in result_list:\n write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event))\n\n helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.')\n return True\n\n def return_json(url, method, **kwargs):\n \"\"\"Gets JSON from URL and parses it for potential error messages.\"\"\"\n response = helper.send_http_request(url, method, use_proxy=False, **kwargs)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as ex:\n # status code 429 means we hit Strava's API limit, wait till next 15 minute mark (+5 seconds) and try again\n if ex.response.status_code == 429:\n # Get the 15m/24h API limits for this user\n api_usage_15m = response.headers['X-RateLimit-Usage'].split(\",\")[0]\n api_usage_24h = response.headers['X-RateLimit-Usage'].split(\",\")[1]\n api_limit_15m = response.headers['X-RateLimit-Limit'].split(\",\")[0]\n api_limit_24h = response.headers['X-RateLimit-Limit'].split(\",\")[1]\n\n timestamp_now = int(time.time())\n modulus_time = timestamp_now % 900\n sleepy_time = 0 if modulus_time == 0 else (900 - modulus_time + 5)\n helper.log_warning(f'Strava API rate limit hit. Used {api_usage_15m}/15min (limit {api_limit_15m}), {api_usage_24h}/24h (limit {api_limit_24h}). Sleeping for {sleepy_time} seconds.')\n time.sleep(sleepy_time)\n response = return_json(url, method, **kwargs)\n helper.log_debug(f'429 detail: {response}')\n return response\n if ex.response.status_code in (400, 401):\n helper.log_error(f'{ex.response.status_code} Error: Strava API credentials invalid or session expired. Make sure Client ID & Client Secret have been added to the Configuration -> Add-On Parameters tab and your access code is valid.')\n sys.exit(1)\n if ex.response.status_code == 404:\n helper.log_warning(f'404 Error: no stream data for url {url}, can happen for manually added activities.')\n return False\n if ex.response.status_code == 500:\n helper.log_warning(f'500 Error: no data received from Strava API for url {url}, it might be corrupt or invalid. Skipping activity.')\n return False\n # In case there's any other error than the ones described above, log the error and exit.\n helper.log_error(f'Error: {ex}')\n sys.exit(1)\n\n # Must have been a 200 status code\n return response.json()\n\n def set_athlete(response):\n \"\"\"Creates dict with athlete details, including token expiry.\"\"\"\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete\n\n def write_to_splunk(**kwargs):\n \"\"\"Writes activity to Splunk index.\"\"\"\n event = helper.new_event(**kwargs)\n ew.write_event(event)\n\n # get configuration arguments\n client_id = helper.get_global_setting('client_id')\n client_secret = helper.get_global_setting('client_secret')\n access_code = helper.get_arg('access_code')\n start_time = helper.get_arg('start_time') or 0\n types = ['time', 'distance', 'latlng', 'altitude', 'velocity_smooth', 'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth']\n\n # stanza is the name of the input. This is a unique name and will be used as a checkpoint key to save/retrieve details about an athlete\n stanza = list(helper.get_input_stanza())[0]\n athlete = helper.get_check_point(stanza)\n helper.log_debug(f'Athlete: {athlete}')\n\n # if reindex_data checkbox is set, update the start_time to be the one specified and clear the checkbox.\n if helper.get_arg('reindex_data'):\n if int(helper.get_arg('reindex_data')) == 1:\n athlete.update({'ts_activity': start_time})\n helper.save_check_point(stanza, athlete)\n # the clear_checkbox function will restart this input as soon as the change is made, so no further code required.\n clear_checkbox(helper.context_meta['session_key'], stanza)\n\n # if athlete is set, get details & tokens - otherwise fetch tokens with get_token()\n if athlete:\n athlete_id = athlete['id']\n athlete_name = athlete['name']\n expires_at = athlete['expires_at']\n refresh_token = athlete['refresh_token']\n else:\n expires_at = False\n refresh_token = False\n\n # Check if expires_at token is set and renew token if token expired. Otherwise fetch token with initial access code.\n if expires_at:\n if time.time() >= expires_at:\n response = get_token(client_id, client_secret, refresh_token, renewal=True)\n helper.log_debug(f\"Access token: {response['access_token']}, refresh token: {response['refresh_token']}\")\n athlete.update({'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at']})\n else:\n response = get_token(client_id, client_secret, access_code, renewal=False)\n athlete = set_athlete(response)\n athlete_id = athlete['id']\n athlete_name = athlete['name']\n\n helper.save_check_point(stanza, athlete)\n\n access_token = athlete['access_token']\n athlete_detail = get_athlete(access_token)\n athlete_firstname = athlete_detail['firstname']\n athlete_lastname = athlete_detail['lastname']\n athlete_weight = ''\n athlete_ftp = ''\n if athlete_detail['resource_state'] == 3:\n athlete_weight = athlete_detail['weight']\n athlete_ftp = athlete_detail['ftp']\n\n helper.log_debug(\"Saving athlete's details to KV Store.\")\n kvstore_save_athlete(helper.context_meta['session_key'], str(athlete_id), athlete_firstname, athlete_lastname, str(athlete_weight), str(athlete_ftp))\n\n # For backwards compatibility with upgrades from pre-2.5.0, which uses athlete['ts_newest_activity']. If there, clean them up.\n if 'ts_newest_activity' in athlete:\n helper.log_info(f\"Found existing timestamp {athlete['ts_newest_activity']}! Will remove it now.\")\n ts_activity = athlete['ts_newest_activity']\n athlete.update({'ts_activity': ts_activity})\n athlete.pop('ts_newest_activity')\n athlete.pop('get_old_activities')\n athlete.pop('ts_oldest_activity')\n helper.save_check_point(stanza, athlete)\n else:\n ts_activity = athlete['ts_activity'] or start_time\n\n # webhook_updates contains updated activities that came in via webhook.\n webhook_updates = helper.get_check_point('webhook_updates') or {}\n\n if str(athlete_id) in webhook_updates:\n for activity in webhook_updates[str(athlete_id)][:]:\n helper.log_info(f'Received update via webhook for activity {activity} from athlete {athlete_id}')\n response = get_activity(activity, access_token)\n ts_activity = get_epoch(response['start_date'])\n\n # Store the event in Splunk\n write_to_splunk(index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(response))\n\n # Get stream data for this activity and write to Splunk\n stream_data = get_activity_stream(access_token, activity, types)\n if stream_data:\n parse_data(stream_data, activity, ts_activity)\n\n # Remove from dict and save dict\n webhook_updates[str(athlete_id)].remove(activity)\n helper.save_check_point('webhook_updates', webhook_updates)\n helper.log_info(f'Got all webhook events for athlete {athlete_id}')\n\n helper.log_info(f'Checking if there are new activities for {athlete_name} ({athlete_id})')\n\n while True:\n\n response_activities = get_activities(ts_activity, access_token)\n\n # if all activities retrieved, set get_old_activities, save checkpoint and end loop to finish\n if len(response_activities) == 0: # pylint: disable=no-else-break\n helper.log_info(f'All done, got all activities for {athlete_name} ({athlete_id})')\n break\n else:\n # Get more details from each activity\n for event in response_activities:\n activity_id = event['id']\n response = get_activity(activity_id, access_token)\n\n # response = False for a 500 Error, which is likely an invalid Strava API file. In that case skip the activity and continue.\n if response:\n data = json.dumps(response)\n\n # Get start_date (UTC) and convert to UTC timestamp\n ts_activity = get_epoch(event['start_date'])\n\n # Store the event in Splunk\n write_to_splunk(index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n helper.log_info(f'Added activity {activity_id} for {athlete_id}.')\n\n # Get stream data for this activity\n stream_data = get_activity_stream(access_token, activity_id, types)\n if stream_data:\n parse_data(stream_data, activity_id, ts_activity)\n\n # Save the timestamp of the last event to a checkpoint\n athlete.update({'ts_activity': ts_activity})\n helper.save_check_point(stanza, athlete)",
"def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)",
"def eventList(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tevents = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]\n\treturn events",
"def filtered(config, time, utc, a, tk):\n if not config.store:\n try:\n cert = tk.read()\n token = _collect_token(config, cert)\n except:\n click.secho(\"No token\", fg='red')\n else:\n _check_options(config, \"events\", time, utc, a, token)\n else:\n try:\n token = config._set_token()\n except:\n click.secho(\"No token\", fg='red')\n else:\n _check_options(config, \"events\", time, utc, a, token)",
"def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n usrservice = discovery.build('admin', 'directory_v1', http=http)\r\n calservice = discovery.build('calendar', 'v3', http=http)\r\n\r\n if not args.inuser:\r\n parser.parse_args(['-h'])\r\n else:\r\n source_user = args.inuser.strip()\r\n if '@bis.gov.uk' in source_user:\r\n print(source_user)\r\n else:\r\n parser.parse_args(['-h'])\r\n \r\n if args.action == 'show':\r\n show_events(usrservice,calservice)\r\n if args.action == 'delete':\r\n delete_events(usrservice,calservice) \r\n if args.action == 'move' \\\r\n and args.destuser:\r\n move_events(usrservice,calservice)\r\n else:\r\n parser.parse_args(['-h'])\r\n \r\n #print(args.action)\r",
"def search(request):\n template_var = base_template_vals(request)\n if request.method==\"GET\":\n event_id_list = []\n events_found = []\n events_found_advanced = []\n events_found_basic = []\n checkbox_session = []\n advanced_search = False\n tags = Tag.objects.all()\n template_var[\"tags\"] = tags\n\n query_tags = request.GET.getlist('query_tag')\n if len(query_tags) > 0: # at least one checkbox selected\n advanced_search = True\n query_tag_list = []\n \n #strip >>u''<< prefix\n for query_tag in query_tags:\n checkbox_session.append(str(query_tag))\n query_tag_list.append(str(query_tag))\n \n events_found_advanced = Event.objects.filter(is_approved=True\n ).filter(tags__name__in=query_tag_list)\n \n query = request.GET.get('query', '').strip('\\t\\n\\r')\n if query == '' : #first came in/accidentily type space/..\n if advanced_search:\n events_found_advanced = events_found_advanced #TODO: Bin this line does nothing.\n else:\n events_found_basic = Event.objects.filter(is_approved=True\n ).order_by(\"-created\") \n else:\n if advanced_search:\n events_found_advanced = events_found_advanced.filter(\n tags__name__in=[query])\n else:\n events_found_basic = Event.objects.filter(\n tags__name__in=[query])\n\n #finally\n if advanced_search:\n #take out duplicated result\n for event_found_advanced in events_found_advanced:\n if(event_found_advanced.id not in event_id_list):\n event_id_list.append(event_found_advanced.id)\n events_found.append(event_found_advanced)\n else:\n for event in events_found_basic:\n events_found.append(event)\n\n #sort result\n sorted_method = request.GET.get('sorted_method', 'desc') #default is desc\n if sorted_method == 'desc':\n events_found.sort(key=lambda event: event.event_time, reverse=True) \n elif sorted_method == 'asc':\n events_found.sort(key=lambda event: event.event_time,\n reverse=False) \n elif sorted_method == 'alphabet':\n events_found.sort(key=lambda event: event.title.lower(),\n reverse=False) \n \n template_var[\"events_found\"] = events_found\n\n request.session[\"sorted_method_session\"] = sorted_method\n request.session[\"checkbox_session\"] = checkbox_session\n request.session[\"query_session\"] = query\n return render_to_response(\"event/event_search_results.html\",\n template_var,\n context_instance=RequestContext(request))\n\n return render_to_response(\"event/event_search_results.html\", template_var,\n context_instance=RequestContext(request))",
"def get_Events(input, request):\n \n t_event_1 = datetime.now()\n \n global events\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n if os.path.exists(eventpath) == True:\n print '--------------------------------------------------------'\n \n if raw_input('Folder for requested Period:' + '\\n' + \\\n str(eventpath) + \\\n '\\n' + 'exists in your directory.' + '\\n\\n' + \\\n 'You could either:' + '\\n' + 'N: Close the program and try the ' + \\\n 'updating mode.' + '\\n' + \\\n 'Y: Remove the tree, continue the program ' + \\\n 'and download again.' + \\\n '\\n\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '--------------------------------------------------------'\n shutil.rmtree(eventpath)\n os.makedirs(eventpath)\n \n else:\n print '--------------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '--------------------------------------------------------'\n sys.exit()\n \n else:\n os.makedirs(eventpath)\n \n events = events_info(request)\n \n os.makedirs(os.path.join(eventpath, 'EVENT'))\n len_events = len(events)\n \n print 'Length of the events found based on the inputs: ' + \\\n str(len_events) + '\\n'\n \n for i in range(0, len_events):\n print \"Event No:\" + \" \" + str(i+1)\n print \"Date Time:\" + \" \" + str(events[i]['datetime'])\n print \"Depth:\" + \" \" + str(events[i]['depth'])\n print \"Event-ID:\" + \" \" + events[i]['event_id']\n try:\n print \"Flynn-Region:\" + \" \" + events[i]['flynn_region']\n except Exception, e:\n print \"Flynn-Region:\" + \" \" + \"NONE\"\n print \"Latitude:\" + \" \" + str(events[i]['latitude'])\n print \"Longitude:\" + \" \" + str(events[i]['longitude'])\n print \"Magnitude:\" + \" \" + str(events[i]['magnitude'])\n print \"-------------------------------------------------\"\n \n Event_cat = open(os.path.join(eventpath, 'EVENT', 'EVENT-CATALOG'), 'a+')\n Event_cat.writelines(str(Period) + '\\n')\n Event_cat.writelines('-------------------------------------' + '\\n')\n Event_cat.writelines('Information about the requested Events:' + '\\n\\n')\n Event_cat.writelines('Number of Events: ' + str(len_events) + '\\n')\n Event_cat.writelines('min datetime: ' + str(input['min_date']) + '\\n')\n Event_cat.writelines('max datetime: ' + str(input['max_date']) + '\\n')\n Event_cat.writelines('min magnitude: ' + str(input['min_mag']) + '\\n')\n Event_cat.writelines('max magnitude: ' + str(input['max_mag']) + '\\n')\n Event_cat.writelines('min latitude: ' + str(input['evlatmin']) + '\\n')\n Event_cat.writelines('max latitude: ' + str(input['evlatmax']) + '\\n')\n Event_cat.writelines('min longitude: ' + str(input['evlonmin']) + '\\n')\n Event_cat.writelines('max longitude: ' + str(input['evlonmax']) + '\\n')\n Event_cat.writelines('min depth: ' + str(input['min_depth']) + '\\n')\n Event_cat.writelines('max depth: ' + str(input['max_depth']) + '\\n')\n Event_cat.writelines('-------------------------------------' + '\\n\\n')\n Event_cat.close()\n \n \n for j in range(0, len_events):\n Event_cat = open(os.path.join(eventpath, 'EVENT', 'EVENT-CATALOG'), 'a')\n Event_cat.writelines(\"Event No: \" + str(j) + '\\n')\n Event_cat.writelines(\"Event-ID: \" + str(events[j]['event_id']) + '\\n')\n Event_cat.writelines(\"Date Time: \" + str(events[j]['datetime']) + '\\n')\n Event_cat.writelines(\"Magnitude: \" + str(events[j]['magnitude']) + '\\n')\n Event_cat.writelines(\"Depth: \" + str(events[j]['depth']) + '\\n')\n Event_cat.writelines(\"Latitude: \" + str(events[j]['latitude']) + '\\n')\n Event_cat.writelines(\"Longitude: \" + str(events[j]['longitude']) + '\\n')\n \n try:\n Event_cat.writelines(\"Flynn-Region: \" + \\\n str(events[j]['flynn_region']) + '\\n')\n \n except Exception, e:\n Event_cat.writelines(\"Flynn-Region: \" + 'None' + '\\n')\n \n Event_cat.writelines('-------------------------------------' + '\\n')\n Event_cat.close()\n \n Event_file = open(os.path.join(eventpath, 'EVENT', 'event_list'), 'a+')\n pickle.dump(events, Event_file)\n Event_file.close()\n \n print 'Events are saved!'\n \n print 'Length of events: ' + str(len_events) + '\\n'\n \n t_event_2 = datetime.now()\n t_event = t_event_2 - t_event_1\n \n print 'Time for getting and saving the events:'\n print t_event\n \n return events",
"def parse(self, response):\n json_response = loads(response.text)\n token = json_response[\"Token\"]\n # api_server = json_response[\"ApiServer\"]\n api_server = \"https://awsapieast1-prod2.schoolwires.com/REST/\"\n api_gateway = api_server + \"api/v4/\"\n api_function = \"CalendarEvents/GetEvents/1?\"\n start_date = \"2019-02-01\"\n today = datetime.today()\n\n e = today.replace(\n year=today.year + 10,\n month=1,\n day=1,\n hour=0,\n minute=0,\n second=1,\n microsecond=1,\n )\n # the end date will be ten years from the date that the script runs\n end_date = str(e.year) + \"-\" + str(e.month).zfill(2) + \"-\" + str(e.day).zfill(2)\n dates = \"StartDate={}&EndDate={}\".format(start_date, end_date)\n modules = \"&ModuleInstanceFilter=\"\n\n # this line is to filter just school board meetings.\n category_filters = (\n \"0-49-40-21-16-4-3-44-39-1-57-43-64-65-58-62-28-25-\"\n \"52-50-55-38-59-17-13-51-56-8-63-53-37-54-7-47-46-33-60-10-19-66-61-48-34-45-41-42-\"\n )\n\n category = \"&CategoryFilter={}\".format(category_filters)\n dbstream = \"&IsDBStreamAndShowAll=true\"\n url = api_gateway + api_function + dates + modules + category + dbstream\n headers = {\"Authorization\": \"Bearer \" + token, \"Accept\": \"application/json\"}\n req = Request(url, headers=headers, callback=self._parse_api)\n\n yield req",
"def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)",
"def searchForEvents(self, search_args, onProgress):\n print('[EventFinder]: Search For Events called. Checking how many pages to crawl...')\n pages = self.get_total_pages_to_search(search_args)\n urls = [self.assembleRequest(search_args, p) for p in range(1, pages + 1)]\n\n print('[EventFinder]: Crawling %d pages from the eventful api...' % pages)\n start_ms = time_ms()\n\n for u in urls:\n response = requests.get(u)\n events = self.parse_events(response)\n onProgress(events)\n\n print('[EventFinder]: Crawling took ' + str(time_ms() - start_ms) + ' ms')",
"def _get_events_and_planning(self, request, query, search_filter):\n # params = request.args or MultiDict()\n # query = construct_combined_search_query(params)\n page = request.page or 1\n max_results = self._get_page_size(request, search_filter)\n req = ParsedRequest()\n req.args = MultiDict()\n req.args[\"source\"] = json.dumps(\n {\n \"query\": query[\"query\"],\n \"sort\": query[\"sort\"] if query.get(\"sort\") else self._get_sort(),\n \"size\": int((5 * max_results) * math.ceil(page / 3)),\n }\n )\n req.args[\"projections\"] = json.dumps([\"_id\", \"type\", \"event_item\"])\n req.page = page\n req.max_results = max_results\n req.exec_on_fetched_resource = False # don't call on_fetched_resource\n return get_resource_service(\"planning_search\").get(req=req, lookup=None)",
"def lambda_handler(event, context):\n\n def get_query_arguments(event):\n query = event.get(\"queryStringParameters\")\n args = {}\n if query:\n if query.get(\"excludeIds\"):\n args[\"excludeIds\"] = query[\"excludeIds\"]\n else:\n args[\"excludeIds\"] = []\n args[\"UserId\"] = query.get(\"userId\")\n args[\"Course\"] = query.get(\"course\")\n return args\n\n ## not needed..\n # def get_topic_of_answer(answer):\n # \"\"\"\n # Given an answer_id, returns the topic as a dict\n # Arguments:\n # - answer (dict)\n # Returns:\n # - topic (dict)\n # \"\"\"\n # question = get_question_of_answer(answer)\n # topic = get_topic_of_question(question)\n # return topic\n\n def get_topic_of_question(question):\n \"\"\"\n Given an question, return the topic as dict\n Arguments:\n - question (dict)\n Returns:\n - question (dict)\n \"\"\"\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n topic_id = question.get(\"TopicId\")\n # query topic_id of the question\n try:\n response = topic_table.get_item(Key={\"TopicId\": topic_id})\n topic = response[\"Item\"]\n except:\n print(\"No topic found, returning None..\")\n return None\n return topic\n\n def get_question_of_answer(answer):\n \"\"\"\n Given an answer, return the topic as dict\n Arguments:\n - answer (dict)\n Returns:\n - question (dict)\n \"\"\"\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n question_id = answer.get(\"QuestionId\")\n # query topic_id of the question\n try:\n response = question_table.get_item(Key={\"QuestionId\": question_id})\n question = response[\"Item\"]\n except:\n print(\"No question found, returning None..\")\n return None\n return question\n\n def get_user_answers(user_id):\n \"\"\"\n Returns the answers of a user.\n Arguments:\n - user_id (str)\n Returns:\n - answers (list of dicts)\n \"\"\"\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n answer_table = dynamodb.Table(\"Answers\")\n\n filterexpression = Attr(\"UserId\").eq(user_id)\n response = answer_table.scan(FilterExpression=filterexpression)\n answers = response.get(\"Items\")\n\n return answers\n\n def choose_questions_to_rehearse(answers, exclude_ids):\n\n \"\"\"\n Chooses questions that should be rehearsed\n Arguments:\n - answers: list of dicts\n - exclude_ids: list of question_ids to ignore\n Returns:\n - questionList: list of dicts \n \"\"\"\n\n def gt(dt_str):\n # converts datetime string to datetime object...\n # https://stackoverflow.com/a/28332149\n dt, _, us = dt_str.partition(\".\")\n dt = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S\")\n us = int(us.rstrip(\"Z\"), 10)\n return dt + datetime.timedelta(microseconds=us)\n\n # Choose questions to rehearse\n questionList = []\n for answer in answers:\n # ignore those in exclude_ids\n if answer[\"QuestionId\"] in exclude_ids:\n continue\n # check if its time to rehearse!\n if \"do_again\" in answer:\n now = datetime.datetime.utcnow()\n do_again_datetime = gt(answer[\"do_again\"])\n if do_again_datetime <= now:\n do_again = True\n else:\n do_again = False\n else:\n do_again = False # tyhmää debuggausta. jollain entryilla puuttuu, siks kai kusee\n\n # if time to rehearse\n # OR\n # KnowledgeList is [] meaning that question hasn't been checked yet\n if do_again or answer.get(\"KnowledgeList\") == []:\n question = get_question_of_answer(answer)\n questionList.append(question)\n return questionList\n\n def get_questions_of_topic(topic):\n \"\"\"\n Returns questions related to a topic\n\n Arguments:\n - topic, dict\n Returns:\n - questionList, list of dicts\n \"\"\"\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions\n\n def determine_next_topic(max_topic_order, course):\n \"\"\"\n Determine the next topic\n Arguments:\n - max_topic_order: int of previous max topic order\n - course: str, coursename eg Kemia1\n Returns:\n - next topic_id\n \"\"\"\n\n # get all topics\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n # max_topic_order can be None if no course started yet.\n # Return the topic with lowest topic order\n if not max_topic_order:\n response = topic_table.scan(FilterExpression=Attr(\"Course\").eq(course))\n all_topics = response.get(\"Items\")\n next_topic = min(all_topics, key=lambda d: d[\"TopicOrder\"])\n else:\n fe = Attr(\"TopicOrder\").gt(max_topic_order) & Attr(\"Course\").eq(course)\n response = topic_table.scan(FilterExpression=fe)\n topics = response.get(\"Items\")\n\n if topics:\n # https://stackoverflow.com/questions/30546889/get-max-value-index-for-a-list-of-dicts\n next_topic = min(topics, key=lambda d: d[\"TopicOrder\"])\n else:\n # If the current topic is already the last one, return a random one\n # TODO\n # Calculate topif for which the knowledge is worse. Give those problems.\n #\n response = topic_table.scan(FilterExpression=Attr(\"Course\").eq(course))\n all_topics = response.get(\"Items\")\n next_topic = random.sample(population=all_topics, k=1)\n\n return next_topic\n\n def put_new_questions_to_answers_table(questionList, user_id):\n \"\"\"\n Takes in a list of questions, the user_id, and populated answer_table\n with new questions for the user\n\n Arguments:\n - questionList: list of dicts, questions\n - user_id: str, user_id \n \"\"\"\n\n # Put questions to answers_table\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n answer_table = dynamodb.Table(\"Answers\")\n\n now = datetime.datetime.utcnow().isoformat()\n with answer_table.batch_writer() as batch:\n for question in questionList:\n answer_to_add = {\n \"UserId\": user_id,\n \"AnswerId\": \"{}_{}\".format(user_id, question[\"QuestionId\"]),\n \"QuestionId\": question[\"QuestionId\"],\n \"Time\": now,\n \"do_again\": datetime.datetime.utcnow().isoformat(),\n \"KnowledgeList\": [],\n }\n batch.put_item(Item=answer_to_add)\n\n args = get_query_arguments(event)\n if None in args.values() or not args:\n print(\"Values are missing from answer-dict! Aborting\")\n return {\n \"statusCode\": 400,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token\",\n \"Access-Control-Allow-Credentials\": \"true\",\n \"Content-Type\": \"application/json\",\n },\n \"body\": json.dumps({\"message\": \"Invalid query! \" + str(args.items())}),\n }\n\n # Get users answers from answers_table\n old_answers_of_user = get_user_answers(args[\"UserId\"])\n questionList = choose_questions_to_rehearse(old_answers_of_user, args[\"excludeIds\"])\n\n if len(questionList) <= 5:\n topic_orders = [\n get_topic_of_question(question).get(\"TopicOrder\")\n for question in questionList\n ]\n # this should be done in a cleaner way xD\n if topic_orders:\n max_topic_order = max(topic_orders)\n else:\n max_topic_order = None # determine next topic deals with None!\n next_topic = determine_next_topic(max_topic_order, args[\"Course\"])\n new_questions = get_questions_of_topic(next_topic)\n if args[\"UserId\"] != \"unknown\":\n put_new_questions_to_answers_table(new_questions, args[\"UserId\"])\n\n # Add the new questions to question list\n questionList += new_questions\n\n # returns questions to recap\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token\",\n \"Access-Control-Allow-Credentials\": \"true\",\n \"Content-Type\": \"application/json\",\n },\n \"body\": json.dumps(questionList),\n }",
"def __call__(self, event):\n post_event(event, self.baseUrl, self.filterName)",
"def test_filter_user_and_room(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?user_id=%s&room_id=%s\" % (self.other_user, self.room_id1),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 5)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"event_reports\"])\n\n for report in channel.json_body[\"event_reports\"]:\n self.assertEqual(report[\"user_id\"], self.other_user)\n self.assertEqual(report[\"room_id\"], self.room_id1)",
"def __init__(self, events):\n self.events = events",
"def filter(self, filters):",
"def __init__(self, filters, event_file_path, device_name):\n super().__init__(device_name=device_name)\n self._filters_dict = {}\n self.event_file_path = event_file_path\n self.load_filters(filters)",
"def event_list(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n request_terms = request.GET.get('search')\n search_terms_array = request_terms.split()\n\n initial_term = search_terms_array[0]\n event_list = Event.objects.annotate(\n num_participants=Count('participants', distinct=True),\n num_collaborators=Count('collaborators', distinct=True)).filter(\n Q(title__icontains=initial_term) |\n Q(description__icontains=initial_term))\n if len(search_terms_array) > 1:\n for term in range(1, len(search_terms_array)):\n event_list = event_list.filter(Q(title__icontains=search_terms_array[term]) |\n Q(description__icontains=search_terms_array[term]))\n else:\n event_list = Event.objects.annotate(\n num_participants=Count('participants', distinct=True),\n num_collaborators=Count('collaborators', distinct=True)).all()\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(event_list, request)\n serializer = EventSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def validate_and_prepare_query_for_event_list(args: Dict) -> str:\n args = validate_common_arguments(args)\n acknowledged = args.get(\"acknowledged\", \"\")\n event_type = argToList(args.get(\"event_type\", []), ',')\n node = argToList(args.get(\"node\", []), ',')\n event_ids = argToList(args.get('event_id', []), ',')\n event_ids = list(map(arg_to_number, event_ids))\n if None in event_ids:\n raise ValueError(ERR_MSG['ID_ERROR'].format('event_id'))\n args['event_id'] = event_ids\n page = args.get(\"page\", 0)\n limit = args.get(\"limit\", 50)\n sort_key = args.get(\"sort_key\", \"EventID\")\n sort_order = \"DESC\" if args.get(\"sort_order\", \"ascending\").lower() == \"descending\" else \"ASC\"\n query = QUERY_PARAM[\"GET_EVENTS\"]\n where_added = False\n if acknowledged and acknowledged.lower() not in ['true', 'false']:\n raise ValueError(ERR_MSG[\"ACKNOWLEDGED\"])\n elif acknowledged:\n query += f\" WHERE Acknowledged = {acknowledged}\"\n where_added = True\n if event_type:\n query += \" AND \" if where_added else \" WHERE \"\n query += \"( \" + \" OR \".join(\n [f\"EventTypeName = '{event_type_name}'\" for event_type_name in event_type]) + \" )\"\n where_added = True\n if node:\n query += \" AND \" if where_added else \" WHERE \"\n query += \"( \" + \" OR \".join(\n [f\"Node = '{node_name}'\" for node_name in node]) + \" )\"\n if event_ids:\n query += \" AND \" if where_added else \" WHERE \"\n query += \"( \" + \" OR \".join(\n [f\"EventID = {event_id}\" for event_id in event_ids]) + \" )\"\n\n query += f\" ORDER BY {sort_key} {sort_order} WITH ROWS {(page * limit) + 1} TO {(page + 1) * limit}\"\n return query",
"def events(self, p=_P, start=0, end=0, limit=0, skip=0, page=1,\n category='all', context='all'):\n\n self._valdiate_param(start=start, end=end,\n limit=limit, skip=skip, page=page)\n \n q = {\"itype\": \"event\", **dtresolve(start, end)}\n\n if category != \"all\":\n self._validate_param(category=category)\n q['category'] = category\n\n if context != \"all\":\n self._validate_param(context=context)\n switch = {'all': '_ref', 'benign': '_ben_ref',\n 'malicious': '_mal_ref', 'unknown': '_ref'}\n q[switch.get(context)] = self._hash\n if context == 'unknown':\n q['_ben_ref'] = {'$ne': self._hash}\n q['_mal_ref'] = {'$ne': self._hash}\n \n return self._backend.find(q, p, **limitskip(limit, skip, page))"
] | [
"0.53511065",
"0.51666886",
"0.5161581",
"0.5099042",
"0.5078286",
"0.50425583",
"0.5039122",
"0.4984391",
"0.49639994",
"0.49617496",
"0.49603945",
"0.49006483",
"0.48956084",
"0.48830333",
"0.48448077",
"0.48447883",
"0.48361924",
"0.4832409",
"0.48313853",
"0.47923324",
"0.47858644",
"0.47792953",
"0.477384",
"0.47614926",
"0.4760945",
"0.4760667",
"0.4754924",
"0.47520146",
"0.47497258",
"0.4748948"
] | 0.5174677 | 1 |
Calculate the upstream distance and downstream distance to the nearest DNA TE from the specified gene. | def calculate_distance(geneid, genes, tes):
# Get which chromosome
for c in genes:
if geneid in genes[c]:
chromosome = c
break
# Get the gene position
genestart, geneend = genes[chromosome][geneid]
# if the gene chromosome does not have any TEs, return NA
if chromosome not in tes:
return ('NA', 'NA', 'NA', 'NA')
# Then get the TE that is closest downstream. We do this by iterating
# forwards, then breaking when we find a TE that starts after the end of
# the gene.
for downstream in sorted(list(tes[chromosome].iteritems()), key=lambda x: x[1][0]):
if downstream[1][0] > geneend:
break
# And get the TE that is closest upstream. We do this by doing the same
# strategy as above, but in reverse
for upstream in reversed(sorted(list(tes[chromosome].iteritems()), key=lambda x: x[1][0])):
if upstream[1][1] < genestart:
break
# Then calculate the distances
# Gene start - TE end and TE start - gene end
dist_upstream = genestart - upstream[1][1]
dist_downstream = downstream[1][0] - geneend
# then return it all
return (upstream[0], downstream[0], dist_upstream, dist_downstream) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))",
"def get_distance_pandas(_gene, apeak):\n if _gene['4'] < apeak.left:\n return apeak.left - _gene['4']\n if apeak.right < _gene['3']:\n return apeak.right - _gene['3']\n return 0",
"def get_distance_pandas(_gene, apeak):\n if _gene['4'] < apeak.left:\n return apeak.left - _gene['4']\n if apeak.right < _gene['3']:\n return apeak.right - _gene['3']\n return 0",
"def calcDistance(self, left, right):\n\n return math.fabs(right-left)",
"def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def compute_distance(df):\n pass",
"def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km",
"def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km",
"def find_min_gene_distance(sequence_record, starting_values=None):\n min_distance = len(sequence_record.seq) if starting_values is None else starting_values[0]\n min_gene1 = 'none' if starting_values is None else starting_values[1]\n min_gene2 = 'none' if starting_values is None else starting_values[2]\n all_gene_positions = []\n for gene in sequence_record.features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_gene_positions.append((gene.location.start.position, gene.location.end.position-1, gene.id))\n all_gene_positions.sort()\n for (_,gene1_end,gene1_name), (gene2_start,_,gene2_name) in itertools.izip(all_gene_positions,all_gene_positions[1:]):\n # subtract 1 from distance, so if gene1 is 1-4 and gene2 is 5-9 the distance is 0\n gene_distance = gene2_start - gene1_end - 1\n if gene_distance < min_distance:\n min_distance = gene_distance \n min_gene1, min_gene2 = gene1_name, gene2_name\n return min_distance, min_gene1, min_gene2",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def _computeDistance(self, mote, neighbor):\n\n return 1000*math.sqrt((mote.x - neighbor.x)**2 +\n (mote.y - neighbor.y)**2)",
"def cost_distance(e):\n # Make sure we have a proper edge with two vertices\n if len(e) != 2:\n raise ValueError\n\n a = V_coord[e[0]]\n b = V_coord[e[1]]\n\n # Return the distance between two points\n return distance(a, b)",
"def calc_distance_efficiency(edgenode1: NodeID, edgenode2: NodeID, endnode: NodeID):\n \n (edgenode1_x, edgenode1_y) = G.nodes[edgenode1]['x'], G.nodes[edgenode1]['y']\n (edgenode2_x, edgenode2_y) = G.nodes[edgenode2]['x'], G.nodes[edgenode2]['y']\n (endnode_x, endnode_y) = G.nodes[endnode]['x'], G.nodes[endnode]['y']\n \n distance_edgenode1_edgenode2 = np.sqrt(np.square(edgenode1_x - edgenode2_x) + np.square(edgenode1_y - edgenode2_y))\n distance_edgenode1_end = np.sqrt(np.square(edgenode1_x - endnode_x) + np.square(edgenode1_y - endnode_y))\n distance_edgenode2_end = np.sqrt(np.square(endnode_x - edgenode2_x) + np.square(endnode_y - edgenode2_y))\n\n progress_to_end = distance_edgenode1_end - distance_edgenode2_end\n \n efficiency_calculation = (progress_to_end/distance_edgenode1_edgenode2)\n return efficiency_calculation",
"def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)",
"def dist(self, node_0, node_1):\n coord_0, coord_1 = self.coords[node_0], self.coords[node_1]\n return math.sqrt((coord_0[0] - coord_1[0]) ** 2 + (coord_0[1] - coord_1[1]) ** 2)",
"def getEdgeDistance():\n '''\n a\n ◿\n b c\n\n hypotenuse\n ◿ adjacent\n opposite\n\n tan(a) = opposite/adjacent\n adjacent * tan(a) = opposite\n '''\n\n # An estimated multiplier to take into account the larger infrared dot\n # observed when further away from as surface - think torch beam onto a\n # wall getting larger as it gets further away, but only the radius\n # (center downwards) being relevant.\n # TODO: Maybe move into infrared sensor code?\n MULTI = 1.2\n\n edgeDistance = BOT_HEIGHT * math.tan(math.radians(getEdgeAngle()))\n edgeDistance *= MULTI\n\n if DEBUG:\n print \"Distance to edge: \", int(round(edgeDistance))\n\n return edgeDistance",
"def gen_dist(genes):\n\n # First generate an NxNxB matrix that has False where\n # i and j individuals have the same kth gene and True\n # otherwise (XOR operation). Then sum along\n # the genome axis to get distance\n return np.sum(genes[:,None,:] ^ genes, axis=-1)",
"def calculate_distance(srcLong, srcLat, dstLong, dstLat):\n return math.sqrt( (srcLong-dstLong) ** 2 + (srcLat - dstLat) ** 2)",
"def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )",
"def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge",
"def calculate_distance_electron(electron_density, tau_event):\n # add full_relativity here\n return tau_event / (electron_density * numba_config.SIGMA_THOMSON)",
"def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance",
"def node_distance(self, node1, node2):\n if node1 == node2:\n return 0.0\n for i, (n1, n2) in enumerate(zip(self.paths[node1], self.paths[node2])):\n if n1 != n2:\n break\n else:\n i = min(len(self.paths[node1]), len(self.paths[node2]))\n return sum(self.path_dists[node1][i:]) + sum(self.path_dists[node2][i:])",
"def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)",
"def distance():\n return str(us.get_distance())",
"def _node_distance(self, first, second):\r\n\r\n name_1 = first.name.split(' ')[0]\r\n name_2 = second.name.split(' ')[0]\r\n\r\n seq1 = self.msa_by_name[name_1]\r\n seq2 = self.msa_by_name[name_2]\r\n\r\n distance = self._seq_distance(seq1, seq2)\r\n\r\n return distance",
"def distance(self, lat: float, long: float) -> float:\n # Initial euclidian formula below\n # diff_lat = self.lat - lat\n # diff_long = self.long - long\n # euclidian = math.sqrt((diff_lat ** 2 + diff_long ** 2 + self.altitude ** 2))\n\n return self._haversine(lat, long) + self.altitude / 1000",
"def mapping(ref, non_ref, probe):\r\n v1 = (ref[0]-non_ref[0], ref[1]-non_ref[1], ref[2]-non_ref[2])\r\n v2 = (ref[0]-probe[0], ref[1]-probe[1], ref[2]-probe[2])\r\n cosin = angle_between(v1,v2)\r\n dist = math.sqrt((probe[0]-ref[0])**2+(probe[1]-ref[1])**2+(probe[2]-ref[2])**2)*cosin\r\n return dist",
"def get_distance(latitude, longitude, del_latitude, del_longitude):\n coord = (latitude, longitude)\n del_coord = (del_latitude, del_longitude)\n return distance.geodesic(coord, del_coord).km"
] | [
"0.6199831",
"0.61004704",
"0.61004704",
"0.5746319",
"0.5437211",
"0.54337853",
"0.5426136",
"0.541698",
"0.541698",
"0.53653735",
"0.5341924",
"0.5341924",
"0.5321905",
"0.5310927",
"0.5270482",
"0.52228695",
"0.5202737",
"0.52025795",
"0.52016085",
"0.5196722",
"0.51792306",
"0.51691294",
"0.5168868",
"0.515106",
"0.5141902",
"0.51092786",
"0.5108411",
"0.50965434",
"0.50915456",
"0.50837284"
] | 0.7312722 | 0 |
Downloads the data and saves it to a zip file. | def download_data(self):
headers = {'User-Agent': 'Mozilla/5.0',}
#Request for html data of url page
r = requests.get(self.url, headers = headers, allow_redirects=True)
soup = BeautifulSoup(r.text, "html.parser")
#Checking if folder path exists, if not, creats it
i=0
while i<len(self.folder)-1:
if self.folder[i] == '/':
if not os.path.isdir(self.folder[:i]):
os.mkdir(self.folder[:i])
i+=1
if i==len(self.folder)-1:
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
# if not os.path.isdir(self.folder):
# os.mkdir(self.folder)
#Gets every href to zip file with data
entries = []
for link in soup.find_all('a'):
if re.search("^data/.*.zip", link.get('href')):
entries.append(link.get('href'))
#Gets the newest dataset
self.getCurrentData(entries)
i=0
#Saves each file in dataset
for list in self.ListOfZipFiles:
if not os.path.isfile(self.folder+list[4:]):
r = requests.get(self.url+list)
open(self.folder+list[4:], 'wb').write(r.content)
#deletes prefix "data/"
self.ListOfZipFiles[i] = list[4:]
i+=1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()",
"def download_data():\n\n if not os.path.exists(zipfile_path):\n print(f'Downloading {config.download_url} to {zipfile_path}')\n urlretrieve(config.download_url, zipfile_path)\n print(f'Successfully downloaded {zipfile_path}')\n\n zip_ref = ZipFile(zipfile_path, 'r')\n zip_ref.extractall(config.raw_data_dir)\n zip_ref.close()\n\n os.rename(f\"{config.raw_data_dir}/cornell movie-dialogs corpus\", extracted_dir)",
"def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)",
"def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)",
"def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)",
"def download_data(url, dest, *a, **kw):\n pth = os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'allCountries.zip'\n )\n\n open(dest, 'w').write(open(pth).read())",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def zip_data(self) -> None:\n zipf = zipfile.ZipFile('output.zip', 'w', zipfile.ZIP_DEFLATED)\n self._zipdir(self.path, zipf)\n zipf.close()",
"def unzip() -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # define the destination\n destination = project_dir / 'data' / 'raw'\n\n # extract zip\n zip_file = ZipFile(destination / \"original.zip\")\n zip_file.extractall(destination)",
"def extract_to_disk(self):\n archive_name, extension = os.path.splitext(os.path.basename(self.file.name))\n if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):\n os.mkdir(archive_name)\n os.chdir(archive_name)\n for filename, data in self.extract().items():\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()",
"def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )",
"def _download_archive(self):\n _logger.debug('Downloading archive...')\n response = urlopen(self.url)\n\n with open(self._archive_full_path, 'wb') as archive_file:\n chunk_size = 1024 * 1024 # 1 MB\n chunk = response.read(chunk_size)\n\n while chunk:\n archive_file.write(chunk)\n chunk = response.read(chunk_size)\n\n _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))",
"def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name",
"def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')",
"def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()",
"def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)",
"def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()",
"def download_one_zip(data_url, data_dir):\r\n\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n r = requests.get(data_url, stream=True)\r\n with open(zipfile_path, \"wb\") as py_file:\r\n for chunk in r.iter_content(chunk_size=1024): # 1024 bytes\r\n if chunk:\r\n py_file.write(chunk)\r\n unzip_nested_zip(zipfile_path, unzip_dir), download_small_file",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()",
"def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def download_and_unzip(url, zip_path, csv_path, data_folder):\n\n download_from_url(url, zip_path)\n\n unzip(zip_path, csv_path, data_folder)\n\n print('Done.')",
"def save_downloaded_zip(self, dict_entry_to_download):\n\tif self.download_subtitle_zip(dict_entry_to_download) == True:\n\t try:\n\t\tzip_file = open(self.ZipFilePath,\"wb\")\n\t\tzip_file.write(self.zip_string)\n\t\tzip_file.close\t\t\n\t\tprint \"Zipfile: %s saved on hdd.\" % self.ZipFilePath\n\t\tdel self.zip_string\n\t\treturn True\n\t except:\n\t\tprint \"Problems with Zipfile: %s saveing on hdd.\" % self.ZipFilePath\n\t\treturn False",
"def download(uri: str) -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # create destination dirs\n destination = project_dir / 'data' / 'raw'\n destination.mkdir(exist_ok=True, parents=True)\n\n # download the file\n urllib.request.urlretrieve(uri, destination / \"original.zip\")",
"def fetch(data_dir):\n file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)\n result_path = os.path.join(data_dir, DESTINATION, NAME)\n return utils.fetch(URL, file_path, result_path)",
"def download_data():\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def download_file(self, name):\n #TODO: handle exceptions\n archive_name = name + \".json.gz\"\n file_name = join(self.new_data_dir, name + \".json\")\n\n try:\n urlretrieve(\"http://data.githubarchive.org/\" + archive_name,\n filename=join(self.downloaded_data_dir, archive_name))\n except IOError:\n self.logger.error(__name__ + \": \" + \"unable to download file (error creating connection).\")\n\n try:\n archive = gz_open(join(self.downloaded_data_dir, archive_name))\n except IOError:\n self.logger.error(__name__ + \": \" + \"unable to open gzipped file (file not created).\")\n else:\n json_file = open(file_name, \"w\")\n json_file.write(archive.read())\n\n archive.close()\n json_file.close()\n\n remove(join(self.downloaded_data_dir, archive_name))\n\n return file_name",
"def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def save_data(self):\n # Command to get the download data\n pass",
"def _create_zip_file(self) -> BytesIO:\n zip_file_io = BytesIO()\n with ZipFile(zip_file_io, 'w') as zip_file:\n for image_scraper_model in self._url_model.image_scraper.all():\n image_absolute_path = self._get_image_absolute_path(image_scraper_model)\n zip_file_image_path = self._get_zip_file_image_path(image_absolute_path)\n zip_file.write(image_absolute_path, zip_file_image_path)\n zip_file.close()\n return zip_file_io"
] | [
"0.7695943",
"0.7471872",
"0.72585493",
"0.7096487",
"0.70846176",
"0.70634305",
"0.706217",
"0.70549625",
"0.7003874",
"0.67468005",
"0.67194486",
"0.66552013",
"0.66520566",
"0.66331756",
"0.66310894",
"0.662298",
"0.65793407",
"0.6567851",
"0.6541638",
"0.65338546",
"0.65108764",
"0.65095705",
"0.65093136",
"0.65052146",
"0.6493664",
"0.6492765",
"0.6470934",
"0.6445515",
"0.6429704",
"0.6427921"
] | 0.7915135 | 0 |
Returns name of csv file of a region. | def getCsvFileByRegion(self, region):
result = ""
if region == "PHA":
result = "00.csv"
elif region == "STC":
result = "01.csv"
elif region == "JHC":
result = "02.csv"
elif region == "PLK":
result = "03.csv"
elif region == "ULK":
result = "04.csv"
elif region == "HKK":
result = "05.csv"
elif region == "JHM":
result = "06.csv"
elif region == "MSK":
result = "07.csv"
elif region == "OLK":
result = "14.csv"
elif region == "ZLK":
result = "15.csv"
elif region == "VYS":
result = "16.csv"
elif region == "PAK":
result = "17.csv"
elif region == "LBK":
result = "18.csv"
elif region == "KVK":
result = "19.csv"
else:
return None
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_region_filename(self, region):\r\n \r\n return {\r\n 'PHA': '00.csv',\r\n 'STC': '01.csv',\r\n 'JHC': '02.csv',\r\n 'PLK': '03.csv',\r\n 'KVK': '19.csv',\r\n 'ULK': '04.csv',\r\n 'LBK': '18.csv',\r\n 'HKK': '05.csv',\r\n 'PAK': '17.csv',\r\n 'OLK': '14.csv',\r\n 'MSK': '07.csv',\r\n 'JHM': '06.csv',\r\n 'ZLK': '15.csv',\r\n 'VYS': '16.csv',\r\n }[region]",
"def csvPathname(self, scenario, baseline=None, outputDir='.', type=RESULT_TYPE_SCENARIO):\n # Output files are stored in the output dir with same name as query file but with 'csv' extension.\n basename = os.path.basename(self.queryFile)\n mainPart, extension = os.path.splitext(basename)\n middle = scenario if type == RESULT_TYPE_SCENARIO else (\"%s-%s\" % (scenario, baseline))\n csvFile = \"%s-%s.csv\" % (mainPart, middle)\n csvPath = os.path.abspath(os.path.join(outputDir, csvFile))\n return csvPath",
"def csv_path(name):\n return \"./data/%s\" % name",
"def get_csv_file_name(output_dir, file_prefix, file_suffix):\n\tcsv_filename = \"\".join([file_prefix, '_', file_suffix, '.csv'])\n\treturn os.path.join(output_dir, csv_filename)",
"def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)",
"def region_name(self):\n return self.random_element(self._regions)[1]",
"def tsv_name():\n\n if PAR['level'] == 1:\n \n return 'col.tsv'\n \n else:\n\n return 'myc.tsv'",
"def destination_region_name(self) -> str:\n return self._destination_region_name",
"def get_csv_path(url, destination):\n datafile_path = get_datafile_path(url, destination)\n return os.path.splitext(datafile_path)[0] + '-processed.csv'",
"def source_region_name(self) -> str:\n return self._source_region_name",
"def function_region(self) -> str:\n return pulumi.get(self, \"function_region\")",
"def get_field_mapping_filename(field_name: str, config_location: str) -> str:\n return os.path.join(config_location, field_name + \".csv\")",
"def get_loc_year_csv(csv_name):\n fname = (csv_name.split('.'))[0].split('-')\n return fname[0], fname[1]",
"def csv_dir(self):\n return op.join(self.root_dir, 'csv')",
"def load_aws_region_name() -> str:\n session = boto3.session.Session()\n region_name = (\n click.get_current_context().params.get(\"region\") or session.region_name\n )\n return region_name",
"def get_filename(self, exported_datetime=None):\n formatted_model = self.model_cls._meta.label_lower.replace(\".\", \"_\")\n formatted_date = exported_datetime.strftime('%Y%m%d%H%M%S')\n return f'{formatted_model}_{formatted_date}.csv'",
"def get_filename(name: str, fold: int, type: str):\n if type != TYPE_TRAIN and type != TYPE_TEST:\n raise \"Invalid dataset type: \" + type\n directory = os.path.dirname(os.path.realpath(__file__))\n if directory.endswith(os.sep):\n directory.rstrip(os.sep)\n return directory + os.sep + type + os.sep + name + str(fold) + \".csv\"",
"def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_GetFileName(self)",
"def region(self) -> str:\n return pulumi.get(self, \"region\")",
"def region(self) -> str:\n return pulumi.get(self, \"region\")",
"def region(self) -> str:\n return pulumi.get(self, \"region\")",
"def region(self) -> str:\n return pulumi.get(self, \"region\")",
"def region(self) -> str:\n return pulumi.get(self, \"region\")",
"def region(self) -> str:\n return pulumi.get(self, \"region\")",
"def region(self) -> str:\n return self.__region",
"def _get_partition(region_name: str) -> str:\n\n if region_name.startswith(\"us-gov\"):\n return \"aws-us-gov\"\n\n return \"aws\"",
"def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetFileName(self)",
"def get_filename(output_dir, accountname):\n f_name = 'twitter_data_' + accountname + str(datetime.datetime.utcnow()) + '.csv'# start_time + '_' + end_time\n full_path = output_dir + '/' + f_name\n\n return full_path",
"def filename(self):\n return os.path.basename(self._spatial_filename)",
"def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_GetFileName(self)"
] | [
"0.7729703",
"0.64638746",
"0.6337415",
"0.6190413",
"0.6148976",
"0.6008073",
"0.5954937",
"0.57771087",
"0.5749584",
"0.5748628",
"0.5714495",
"0.5634323",
"0.5593007",
"0.5579389",
"0.5565088",
"0.5473224",
"0.54680103",
"0.5429127",
"0.5419594",
"0.5419594",
"0.5419594",
"0.5419594",
"0.5419594",
"0.5419594",
"0.54169446",
"0.54152733",
"0.5380459",
"0.5370246",
"0.5365276",
"0.5364091"
] | 0.79758877 | 0 |
Build an uniform triangular mesh on unit square. | def square_mesh(N):
xs,ys = np.meshgrid(np.linspace(0,1,N),np.linspace(0,1,N))
xs = xs.flatten(1)
ys = ys.flatten(1)
_,_,t,_ = triang.delaunay(xs,ys)
p = np.vstack((xs,ys)).T
return Trimesh(p,t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_square_triangle_mesh():\n vertices = np.array(\n ((0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0.5, 0.5, 0)),\n dtype=np.float32)\n faces = np.array(\n ((0, 1, 4), (1, 3, 4), (3, 2, 4), (2, 0, 4)), dtype=np.int32)\n return vertices, faces",
"def create_single_triangle_mesh():\n vertices = np.array(\n ((0, 0, 0), (1, 0, 0), (0, 1, 0)), dtype=np.float32)\n faces = np.array(((0, 1, 2),), dtype=np.int32)\n return vertices, faces",
"def triangle(self):\n [r,c] = self.D\n m = min(r,c)\n S = self\n T = zeros(r,c)\n while m > 0:\n NoLigne = 0\n while S[NoLigne, 0] == 0 and (NoLigne < m - 1):\n NoLigne += 1\n S = S.swap(NoLigne,0)\n if S[0, 0] != 0:\n pivot = S[0,0]\n for k in range(1,m):\n if S[k,0] != 0:\n S = S.comb_lignes(pivot, -S[k,0],k,0)\n #print(\"pivot = \"+str(pivot))\n #print(\"S dans for :\")\n #print(S)\n T = T.remplace_ligned(r - m,S.F)\n #print(\"Évolution de T :\")\n #print(T)\n S = S.decoupe()\n m -= 1\n return T",
"def mesh_uniform(N_e, d, Omega):",
"def Controlled2(U):\n '''Generalized controlled unitary tensor construction\n Parameters:\n -----------\n U: input tensor which is assumed to be a square Matrix\n\n Returns:\n --------\n Controlled unitary\n\n '''\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1], 2, shp[2])",
"def test_special_triangles_euclidean(self):\n import itertools\n\n s = space(curvature=0)\n\n # turning constants in radians\n t1_ref = 6.28318530717958647692528676655867\n t2_ref = t1_ref / 2\n t3_ref = t1_ref / 3\n t4_ref = t1_ref / 4\n t6_ref = t1_ref / 6\n t8_ref = t1_ref / 8\n t12_ref = t1_ref / 12\n # sqrt constants\n sqrt2_ref = 1.41421356237309504880168872420977\n sqrt3_ref = 1.73205080756887729352744634150584\n\n # test with each known triangle\n for a, C, b, A, c, B, m in (\n (1, t6_ref, 1, t6_ref, 1, t6_ref, sqrt3_ref/4), # 1 1 1 (equilateral)\n (1, t4_ref, 1, t8_ref, sqrt2_ref, t8_ref, 1/2), # 1 1 sqrt2 (right isoceles)\n (1, t4_ref, sqrt3_ref, t12_ref, 2, t6_ref, sqrt3_ref/2), # 1 sqrt3 2 (right)\n (1, t3_ref, 1, t12_ref, sqrt3_ref, t12_ref, sqrt3_ref/4), # 1 1 sqrt3 (obtuse isoceles)\n (sqrt2_ref, t8_ref + t6_ref, 2, t12_ref, 1 + sqrt3_ref, t8_ref, (1 + sqrt3_ref)/2) # sqrt2 2 1+sqrt3 (obtuse scalene)\n ):\n # try scaling them up and down too\n for scale in (1, 2, 1/3):\n a *= scale\n b *= scale\n c *= scale\n m *= scale**2\n # go through all vertex permutations\n for (a, A), (b, B), (c, C) in itertools.permutations([(a, A), (b, B), (c, C)], 3):\n self.assertTrue(isclose(\n s.cosine_law_side(a, b, C),\n c\n ))\n self.assertTrue(isclose(\n s.cosine_law_angle(a, b, c),\n C\n ))\n self.assertTrue(isclose(\n s.dual_cosine_law_angle(A, B, c),\n C\n ))\n # skip dual_cosine_law_side because it is not defined in K = 0\n self.assertTrue(isclose(\n s.sine_law_side(a, A, B),\n b\n ))\n self.assertTrue(isclose(\n s.sine_law_angle(a, A, b),\n B,\n rel_tol = 1e-5 # have to go easier on it since asin is really sensitive around 1\n ) or B > t4_ref and isclose( # SSA triangle solving strangeness\n s.sine_law_angle(a, A, b),\n t2_ref - B\n ))\n self.assertTrue(isclose(\n s.triangle_area_from_sides(a, b, c),\n m\n ))",
"def form_triu_matrix(arr):\n n = int(np.ceil((np.sqrt(1 + 8 * len(arr)) - 1) * 0.5))\n M = np.zeros((n, n))\n c = 0\n for i in range(n):\n for j in range(n):\n if j >= i:\n if c < len(arr):\n M[i, j] = arr[c]\n c += 1\n else:\n break\n return M",
"def uniform_mesh(n, x_0=0.0, x_1=1.0):\n\n assert n>0\n\n\n points = x_0 + (x_1 - x_0)*numpy.arange(n+1,dtype=numpy.float)/n\n boundary = {(0, 0): 'left', (n-1, 1): 'right'}\n\n return points, boundary",
"def addUVTri(self,t,uv):\n indexTri = [self.register(v) for v in t]\n indexUVTri = [self.uvregister(v) for v in uv]\n self.get('mesh.triangles').append(indexTri)\n self.get('mesh.uvtriangles').append(indexUVTri)\n return self",
"def triu(m, k=0):\n\n if not use_origin_backend(m):\n if not isinstance(m, dparray):\n pass\n else:\n return dpnp_triu(m, k)\n\n return call_origin(numpy.triu, m, k)",
"def Triangle(self, c1=(0.,0.), c2=(0.,1.), c3=(1.,0.), npoints=10, element_type=\"tri\", equally_spaced=True):\n\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):\n raise ValueError(\"The coordinates c1, c2 and c3 should be given in tuples of two elements each (x,y)\")\n\n npoints = int(npoints)\n\n\n npoints = npoints - 1\n if npoints < 0:\n npoints = 0\n\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)\n opoints = np.vstack((c1,c2,c3))\n oelements = np.array([[0,1,2]])\n\n if element_type==\"tri\":\n mesh = self.TriangularProjection(points=opoints, npoints=npoints, equally_spaced=equally_spaced)\n self.__update__(mesh)\n\n\n elif element_type == \"quad\":\n\n # SPLIT THE TRIANGLE INTO 3 QUADS\n omesh = Mesh()\n omesh.element_type=\"tri\"\n omesh.elements = oelements\n omesh.nelem = omesh.elements.shape[0]\n omesh.points = opoints\n omesh.GetBoundaryEdges()\n\n sys.stdout = open(os.devnull, \"w\")\n omesh.ConvertTrisToQuads()\n sys.stdout = sys.__stdout__\n\n\n npoints = int(npoints/2) + 1\n mesh = self.QuadrilateralProjection(points=omesh.points[omesh.elements[0,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n for i in range(1,omesh.nelem):\n mesh += self.QuadrilateralProjection(points=omesh.points[omesh.elements[i,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n\n self.__update__(mesh)",
"def compute_mesh(nrow, ncol, nele):\n tri_index = np.zeros((nele, 3))\n for i in range(nrow-1):\n for j in range(NUM):\n if j == 0:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)\n tri_index[i*4*NUM+j*4, 2] = (i+2)\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n else:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4, 2] = (i+2)+(2*j-1)*nrow\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n \n tri_index[i*4*NUM+j*4+2, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+2, 1] = (i+1)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+2, 2] = (i+2)+2*(j+1)*nrow\n\n tri_index[i*4*NUM+j*4+3, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+3, 1] = (i+2)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+3, 2] = (i+2)+2*j*nrow\n return tri_index",
"def meshup(self, ind='ij'):\r\n xv, yv, zv = self.vec()\r\n x_reg, y_reg, z_reg = np.meshgrid(xv, yv, zv, indexing=ind)\r\n\r\n return x_reg, y_reg, z_reg",
"def CreateDummyUpperDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tri\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"tet\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"quad\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"hex\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"line\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n sys.stdout = sys.__stdout__\n\n return mesh",
"def unitQuad_Edge(lens, N=3):\n template = [ np.array([0,0]), np.array([lens[0], 0]), None, None ] #Template from which to generate other Quad Vertex Lists\n leftDegenerate = template.copy() #Left Limit of quad if you were to rotate edge 3 CCW about the origin until you no longer can\n rightDegenerate = template.copy() #Right Limit of quad if you were to rotate edge 2 CW about point 1 until you no longer can,\n # or alternatively, how far edge 3 can rotate CW until the quad is degenerate\n try:\n leftDegenerate[3] = np.array( circleIntersection(leftDegenerate[0], lens[3], leftDegenerate[1], lens[1]+lens[2]) )\n leftDegenerate[2] = ( lens[1] / (lens[2]+lens[1]) ) * (leftDegenerate[3]-leftDegenerate[1]) + leftDegenerate[1]\n except: \n leftDegenerate[3] = np.array([-lens[3],0])\n leftDegenerate[2] = np.array( circleIntersection(leftDegenerate[3], lens[2], leftDegenerate[1], lens[1]) )\n\n try:\n rightDegenerate[2] = np.array( circleIntersection(rightDegenerate[0], lens[2]+lens[3], rightDegenerate[1], lens[1]) )\n rightDegenerate[3] = ( lens[3] / (lens[3]+lens[2]) ) * rightDegenerate[2]\n except:\n rightDegenerate[2] = np.array([lens[0]+lens[1], 0])\n rightDegenerate[3] = np.array( circleIntersection(rightDegenerate[0], lens[3], rightDegenerate[2], lens[2]))\n \n rightOfOrigin = np.array([1,0]) #Theta = 0 on the Unit Circle\n thetaMin = angle_between(leftDegenerate[3], rightOfOrigin) #Angle of \n thetaMax = angle_between(rightDegenerate[3], rightOfOrigin)\n pitch = (thetaMax - thetaMin) / (N-1)\n\n result = []\n result.append(leftDegenerate) \n for i in range(1, N-1):\n result.append(template.copy())\n result[i][3] = lens[3]*unitCircPt(i*pitch+thetaMin)\n result[i][2] = np.array(circleIntersection( result[i][3], lens[2], result[i][1], lens[1]))\n result.append(rightDegenerate) \n\n return listify(result)",
"def build_mesh(self):\n vertices = []\n indices = []\n step = 10\n istep = (pi * 2) / float(step)\n for i in range(step):\n x = 350 + cos(istep * i) * 100\n y = 350 + sin(istep * i) * 100\n vertices.extend([x, y, 0, 0])\n indices.append(i)\n return Mesh(vertices=vertices, indices=indices)",
"def triangleFunction(self):\n \n w = np.zeros((self.N))\n l = self.l\n for i in range(self.r.shape[0]):\n r = np.abs(self.r[i])\n if r <= l:\n tf = lambda r,l : 1 - r/l\n w[i] = tf(r,l)\n else:\n w[i] = 0\n self.w = w",
"def tri(self):\n if self._tri is None:\n self._tri = mtri.Triangulation(self.meshx[:self.npoin2],\n self.meshy[:self.npoin2],\n self.ikle2)\n\n return self._tri",
"def MeshPyTri(points,facets,*args,**kwargs):\n info = triangle.MeshInfo()\n info.set_points(points)\n info.set_facets(facets)\n\n return triangle.build(info,*args,**kwargs)",
"def random_triangular_matrix(size: int, lower: bool = True) -> np.ndarray:\n\n a = np.random.uniform(0, 1, (size, size))\n if lower:\n ind = np.triu_indices(5, 1)\n else:\n ind = np.tril_indices(5, 1)\n a[ind] = 0\n\n return a",
"def create_quad(scale=(1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n shape = [4, 3]\n rgba_offset = 3\n\n width, height = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n\n vertices = np.array([\n # top right\n ( width, height, 0.0,),\n # top left\n (-width, height, 0.0,),\n # bottom left\n (-width,-height, 0.0,),\n # bottom right\n ( width,-height, 0.0,),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype)\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (4,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (4,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = rgba\n elif rgba.shape == (4,3,):\n rgba_values = rgba\n elif rgba.shape == (4,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.array([0, 1, 2, 0, 2, 3])\n elif type == 'triangle_strip':\n # verify\n indices = np.arange(len(data))\n elif type == 'triangle_fan':\n # verify\n indices = np.arange(len(data))\n elif type == 'quads':\n indices = np.arange(len(data))\n elif type == 'quad_strip':\n indices = np.arange(len(data))\n else:\n raise ValueError('Unknown type')\n\n return data, indices",
"def yield_equilateral_triangles(cls):\n for i in range(1, 201):\n yield i-.5, i-.5, i-.5\n yield i, i, i",
"def Controlled(U):\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1])",
"def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices",
"def triangulate(self):\n npts = self._vertices.shape[0]\n if np.any(self._vertices[0] != self._vertices[1]):\n # start != end, so edges must wrap around to beginning.\n edges = np.empty((npts, 2), dtype=np.uint32)\n edges[:, 0] = np.arange(npts)\n edges[:, 1] = edges[:, 0] + 1\n edges[-1, 1] = 0\n else:\n # start == end; no wrapping required.\n edges = np.empty((npts-1, 2), dtype=np.uint32)\n edges[:, 0] = np.arange(npts)\n edges[:, 1] = edges[:, 0] + 1\n\n tri = Triangulation(self._vertices, edges)\n tri.triangulate()\n return tri.pts, tri.tris",
"def u_bar(ntab, return_multi_indices_matrix=False, triangles_partitions=False):\n\n# observe that it is always possible to halve the next quantity, \n# since if `ntab` is odd then `ntab+1` is even, and if `ntab` \n# is even then `ntab+2` is even too, hence both are divisible by 2.\n multi_indeces = int((ntab+1)*(ntab+2)/2) \n\n U = np.empty((3, multi_indeces))\n tri = np.empty((ntab**2, 3))\n\n count = 0\n for kt in range(ntab + 1):\n _np = ntab - kt + 1\n U[:, count:count+_np] = np.array(\n [list(range(_np))[::-1],\n list(range(_np)),\n (kt * np.ones(_np)).tolist()])\n count += _np\n\n multi_indices_matrix = np.copy(U) # just have a copy of multi indices\n U /= ntab # make the matrix represent baricentric coordinates\n\n # the following dictionary saves triangles partitions\n partitioned_triangles = {\n 'upside':[],\n 'upside_down':[],\n 'on_left_inv_diagonal':[],\n 'on_right_diagonal':[],\n 'on_bottom_diagonal':[]\n }\n\n def update_tri_matrix(a, b, c):\n update_tri_matrix.count += 1\n tri[update_tri_matrix.count,:] = np.array([a, b, c])\n\n update_tri_matrix.count = -1\n\n for kt in range(ntab-1):\n\n nk = ntab+2-kt\n sm = sum(range(nk,ntab+2))\n end = sm + (ntab-kt-1)\n\n for i, ind in enumerate(range(sm, end)):\n\n upside_triangle = (ind, ind+1, ind+nk-1)\n upside_down_triangle = (ind+1, ind+nk-1, ind+nk)\n\n update_tri_matrix(*upside_triangle)\n update_tri_matrix(*upside_down_triangle)\n \n partitioned_triangles['upside'].append(upside_triangle) \n partitioned_triangles['upside_down'].append(upside_down_triangle) \n\n# using `i` from the enumeration allow us to look for the very first\n# triangle without comparing against `sm`, the start value of `range`\n if i is 0: partitioned_triangles['on_right_diagonal'].append(upside_triangle) \n\n last_triangle = (end, end+1, end+nk-1)\n update_tri_matrix(*last_triangle)\n partitioned_triangles['upside'].append(last_triangle) \n partitioned_triangles['on_bottom_diagonal'].append(last_triangle) \n\n rightmost_bottom_triangle = (multi_indeces-3, multi_indeces-2, multi_indeces-1)\n update_tri_matrix(*rightmost_bottom_triangle)\n partitioned_triangles['upside'].append(rightmost_bottom_triangle) \n partitioned_triangles['on_right_diagonal'].append(rightmost_bottom_triangle) \n partitioned_triangles['on_bottom_diagonal'].append(rightmost_bottom_triangle) \n\n partitioned_triangles['on_left_inv_diagonal'] = partitioned_triangles['upside'][:ntab]\n\n assert update_tri_matrix.count == (ntab**2 - 1)\n\n assert (len(partitioned_triangles['on_left_inv_diagonal']) ==\n len(partitioned_triangles['on_right_diagonal']) ==\n len(partitioned_triangles['on_bottom_diagonal']) == \n ntab)\n\n result = (tri, U)\n if return_multi_indices_matrix: result += (multi_indices_matrix,)\n if triangles_partitions: result += (partitioned_triangles,)\n \n return result",
"def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )",
"def _uniform_refine_triangles(mesh):\n\n # get current cells and edges\n cells = mesh.cells\n edges = mesh.edges\n\n assert np.size(cells, axis=1) == 3\n\n # get number of current nodes\n n_nodes = np.size(mesh.nodes, axis=0)\n \n # first we create additional nodes as midpoints of the current edges\n midpoints = 0.5 * mesh.nodes.take(edges - 1, axis=0).sum(axis=1)\n\n # add them to the existing mesh nodes\n new_nodes = np.vstack([mesh.nodes, midpoints])\n \n # then we generate the indices of the newly created nodes\n #\n # their indices start at the current number of nodes (`n_nodes`) + 1\n # and end after additional `n_edges` nodes\n n_edges = np.size(edges, axis=0)\n new_node_indices = np.arange(n_nodes + 1, n_nodes + n_edges + 1, dtype=int)\n\n # refine elements\n #\n # for every element we need the indices of the edges at which the corresponding\n # new nodes are created\n indices_2_1 = mesh.topology.get_connectivity(2, 1, return_indices=True) - 1\n \n # next we augment the indices that define each element as if\n # they were defined by 6 nodes (including those of the edge midpoints)\n cells_ = np.hstack([cells, new_node_indices.take(indices_2_1, axis=0)])\n \n # now we can generate the four new elements for each existing one\n new_cells_1 = np.vstack([cells_[:,0], cells_[:,3], cells_[:,4]]).T\n new_cells_2 = np.vstack([cells_[:,1], cells_[:,3], cells_[:,5]]).T\n new_cells_3 = np.vstack([cells_[:,2], cells_[:,4], cells_[:,5]]).T\n new_cells_4 = np.vstack([cells_[:,3], cells_[:,4], cells_[:,5]]).T\n \n new_cells = np.vstack([new_cells_1, new_cells_2, new_cells_3, new_cells_4])\n\n return new_nodes, new_cells",
"def create_uniform_mesh(a=0, b=100, n_elem=4):\n pts = arange(a, b, float(b-a)/(n_elem))\n pts = list(pts) + [b]\n assert len(pts) == n_elem + 1\n return array(pts)",
"def CreateDummyLowerDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tet\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"tri\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"hex\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n mesh.Line(n=1, p=p)\n elif self.element_type == \"line\":\n mesh.element_type = \"point\"\n mesh.nelem = 1\n mesh.nnode = 1\n mesh.degree = p\n mesh.elements = np.array([[0]])\n mesh.points = np.array([[0.,0.,0.]])\n sys.stdout = sys.__stdout__\n\n return mesh"
] | [
"0.6988665",
"0.61453784",
"0.6137518",
"0.6123684",
"0.6085948",
"0.604803",
"0.60441124",
"0.6016392",
"0.5986559",
"0.59620947",
"0.59336704",
"0.593075",
"0.58841795",
"0.58742106",
"0.580949",
"0.5796873",
"0.5764584",
"0.57548827",
"0.5731663",
"0.5716954",
"0.57152927",
"0.5701612",
"0.5644698",
"0.5636888",
"0.5628284",
"0.5620493",
"0.56066775",
"0.5597496",
"0.55536944",
"0.5546118"
] | 0.7298217 | 0 |
Load the input file (a lineseperated list of numbers). | def load_input(input_name):
with open(input_name) as input_file:
input_list = list(map(int,input_file.readline().split(",")))
return input_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_input() -> List[List[int]]:\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), INPUT_FILE)\n f = open(filepath, 'r')\n data = f.read()\n f.close()\n\n raw_input = data.strip().split('\\n')\n input = [list(ri) for ri in raw_input]\n return [[int(i) for i in line] for line in input]",
"def load_data(filename):\n\n with open(filename) as f_obj: # Open file to read & assign file object\n for line in f_obj: # Read each line as text\n print(int(line)) # Convert to int & display",
"def loadFile(targetFile):\n outputList = []\n file = open(targetFile, \"r\") # Loading file\n fileLines = file.readlines() # Splitting into lines\n for line in fileLines:\n line = line.strip(\"\\n\") # Removing any line separators\n line = line.split(\",\") # Dividing elements separated by commas\n listLength = len(line)\n outputList.append([int(line[index]) for index in range(listLength)]) # Appending line info to list\n return outputList # Returning final list",
"def read_line_as_numbers(filename):\n # As we expect numbers here and need them in order to do calculations lets map to int\n return list(map(int, read_lines(filename)))",
"def load_input(self, file_name):\n with open(file_name, \"r\") as in_file:\n self.all_lines = [line.rstrip('\\n') for line in in_file]",
"def read_data(filename):\n\n fp = open(filename)\n\n A = []\n\n one_line = fp.readline()\n while one_line != \"\":\n x = int(one_line)\n A.append(x)\n one_line = fp.readline()\n\n return A",
"def readInput(fileName):\n\n with open(fileName) as f:\n firstLine = f.readlines()[0].split(\",\")\n\n return [int(i) for i in firstLine]",
"def read(self, filename):\n f = open(filename, 'r')\n m = f.readline()\n n = f.readline()\n lst = []\n for line in f.readlines():\n lst.append(int(line))\n f.closed\n self.__init__(int(m), int(n), lst)",
"def load_day1_input(filename: str) -> List[int]:\n mass_lst = []\n with open(filename) as f:\n for line in f:\n mass_lst.append(int(line))\n return mass_lst",
"def read_input(fname=\"day11.in\"):\n with open(fname) as f:\n return [int(v.strip()) for v in next(f).split(\",\")]",
"def read_file_into_ints(filename):\n lines = read_file_into_list(filename)\n list_of_lists = []\n for line in lines:\n list_of_lists.append(read_lines_of_ints))\n return list_of_lists",
"def read_input(fname=\"day05.in\"):\n with open(fname) as f:\n return [int(v.strip()) for v in next(f).split(\",\")]",
"def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])",
"def load_file(file, split_value=\"\\n\", cast_int=False):\n try:\n input_file = open(file, \"r\")\n output_list = input_file.read().split(split_value)\n input_file.close()\n\n if cast_int:\n return [int(i) for i in output_list]\n\n return output_list\n except IOError:\n sys.exit(\"ERROR: Cannot load file: %s\" % file)",
"def data_parser(filepath):\n tmp = open(filepath).read().split('\\n')\n return [int(x) for x in tmp]",
"def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()",
"def load_from_file(self, file_path):\n board_f = open(file_path, 'r')\n row = board_f.readline().strip('\\n')\n self.data = []\n while row != '':\n self.data.append(list(row.split()))\n row = board_f.readline().strip('\\n')\n board_f.close()",
"def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data",
"def myloadtxt(fname, skiprows = 0):\n fin = file(fname)\n for i in range(skiprows):\n fin.readline()\n ln = fin.readline()\n lns = []\n while (ln != \"\"):\n thisln = []\n ln = ln.strip().split()\n for s in ln:\n try:\n f = float(s)\n except:\n f = None\n thisln.append(f)\n lns.append(thisln)\n ln = fin.readline()\n return np.array(lns)",
"def loadData(arr):\n f = open(\"QuickSort.txt\",\"r\")\n for line in f:\n arr.append(int(line))\n\n f.close()",
"def read(path):\n lst = []\n f = open(path, \"r\")\n first_line = True\n for line in f:\n if first_line:\n first_line = False\n continue\n line = line.strip().split()\n num = int(line[1][1:-1])\n lst.append((line[0], num))\n f.close()\n return lst",
"def load_input(input_name):\n with open(input_name) as input_file:\n bingonumbers = list(map(int, input_file.readline().split(\",\")))\n boards = []\n board_num = -1\n board_row = -1\n for line in input_file.readlines():\n if line.strip() == \"\":\n board_num += 1\n board_row = -1\n boards.append([])\n else:\n boards[board_num].append(list(map(int, line.split())))\n return (bingonumbers, boards)",
"def read_input(fname=\"day16.in\"):\n with open(fname) as f:\n return line2ints(f.read())",
"def load(self, file_):\n\n program = []\n\n with open(f'ls8/examples/{file_}') as f:\n for line in f:\n line = line.split(\"#\")\n try:\n v = int(line[0], 2)\n program.append(v)\n except ValueError:\n continue\n\n for instruction in program:\n self.ram_write(instruction, self.address)\n self.address += 1\n\n self.address = 0",
"def read_numbers(fname='input.txt'):\n with open(fname) as fp:\n # we use a set: efficient for quick lookup, and we don't care about order or duplicates\n numbers = set(int(num) for num in fp.readlines())\n return numbers",
"def load(self, filename):\n\n with open(filename) as f:\n\n for val in f:\n\n val = val.strip().split(\"#\", 1)[0]\n\n if val == '':\n continue\n\n val = int(val, 2)\n self.ram[self.address] = val\n self.address += 1\n\n if len(sys.argv) != 2:\n print(\"Expected Usage: ls8.py [filename-to-run]\")\n sys.exit(1)\n\n if ValueError:\n pass",
"def data_parser(filepath):\n d = [int(line) for line in open(filepath)]\n return (int(s) for s in d)",
"def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst",
"def load_input(filepath: str) -> list:\n lines = []\n with open(filepath, \"r\", encoding=\"utf-8\") as file:\n for line in file.readlines():\n lines.append(line.strip())\n return lines",
"def loadData(self, file):\n self.data = batchImport(file, self.ps)"
] | [
"0.72228956",
"0.71041244",
"0.7010073",
"0.6906203",
"0.67904806",
"0.67290246",
"0.6650171",
"0.66440064",
"0.66392255",
"0.66032666",
"0.65971136",
"0.65716565",
"0.6549218",
"0.65480024",
"0.6500772",
"0.64960253",
"0.64960253",
"0.6484692",
"0.64831895",
"0.64763206",
"0.64515764",
"0.6451013",
"0.6428771",
"0.6406302",
"0.63112104",
"0.6284319",
"0.6269514",
"0.62673295",
"0.624114",
"0.62231016"
] | 0.73727155 | 0 |
Prints some additional node/edge sets Set hide(node) (hidden nodes) Set novelGene(node)(unknown + hidden) Set h(node)(hits hidden, if hiding hits) Set intNode(node) (interfaces hidden, if hiding interfaces) Set intEdge(edge) (interface edges in paths) | def print_node_edge_sets(labels, aside, paths, mode, outf):
#print_gams_set("hide(node)", "hidden nodes", aside)
#print ""
# genes without labels
novel=set.union(labels["unknown"], aside)
print_gams_set("novelGene(node)", "unlabeled or hidden genes", novel, out=outf)
outf.write("\n")
# interface nodes and edges - assume we've taken care of hiding
# them according to the mode by now
hits=set()
intNodes=set()
intEdges=set()
# { pathfinder : { pid : { "nodes":[], "edges":[] } } }
for pf in paths:
for pid in paths[pf]:
hits.add(paths[pf][pid]["nodes"][0])
intNodes.add(paths[pf][pid]["nodes"][-2])
intEdges.add(paths[pf][pid]["edges"][-1])
print_gams_set("hit(node)", "hits", hits, out=outf)
outf.write("\n")
print_gams_set("intNode(node)", "interface nodes", intNodes, out=outf)
outf.write("\n")
print_gams_set("intEdge(edge)", "interface edges", intEdges, out=outf)
outf.write("\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])",
"def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))",
"def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'",
"def print_model_graph(self, name=None, agent=([], [], [])):\n dot = pygraphviz.AGraph(directed=\"True\")\n for outp in list(self.outputs.keys()):\n dot.add_node(outp, pos=(outp[1:] + \",10\"), color=\"red\", label=outp + \", \" + str(self.outputs[outp].taking.size) + \"-\" + self.outputs[outp].taking.type)\n for inp in list(self.inputs.keys()):\n dot.add_node(inp, pos=(inp[1:] + \",0\"), color=\"blue\", label=inp + \", \" + str(self.inputs[inp].producing.size) + \"-\" + self.inputs[inp].producing.type)\n for comp in list(self.networks.keys()):\n dot.add_node(comp, label=comp + \"-\" + str(type(self.networks[comp].descriptor).__name__)[:-14] + \":\" + str(self.networks[comp].taking.size) + \"-\" + str(self.networks[comp].producing.size))\n\n for c in self.connections:\n con = self.connections[c]\n if self.conn_in_agent(con, agent[0]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"blue\")\n elif self.conn_in_agent(con, agent[1]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"red\")\n elif self.conn_in_agent(con, agent[2]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"green\")\n else:\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"black\")\n dot.layout('dot')\n if not name:\n name = str(hash(self))\n dot.draw(name + '.pdf')",
"def print_graph() -> None:\n raise NotImplementedError",
"def plot_network(graph, chars = None, show_all = False, set_width = None, output='plot'):\n if chars is not None:\n graph = graph.subgraph(chars)\n\n scaled = scale_edge_weights(graph)\n pos = nx.spring_layout(graph, k =.75 , seed = 1)\n\n #Add edges\n edge_traces, edge_text_trace = make_edges(scaled, pos, graph, show_all, set_width)\n\n #Add nodes\n node_xs = [pos[node][0] for node in scaled.nodes()]\n node_ys = [pos[node][1] for node in scaled.nodes()]\n node_text = ['<b>'+node.capitalize() for node in scaled.nodes()]\n node_hovertext = []\n for node in graph.nodes():\n node_hovertext.append(node.capitalize() + ': '+ str(graph.nodes()[node]['size']) + ' appearances')\n node_trace = go.Scatter(x = node_xs,\n y = node_ys,\n text = node_text,\n textposition = \"bottom center\",\n textfont_size = 14,\n mode = 'markers+text',\n hovertext = node_hovertext,\n hoverinfo = 'text',\n marker = dict(color = 'black',#'#6959CD',\n size = 15,\n line = None))\n layout = go.Layout(paper_bgcolor='rgba(0,0,0,0)',plot_bgcolor='rgba(0,0,0,0)')\n fig = go.Figure(layout = layout)\n\n for trace in edge_traces:\n fig.add_trace(trace)\n fig.add_trace(node_trace)\n fig.add_trace(edge_text_trace)\n\n fig.update_layout(showlegend = False, width = 1000, height = 1200)\n fig.update_xaxes(showticklabels = False)\n fig.update_yaxes(showticklabels = False)\n\n if output == 'plot':\n fig.show()\n elif output == 'return':\n return fig\n elif output == 'save':\n fig.write_image('graph.png')\n else:\n fig.show()",
"def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"",
"def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm",
"def dgInfo(*args, allNodes: bool=True, connections: bool=True, dirty: bool=True, nodes:\n bool=True, nonDeletable: bool=True, outputFile: AnyStr=\"\", propagation: bool=True,\n short: bool=True, size: bool=True, subgraph: bool=True, type: AnyStr=\"\",\n **kwargs)->None:\n pass",
"def render(self): # pragma: no cover\n from graphviz import Digraph\n dot = Digraph(name=\"top\")\n for block in self.blocks:\n if isinstance(block, Branch):\n label = \"if \" + astor.to_source(block.cond)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"invhouse\"})\n elif isinstance(block, Yield):\n label = astor.to_source(block.value)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"oval\"})\n elif isinstance(block, BasicBlock):\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n elif isinstance(block, HeadBlock):\n label = \"Initial\"\n dot.node(str(id(block)) + \"_start\", label.rstrip(), {\"shape\": \"doublecircle\"})\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.initial_statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n dot.edge(str(id(block)) + \"_start\", str(id(block)))\n else:\n raise NotImplementedError(type(block))\n # for source, sink, label in self.edges:\n for sink, label in block.outgoing_edges:\n dot.edge(str(id(block)), str(id(sink)), label)\n\n\n file_name = tempfile.mktemp(\"gv\")\n dot.render(file_name, view=True)\n # with open(\"cfg.dot\", \"w\") as file:\n # file.write(dot.source)\n # exit()",
"def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")",
"def PrintGraph(self):\n # print(\"Graph has {} nodes and {} edges.\".format(Node.count, Edge.count))\n # print(\"Unique connected nodes:\")\n # for (a, b) in self.connections:\n # print(\"{},{}\".format(a.index, b.index))\n\n # print(f\"\\nAll edges : {[e.index for e in self.edges]}\")\n\n # print(\"\\nDegree of nodes\")\n\n # for node in self.nodes:\n # print(f\"D of {node.index} = {len(node.neighbours)}\")\n\n for node in self.nodes:\n print(\"{}. ({}, {})\".format(node.index, node.x, node.y))",
"def dump_graph(self):\n # TODO\n return",
"def OutputDot(self, output):\n sorted_nodes = [n for n in self._graph.Nodes(sort=True)]\n self._global_start = min([n.StartTime() for n in sorted_nodes])\n visited_nodes = set([n for n in sorted_nodes])\n\n output.write(\"\"\"digraph dependencies {\n rankdir = LR;\n \"\"\")\n\n orphans = set()\n for n in sorted_nodes:\n for s in itertools.chain(n.Node().Successors(),\n n.Node().Predecessors()):\n if s in visited_nodes:\n break\n else:\n orphans.add(n)\n if orphans:\n output.write(\"\"\"subgraph cluster_orphans {\n color=black;\n label=\"Orphans\";\n \"\"\")\n for n in orphans:\n # Ignore synthetic nodes for orphan display.\n if not self._graph.NodeInfo(n).Request():\n continue\n output.write(self.DotNode(n))\n output.write('}\\n')\n\n output.write(\"\"\"subgraph cluster_nodes {\n color=invis;\n \"\"\")\n\n for n in sorted_nodes:\n if n in orphans:\n continue\n output.write(self.DotNode(n))\n\n for n in visited_nodes:\n for s in n.Node().Successors():\n if s not in visited_nodes:\n continue\n style = 'color = orange'\n label = '%.02f' % self._graph.EdgeCost(n, s)\n annotations = self._graph.EdgeAnnotations(n, s)\n edge_kind = annotations.get(\n loading_model.ResourceGraph.EDGE_KIND_KEY, None)\n assert ((edge_kind is None)\n or (edge_kind in loading_model.ResourceGraph.EDGE_KINDS))\n style = 'color = %s' % self._EDGE_KIND_TO_COLOR[edge_kind]\n if edge_kind == 'timing':\n style += '; style=dashed'\n if self._graph.EdgeCost(n, s) > self._LONG_EDGE_THRESHOLD_MS:\n style += '; penwidth=5; weight=2'\n\n label = '%.02f' % self._graph.EdgeCost(n, s)\n if 'activity' in annotations:\n activity = annotations['activity']\n separator = ' - '\n for activity_type, activity_label in self._ACTIVITY_TYPE_LABEL:\n label += '%s%s:%.02f ' % (\n separator, activity_label, activity[activity_type])\n separator = ' '\n arrow = '[%s; label=\"%s\"]' % (style, label)\n output.write('%d -> %d %s;\\n' % (n.Index(), s.Index(), arrow))\n output.write('}\\n')\n\n output.write('}\\n')",
"def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')",
"def main(G): \n try:\n val_map = {'A': 1.0,\n 'D': 0.5714285714285714,\n 'H': 0.0}\n values = [val_map.get(node, 0.45) for node in G.nodes()]\n edge_colors = 'k'\n \n edge_labels=dict([((u,v,),d['weight'])\n for u,v,d in G.edges(data=True)])\n pos=nx.spring_layout(G) # positions for all nodes \n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)\n nx.draw(G,pos, node_color = values, node_size=15,edge_color=edge_colors,edge_cmap=plt.cm.Reds)\n pylab.show()\n\n for ite in range(len(G.nodes())):\n \n Iterations = ite \n SL = SIG.Single_linkage(G, Iterations)\n pos=nx.spring_layout(G) # positions for all nodes\n node_colors = ['b','g','r','y','c','k','m','w']\n for i in range(len(G)):\n node_colors.append('w')\n \n # nodes\n C_list = SL.fit_predict(G)[-1,:]\n for Clust in range(C_list.shape[1]):\n nx.draw_networkx_nodes(G,pos,\n nodelist = list(C_list[0,Clust]),\n node_color=node_colors[Clust],\n node_size=80,\n alpha=0.8)\n \n # edges\n nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)\n \n plt.axis('off')\n plt.savefig(\"labels_and_colors.png\") # save as png\n plt.show() # display\n print \"in level :\",ite \n print SL.__str__()\n\n\n except SIG.Single_linkage_Error:\n \n print( \"Got an imput error, please change the input and try it again.\" )",
"def graph_cmd(args):\n\n # charge graphviz\n Digraph = load_graphviz()\n\n view = True\n for i in args:\n if i == \"noview\":\n view = False\n args.remove(i)\n break\n\n if len(args) > 0:\n if len(args) >= 2:\n r = requete('NeMo.Intf.%s:getMIBs' % args[0], { \"traverse\":args[1], \"mibs\":\"base\" })\n else:\n r = requete('NeMo.Intf.%s:getMIBs' % args[0], { \"mibs\":\"base\" })\n else:\n r = requete('NeMo.Intf.lo:getMIBs', { \"traverse\":\"all\", \"mibs\":\"base\" })\n if r is None: return\n if not 'status' in r or not 'base' in r['status']: return\n r = r['status']['base']\n\n dot = Digraph(name='NeMo.Intf', format='svg', engine='dot')\n\n dot.attr('node', fontname='Helvetica')\n #dot.attr('node', fontname='Times-Roman')\n\n for i, node in r.items():\n #dot.attr('node', tooltip=v['Flags'] if 'Flags' in v else '')\n if 'Enable' in node:\n if node['Enable'] == True:\n dot.node(i, shape='box')\n else:\n dot.node(i, shape='ellipse', color='lightgrey')\n else:\n dot.node(i, shape='box', color='lightgrey')\n\n for i, v in r.items():\n for j in v['LLIntf']:\n dot.edge(i, j)\n\n dot.render(filename=\"nemo_intf.gv\", view=view)",
"def print_out_unexplained(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['unexplained_flow']\n print(\"({} {}) unexplained flow={}, edgeId={}\".format(s, t, w,\n arc))",
"def setDisplayWireframe():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(1)\n node.end()\n else:\n node['display'].setValue(1)",
"def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()",
"def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)",
"def dump(self, mark='----'):\n print(mark)\n node = self.head\n while node:\n print(node, \" \", end='')\n node = node.next\n print()",
"def graphviz(A,B):\r\n N = len(A) \r\n sc = 1.0 / len(B)\r\n \r\n print \"digraph AdjacencyMatrix {\"\r\n print \"\\tgraph[label=\\\"Graph representing the weight of the edge and adjacent\\\",labelloc =t];\"\r\n for i in range(N):\r\n #p = colorsys.hsv_to_rgb(sc*i,1.0,1.0)\r\n p = colorsys.hsv_to_rgb(sc*B[i],1.0,1.0)\r\n print \"\\t%s [style = filled, color=\\\"#000000\\\" fillcolor = \\\"#%s%s%s\\\"];\" \\\r\n % (i+1, \"00\" if p[0] == 0.0 else hex(int(p[0]*255)).replace(\"0x\",\"\"), \\\r\n \"00\" if p[1] == 0.0 else hex(int(p[1]*255)).replace(\"0x\",\"\"), \\\r\n \"00\" if p[2] == 0.0 else hex(int(p[2]*255)).replace(\"0x\",\"\") )\r\n for j in range(N): \r\n if i != j and A[i,j] != 0.0:\r\n print \"\\t%s->%s\\t[label=\\\"%s\\\",color=\\\"%s\\\"];\" \\\r\n % (j+1, i+1, A[i,j], \"red\" if A[i,j] < 0.0 else \"blue\")\r\n print \"}\"",
"def print_nice(self):\n print(\"- \" + str(self.__node_a.name) + \" (\" + self.__node_a.get_value_string() +\n \") -> \" + str(self.__node_b.name) + \" (\" + self.__node_b.get_value_string() + \")\")",
"def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)",
"def print_out(self):\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n lb = self.arc_info[arc]['lower_bound']\n u = self.arc_info[arc]['upper_bound']\n print(\"{} {} {} {} flow={}, edgeId={}\".format(s, t, lb, u, w,\n arc))",
"def main():\n\n \"\"\"\n nodes, hd3 = erdos_rennie_like(100,8333,5)\n export('d3',hd3)\n\n nodes, hd5 = erdos_rennie_like(100,8333,6)\n export('d5',hd5)\n\n nodes, hd6 = erdos_rennie_like(100,8333,7)\n export('d6',hd6)\n \"\"\"\n\n \"\"\"\n nodes, sparse1 = erdos_rennie_like(600, 1200, 3)\n export('sparse_diag1', sparse1)\n\n nodes, sparse2 = erdos_rennie_like(600, 2400, 3)\n export('sparse_diag2',sparse2)\n\n nodes, sparse3 = erdos_rennie_like(600, 5800, 3)\n export('sparse_diag3',sparse3)\n\n nodes, sparse4 = erdos_rennie_like(600,11600, 3)\n export('sparse_diag4',sparse4)\n\n nodes, sparse5 = erdos_rennie_like(600,23200, 3)\n export('sparse_diag5',sparse5)\n \"\"\"\n\n nodes, size1 = erdos_rennie_like(100, 500, 3)\n nodes, size2 = erdos_rennie_like(200,1000,3)\n nodes,size3 = erdos_rennie_like(300,1500,3)\n nodes,size4 = erdos_rennie_like(400,2000,3)\n nodes,size5 = erdos_rennie_like(500,2500,3)\n\n export('size_diag1',size1)\n export('size_diag2',size2)\n export('size_diag3',size3)\n export('size_diag4',size4)\n export('size_diag5',size5)",
"def show_neighborhood(self, max_dist=3, detailed=True):\n dotstr = ''\n for node in self.neighbors(max_dist):\n if node is self:\n dotstr += node.dot(color='dodgerblue', detailed=detailed)\n else:\n dotstr += node.dot(detailed=detailed)\n dotstr = 'digraph hypergraph {\\nrankdir=BT\\n%s}\\n' % dotstr\n f = open('/tmp/dotty', 'w')\n f.write(dotstr)\n f.close()\n os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif')\n os.system('eog /tmp/dotty.gif')",
"def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()",
"def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)"
] | [
"0.6214952",
"0.613745",
"0.60090965",
"0.59988785",
"0.581472",
"0.58066213",
"0.57610726",
"0.5760247",
"0.573816",
"0.5723644",
"0.5704321",
"0.56841236",
"0.5680305",
"0.5650115",
"0.5615147",
"0.5604487",
"0.56031686",
"0.5579508",
"0.5568939",
"0.5564694",
"0.55471396",
"0.55429965",
"0.5539352",
"0.5539163",
"0.5532734",
"0.5532356",
"0.55206543",
"0.55163765",
"0.5513975",
"0.54750425"
] | 0.767322 | 0 |
Open projects page if it isn't opened. | def _page_projects(self):
return self._open(self.app.page_projects) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openproject():\n\n # POST\n if request.method == \"POST\":\n\n # Validate form submission\n if not request.form.get(\"projectname\"):\n return apology(\"missing project name\")\n elif not request.form.get(\"link\"):\n return apology(\"missing project link\")\n\n\n # Record project in the database\n db.execute(\"\"\"INSERT INTO projects (projectname, link)\n VALUES(:projectname, :link)\"\"\", projectname=request.form.get(\"projectname\"), link=request.form.get(\"link\"))\n\n # Display that the project has been opened\n flash(\"Opened!\")\n return redirect(\"/\")\n\n # GET\n else:\n return render_template(\"openproject.html\")",
"def __projectOpened(self):\n if self.__e5project.getProjectType() == \"Django\":\n projectAct = self.__ui.getMenuBarAction(\"project\")\n actions = self.__ui.menuBar().actions()\n insertAct = actions[actions.index(projectAct) + 1]\n self.__mainAct = self.__ui.menuBar().insertMenu(\n insertAct, self.__mainMenu)",
"def _open_project(project):\n api_segment = '/_apis/'\n pos = project.url.find(api_segment)\n if pos >= 0:\n url = project.url[:pos + 1] + uri_quote(project.name)\n logger.debug('Opening web page: %s', url)\n webbrowser.open_new(url=url)\n else:\n raise CLIError(\"Failed to open web browser, due to unrecognized url in response.\")",
"def project(request, proj_id=None, scenario_id=None):\n\n if proj_id:\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n return render_to_response('home/home.html', get_context(request))",
"def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )",
"def open_project(self):\n self.logger.debug(\"Open project dialog\")\n directory = QFileDialog.getExistingDirectory(self, translate(\"MainWindow\", \"Open project\"),\n ConfigHandler.cfg.dev_dir, QFileDialog.ShowDirsOnly)\n\n # sometimes window disappears into background, force to front\n self.activateWindow()\n\n if not directory == \"\":\n self.logger.info(\"Chosen existing project directory: \" + directory)\n self._parent.load_project(directory)\n else:\n self.logger.debug(\"Dialog aborted.\")",
"def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)",
"def openProject(self):\n path, filter = QtGui.QFileDialog.getOpenFileNames(self, str(self.tr(\"Open a project\")), \"/home/cecilia/\", self.tr(\"EWP project (*.ewp)\"))\n savedPath = path[0].encode(\"utf-8\")\n self._app.loadSavedFile(savedPath)",
"def open_project(self, project_path):\n self.clear()\n self.open_directory(project_path)\n if self.show_libraries:\n self.add_libraries()",
"def __CB_ProjectOpen(self, *args):\r\n \r\n self.ConnectToProject()",
"def projectOpened(self):\n for editor in self.editors:\n editor.projectOpened()\n \n self.__editProjectPwlAct.setEnabled(True)\n self.__editProjectPelAct.setEnabled(True)",
"def view_project(self, pathString, window):\r\n\r\n kT.debug_log('This is the project path: ' + pathString)\r\n\r\n if self.newProj.osType == 'Windows':\r\n subprocess.call(['explorer', pathString])\r\n elif self.newProj.osType == 'Linux':\r\n subprocess.Popen(['xdg-open', pathString])\r\n elif self.newProj.osType == 'Darwin':\r\n subprocess.Popen(['open', pathString])\r\n\r\n window.destroy()\r\n\r\n return",
"def od_open_clicked(self, widget, data=None):\n filename = self.open_chooser.get_filename()\n self.communicator.load_project(filename)",
"def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())",
"def open_project_activated(self):\n\n action = self.sender()\n\n # check if current observation\n if self.observationId:\n if dialog.MessageDialog(programName, \"Existe uma observação atual. O que você quer fazer?\",\n [\"Fechar observação\", \"Continuar observação\"]) == \"Fechar observação\":\n self.close_observation()\n else:\n return\n\n if self.projectChanged:\n response = dialog.MessageDialog(programName, \"O que fazer com o projeto atual?\",\n [SAVE, DISCARD, CANCEL])\n\n if response == SAVE:\n if self.save_project_activated() == \"not saved\":\n return\n\n if response == CANCEL:\n return\n\n if action.text() == \"Abrir projeto\":\n fn = QFileDialog(self).getOpenFileName(self, \"Open project\", \"\", \"Project files (*.eMOC);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n else: # recent project\n fileName = action.text()\n\n if fileName:\n project_path, project_changed, pj, msg = project_functions.open_project_json(fileName)\n\n if \"error\" in pj:\n logging.debug(pj[\"error\"])\n QMessageBox.critical(self, programName, pj[\"error\"])\n else:\n if msg:\n QMessageBox.information(self, programName, msg)\n\n self.load_project(project_path, project_changed, pj)",
"def projects():\n \n if 'username' in session:\n current_user = mongo.db.user.find_one({'username': session['username']}) \n projects = mongo.db.projects.find().sort('date',pymongo.DESCENDING)\n return render_template('pages/projects.html', title='Projects', projects=projects, current_user=current_user)\n \n flash('Please login to view user projects.', 'warning')\n return redirect(url_for('login'))",
"def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html",
"def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)",
"def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")",
"def open_project(self, rootdir=None, filename=None):\n if None in [rootdir, filename]:\n if rootdir is None:\n rootdir = '~'\n filepath = Ui.instance().select_file(startdir=rootdir,\n extensions='Weld project files (*%s)' % Project.file_extension,\n label='Select a weld project')\n if filepath is None:\n Ui.instance().show_status('project opening is aborted')\n return\n rootdir, filename = os.path.split(filepath)\n else:\n if not os.path.exists(rootdir):\n self.current_project_path = None\n s = 'invalid project path:', rootdir\n print >> sys.stderr, s\n Ui.instance().show_status(s)\n return\n if not os.path.exists(os.path.join(rootdir, filename + Project.file_extension)):\n self.current_project_path = None\n s = 'can\\'t locate project file \\'%s\\' inside \\'%s\\'' % (filename, rootdir)\n print >> sys.stderr, s\n Ui.instance().show_status(s)\n return\n\n print 'Weld.open_project \\'%(filename)s in %(rootdir)s' % locals()\n project = Project(rootdir)\n project.load()\n\n self.project = project\n self.current_project_path = rootdir\n Ui.instance().set_resources_draggable(True)\n Ui.instance().show_status('project %s opened' % (filename))",
"def test_project_view(self):\n response = self.client.get('/projects/')\n self.assertEqual(response.status_code, 200)",
"def test_project_view(self):\n with self.app.app_context():\n p = project(save=True)\n\n response = self.client.get('/project/%s' % p.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/project/not-a-real-project')\n eq_(response.status_code, 404)",
"def apache_projects():\n display = Display(visible=0, size=(800, 800)) \n display.start()\n # path to where I have chrome driver installed\n path_to_chromedriver = '/usr/local/bin/chromedriver'\n # initialize the driver\n driver = webdriver.Chrome(executable_path=path_to_chromedriver)\n # go to the apache projects page\n driver.get('https://projects.apache.org/projects.html')\n # wait for the list of projects to load\n time.sleep(2)\n\n # get the HTML element with id list\n elem = driver.find_element_by_id('list')\n project_list = elem.text.split(\"\\n\")\n # initialize an instance of Projects\n projects = Projects()\n\n for i in range(1, len(project_list) + 1):\n # Get the url of each project\n project_xpath = '//*[@id=\"list\"]/ul/li[%d]/a' %i\n # Get the HTML element that for the current project\n project_link = driver.find_element_by_xpath(project_xpath)\n project_name = project_link.text\n\n # Open the project page\n driver.get(project_link.get_attribute(\"href\"))\n # Wait for project page to load\n time.sleep(0.5)\n\n inception = get_inception(driver)\n description = get_description(driver, project_name)\n\n # get the name without \"Apache\", make it lowercase, and add dashes\n stripped_name = \"-\".join(project_name.lower().split(\" \")[1:]).encode('utf-8')\n github_mirror = \"http://github.com/apache/\" + stripped_name\n\n # see if there's anything at the github url that was generated\n resp = httplib2.Http().request(github_mirror, 'HEAD')\n # this means the github repo with the parsed url doesn't exist\n if int(resp[0]['status']) >= 400:\n github_mirror = \"N/A\"\n\n # Add extra attributes to the JSON\n description[\"github\"] = github_mirror\n description[\"company\"] = \"Apache Software Foundation\"\n description[\"name\"] = project_name\n description[\"day\"] = inception[\"day\"]\n description[\"month\"] = inception[\"month\"]\n description[\"year\"] = inception[\"year\"]\n\n projects.add(project_name, description)\n\n # Reset the driver\n driver.get('https://projects.apache.org/projects.html')\n time.sleep(0.8)\n\n return projects",
"def on_projectButton_clicked(self):\n self.__enableFindButton()",
"def _on_click_browse_to_pt_project(self):\n pass",
"def index():\n return render_template('project.html')",
"def switch_project(self, project_name, check=True):\n with self.app.page_base.dropdown_menu_project as menu:\n\n if menu.label_project.value == project_name:\n self.app.current_project = project_name\n return\n\n menu.click()\n menu.item_project().click()\n self.app.current_project = project_name\n\n if check:\n self.close_notification('success')\n assert_that(menu.label_project.value, equal_to(project_name))",
"def open(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/open\"\n\n _response = self.connector.http_call(\"post\", _url)\n\n # Update object\n self._update(_response.json())",
"def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})",
"def projects(update_db=False):\n try:\n if not os.path.isfile(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\"\n ):\n update(update_db=update_db)\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\", \"r\"\n ) as f:\n whiteboard = f.read()\n\n return whiteboard\n\n except:\n return traceback.format_exc()"
] | [
"0.731947",
"0.70801455",
"0.70608085",
"0.66960037",
"0.6594605",
"0.65892035",
"0.6571664",
"0.65079004",
"0.6379645",
"0.6317886",
"0.6307796",
"0.63034767",
"0.6256844",
"0.61474514",
"0.6115373",
"0.61066955",
"0.60768175",
"0.60427994",
"0.6040594",
"0.603822",
"0.60297734",
"0.602138",
"0.5973245",
"0.596266",
"0.58912194",
"0.586031",
"0.5853774",
"0.58102787",
"0.5786718",
"0.5785013"
] | 0.78490776 | 0 |
Step to filter projects. | def filter_projects(self, query, check=True):
page_projects = self._page_projects()
page_projects.field_filter_projects.value = query
page_projects.button_filter_projects.click()
if check:
def check_rows():
is_present = False
for row in page_projects.table_projects.rows:
if not (row.is_present and
query in row.link_project.value):
break
is_present = True
return waiter.expect_that(is_present, equal_to(True))
waiter.wait(check_rows,
timeout_seconds=10,
sleep_seconds=0.1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name",
"def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)",
"def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects",
"def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)",
"def project_filter(filename):\n return 'projects' in filename",
"def filter_projects(project_services):\n return [project for project, services in project_services.items() if \"Travis\" in services or \"GitHub\" in services]",
"def filter(self, filters):",
"def project_view(request, project_id):\n\n # Retrieve the project to to be displayed. Raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n if request.method == 'GET':\n\n filters = Q()\n list_of_key = []\n query_string = request.META['QUERY_STRING']\n query_tab = query_string.split('&')\n filter_id_tab = []\n filter_dic = {}\n\n print(query_tab)\n\n if (query_tab != ['']):\n for query in query_tab:\n query_arg = query.split('=')\n id = query_arg[0]\n\n if not (id in filter_id_tab):\n filter_id_tab.append(id)\n try:\n filter_dic[id].append(query_arg[1])\n except KeyError:\n filter_dic[id] = [query_arg[1]]\n\n for key in request.GET:\n list_of_key.append(key)\n\n print(list_of_key)\n filters = creat_filters_rec(project, filter_dic, filter_id_tab)\n else:\n filters = Q()\n\n #\n # for key in filter_id_tab:\n #\n #\n # entry = filter_dic[key]\n #\n # if (len(entry) != 3):\n # continue\n #\n # filters = add_filter(filters, entry)\n\n tasks = project.task_set.filter(filters).order_by('-priority')\n else:\n # Retrieve all the task of the project and order them\n tasks = project.task_set.all().order_by('-priority')\n\n # Check if the logged in user is allowed to see this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n status = Status.objects.all()\n users = project.members.all()\n return render(request, 'project.html', locals())\n else:\n return redirect(\"projects\")",
"def step_filter(self, qs):\n return qs",
"def __doSearch(self):\n if (\n self.__replaceMode and\n not e5App().getObject(\"ViewManager\").checkAllDirty()\n ):\n return\n \n self.__cancelSearch = False\n \n if self.filterCheckBox.isChecked():\n fileFilter = self.filterEdit.text()\n fileFilterList = [\n \"^{0}$\".format(filter.replace(\".\", r\"\\.\").replace(\"*\", \".*\"))\n for filter in fileFilter.split(\";\")\n ]\n filterRe = re.compile(\"|\".join(fileFilterList))\n \n if self.projectButton.isChecked():\n if self.filterCheckBox.isChecked():\n files = [self.project.getRelativePath(file)\n for file in\n self.__getFileList(\n self.project.getProjectPath(), filterRe)]\n else:\n files = []\n if self.sourcesCheckBox.isChecked():\n files += self.project.pdata[\"SOURCES\"]\n if self.formsCheckBox.isChecked():\n files += self.project.pdata[\"FORMS\"]\n if self.interfacesCheckBox.isChecked():\n files += self.project.pdata[\"INTERFACES\"]\n if self.protocolsCheckBox.isChecked():\n files += self.project.pdata[\"PROTOCOLS\"]\n if self.resourcesCheckBox.isChecked():\n files += self.project.pdata[\"RESOURCES\"]\n elif self.dirButton.isChecked():\n if not self.filterCheckBox.isChecked():\n filters = []\n if self.sourcesCheckBox.isChecked():\n filters.extend(\n [\"^{0}$\".format(\n assoc.replace(\".\", r\"\\.\").replace(\"*\", \".*\"))\n for assoc in list(\n Preferences.getEditorLexerAssocs().keys())\n if assoc not in self.formsExt + self.interfacesExt +\n self.protocolsExt])\n if self.formsCheckBox.isChecked():\n filters.append(self.filterForms)\n if self.interfacesCheckBox.isChecked():\n filters.append(self.filterInterfaces)\n if self.protocolsCheckBox.isChecked():\n filters.append(self.filterProtocols)\n if self.resourcesCheckBox.isChecked():\n filters.append(self.filterResources)\n filterString = \"|\".join(filters)\n filterRe = re.compile(filterString)\n files = self.__getFileList(\n os.path.abspath(self.dirPicker.currentText()),\n filterRe)\n elif self.openFilesButton.isChecked():\n vm = e5App().getObject(\"ViewManager\")\n vm.checkAllDirty()\n files = vm.getOpenFilenames()\n \n self.findList.clear()\n QApplication.processEvents()\n QApplication.processEvents()\n self.findProgress.setMaximum(len(files))\n \n # retrieve the values\n reg = self.regexpCheckBox.isChecked()\n wo = self.wordCheckBox.isChecked()\n cs = self.caseCheckBox.isChecked()\n ct = self.findtextCombo.currentText()\n if reg:\n txt = ct\n else:\n txt = re.escape(ct)\n if wo:\n txt = \"\\\\b{0}\\\\b\".format(txt)\n flags = re.UNICODE\n if not cs:\n flags |= re.IGNORECASE\n try:\n search = re.compile(txt, flags)\n except re.error as why:\n E5MessageBox.critical(\n self,\n self.tr(\"Invalid search expression\"),\n self.tr(\"\"\"<p>The search expression is not valid.</p>\"\"\"\n \"\"\"<p>Error: {0}</p>\"\"\").format(str(why)))\n self.stopButton.setEnabled(False)\n self.findButton.setEnabled(True)\n self.findButton.setDefault(True)\n return\n # reset the findtextCombo\n if ct in self.searchHistory:\n self.searchHistory.remove(ct)\n self.searchHistory.insert(0, ct)\n self.findtextCombo.clear()\n self.findtextCombo.addItems(self.searchHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/SearchHistory\",\n self.searchHistory[:30])\n \n if self.__replaceMode:\n replTxt = self.replacetextCombo.currentText()\n if replTxt in self.replaceHistory:\n self.replaceHistory.remove(replTxt)\n self.replaceHistory.insert(0, replTxt)\n self.replacetextCombo.clear()\n self.replacetextCombo.addItems(self.replaceHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/ReplaceHistory\",\n self.replaceHistory[:30])\n \n if self.dirButton.isChecked():\n searchDir = self.dirPicker.currentText()\n if searchDir in self.dirHistory:\n self.dirHistory.remove(searchDir)\n self.dirHistory.insert(0, searchDir)\n self.dirPicker.clear()\n self.dirPicker.addItems(self.dirHistory)\n Preferences.Prefs.settings.setValue(\n \"FindFileDialog/DirectoryHistory\",\n self.dirHistory[:30])\n \n # set the button states\n self.stopButton.setEnabled(True)\n self.stopButton.setDefault(True)\n self.findButton.setEnabled(False)\n \n # now go through all the files\n self.__populating = True\n self.findList.setUpdatesEnabled(False)\n progress = 0\n breakSearch = False\n occurrences = 0\n fileOccurrences = 0\n for file in files:\n self.__lastFileItem = None\n found = False\n if self.__cancelSearch or breakSearch:\n break\n \n self.findProgressLabel.setPath(file)\n \n if self.projectButton.isChecked():\n fn = os.path.join(self.project.ppath, file)\n else:\n fn = file\n # read the file and split it into textlines\n try:\n text, encoding, hashStr = Utilities.readEncodedFileWithHash(fn)\n lines = text.splitlines(True)\n except (UnicodeError, IOError):\n progress += 1\n self.findProgress.setValue(progress)\n continue\n \n # now perform the search and display the lines found\n count = 0\n for line in lines:\n if self.__cancelSearch:\n break\n \n count += 1\n contains = search.search(line)\n if contains:\n occurrences += 1\n found = True\n start = contains.start()\n end = contains.end()\n if self.__replaceMode:\n rline = search.sub(replTxt, line)\n else:\n rline = \"\"\n line = self.__stripEol(line)\n if len(line) > 1024:\n line = \"{0} ...\".format(line[:1024])\n if self.__replaceMode:\n if len(rline) > 1024:\n rline = \"{0} ...\".format(line[:1024])\n line = \"- {0}\\n+ {1}\".format(\n line, self.__stripEol(rline))\n self.__createItem(file, count, line, start, end,\n rline, hashStr)\n \n if self.feelLikeCheckBox.isChecked():\n fn = os.path.join(self.project.ppath, file)\n self.sourceFile.emit(fn, count, \"\", start, end)\n QApplication.processEvents()\n breakSearch = True\n break\n \n QApplication.processEvents()\n \n if found:\n fileOccurrences += 1\n progress += 1\n self.findProgress.setValue(progress)\n \n if not files:\n self.findProgress.setMaximum(1)\n self.findProgress.setValue(1)\n \n resultFormat = self.tr(\"{0} / {1}\", \"occurrences / files\")\n self.findProgressLabel.setPath(resultFormat.format(\n self.tr(\"%n occurrence(s)\", \"\", occurrences),\n self.tr(\"%n file(s)\", \"\", fileOccurrences)))\n \n self.findList.setUpdatesEnabled(True)\n self.findList.sortItems(self.findList.sortColumn(),\n self.findList.header().sortIndicatorOrder())\n self.findList.resizeColumnToContents(1)\n if self.__replaceMode:\n self.findList.header().resizeSection(0, self.__section0Size + 30)\n self.findList.header().setStretchLastSection(True)\n self.__populating = False\n \n self.stopButton.setEnabled(False)\n self.findButton.setEnabled(True)\n self.findButton.setDefault(True)\n \n if breakSearch:\n self.close()",
"def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')",
"def stract_scans(self, projects):\n pass",
"def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)",
"def on_projectButton_clicked(self):\n self.__enableFindButton()",
"def _get_projects(filters):\n # First order the objects, so separate that out\n orders_query = [o for o in filters if o['type']=='order']\n # Filter objects next, so separate those out\n filters_query = [f for f in filters if f['type']=='filter']\n\n projects = Project.objects.all()\n # We need a dictonary to pass to Django's filter function\n query_dict = {}\n # Order the projects based on the ordering queries\n for orders in orders_query:\n projects = projects.order_by(orders['property'])\n # create the dictonary based on the filtering queries\n for filters in filters_query:\n # First, if we want to filter by user, find the user\n if filters['property'] =='user':\n try:\n user_p = UserProfile.objects.get(email=filters['value'])\n query_dict[filters['property']] = user_p\n except UserProfile.DoesNotExist:\n raise Http404(\"User does not exist\")\n # Second, if the filter is by tags, change the query phrase\n # to 'tags__tag_name' - this is because tags is a ManyToManyField\n # and we want to search by the tag_name property of Tag objects\n elif filters['property'] == 'tags':\n filters['property'] = 'tags__tag_name'\n query_dict[filters['property']] = filters['value']\n else:\n # Make a dictionary, property: value, and you can pass it to filter fn\n query_dict[filters['property']] = filters['value']\n projects = projects.filter(**query_dict)\n return projects",
"def test_get_projects_expanded(self):\n pass",
"def scan(self):\n\n # Check for whether session is still alive\n if not self.__projects:\n return\n\n Console.info(\"Scanning projects...\")\n Console.indent()\n\n for project in self.__projects:\n project.scan()\n\n for postscan in self.__postscans:\n postscan()\n\n Console.outdent()",
"def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")",
"def test_get_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))",
"def _page_projects(self):\n return self._open(self.app.page_projects)",
"def test_list_project(self):\n pass",
"def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )",
"def test_list_project_request(self):\n pass",
"def _execute(self):\n return self.warrior.filter_tasks(self.filter_obj)",
"def _on_click_browse_to_pt_project(self):\n pass",
"async def filter(self, **kwargs):\n\n pass",
"def filter(self, artifacts: ArtifactsList) -> ArtifactsList:\n print(self.my_param)\n return artifacts"
] | [
"0.6948827",
"0.65609",
"0.65237945",
"0.6309058",
"0.61236054",
"0.6022066",
"0.6016844",
"0.59866375",
"0.5947311",
"0.5868932",
"0.5776334",
"0.57750094",
"0.57653195",
"0.57625276",
"0.5737932",
"0.5719922",
"0.5698445",
"0.56475973",
"0.5637165",
"0.5617174",
"0.5617174",
"0.560564",
"0.5590766",
"0.5571748",
"0.5563497",
"0.55499285",
"0.5528862",
"0.5523427",
"0.55139655",
"0.5510359"
] | 0.65843356 | 1 |
Step for trying to disable current project. | def check_project_cant_disable_itself(self):
page_projects = self._page_projects()
project_name = self.app.current_project
with page_projects.table_projects.row(
name=project_name).dropdown_menu as menu:
menu.button_toggle.click()
menu.item_edit.click()
with page_projects.form_edit_project as form:
form.checkbox_enable.click()
form.submit()
assert_that(page_projects.table_projects.row(
name=project_name).is_enabled, equal_to(True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disable(self):\n self.error_code = 'DISABLED'\n self.running = False",
"def StopProject (paser):\n\tModKeyProjectProtection = Project.status()\t\t#Status del proyecto\n\n\tif ModKeyProjectProtection == False:\n\t\trepositorio.stopChanges()",
"def on_disable(self) -> None:\n self._cancel_automation()",
"def _disable(self):\n self.enabled = False",
"def disable(self) -> None:",
"def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()",
"def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc",
"def disable(self):\n pass",
"async def admin_disable(self, ctx: commands.Context):\n if ctx.guild.id not in self.guilds:\n await ctx.send('Team management is already disabled in this guild.')\n return\n await self._disable_guild(guild=ctx.guild)\n await ctx.send('Team management disabled.')",
"def disable(self):",
"def disable(self, index):\n self._action(index, StateVariable.enable, missingok=False, value=False)",
"def disable(self):\n self.enabled = False",
"def disable():\n ret = _LIB.oled_click_disable()\n if ret < 0:\n raise Exception(\"oled click disable failed\")",
"def disableWorkspace(self):\n # disable tabs\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n if isinstance(doc, WelcomePage):\n continue\n doc.setEnabled(False)\n \n # disable findwidget if needed ?\n currentTab = self.tab.currentIndex()\n currentDoc = self.tab.widget(currentTab)\n if currentDoc is not None:\n if not isinstance(currentDoc, WelcomePage):\n if currentDoc.extension in [ TestUnit.TYPE, TestData.TYPE, TestSuite.TYPE,\n TestAdapter.TYPE, TestLibrary.TYPE, TestTxt.TYPE ]:\n self.findWidget.setEnabled(False)",
"def DisableByRunIf(self):\n self.run_if = 'False'",
"def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")",
"def skip_project_message():\n\n return ' - Skip project'",
"def stop_modify_project(update, context):\n context.user_data[START_OVER] = True\n get_list_projects(update, context)\n\n return END",
"def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True",
"def disable(self, is_top_level=True):\n self.enabled = False",
"def deactivate(self):\n e5App().unregisterPluginObject(\"ProjectDjango\")\n \n e5App().getObject(\"Project\").projectOpened.disconnect(\n self.__projectOpened)\n e5App().getObject(\"Project\").projectClosed.disconnect(\n self.__projectClosed)\n e5App().getObject(\"Project\").newProject.disconnect(\n self.__projectOpened)\n \n e5App().getObject(\"Project\").projectOpenedHooks.disconnect(\n self.__object.projectOpenedHooks)\n e5App().getObject(\"Project\").projectClosedHooks.disconnect(\n self.__object.projectClosedHooks)\n e5App().getObject(\"Project\").newProjectHooks.disconnect(\n self.__object.projectOpenedHooks)\n \n e5App().getObject(\"Project\").projectAboutToBeCreated.disconnect(\n self.__object.startProjectOrApplication)\n e5App().getObject(\"Project\").newProject.disconnect(\n self.__object.newProjectCreated)\n \n self.__e5project.unregisterProjectType(\"Django\")\n \n self.__object.projectClosedHooks()\n self.__projectClosed()\n \n self.__initialize()",
"def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])",
"def __disable__(self) -> None:\n pass",
"async def cmd_galdisable(self, ctx):\n # ===== SET LOCAL COG VARIABLE\n self.cogset['enable']= False\n\n # ===== SAVE SETTINGS \n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== DELETE THE JOB IF IT EXISTS\n for job in self.jobstore.get_all_jobs():\n if [\"_delete_gallery_messages\"] == job.id.split(\" \"):\n self.scheduler.remove_job(job.id)\n\n await ctx.channel.send(content=\"Galleries are disabled.\")\n\n return",
"def disable(self):\n self._enabled = False",
"def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")",
"def disable(self):\n return self.enable(False)",
"def __gitBisectSkip(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"skip\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()",
"def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()",
"def disable(self):\n raise NotImplementedError"
] | [
"0.65031326",
"0.62058604",
"0.6188016",
"0.6126871",
"0.61137885",
"0.60983574",
"0.60533607",
"0.6040284",
"0.60343134",
"0.601686",
"0.6012566",
"0.59643465",
"0.59616554",
"0.593261",
"0.5905234",
"0.58999574",
"0.58820313",
"0.5857903",
"0.5816048",
"0.5810383",
"0.58069295",
"0.5793135",
"0.5786011",
"0.5782531",
"0.5776677",
"0.57699794",
"0.57595485",
"0.5739254",
"0.57316214",
"0.5706413"
] | 0.7005946 | 0 |
Step to manage project members. | def manage_project_members(self, project_name, admin_project_resources,
check=True):
page_projects = self._page_projects()
with page_projects.table_projects.row(
name=project_name).dropdown_menu as menu:
menu.item_default.click()
with page_projects.form_available_members as form:
form.item_members(admin_project_resources["user"].name).click()
form.submit()
if check:
self.close_notification('success') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_project_member(self):\n pass",
"async def add(self, ctx, project_name: str,\n members: commands.Greedy[discord.Member]) -> None:\n project = project_name\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n if str(ctx.author.id) != ctx.projects.find_project(project).get(\n \"owner\"):\n await ctx.send(\"You can't add members to this project.\")\n return\n members = members if len(members) > 0 else [ctx.author]\n count = len(members)\n channel = ctx.guild.get_channel(\n int(ctx.projects.find_project(project).get(\"channel\")))\n for member in members:\n await channel.set_permissions(member, read_messages=True,\n send_messages=False)\n ctx.projects.add_project_members(project, [x.id for x in members])\n if members == ctx.author:\n await ctx.send(f\"You're already a member.\")\n if count == 1:\n member = members[0]\n await ctx.send(f\"`{member}` is now a member.\")\n if count == 2:\n await ctx.send(f\"`{members[0]}` and `{members[1]} `\"\n \"are now members.\")\n else:\n last_member = members[count - 1]\n members = members.pop(count - 1)\n string = \"`\"\n members = string + \", \".join(str(x) for x in members) + string\n members = members + f\" and `{last_member}`\"\n await ctx.send(f\"{members} are now members of your project.\")",
"def turn_on_member_workspaces(context):\n portal = context.getSite()\n \n if \"Members\" in portal.objectIds():\n portal.manage_delObjects(ids=[\"Members\"])\n\n if \"membership\" not in portal.objectIds(): \n # create members container folder and set default properties\n pt = portal[\"portal_types\"]\n members = portal[portal.invokeFactory(\"Folder\",id=\"membership\")]\n members.setTitle(\"membership\")\n members.setDescription(\"Member workspaces container.\")\n members._getWorkflowTool().doActionFor(members, \"publish\" \"\")\n members.setExcludeFromNav(True) \n members.reindexObject() \n \n # set members folder\n pm = portal['portal_membership']\n pm.memberareaCreationFlag = 1\n pm.setMembersFolderById('membership') \n logger.info(\"Members container created.\")",
"def test_remove_project_member(self):\n pass",
"async def on_project_member_add(self, guild_id: int, project: dict,\n members: list) -> discord.Message:\n # print(project)\n guild = (await self.bot.fetch_guild(guild_id))\n channel = await self.bot.fetch_channel(int(project.get(\"channel\")))\n members = [(await guild.fetch_member(member)) for member in members]\n count = len(members)\n if count == 1:\n member = members[0]\n return await channel.send(f\"**> Member Update:** `{member}` was\"\n \" added to this project.\")\n if count == 2:\n return await channel.send(f\"**> Member Update:** `{members[0]} `\"\n f\"and `{members[1]}\"\n \" were added to this project.\"\n )\n else:\n last_member = members[count - 1]\n members = members.pop(count - 1)\n string = \"`\"\n members = string + \"`, \".join(str(x) for x in members) + string\n members = members + f\" and `{last_member}`\"\n return await channel.send(f\"**> Member Update:** {members} were \"\n \"added to this project.\")",
"def test_add_role_to_project_member(self):\n pass",
"def create_memberships_project():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/memberships\".format(STORED_ID['project_id']))\n body = {\"person_id\": CONFIG_DATA['member_id'], \"role\": 'member'}\n client.set_body(json.dumps(body))\n client.execute_request()",
"def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()",
"def test_add_team_member(self):\n pass",
"def test_get_member(self):\n user_new = self.make_user('user_new')\n self.make_assignment(self.project, user_new, self.role_contributor)\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url, token=self.get_token(user_new))\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 1)\n self.assertEqual(\n response_data[0]['sodar_uuid'], str(self.project.sodar_uuid)\n )",
"def test_remove_role_from_project_member(self):\n pass",
"def setHgMembers(self, membersToAdd):\n self.members = membersToAdd",
"def test_registered_user_can_create_project(self):\n user = self._create_user({\"username\":\"user2\",\"email\":\"[email protected]\"})\n testproject = self._create_comicsite_in_admin(user,\"user1project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\")\n \n self._test_page_can_be_viewed(user,testpage1)\n self._test_page_can_be_viewed(self.root,testpage1)",
"def test_form_with_an_authorised_project_member(self):\n accounts = [\n self.project_owner,\n # self.project_applicant,\n ]\n\n # Authorise a project user membership for the project applicant.\n ProjectUserMembership.objects.create(\n project=self.project,\n user=self.project_applicant,\n status=ProjectUserMembership.AUTHORISED,\n date_joined=datetime.datetime.now(),\n date_left=datetime.datetime.now() + datetime.timedelta(days=10),\n )\n\n for account in accounts:\n self.approve_project(self.project)\n # A request to create a project user membership should be rejected.\n form = ProjectUserMembershipCreationForm(\n initial={\n 'user': account,\n },\n data={\n 'project_code': self.project_code,\n },\n )\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['project_code'],\n ['You are currently a member of the project.'],\n )\n\n # Ensure the project user membership status is currently set authorised.\n membership = ProjectUserMembership.objects.get(user=account)\n self.assertTrue(membership.is_authorised())",
"async def vouch(ctx, *, member_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n server = ctx.message.server\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n member = discord.utils.find(lambda c: c.name.lower() == member_name.lower(), server.members)\n roles = member.roles\n new_role = discord.utils.find(lambda r: r.name.lower() == required_role, server.roles)\n roles.append(new_role)\n await amor_manager.replace_roles(member, *roles)\n await amor_manager.say('{0} granted citizenship'.format(member.name))",
"def test_get_member(self):\n test_resource = 'test_get_member'\n self.app.post(f'/v1/resource/{self.test_resource}/id/{test_resource}', headers=admin_headers)\n\n # make groups\n members = []\n for group in [f'rt_group{i}' for i in range(5)]:\n resp = self.app.post(\n f'/v1/group',\n data=json.dumps({'group_id': group}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n members.append({'member': group,\n 'member_type': 'group',\n 'access_level': 'read'})\n\n # make users\n for user in [f'rt_user{i}' for i in range(6)]:\n resp = self.app.post(\n f'/v1/user',\n data=json.dumps({'user_id': user}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n members.append({'member': user,\n 'member_type': 'user',\n 'access_level': 'read'})\n for m in members:\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{test_resource}/members',\n data=json.dumps([m]),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self._test_paging(f'/v1/resource/{self.test_resource}/id/{test_resource}/members', admin_headers, 10, 'members')",
"def setHgMembers(self, members):\n self.huntGroup.setHgMembers(members)",
"async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")",
"def follow_project(cls, user, project):\r\n pass",
"def process_members(self, members):\n seq = (list, tuple, set)\n assert isinstance(members, seq), (f\"The members argument must be one\"\n f\"of '{seq}', found '{members}'.\")\n assert all([isinstance(member, dict) for member in members]), (\n f\"The members object must be a list of dicts, found {members}\")\n assert all([field in self.PROCESS_MEMBERS_FIELDS\n for member in members for field in member.keys()]), (\n f\"Invalid fields in dict, must have these keys \"\n f\"{self.PROCESS_MEMBERS_FIELDS}, members {members}\"\n )\n wanted_user_pks = [item['user'].pk for item in members]\n current_user_pks = [inst.user.pk for inst in self.memberships.all()]\n # Delete unwanted Membership objects.\n rem_user_pks = list(set(current_user_pks) - set(wanted_user_pks))\n self.memberships.select_related('user').filter(\n user__pk__in=rem_user_pks).delete()\n # Add new members.\n add_user_pks = list(set(wanted_user_pks) - set(current_user_pks))\n common_pks = list(set(wanted_user_pks) & set(current_user_pks))\n\n for item in members:\n if item['user'].pk in add_user_pks:\n # Create any new members.\n kwargs = {}\n kwargs['project'] = self\n kwargs['user'] = item['user']\n kwargs['role_text'] = item['role_text']\n obj = Membership(**kwargs)\n obj.save()\n elif item['user'].pk in common_pks:\n # Update any comment members.\n role = Membership.ROLE_MAP_REV[item['role_text']]\n self.memberships.filter(user=item['user']).update(role=role)",
"def test_alert_create_for_site_members(self):\n pass",
"def run_manage(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"/manage\", roles = \"Manager\")",
"def hello():\n # proj_service = build('cloudresourcemanager','v1beta1',credentials=oauth2.credentials)\n # projects_raw=proj_service.projects().list().execute()\n # if projects_raw:\n # session['projects'] = [i['projectId'] for i in projects_raw['projects']]\n # else:\n # session['projects']='None'\n get_proj(oauth2)\n # Change next line to determine the project whose membership is tested for access\n test_project = 'PROJECT TO TEST FOR VALIDATION'\n if test_project in session['projects']:\n session['validated'] = 1\n return render_template('index.html')\n else:\n [session.pop('validated') if session.get(\"validated\") else None]\n flask.abort(403)\n # This looks like it works swimmingly.",
"async def scan_members(self, ctx):\n if not ctx.guild.id:\n return\n\n if not await self.bot.guildservice.check_guild(ctx.guild.id):\n await self.bot.guildservice.add_guild(ctx.guild)\n\n async for member in ctx.guild.fetch_members():\n await self.register_member(member, ctx.guild.id)\n await asyncio.sleep(0)\n print(\"Usuários salvos com sucesso.\")\n await asyncio.sleep(1)\n await ctx.message.delete()",
"def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def main(request):\n users = get_channel_users(MEMBER_CHANNEL)\n human_users = filter_bots(users)\n buddy_groups = group_users(human_users)\n if buddy_groups:\n resp = post_message(buddy_groups, POST_CHANNEL)\n ret = str(resp)\n else:\n ret = 'Not enough members to run'\n return ret",
"def test_handle_assign_as_admin(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (\"Project successfully assigned!\", 200))",
"def test_update_member(self):\r\n resource = 'member'\r\n cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'myname',\r\n '--tags', 'a', 'b'],\r\n {'name': 'myname', 'tags': ['a', 'b'], })",
"def test_delete_team_member(self):\n pass",
"def test_assign_managing_team(self):\n pass"
] | [
"0.6774335",
"0.6523073",
"0.6445679",
"0.6356567",
"0.6110086",
"0.6039119",
"0.5738103",
"0.5666908",
"0.56620085",
"0.56442386",
"0.563256",
"0.55870676",
"0.55030304",
"0.5493451",
"0.5483998",
"0.5423954",
"0.54072183",
"0.540391",
"0.5349906",
"0.53472126",
"0.53173715",
"0.5302406",
"0.52958876",
"0.52741605",
"0.52579856",
"0.5256869",
"0.525141",
"0.5233545",
"0.522911",
"0.52185565"
] | 0.71600276 | 0 |
Step to update project name. | def update_project_name(self, project_name, new_project_name, check=True):
page_projects = self._page_projects()
with page_projects.table_projects.row(
name=project_name).dropdown_menu as menu:
menu.button_toggle.click()
menu.item_edit.click()
with page_projects.form_edit_project as form:
form.field_name.value = new_project_name
form.submit()
if check:
self.close_notification('success')
page_projects.table_projects.row(
name=new_project_name).wait_for_presence() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_project_name(self, curr_proj, proj_new_name):\r\n for proj in self.__projects:\r\n if proj == curr_proj: # Find the project with the same current name\r\n proj.update_name(proj_new_name) # Update the project's name\r",
"def update_project(self, name):\n self._log.info(\"Updating project: {}\".format(name))\n if name in self.projects:\n pass\n else:\n self.add_project(name)",
"def update_name(self, project: str, new_name: str) -> dict:\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {\n 'url': project\n },\n {\n '$set': {\n 'name': new_name,\n }\n }\n )",
"def rename_project(request):\n data = json.loads(request.body.decode('utf-8'))\n try:\n proj = models.Project.objects.get(pk=data['projid'])\n except models.Project.DoesNotExist:\n return JsonResponse({'error': f'Project with that ID does not exist in DB'}, status=404)\n # check if new project not already exist, and user have permission for all dsets\n proj_exist = models.Project.objects.filter(name=data['newname'])\n if proj_exist.count():\n if proj_exist.get().id == proj.id:\n return JsonResponse({'error': f'Cannot change name to existing name for project {proj.name}'}, status=403)\n else:\n return JsonResponse({'error': f'There is already a project by that name {data[\"newname\"]}'}, status=403)\n if is_invalid_proj_exp_runnames(data['newname']):\n return JsonResponse({'error': f'Project name cannot contain characters except {settings.ALLOWED_PROJEXPRUN_CHARS}'}, status=403)\n dsets = models.Dataset.objects.filter(runname__experiment__project=proj)\n if not all(check_ownership(request.user, ds) for ds in dsets):\n return JsonResponse({'error': f'You do not have the rights to change all datasets in this project'}, status=403)\n # queue jobs to rename project, update project name after that since it is needed in job for path\n create_job('rename_top_lvl_projectdir', newname=data['newname'], proj_id=data['projid'])\n proj.name = data['newname']\n proj.save()\n return JsonResponse({})",
"def getProjectName():",
"def project_name(self):\n pass",
"def project_name(self, project_name):\n\n self._project_name = project_name",
"def test_change_title(self):\n test_title = 'Some Title'\n support.create_project(self, 'igor')\n cd.project.title = test_title\n self.assertEqual(cd.project.title, test_title)",
"def _project_name(self):\n name = getattr(self._req.req, 'project_name', '')\n if name:\n return name\n raise ValueError('Requirement has no project_name.')",
"def update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):\n pass",
"def switch_project(self, project_name, check=True):\n with self.app.page_base.dropdown_menu_project as menu:\n\n if menu.label_project.value == project_name:\n self.app.current_project = project_name\n return\n\n menu.click()\n menu.item_project().click()\n self.app.current_project = project_name\n\n if check:\n self.close_notification('success')\n assert_that(menu.label_project.value, equal_to(project_name))",
"def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def setup_project(project_name):\n\n project_arn = ''\n for project in device_farm.list_projects()['projects']:\n if project['name'] == project_name:\n print('{} project already exists'.format(project_name))\n project_arn = project['arn']\n else:\n print(\n '{} project is not available, creating new one'.format(\n project_name\n )\n )\n project_arn = create_project(project_name)\n\n return project_arn\n\n raise KeyError('Problem finding project %r' % project_name)",
"def test_update_project(self):\n pass",
"def test_update_project(self):\n pass",
"def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))",
"def ChangeName(self, newName):\n if newName != \"\":\n newPath = self.format + os.sep + \"playlists\" + os.sep + newName + \".txt\"\n os.replace(self.path, newPath)\n self.path = newPath",
"def edit_task_name(entry):\n entry.task_name = get_task_name()\n entry.save()\n input(\"Edit successful. \")\n return entry",
"def _get_project_name(self, context, project_id):\n return project_id",
"def set_project(\n name\n):\n if not is_alive():\n err_msg = \"Cannot connect to getML engine. Make sure the engine is running and you are logged in.\"\n raise ConnectionRefusedError(err_msg)\n\n cmd = dict()\n cmd[\"type_\"] = \"set_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def remoteTestsDirRenamed(self, projectId, directoryPath, directoryName, newName):\n if len(directoryPath) > 0:\n complete_old = \"%s/%s\" % (directoryPath, directoryName)\n complete_new = \"%s/%s\" % (directoryPath, newName)\n else:\n complete_old = directoryName\n complete_new = newName\n\n for tabId in xrange( self.tab.count() ): \n doc = self.tab.widget(tabId)\n \n # bypass the welcome page\n if isinstance(doc, WelcomePage): \n continue\n # end of bypass\n \n \n if doc.isRemote == True and doc.getPathOnly().startswith(complete_old) and \\\n doc.project == int(projectId) and doc.repoDest==UCI.REPO_TESTS: \n to_keep = doc.getPathOnly().split(complete_old)\n if len(to_keep) > 1: to_keep = to_keep[1]\n else: to_keep = to_keep[0]\n\n full_new_path = \"%s%s\" % (complete_new, to_keep)\n\n self.tab.setCurrentIndex(tabId)\n \n msg = self.tr(\"The path of this file has been renamed.\\nDo you want to update the path ?\")\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n msg, buttons)\n if answer == QMessageBox.Yes:\n doc.updatePath( pathFilename=full_new_path )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()",
"def get_project_name(self):\n return self.line_edit.text()",
"def get_project_name(name: str) -> str:\n if is_shortcut_name(name):\n return name.split(config.name_separator)[1]\n raise CHCShortCutNameError(name)",
"def change_name(change_account):\n change_data(change_account, changed_data='name')",
"def update_name(self, new_name):\r\n self.__name = new_name",
"def update_name(self, new_name):\r\n self.__name = new_name",
"def upgrade_project(ctx, path):\n with ctx.cd(path):\n ctx.run(\"newt upgrade\")",
"def test_rename_cmd_line(self):\n\n cmd = ['pydroid', 'rename', 'name:%s' % NEW_APP_NAME,\n 'domain:%s' % NEW_DOMAIN]\n\n subprocess.call(cmd)\n self.assertTrue(os.path.exists(RENAMED_PROJECT_DIR))",
"def test_replace_project(self):\n pass"
] | [
"0.71789324",
"0.70686406",
"0.691698",
"0.67916363",
"0.6765881",
"0.6631026",
"0.65020585",
"0.6319885",
"0.62859344",
"0.62705225",
"0.6221078",
"0.61977655",
"0.61945593",
"0.61657494",
"0.61657494",
"0.6118281",
"0.61064047",
"0.60236293",
"0.5981603",
"0.5951724",
"0.5920675",
"0.59183687",
"0.588797",
"0.58763516",
"0.5862025",
"0.58575475",
"0.58575475",
"0.5838975",
"0.58201945",
"0.58110666"
] | 0.717039 | 1 |
Pad x with zeros to total digits. | def pad_zeros(x, total):
num_pad = total - len(x)
for idx in range(num_pad):
x = '0' + x
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pad_digits(x, width):\n if pd.notnull(x):\n return '{0:0{1}d}'.format(int(x), width)\n else:\n return x",
"def pad(number, width=0):\n return str(number).zfill(width)",
"def pad_with_border(x, n_pad):\n x_pad_list = [x[0:1]] * int(n_pad) + [x] + [x[-1:]] * int(n_pad)\n return np.concatenate(x_pad_list, axis=0)",
"def left_fill(s, n, x=\"0\"):\n sl = len(s)\n zn = n - sl\n if zn > 0:\n return zn*\"0\" + s\n else:\n return s",
"def pad(value, digits, to_right=False):\n len_val = len(value)\n assert len_val <= digits\n rem_digits = digits - len_val\n if to_right:\n return value + \"0\"*rem_digits\n else:\n return \"0\"*rem_digits + value",
"def padlen_64(x: int):\n return (64 - (x % 64)) % 64",
"def padding_zeroes(number, length_string):\n return str(number).zfill(length_string)",
"def pad_number(number, length):\n\n string_number = str(number)\n number_of_zeros = length - len(string_number)\n if number_of_zeros >= 0:\n return \"0\" * number_of_zeros + string_number\n else:\n return string_number",
"def pad_zeros(x):\n dim = tf.shape(x)[0]\n log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)\n pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))\n with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):\n return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])",
"def zeroPad(numberString, zeros, left = True):\n for i in range(zeros):\n if left:\n numberString = '0' + numberString\n else:\n numberString = numberString + '0'\n return numberString",
"def _pad(x, depth=4):\n divisor = np.power(2, depth)\n remainder = x.shape[0] % divisor\n\n # no padding because already of even shape\n if remainder == 0:\n return x\n # add zero rows after 1D feature\n elif len(x.shape) == 2:\n return np.pad(x, [(0, divisor - remainder), (0, 0)], \"constant\")\n # add zero columns and rows after 2D feature\n elif len(x.shape) == 3:\n return np.pad(x, [(0, divisor - remainder), (0, divisor - remainder),\n (0, 0)], \"constant\")",
"def zero_pad(X, padding_width, dims):\n dims = (dims) if isinstance(dims, int) else dims\n pad = [(0, 0) if idx not in dims else (padding_width, padding_width)\n for idx in range(len(X.shape))]\n X_padded = np.pad(X, pad, 'constant')\n return X_padded",
"def truncate(x, digits):\n if x==0:\n return '0'\n if digits==0:\n return str(int(round(x)))\n FORMAT = \"\"\".%sf\"\"\"\n format_x = FORMAT % str(int(digits))\n return format(x, format_x).rstrip('0').rstrip('.')",
"def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))",
"def zero_pad(data):\n N = len(data)\n pow_2 = np.ceil(np.log2(N))\n return np.pad(data,(0,int((2**pow_2)-N)),'constant')",
"def conv_pad(x, ks, mode):\n\tpad = (int(np.floor((ks-1)/2)), int(np.ceil((ks-1)/2)))\n\treturn F.pad(x, (*pad, *pad), mode=mode)",
"def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")",
"def _pad1d(self, x: torch.Tensor, padding_left: int, padding_right: int, mode: str = \"zero\", value: float = 0.0):\n length = x.shape[-1]\n if mode == \"reflect\":\n max_pad = max(padding_left, padding_right)\n if length <= max_pad:\n x = F.pad(x, (0, max_pad - length + 1))\n return F.pad(x, (padding_left, padding_right), mode, value)",
"def pad(size, value):\n return (value + size - 1)/size*size",
"def trunc(x):\n return 0",
"def filter_pad(val: Union[int, str], width: int, fillchar: str = '0') -> str:\n return str(val).rjust(width, fillchar)",
"def zeros_padding_to_number_digits(mystring):\n\treturn ''.join([format(int(x), '05d') if x.isdigit() else x for x in re.split(r'(\\d+)', mystring)])",
"def add_padding(x, maxlen=500):\n \n # May want to increase maxlen from 500! Not sure the total dist of chomragram lengths.\n\n for i in range(len(x)):\n x[i] = x[i][:,:maxlen]\n q = maxlen - x[i].shape[1]\n p = q//2\n# if q % 2 == 0:\n# x[i] = np.pad(x[i], ((p,p), (0,0)), 'constant', constant_values=(0,0))\n# else:\n# x[i] = np.pad(x[i], ((p,p+1), (0,0)), 'constant', constant_values=(0,0))\n\n print\n if q % 2 == 0:\n x[i] = np.pad(x[i], ((0,0), (p,p)), 'constant', constant_values=(0,0))\n else:\n x[i] = np.pad(x[i], ((0,0), (p,p+1)), 'constant', constant_values=(0,0))\n \n return x",
"def _pad_with_zeros(self, X, margin):\n newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2]))\n x_offset = margin\n y_offset = margin\n newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X\n return newX",
"def pad(x, padding, fill_value=0):\n input_shape = x.shape\n output_shape = []\n indices = []\n\n for dim, pad in enumerate(padding):\n try:\n left_pad, right_pad = pad\n except TypeError:\n left_pad = right_pad = pad\n output_shape.append(left_pad + input_shape[dim] + right_pad)\n indices.append(slice(left_pad, left_pad + input_shape[dim]))\n\n if fill_value:\n out = T.ones(output_shape) * fill_value\n else:\n out = T.zeros(output_shape)\n return T.set_subtensor(out[tuple(indices)], x)",
"def rjust(value, length):\n\n if value is None or value == '':\n value = '0'\n else:\n value = str(value)\n value = value.rjust(length, '0')\n return value",
"def int_padding(length, val, direction=\">\"):\n return '{0:0{direction}{fill}}'.format(val, direction=direction, fill=length)",
"def pad(x, pad_left=0, pad_right=0, to_complex=True):\n output = pad_1d(x, pad_left, pad_right, mode='reflect')\n if to_complex:\n output = torch.stack((output, torch.zeros_like(output)), dim=-1)\n return output",
"def _padleft(width, s):\n fmt = \"{0:>%ds}\" % width\n return fmt.format(s)",
"def pad_spatial(self, x):\n n, t, c, h, w = x.size()\n\n pad_h = (4 - h % 4) % 4\n pad_w = (4 - w % 4) % 4\n\n # padding\n x = x.view(-1, c, h, w)\n x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')\n\n return x.view(n, t, c, h + pad_h, w + pad_w)"
] | [
"0.7705322",
"0.6957708",
"0.67513067",
"0.67392343",
"0.6692656",
"0.6670947",
"0.66124976",
"0.6442183",
"0.6426501",
"0.64234763",
"0.6382758",
"0.63817555",
"0.6342493",
"0.62942547",
"0.62548393",
"0.62539077",
"0.6236442",
"0.62061024",
"0.61991197",
"0.6173912",
"0.61250323",
"0.6105705",
"0.60866743",
"0.6084245",
"0.60773355",
"0.6052809",
"0.6015161",
"0.5978191",
"0.5962209",
"0.5958778"
] | 0.8166357 | 0 |
Build shards in a loop. | def create_shards(
it_shards,
shard_dir,
key,
files,
labels,
targets,
im_size,
label_size,
preprocess,
store_z,
normalize_im):
all_files = files[key]
all_labels = labels[key]
total_data = len(all_files) / it_shards
mask = np.arange(it_shards).reshape(1, -1).repeat(total_data).reshape(-1)
all_files = all_files[:len(mask)]
all_labels = all_labels[:len(mask)]
total_shards = pad_zeros(str(it_shards), 5)
for idx in tqdm(
range(it_shards), total=it_shards, desc='Building %s' % key):
it_mask = mask == idx
shard_label = pad_zeros(str(idx), 5)
shard_name = os.path.join(
shard_dir,
'%s-%s-of-%s.tfrecords' % (key, shard_label, total_shards))
it_files = {key: all_files[it_mask]}
it_labels = {key: all_labels[it_mask]}
data_to_tfrecords(
files=it_files,
labels=it_labels,
targets=targets,
ds_name=shard_name,
im_size=im_size,
label_size=label_size,
preprocess=preprocess,
store_z=store_z,
it_ds_name=shard_name,
normalize_im=normalize_im) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_shard(dataset, num_shards):\n input_chips, label_chips = [], []\n for item in tqdm(dataset):\n # not using chip_id and chip_for_display fields\n input_chips.append(item['chip'])\n label_chips.append(item['chip_label'])\n\n # debugging\n # if len(input_chips) > 200:\n # break\n num_chips = len(input_chips)\n print(f'Created {num_chips} chips.')\n\n items_per_shards = math.ceil(num_chips / num_shards)\n shard_idx = []\n for i in range(num_shards):\n shard_idx.append(\n (i * items_per_shards, (1 + i) * items_per_shards)\n )\n # print(f'Debug - shard_end_idx is {shard_idx}')\n\n print('Stacking imagery and label chips into shards')\n input_chip_shards, label_chip_shards = [], []\n for begin_idx, end_idx in shard_idx:\n if begin_idx < num_chips:\n input_chip_shard = input_chips[begin_idx:end_idx]\n input_chip_shard = np.stack(input_chip_shard, axis=0)\n print(f'dim of input chip shard is {input_chip_shard.shape}, dtype is {input_chip_shard.dtype}')\n input_chip_shards.append(input_chip_shard)\n\n label_chip_shard = label_chips[begin_idx:end_idx]\n label_chip_shard = np.stack(label_chip_shard, axis=0)\n print(f'dim of label chip shard is {label_chip_shard.shape}, dtype is {label_chip_shard.dtype}')\n label_chip_shards.append(label_chip_shard)\n\n return (input_chip_shards, label_chip_shards)",
"def get_shards(data_dir, file_list, shard_size, istraining):\n file_idxs = np.arange(0, len(file_list))\n np.random.shuffle(file_idxs) # randomly extract data from files\n\n shard_num = len(file_list) // shard_size\n\n for shard_idx in range(shard_num):\n\n start_idx = shard_idx * shard_size\n end_idx = (shard_idx + 1) * shard_size\n shard_files_idxs = file_idxs[start_idx: end_idx]\n\n all_data, all_label, all_names, all_node_img = [], [], [], []\n for fn in shard_files_idxs:\n\n if not data_dir:\n raw_data = np.load(file_list[fn])\n else:\n raw_data = np.load(os.path.join(data_dir, file_list[fn]))\n\n current_data = raw_data['vgg_features']\n node_img_path = raw_data['img_path']\n # pid = raw_data['pid']\n # time = raw_data['time']\n if len(current_data) < MIN_NUM_POINT:\n # skip WSI of too few patches\n continue\n\n # if len(current_data) > MAX_NUM_POINT:\n # continue\n\n curr_path = file_list[fn]\n\n curr_type = curr_path.split('/')[-4]\n curr_filename = curr_path.split('/')[-1]\n\n if curr_type == 'LUAD':\n # LUAD -> class 0, LUSC -> class 1\n current_label = 0\n else:\n current_label = 1\n\n # if istraining:\n \"random select at most MAX_NUM_POINT nodes for WSI\"\n list_node_idx = np.arange(0, current_data.shape[0])\n np.random.shuffle(list_node_idx)\n sel_ids = list_node_idx[0: MAX_NUM_POINT]\n\n current_data = current_data[sel_ids]\n current_data = np.expand_dims(current_data, 0)\n node_img_path = node_img_path[sel_ids]\n\n all_data.append(current_data)\n all_label.append(current_label)\n all_names.append(curr_filename)\n all_node_img.append(node_img_path)\n\n \"\"\" create numpy for all data and label\"\"\"\n all_label = np.squeeze(np.hstack(all_label))\n\n yield all_data, all_label, all_names, all_node_img",
"def shard(self, dataset_iter):\n return dataset_iter",
"def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)",
"def setup_index_shards_per_node(cluster: str, index: str, number: int):\n\n elastic = sreElastic(host=cluster)\n elastic.set_number_shards_per_node(index=index, number=number)",
"def shards(self):\n shards_per_node = {}\n for node in self.nodes:\n num_shards = 0\n metrics = self.metrics(node)\n for family in metrics:\n for sample in family.samples:\n if sample.name == \"vectorized_reactor_utilization\":\n num_shards = max(num_shards,\n int(sample.labels[\"shard\"]))\n assert num_shards > 0\n shards_per_node[self.idx(node)] = num_shards\n return shards_per_node",
"def build(self):\n states = WOFRegion.query.filter(WOFRegion.country_iso=='US')\n\n logger.info('Indexing US states.')\n\n for row in tqdm(states):\n\n # Key -> id(s)\n for key in map(keyify, state_key_iter(row)):\n self.add_key(key, row.wof_id)\n\n # ID -> state\n self.add_location(row.wof_id, StateMatch(row))",
"def load_next_shards(cmp_id):\n while shards and shards[0].cmp_id <= cmp_id:\n shard = shards.pop(0)\n shard.start_fetching_events_async(start_id, end_id)\n shards_to_load.append(shard)\n while shards_to_load:\n shard = shards_to_load.pop(0)\n it = shard.iterator(start_id, end_id)\n try:\n event = it.next()\n heapq.heappush(event_heap, event)\n iterators[shard] = it\n except StopIteration:\n pass",
"def _project_loops(self):\n\n self._create_projection_datasets()\n self._get_sho_chunk_sizes(10)\n\n '''\n Loop over the FORCs\n '''\n for forc_chunk_index in range(self._num_forcs):\n pos_chunk_index = 0\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n dc_vec = self._get_dc_offset()\n '''\n Loop over positions\n '''\n while self._current_pos_slice.stop < self._end_pos:\n loops_2d, nd_mat_shape_dc_first, order_dc_offset_reverse = self._get_projection_data(pos_chunk_index)\n\n # step 8: perform loop unfolding\n projected_loops_2d, loop_metrics_1d = self._project_loop_batch(dc_vec, np.transpose(loops_2d))\n\n # test the reshapes back\n projected_loops_2d = self._reshape_projected_loops_for_h5(projected_loops_2d,\n order_dc_offset_reverse,\n nd_mat_shape_dc_first)\n self.h5_projected_loops[self._current_pos_slice, self._current_sho_spec_slice] = projected_loops_2d\n\n metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)\n\n self.h5_loop_metrics[self._current_pos_slice, self._current_met_spec_slice] = metrics_2d\n\n # Reset the position slice\n self._current_pos_slice = slice(None)\n\n pass",
"def build_lhosts(self , sws , lhost_count):\n host_count = 0\n for sw in sws:\n for i in range(lhost_count):\n host_id = host_count + 1\n host = self.addHost('h%s' % host_id)\n self.addLink(sw, host)\n host_count += 1\n return host_count",
"def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()",
"def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]",
"def build_index():\n for site in get_sites():\n text = read_site(site)\n while text == False:\n text = read_site(site) # keep attempting to read until successful\n index_site(site, text)",
"def build_index():\n for site in get_sites():\n text = read_site(site)\n while text == False:\n text = read_site(site) # keep attempting to read until successful\n index_site(site, text)",
"def build_all(nodes=[]):\n\n if nodes:\n nodes = [n for n in mc.ls(nodes) if mc.objExists(n+'.tagSpaces')]\n else:\n nodes = mc.ls('*.tagSpaces')\n nodes = [n.replace('.tagSpaces','') for n in nodes if mc.getAttr(n)]\n\n for node in nodes:\n space_obj = Space(node)\n space_obj.build_space()",
"def prepare_shards_maintenance(self):\n errors = Queue.Queue()\n threads = []\n for shard in self.shards:\n t = threading.Thread(target=shard.prepare_maintenance, args=(errors,))\n threads.append(t)\n if self.config_server is not None:\n t = threading.Thread(target=self.config_server.prepare_maintenance, args=(errors,))\n threads.append(t)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n if not errors.empty():\n # We don't really care for all errors, so just through the first one\n raise errors.get()",
"def init_rack(self):\n for i in range(7):\n self.rack.append(self.board.draw_random_tile())",
"def cluster_shards(self, target_nodes=None):\n return self.execute_command(\"CLUSTER SHARDS\", target_nodes=target_nodes)",
"def _build_runlist(self):\n\n if self.seed is not None:\n np.random.seed(self.seed)\n\n for i in moves.range(self.num_samples):\n sample = []\n for key, meta in iteritems(self.get_desvar_metadata()):\n nval = meta['size']\n values = []\n for k in range(nval):\n\n low = meta['lower']\n high = meta['upper']\n if isinstance(low, np.ndarray):\n low = low[k]\n if isinstance(high, np.ndarray):\n high = high[k]\n\n values.append(np.random.uniform(low, high))\n sample.append([key, np.array(values)])\n\n yield sample",
"def list_shards(name='shards'):\n embed = discord.Embed(title=\"Shards:\", colour=discord.Colour(14066432))\n embed.set_footer(text=\"Donations\")\n for idx, shard in enumerate(Shard):\n embed.add_field(name=str(idx + 1) + \": \", value=shard.value, inline=True)\n yield from bot.say(embed=embed)",
"def presplit(host, database, collection, shardkey, shardnumber=None,\n chunkspershard=1, verbose=False):\n con = Connection(host)\n namespace = '%s.%s' % (database, collection)\n\n # disable balancer\n con['config']['settings'].update_one({'_id': \"balancer\"},\n {'$set': {'stopped': True}}, upsert=True)\n\n # enable sharding on database if not yet enabled\n db_info = con['config']['databases'].find_one({'_id': database})\n if not db_info or db_info['partitioned'] is False:\n con['admin'].command(SON({'enableSharding': database}))\n\n # shard collection if not yet sharded\n coll_info = con['config']['collections'].find_one({'_id': namespace})\n if coll_info and not coll_info['dropped']:\n # if it is sharded already, quit. something is not right.\n if verbose:\n print(\"collection already sharded.\")\n return\n else:\n con[database][collection].create_indexes([shardkey])\n con['admin'].command(SON({'shardCollection': namespace,\n 'key': {shardkey: 1}}))\n\n # get shard number and names and calculate split points\n shards = list(con['config']['shards'].find())\n\n if len(shards) == 1:\n if verbose:\n print(\"only one shard found. no pre-splitting required.\")\n return\n\n # limit number of shards if shardnumber given\n if shardnumber and shardnumber <= len(shards):\n shards = shards[:shardnumber]\n\n shard_names = [s['_id'] for s in shards]\n splits_total = len(shards) * chunkspershard\n split_interval = 16**4 / splits_total\n split_points = [\"%0.4x\" % s for s in range(split_interval,\n splits_total * split_interval,\n split_interval)]\n\n # pre-splitting commands\n for s in split_points:\n con['admin'].command(SON([('split', namespace),\n ('middle', {shardkey: s})]))\n\n split_points = [MinKey()] + split_points\n\n # move chunks to shards (catch the one error where the chunk resides\n # on that shard already)\n for i, s in enumerate(split_points):\n try:\n if verbose:\n print('moving chunk %s in collection %s to shard %s.'\n % (s, namespace, shard_names[i % len(shards)]))\n res = con['admin'].command(SON([('moveChunk', namespace),\n ('find', {shardkey: s}),\n ('to',\n shard_names[i % len(shards)])]))\n except OperationFailure as e:\n if verbose:\n print(e)\n\n if verbose:\n print('chunk distribution:', end=' ')\n chunk_group = con['config']['chunks'].aggregate([\n { '$match': { 'ns': namespace }},\n { '$group': { \n '_id': '$shard',\n 'nChunks': { '$sum': 1 }}\n }\n ])\n print(', '.join([\"%s: %i\" % (ch['_id'], ch['nChunks'])\n for ch in chunk_group]))",
"def generate_scaffolds(self, dataset, log_every_n=1000):\n scaffolds = {}\n data_len = len(dataset)\n\n log(\"About to generate scaffolds\", self.verbose)\n for ind, smiles in enumerate(dataset.ids):\n if ind % log_every_n == 0:\n log(f\"Generating scaffold {ind} {data_len}\", self.verbose)\n scaffold = generate_scaffold(smiles)\n if scaffold not in scaffolds:\n scaffolds[scaffold] = [ind]\n else:\n scaffolds[scaffold].append(ind)\n\n # Sort from largest to smallest scaffold sets\n scaffolds = {key: sorted(value) for key, value in scaffolds.items()}\n scaffold_sets = [\n scaffold_set\n for (scaffold, scaffold_set) in sorted(scaffolds.items(),\n key=lambda x: (len(x[1]), x[1][0]),\n reverse=True)\n ]\n return scaffold_sets",
"def ShardList(list_to_shard, total_shards, shard_idx):\n length = len(list_to_shard)\n split_lists = []\n for i in range(total_shards):\n start_idx = i * length // total_shards\n end_idx = (i + 1) * length // total_shards\n split_lists.append(list_to_shard[start_idx: end_idx])\n\n return split_lists[shard_idx]",
"def sharding(nodes, terms, document=None):\n shards = defaultdict(lambda: defaultdict(list))\n document_hash = None\n\n if document:\n document_hash = get_document_hash(document)\n node = get_node_by_document_hash(nodes, document_hash)\n shards[node]['document'] = document\n shards[node]['document_hash'] = document_hash\n\n for term in terms:\n shards[get_node_by_term(nodes, term)]['terms'].append(term)\n shards[get_node_by_term(nodes, term)]['document_hash'] = document_hash\n\n return shards",
"def build_sites(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for s,site in enumerate(self.cell.sites):\n newsite = copy.deepcopy(site)\n coordinate = self.cell.a1*i+\\\n self.cell.a2*j+\\\n self.cell.a3*k\n newsite.coordinate += coordinate\n self.sites[i,j,k,s] = newsite",
"def start_new_shard(self, bpf_chunk, gugid):\n with self.monitor:\n shrd = SpawnObject(self, bpf_chunk, gugid)\n shrd.start()\n self.shards.append(shrd)\n return shrd.tcp_port, shrd.udp_port",
"def parse_all_shards(shard_dir_path):\n files_to_process = utils.absolute_paths_of_files_in_dir(shard_dir_path)\n list_of_shard_results = [parse_shard(f) for f in files_to_process]\n return pd.DataFrame(\n list(itertools.chain(*list_of_shard_results)),\n columns=['sequence_name', 'predictions'])",
"def read_all_shards(partition, data_dir, bucket_name):\n\n shards = []\n gcsfs = GCSFS(bucket_name)\n for fn in gcsfs.listdir(os.path.join(data_dir, partition)):\n with gcsfs.open(os.path.join(data_dir, partition, fn)) as f:\n shards.append(pd.read_csv(f, index_col=None))\n \n return pd.concat(shards)",
"def build(self, block_size):",
"def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()"
] | [
"0.6430954",
"0.6155751",
"0.5515195",
"0.5419035",
"0.5399036",
"0.53467405",
"0.5314759",
"0.5262373",
"0.52089936",
"0.51875865",
"0.51793534",
"0.516469",
"0.5127667",
"0.5127667",
"0.50922453",
"0.50574",
"0.50531745",
"0.5042628",
"0.50401515",
"0.50333697",
"0.5030738",
"0.50270015",
"0.50261194",
"0.5020177",
"0.50140953",
"0.50070596",
"0.50054425",
"0.4999978",
"0.4999677",
"0.49761638"
] | 0.66212094 | 0 |
Processes the event if it matches the aggregator. Returns "NEW" when the event has become the group_leader of a new group, "AGGR" when it has been added to an existing, active group or "CLEAR" if it is the clear event for a group | def process(self, event):
matchgroups = {}
try:
self.lock.acquire() # matchgroups are not thread safe, but we need to be reentrant here
if not self.matcher.matches(event):
return "PASS"
matchgroups = self.matcher.get_match_groups()
finally:
self.lock.release()
if self.autoclear:
event["group_autoclear"] = 1
else:
event["group_autoclear"] = 0
self.set_aggregation_group_id(event, matchgroups)
(group, lastmod) = self.datasource.get_group_leader(event["group_id"])
if group and time.time()-lastmod >= self.config["maxdelay"]:
logging.debug("Cleared group %s ", event["group_id"])
self.datasource.deactivate_group(event["group_id"])
group = None
if self.clear_matcher.matches(event):
group_id = event["group_id"]
event["clear_group_leader"] = group
event["clear_group_id"] = group_id
event["group_id"] = None
self.datasource.deactivate_group(group_id)
self.datasource.acknowledge_group(group_id, group)
if self.auto_acknowledge:
event["ack"] = 1
group = None
return "CLEAR"
if group:
event["group_leader"] = group
event["group_active"] = True
return "AGGR"
else:
msg = self.create_aggregation_message(event, matchgroups)
event["group_leader"] = -1
event["alternative_message"] = msg
event["group_active"] = True
return "NEW" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_groups_changed(event):\n permission_backend = event.request.registry.permission\n\n for change in event.impacted_objects:\n if \"old\" in change:\n existing_record_members = set(change[\"old\"].get(\"members\", []))\n else:\n existing_record_members = set()\n\n group = change[\"new\"]\n group_uri = f\"/buckets/{event.payload['bucket_id']}/groups/{group['id']}\"\n new_record_members = set(group.get(\"members\", []))\n new_members = new_record_members - existing_record_members\n removed_members = existing_record_members - new_record_members\n\n for member in new_members:\n # Add the group to the member principal.\n permission_backend.add_user_principal(member, group_uri)\n\n for member in removed_members:\n # Remove the group from the member principal.\n permission_backend.remove_user_principal(member, group_uri)",
"def process_event(self, event):\n\n key = apply(event.data.match.group, self.match_on)\n \n self._increment_count(key)\n\n if self._count[key] >= self.threshold:\n if self.reset:\n del self._count[key]\n return 1\n\n return 0",
"def process_event(self, event):\n\n key = apply(event.data.match.group, self.match_on)\n \n self._increment_count(key)\n\n if self._count[key] == 1:\n return 1\n elif self._count[key] == self.threshold:\n del self._count[key]\n\n return 0",
"def handlereply(op):\n cgreplies = op.records.getreplies(cgpart.id)\n assert len(cgreplies[b'changegroup']) == 1\n pushop.cgresult = cgreplies[b'changegroup'][0][b'return']",
"def __on_group_created(self, logger, *args):",
"def log_group_updated_added_event(sender, **kwargs):\n logger = logging.getLogger(__name__)\n\n group = kwargs['instance']\n if kwargs['created']:\n logger.info(\"Group added: %s. Group leader: %s (ID: %d)\",\n group.title,\n group.leader,\n group.id)\n else:\n logger.info(\"Group updated: %s. Group leader: %s (ID: %d)\",\n group.title,\n group.leader,\n group.id)",
"async def group(ctx, *, new_group=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n # Can't be group-less\n if new_group is None:\n new_group = random.choice(changeable_groups)\n new_group = new_group.lower()\n author = ctx.message.author\n member_roles = author.roles\n server_roles = ctx.message.server.roles\n\n member_allowed = discord.utils.find(lambda r: r.name.lower() == required_role, member_roles)\n\n if not member_allowed:\n need_citizen = \"You must be a member of the {0} role to join a color group\"\n await amor_manager.say(need_citizen.format(required_role.title()))\n return\n\n if new_group in changeable_groups:\n # Remove the old group the user was in\n new_roles = [r for r in member_roles if not r.name.lower() in changeable_groups]\n # Get the proper object for the user's new group\n role = discord.utils.find(lambda r: r.name.lower() == new_group, server_roles)\n if role is not None:\n new_roles.append(role)\n await(amor_manager.replace_roles(author, *new_roles))\n await amor_manager.say('{0} moved to group {1}'.format(author.name, new_group))\n else:\n suggest = random.choice(changeable_groups)\n cant_join = \"`{0}` is not a color group you're allowed to join. Why not try `{1}`\"\n await amor_manager.say(cant_join.format(new_group, suggest))",
"def handle_groupchat_message(self, msg):\n self.xmpp.event('groupchat_message', msg)\n self.xmpp.event(\"muc::%s::message\" % msg['from'].bare, msg)",
"def group_data_callback(self, action: EventType, group_id: str) -> None:\n self.process_item(group_id, {})",
"def visit_group(self, group):\n for obj in self.event_json['events']:\n event_id = obj['id']\n event = self.world.events[event_id]\n group.add(event)",
"def create_aggregation_message(self, event, matchgroups):\n msg = self.config[\"aggregatemessage\"]\n tokens = re.findall(\"[$#]\\w+\", msg)\n for token in tokens:\n if token[0] == '#':\n msg = msg.replace(token, str(event[token[1:]]))\n continue\n if token[0] == '$' and token[1:] in matchgroups: \n msg = msg.replace(token, str(matchgroups[token[1:]]))\n continue\n return msg",
"def EntryGroupNew(self):\n try:\n return self.server.EntryGroupNew()\n except dbus.DBusException:\n return None",
"def process_secgroup_commit(self, resource, event, trigger, **kwargs):\n LOG.debug(\"Received event %s notification for resource\"\n \" %s with kwargs %s\", event, resource, kwargs)\n context = kwargs['context']\n\n # Whatever we're working from should have a resource ID\n # in this form, if it exists at all. Alternatively, it may\n # be that there's no ID (because the row is freshly created).\n res = kwargs.get(resource)\n res_id = kwargs.get(\"%s_id\" % resource)\n if res_id is None:\n res_id = res.get('id')\n\n new_objects = context.session.new\n\n changed_sgids = []\n deleted_rules = []\n\n if resource == resources.SECURITY_GROUP:\n if event == DELETE_COMMIT_TIME:\n self.delete_secgroup_from_etcd(context.session,\n kwargs['security_group_id'])\n elif event == CREATE_COMMIT_TIME:\n # When Neutron creates a security group it also\n # attaches rules to it. We need to sync the rules.\n\n # Also, the SG passed to us is what comes in from the user.\n # We require what went into the DB (where we added a UUID\n # to it).\n\n if res_id is None:\n # New objects do not have their resource ID assigned\n changed_sgids = \\\n [sg.id for sg in new_objects\n if isinstance(sg, securitygroup.SecurityGroup)]\n else:\n changed_sgids = [res_id]\n\n elif resource == resources.SECURITY_GROUP_RULE:\n # We store security groups with a composite of all their\n # rules. So in this case we track down the affected\n # rule and update its entire data.\n # NB: rules are never updated.\n if event == events.BEFORE_DELETE:\n # This is a nasty little hack to add required information\n # so that the AFTER_DELETE trigger can have it\n # Fortunately the events described are all called from the\n # one DB function and we will see all of them in the one\n # process. We use a dict in case multiple threads are\n # working. Only one of them will get to the AFTER if they're\n # working on the one rule.\n # This is ugly. Liberty support is ugly.\n rule = self.get_secgroup_rule(res_id, context)\n self.deleted_rule_secgroup_id[res_id] = \\\n rule['security_group_id']\n\n if event == DELETE_COMMIT_TIME:\n\n if PRECOMMIT:\n # This works for PRECOMMIT triggers, where the rule\n # is in the DB still\n rule = self.get_secgroup_rule(res_id, context)\n changed_sgids = [rule['security_group_id']]\n else:\n # This works for AFTER_DELETE triggers (Liberty)\n # but only because we saved it in BEFORE_DELETE\n changed_sgids = [self.deleted_rule_secgroup_id[res_id]]\n # Clean up to keep the dict size down\n del self.deleted_rule_secgroup_id[res_id]\n\n deleted_rules.append(res_id)\n\n elif event == CREATE_COMMIT_TIME:\n # Groups don't have the same UUID problem - we're not\n # using their UUID, we're using their SG's, which must\n # be present.\n rule = kwargs['security_group_rule']\n changed_sgids = [rule['security_group_id']]\n\n if changed_sgids:\n self.send_sg_updates(context,\n changed_sgids,\n deleted_rules=deleted_rules)",
"def set_aggregation_group_id(self, event, matchgroups):\n id = str(self.id)\n for field in self.use_fields_for_id:\n field = field.strip()\n id = id + str(event[field])\n \n attributes = matchgroups\n for i in attributes:\n id = id + i + attributes[i]\n event[\"group_id\"] = self.hash(id)",
"def process_group_message(self, d):\n dpid = int(d.get(\"dpid\", 0))\n dp = self.dpset.get(dpid)\n if not dp:\n return \"Datapath does not exist!\"\n\n ofproto = dp.ofproto\n parser = dp.ofproto_parser\n\n command = {\n 'add': ofproto.OFPGC_ADD,\n 'mod': ofproto.OFPGC_MODIFY,\n 'del': ofproto.OFPGC_DELETE,\n }\n\n cmd = command.get(d[\"operation\"], ofproto.OFPGC_ADD)\n\n type_convert = {'ALL': dp.ofproto.OFPGT_ALL,\n 'SELECT': dp.ofproto.OFPGT_SELECT,\n 'INDIRECT': dp.ofproto.OFPGT_INDIRECT,\n 'FF': dp.ofproto.OFPGT_FF}\n\n gtype = type_convert.get(d[\"type\"])\n\n group_id = d[\"group_id\"]\n\n buckets = []\n for bucket in d[\"buckets\"]:\n weight = bucket.get('weight', 0)\n watch_port = bucket.get('watch_port', ofproto.OFPP_ANY)\n watch_group = bucket.get('watch_group', dp.ofproto.OFPG_ANY)\n actions = []\n if bucket['actions']:\n actions_list = []\n if type(bucket['actions'][0]) is str or \\\n (not PYTHON3 and type(bucket['actions'][0]) is unicode):\n # Ryu's format\n for i in bucket['actions']:\n x = i.split(':', 1)\n y = x[1].replace('{', '').replace(\n '}', '').strip() if len(x) > 1 else ''\n y = y.replace(\n ':', '=', 1) if x[0] == 'SET_FIELD' else y\n actions_list.append({x[0]: y})\n else: # FlowManager's format\n actions_list = bucket['actions']\n actions = self.get_actions(parser, actions_list)\n buckets.append(dp.ofproto_parser.OFPBucket(\n weight, watch_port, watch_group, actions))\n\n #print(dp, cmd, gtype, group_id, buckets)\n group_mod = parser.OFPGroupMod(\n dp, cmd, gtype, group_id, buckets)\n\n try:\n dp.send_msg(group_mod) # ryu/ryu/controller/controller.py\n except KeyError as e:\n return e.__repr__()\n except Exception as e:\n return e.__repr__()\n\n return \"Message sent successfully.\"",
"def group(self) -> str:\n return self._event_extras['group']",
"def on_group_members_change(self, new_members):\n log.info(\"Consumer group '%s' members changed.\", self.group_name)\n\n new_members = set(new_members)\n if new_members != self.members:\n self.members = new_members\n self.rebalance()\n\n self.members_collected.set()",
"def process_event(self, event):\r\n pass",
"def _add_group(self, group):\n\n if group.name not in self.groups:\n # it's brand new, add him!\n self.groups[group.name] = group\n if self.groups[group.name] != group:\n # different object, merge\n self._merge_groups(self.groups[group.name], group)",
"def test_chgid(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.assertTrue(self.run_function(\"group.chgid\", [self._group, self._new_gid]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertEqual(group_info[\"gid\"], self._new_gid)",
"def create_group(self, event):\n body = event['body']\n body = json.loads(body)\n\n # Required field in POST body\n if 'group_name' not in body:\n return self.get_bad_request('POST body missing group_name')\n\n group_name = body['group_name']\n user = self.mealShareUsers.get_user_cognito_data(event)\n user_id = user['user_id']\n \n # Add the creator to the group, as the initial member\n group_id = self.mealShareGroups.create_group(group_name)\n success = self.mealShareGroups.add_user_to_group(user_id, group_id)\n if success:\n return {\n 'statusCode': 200,\n 'statusMessage': 'Successfully created group {} with ID {}'.format(group_name, group_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }\n else:\n return {\n 'statusCode': 500,\n 'statusMessage': 'FAILED to create group {} by user {}'.format(group_name, user_id),\n 'group_id': group_id,\n 'group_name': group_name,\n 'user_id': user_id\n }",
"def _process_event(self, event: Dict[str, Any]) -> None:\n try:\n content = event[\"content\"]\n _LOGGER.debug(\"Received event: %s\", content)\n except KeyError:\n _LOGGER.warning(\"Received invalid event: %s\", event)\n return\n\n if content[\"deviceId\"] is not None:\n device_id = content[\"deviceId\"]\n self._update_device_attr(\n device_id, content[\"name\"], content[\"value\"], content[\"unit\"]\n )\n\n evt = Event(content)\n\n if device_id in self._listeners:\n for listener in self._listeners[device_id]:\n listener(evt)\n elif content[\"name\"] == \"mode\":\n name = content[\"value\"]\n mode_set = False\n for mode in self._modes:\n if mode.name == name:\n mode.active = True\n mode_set = True\n else:\n mode.active = False\n\n # If the mode wasn't set, this is a new mode. Add a placeholder\n # to the modes list, and reload the modes\n if not mode_set:\n self._modes.append(Mode({\"active\": True, \"name\": name}))\n _ = self._load_modes()\n\n evt = Event(content)\n\n for listener in self._listeners.get(ID_MODE, []):\n listener(evt)\n\n elif content[\"name\"] == \"hsmStatus\":\n self._hsm_status = content[\"value\"]\n evt = Event(content)\n for listener in self._listeners.get(ID_HSM_STATUS, []):\n listener(evt)",
"def process_secgroup_after(self, resource, event, trigger, **kwargs):\n # In Liberty, this is the only callback that's called.\n # We use our own event names, which will identify AFTER_*\n # events as the right time to commit, so in this case we\n # simply call the commit function ourselves.\n\n # This is not perfect - since we're not committing in one\n # transaction we can commit the secgroup change but fail to\n # propagate it to the journal and from there to etcd on a\n # crash. It's all we can do for Liberty as it doesn't support\n # in-transaction precommit events.\n if not PRECOMMIT:\n self.process_secgroup_commit(resource, event, trigger, **kwargs)\n\n # Whatever the object that caused this, we've put something\n # in the journal and now need to nudge the communicator\n self.kick()",
"def process_groups(groups, logs):\n events = list()\n \n for group in groups:\n tag = group[2]\n target = group[3]\n msg_type = group[-1].lower()\n if tag == ACTIVITY_TAG or tag == DIALOG_TAG or tag == VIEW_TAG:\n\n if group[0] == group[1]:\n if msg_type == 'touchevent':\n events.append(touch_processor.create_touch_event(msg_type, target, logs[group[0]], group[0], tag))\n elif msg_type == 'keyevent':\n events.append(key_processor.create_key_event(msg_type, target, logs[group[0]], group[0]))\n continue\n\n # Activity & Dialig\n if msg_type == 'touchevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG, VIEW_TAG])\n ev = touch_processor.parse_touch_event(msg_type, target, event_logs, group[0], tag)\n elif msg_type == 'keyevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == POPUPWINDOW_TAG:\n # PopupWindow, process view onTouchEvent\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[0]], group[0]))\n view_groups = group[4]\n view_events = process_groups(view_groups, logs)\n if len(view_events) != 0:\n events += view_events\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[1]], group[1]))\n elif tag == EDITABLE_INPUT_CONNECTION_TAG:\n # Input Event\n nested_groups = group[4]\n # Process nested events\n nested_events = process_groups(nested_groups, logs)\n evs = input_processor.parse_input_event(msg_type, target, logs[group[0]:group[1]+1], nested_events, group[0])\n events += evs\n elif tag == TEXT_VIEW_KEY_TAG:\n # Keyboard event caught by TextView onKeyPreIme\n event_logs = clear_logs(logs[group[0]:group[1]+1], [TEXT_VIEW_KEY_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n ev.intent = event.KeyEvent.HIDE_KEYBOARD_INTENT\n events.append(ev)\n elif tag == WEBVIEW_KEY_EVENT_TAG:\n # WebView KeyBoard event\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == WEBVIEW_CLIENT_TAG:\n # WebView page loaded\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_page_loaded_processor.parse_page_loaded(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == SENSOR_LISTENER_TAG:\n # Low level sensor\n event_logs = logs[group[0]:group[1]+1]\n ev = low_level_sensor_processor.parse_low_level_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == LOCATION_MANAGER_TAG or tag == LOCATION_LISTENER_TAG:\n event_logs = logs[group[0]:group[1]+1]\n ev = location_processor.parse_location_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n\n return events",
"async def handle_set_group(self, match: Match[str], payload: str) -> None:\n groupid = match.group(1)\n\n try:\n group = self._bridge.groups[groupid]\n state = GroupSetState(**json.loads(payload))\n LOGGER.info(f\"Updating group {group.name}\")\n await group.set_action(**state.dict())\n except IndexError:\n LOGGER.warning(f\"Unknown group id: {groupid}\")\n except json.JSONDecodeError:\n LOGGER.warning(f\"Bad JSON on light request: {payload}\")\n except TypeError:\n LOGGER.warning(f\"Expected dictionary, got: {payload}\")\n except ValidationError as e:\n LOGGER.warning(f\"Invalid light state: {e}\")",
"async def groups_service_handler(service: ServiceCall) -> None:\n object_id = service.data[ATTR_OBJECT_ID]\n entity_id = f\"{DOMAIN}.{object_id}\"\n group = component.get_entity(entity_id)\n\n # new group\n if service.service == SERVICE_SET and group is None:\n entity_ids = (\n service.data.get(ATTR_ENTITIES)\n or service.data.get(ATTR_ADD_ENTITIES)\n or None\n )\n\n extra_arg = {\n attr: service.data[attr]\n for attr in (ATTR_ICON,)\n if service.data.get(attr) is not None\n }\n\n await Group.async_create_group(\n hass,\n service.data.get(ATTR_NAME, object_id),\n object_id=object_id,\n entity_ids=entity_ids,\n user_defined=False,\n mode=service.data.get(ATTR_ALL),\n **extra_arg,\n )\n return\n\n if group is None:\n _LOGGER.warning(\"%s:Group '%s' doesn't exist!\", service.service, object_id)\n return\n\n # update group\n if service.service == SERVICE_SET:\n need_update = False\n\n if ATTR_ADD_ENTITIES in service.data:\n delta = service.data[ATTR_ADD_ENTITIES]\n entity_ids = set(group.tracking) | set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_REMOVE_ENTITIES in service.data:\n delta = service.data[ATTR_REMOVE_ENTITIES]\n entity_ids = set(group.tracking) - set(delta)\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_ENTITIES in service.data:\n entity_ids = service.data[ATTR_ENTITIES]\n await group.async_update_tracked_entity_ids(entity_ids)\n\n if ATTR_NAME in service.data:\n group.name = service.data[ATTR_NAME]\n need_update = True\n\n if ATTR_ICON in service.data:\n group.icon = service.data[ATTR_ICON]\n need_update = True\n\n if ATTR_ALL in service.data:\n group.mode = all if service.data[ATTR_ALL] else any\n need_update = True\n\n if need_update:\n group.async_write_ha_state()\n\n return\n\n # remove group\n if service.service == SERVICE_REMOVE:\n await component.async_remove_entity(entity_id)",
"def map_event_code(event_code):\n event_code = int(event_code)\n\n # Honestly, these are just guessing based on the below event list.\n # It could be wrong, I have no idea.\n if 1100 <= event_code <= 1199:\n return ALARM_GROUP\n\n if 3100 <= event_code <= 3199:\n return ALARM_END_GROUP\n\n if 1300 <= event_code <= 1399:\n return PANEL_FAULT_GROUP\n\n if 3300 <= event_code <= 3399:\n return PANEL_RESTORE_GROUP\n\n if 1400 <= event_code <= 1499:\n return DISARM_GROUP\n\n if 3400 <= event_code <= 3799:\n return ARM_GROUP\n\n if 1600 <= event_code <= 1699:\n return TEST_GROUP\n\n if 5000 <= event_code <= 5099:\n return CAPTURE_GROUP\n\n if 5100 <= event_code <= 5199:\n return DEVICE_GROUP\n\n if 5200 <= event_code <= 5299:\n return AUTOMATION_GROUP\n\n if 6000 <= event_code <= 6100:\n return ARM_FAULT_GROUP\n\n return None",
"def test_group_of_one(self):\n self.testcases[0].group_id = 1\n self.testcases[0].put()\n self.testcases[1].key.delete()\n\n grouper.group_testcases()\n\n testcase = data_handler.get_testcase_by_id(self.testcases[0].key.id())\n self.assertEqual(testcase.group_id, 0)\n self.assertTrue(testcase.is_leader)",
"def _get_new_group_id():\n new_group = data_types.TestcaseGroup()\n new_group.put()\n return new_group.key.id()",
"def _processEvent(self):\n\t\ttry:\n\t\t\t# Run CUSUM+ to detect changes in level\n\t\t\tself.__FitEvent()\n\t\texcept:\n\t\t\traise"
] | [
"0.5848911",
"0.5695644",
"0.5646584",
"0.5636374",
"0.5563719",
"0.55539626",
"0.5505983",
"0.5474636",
"0.5439717",
"0.5439534",
"0.53875905",
"0.5384203",
"0.52943784",
"0.52490634",
"0.5216275",
"0.5209896",
"0.51932746",
"0.5185326",
"0.51836437",
"0.517594",
"0.51704055",
"0.51677305",
"0.5153258",
"0.5141197",
"0.5086968",
"0.50817186",
"0.50731605",
"0.50647694",
"0.50530106",
"0.5029424"
] | 0.7965338 | 0 |
returns the md5 hash of a string | def hash(self, string):
h = md5()
h.update(string)
return h.digest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def md5hash(string):\n return hashlib.md5(string).hexdigest()",
"def calc_md5(string):\n\treturn md5(string).hexdigest()",
"def md5(string: str) -> str:\n\treturn str(hashlib.md5(string.encode()).hexdigest())",
"def digest(string):\n return md5(string.encode(\"utf-8\")).hexdigest()",
"def md5(input_string):\n return hashlib.md5(input_string.encode('utf-8')).hexdigest()",
"def compute_md5_for_string(string):\n return base64.b64encode(hashlib.md5(string).digest())",
"def get_md5(s):\n m = hashlib.md5()\n m.update(s.encode('utf8'))\n return m.hexdigest()",
"def md5_sum(string):\n m = hashlib.md5()\n m.update(string.encode(\"utf-8\"))\n return m.hexdigest()",
"def string_to_md5(content):\n return hashlib.md5(content).hexdigest()",
"def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()",
"def calc_md5(s: Union[bytes, str]) -> str:\n h = hashlib.new(\"md5\")\n\n b = s.encode(\"utf-8\") if isinstance(s, str) else s\n\n h.update(b)\n return h.hexdigest()",
"def get_md5(text):\n return hashlib.md5(text).hexdigest()",
"def md5(s: str) -> str:\n return hashlib.md5(s.encode()).hexdigest()",
"def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()",
"def get_md5(string):\r\n byte_string = string.encode(\"utf-8\")\r\n md5 = hashlib.md5()\r\n md5.update(byte_string)\r\n result = md5.hexdigest()\r\n return 'M'+result",
"def rss_md5(string):\r\n if not isinstance(string, basestring):\r\n try: string = string.decode('utf8','replace')\r\n except: pass\r\n md5 = hashlib.md5()\r\n md5.update(string.encode('utf8'))\r\n return md5.hexdigest()",
"def string_md5(unicode_string):\n return hashlib.md5(unicode_string.encode('utf-8')).hexdigest()",
"def _md5(input):\n m = hashlib.md5()\n m.update(input)\n return m.hexdigest()",
"def get_md5_from_str(src: str) -> str:\n res: str = \"\"\n if not isinstance(src, str) or str == \"\":\n return res\n m: hashlib._hashlib.HASH = hashlib.md5()\n m.update(src.encode('utf-8'))\n res = m.hexdigest()\n return res",
"def __md5_hash(txt) -> str:\n\n return md5_crypt.hash(txt)",
"def md5_value(strg):\n\tmd5 = hashlib.md5()\n\tmd5.update(strg.encode('UTF-8'))\n\treturn md5.hexdigest()",
"def get_md5(self, line):\n m = hashlib.md5()\n m.update(str(line).encode('utf-8'))\n return m.hexdigest()",
"def get_md5(byte_string=b''):\n try:\n return hashlib.md5(byte_string)\n except ValueError:\n # On Red Hat-based platforms, may catch a FIPS error.\n # \"usedforsecurity\" flag only available on Red Hat systems or Python 3.9+.\n # pylint:disable=unexpected-keyword-arg\n return hashlib.md5(byte_string, usedforsecurity=False)\n # pylint:enable=unexpected-keyword-arg",
"def md5(s1):\n s = str(s1)\n h1 = hashlib.md5()\n h1.update(s.encode(encoding='utf-8'))\n s = h1.hexdigest()\n return s",
"def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h",
"def hash_string(input_str):\n input_b = str.encode(input_str)\n input_hash = hashlib.md5(input_b.lower())\n input_hash_str = input_hash.hexdigest()\n\n return input_hash_str",
"def md_5_hash(i):\n h = hashlib.md5(i.encode('utf-8')).hexdigest()\n return h",
"def gen_hash(s: str) -> str:\n\n m = hashlib.md5()\n m.update(bytes(s, encoding = 'utf8'))\n hash_code = str(m.hexdigest())\n\n return hash_code",
"def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]",
"def str_to_hash(self, param):\n param = param.encode('utf-8')\n my_hash = hashlib.md5(param)\n return my_hash.hexdigest()"
] | [
"0.9092539",
"0.88171506",
"0.87841374",
"0.8736102",
"0.8721026",
"0.86720395",
"0.86507165",
"0.85748506",
"0.8516772",
"0.8485681",
"0.8442403",
"0.84263587",
"0.8423831",
"0.82837284",
"0.82373786",
"0.8200525",
"0.8188318",
"0.8168509",
"0.80750155",
"0.8010676",
"0.7874055",
"0.78079087",
"0.77796376",
"0.7777433",
"0.768681",
"0.7670453",
"0.7669643",
"0.76317567",
"0.7626971",
"0.75631875"
] | 0.88464713 | 1 |
Creates the different matchers for this aggregator (General matcher, clear matcher) | def create_matcher(self):
self.matcher = None
if "matcher" in self.config:
self.matcher = matcher.Matcher(self.config["matcher"])
else:
self.matcher = matcher.TrueMatcher()
self.use_fields_for_id = []
if "matcherfield" in self.config:
self.use_fields_for_id = self.config["matcherfield"].split(",")
if "clear" in self.config:
self.clear_matcher = matcher.Matcher(self.config["clear"])
self.autoclear = self.auto_acknowledge
else:
self.clear_matcher = matcher.FalseMatcher()
self.autoclear = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init_matcher(self):\n\t\tmatcher = Matcher()\n\t\tmatcher.set_mentors(self.mentors.find())\n\t\tmatcher.set_migrants(self.migrants.find())\n\t\tprint(matcher.migrants)\n\t\t\n\t\treturn matcher",
"def __init__(self, matcher, generate):\n self.matcher = matcher\n self._generate = generate",
"def __init__(self, compound_matcher):\n self._matcher = compound_matcher",
"def __init__(self, matcher):\n valid_types = (\n type(None), list, dict, int, float, six.string_types, Matcher)\n\n assert isinstance(matcher, valid_types), (\n \"matcher must be one of '{}', got '{}'\".format(\n valid_types, type(matcher)))\n\n self.matcher = matcher",
"def __init__(self, *args):\n self.args = args\n self.matchers = []\n for a in args:\n if a is _:\n a = lambda k: True\n elif isinstance(a, basestring):\n a = a.__eq__\n elif isinstance(a, (list, tuple, set)):\n a = (lambda ary: (lambda k: k in ary))(a)\n elif hasattr(a, 'search'):\n a = a.search\n else:\n a = str(a).__eq__\n self.matchers.append(a)",
"def test_matcher(self):\n\n if self.config.xml_generator == \"gccxml\":\n return\n\n decls = parser.parse([self.header], self.config)\n global_ns = declarations.get_global_namespace(decls)\n criteria = declarations.declaration_matcher(name=\"myClass\")\n _ = declarations.matcher.find(criteria, global_ns)",
"def _create_matcher(self, matcher_config: dict) -> SubProfileMatcher:\n matcher_type: SubProfileMatcherType = matcher_config[\"type\"]\n if matcher_type == SubProfileMatcherType.ATTRIBUTE:\n return AttributeMatcher(matcher_config[\"attribute\"], matcher_config[\"map\"])\n if matcher_type == SubProfileMatcherType.ENTITY_STATE:\n return EntityStateMatcher(\n self._hass,\n self._source_entity,\n matcher_config[\"entity_id\"],\n matcher_config[\"map\"],\n )\n if matcher_type == SubProfileMatcherType.ENTITY_ID:\n return EntityIdMatcher(matcher_config[\"pattern\"], matcher_config[\"profile\"])\n if matcher_type == SubProfileMatcherType.INTEGRATION:\n return IntegrationMatcher(\n matcher_config[\"integration\"],\n matcher_config[\"profile\"],\n )\n raise PowercalcSetupError(f\"Unknown sub profile matcher type: {matcher_type}\")",
"def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }",
"def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }",
"def _add_matcher_specific_properties_to_json(self):\n return {\n 'unaryNumericMatcherData': {\n 'dataType': self._data_type,\n 'value': self._original_value,\n }\n }",
"def test_standard_regex_compile(self, mock_re_compile):\n self.finder.component_base_directories = ()\n for _ in self.finder.list(['normal regex.*with middle']): # generators gonna generate\n pass\n mock_re_compile.assert_called_with('normal regex.*with middle')",
"def get_match_criteria(self):\n #-- factory attributes ----\n print(\"\"\"\nWhat glidein/factory attributres are you using in the match expression?\nI have computed my best estimate for your match string,\nplease verify and correct if needed.\n\"\"\")\n default_factory_attributes = string.join(self.extract_factory_attrs(), ',')\n factory_attributes = raw_input(\"Factory attributes: [%s] \"%default_factory_attributes)\n if factory_attributes == \"\":\n factory_attributes = default_factory_attributes\n if factory_attributes == \"\":\n factory_attributes = []\n else:\n factory_attributes = string.split(factory_attributes, ',')\n\n #--- job_attributes --\n print(\"\"\"\nWhat job attributes are you using in the match expression?\nI have computed my best estimate for your match string,\nplease verify and correct if needed.\n\"\"\")\n default_job_attributes = string.join(self.extract_job_attrs(), ',')\n job_attributes = raw_input(\"Job attributes: [%s] \" % default_job_attributes)\n if job_attributes == \"\":\n job_attributes = default_job_attributes\n if job_attributes == \"\":\n job_attributes = []\n else:\n job_attributes = string.split(job_attributes, ',')\n\n #--- create xml ----\n data = \"\"\"\n%(indent2)s<group name=\"%(group_name)s\" enabled=\"True\">\n%(indent3)s<match match_expr=%(match_string)s start_expr=\"True\">\n%(factory_attributes)s\n%(job_attributes)s\n%(indent3)s</match>\n%(indent2)s</group>\n\"\"\" % \\\n{ \"indent2\": common.indent(2),\n \"indent3\": common.indent(3),\n \"indent4\": common.indent(4),\n \"group_name\": self.group_name(),\n \"match_string\": glideinwms.lib.xmlFormat.xml_quoteattr(self.match_string()),\n \"factory_attributes\": self.factory_data(factory_attributes),\n \"job_attributes\": self.job_data(job_attributes),\n}\n return data",
"def __init__(self, *regexes): #todo: maybe design a container for regexes (because of precedence)\n self._regexes: list = regexes\n for regex in self._regexes:\n try:\n assert type(regex) is rgx.RegEx\n except AssertionError as e:\n print(type(regex), e)\n self._ignored = set()",
"def testMatch(self):\n\n self.inv._literals_filter['fruit'] = ['pear', 'apple']\n self.inv._literals_filter['xfruit'] = None\n self.inv._compiled_filter['shape'] = None\n self.inv._compiled_filter['xshape'] = None\n self.assertTrue(self.inv._Match('fruit', 'apple'))\n\n self.inv._literals_filter['fruit'] = None\n self.inv._compiled_filter['fruit'] = [re.compile('^apple$')]\n self.assertTrue(self.inv._Match('fruit', 'apple'))",
"def by_regex(cls, *patterns):\n return cls(*(to_matcher(RegexMatcher, p) for p in patterns))",
"def at_cmdmatcher_creation(self):\n pass",
"def setup_matcher(triggers, matcher):\n spacy.tokens.Token.set_extension(\"clue\", default=None, force=True)\n for elm in triggers:\n variants = [elm, elm[0].upper() + elm[1:], elm.upper()]\n for var in variants:\n # support for discontinuous cues. allow a maximum of x tokens between the parts of the discontinuous cue\n x = 6\n if '#' in var:\n cues = var.split('#')\n pattern = []\n for i, cue in enumerate(cues):\n if i != 0:\n for _ in range(x):\n pattern.append({'OP': '?'})\n pattern.append({'ORTH': cue})\n matcher.add(var, [pattern], on_match=mark_discont_cue)\n else:\n pattern = [{'ORTH': tok} for tok in var.split()]\n matcher.add(var, [pattern], on_match=mark_cue)",
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"def create_match(self, parser, fields):\n match = parser.OFPMatch()\n for (field, value) in fields.iteritems():\n match.append_field(field, value)\n return match",
"def matches(self):\n pass",
"def choose_algorithm(self):\n\n # if case insensitive lowercase patterns\n for i in range(len(self.patterns)):\n if self.case_insensitive:\n self.patterns[i] = self.patterns[i].lower()\n\n # naive matcher option\n if self.naive:\n matcher = NaiveStringMatcher(self.patterns)\n return matcher\n\n # AHC matcher by default\n matcher = State.create_automaton(self.patterns)\n return matcher",
"def unpickle_matcher(\n matcher: Type[TokenMatcher],\n vocab: Vocab,\n patterns: DefaultDict[str, List[List[Dict[str, Any]]]],\n callbacks: Dict[str, TokenCallback],\n defaults: Any,\n) -> Any:\n matcher_instance = matcher(vocab, **defaults)\n for key, specs in patterns.items():\n callback = callbacks.get(key)\n matcher_instance.add(key, specs, on_match=callback)\n return matcher_instance",
"def __init__(self, regex, groups, nestedPattern = None, ignored = dict()):\r\n self.regex = regex.format(*[x.group() for x in groups])\r\n self.groups = groups\r\n self.ignored = ignored\r\n self.nestedPattern = nestedPattern\r\n self.name = \"_\"\r\n while self.name in self.groups:\r\n self.name += \"_\"",
"def __init__(self, matcher):\n self.matcher = matcher\n\n self.dv = list(set(self.make_decision_variables()))\n\n self.objective = np.array([x.count(\", \") + 1 for x in self.dv])\n self.constraintix = self.constraint_matrix()\n self.upper_bound = np.repeat(1, len( self.constraintix ))\n self.match_dict = self.make_match_dict()\n self.out = self.output_graph()\n self.missed_requirments = self.missing_requirments()\n self.missed_courses = self.missing_courses()",
"def matcher_description(self):\n return None",
"def MatchAll():\n return {\"match_all\": {}}",
"def create_default_mock_expectation(self, method, path, response_code=200,\n response_headers=None, body_type='JSON',\n response_body=None):\n req = self.create_mock_request_matcher(method, path, exact=False)\n rsp = self.create_mock_response(response_code, response_headers, body_type, response_body)\n self.create_mock_expectation(req, rsp, unlimited=True)",
"def _create_regexes():\n space = r'(?:[^\\S\\n]| |&\\#0*160;|&\\#[Xx]0*[Aa]0;)'\n spaces = r'{space}+'.format(space=space)\n space_dash = r'(?:-|{space})'.format(space=space)\n tags = [\n 'gallery',\n 'math',\n 'nowiki',\n 'pre',\n 'score',\n 'source',\n 'syntaxhighlight',\n ]\n # Based on pywikibot.textlib.compileLinkR\n # and https://gist.github.com/gruber/249502\n url = r'''(?:[a-z][\\w-]+://[^\\]\\s<>\"]*[^\\]\\s\\.:;,<>\"\\|\\)`!{}'?«»“”‘’])'''\n _regexes.update(\n {\n 'bare_url': re.compile(r'\\b({})'.format(url), flags=re.I),\n 'bracket_url': re.compile(\n r'(\\[{}[^\\]]*\\])'.format(url), flags=re.I\n ),\n 'ISBN': re.compile(\n r'\\bISBN(?P<separator>{spaces})(?P<value>(?:97[89]{space_dash}'\n r'?)?(?:[0-9]{space_dash}?){{9}}[0-9Xx])\\b'.format(\n spaces=spaces, space_dash=space_dash\n )\n ),\n 'PMID': re.compile(\n r'\\bPMID(?P<separator>{spaces})(?P<value>[0-9]+)\\b'.format(\n spaces=spaces\n )\n ),\n 'RFC': re.compile(\n r'\\bRFC(?P<separator>{spaces})(?P<value>[0-9]+)\\b'.format(\n spaces=spaces\n )\n ),\n 'tags': re.compile(\n r'''(<\\/?\\w+(?:\\s+\\w+(?:\\s*=\\s*(?:(?:\"[^\"]*\")|(?:'[^']*')|'''\n r'''[^>\\s]+))?)*\\s*\\/?>)'''\n ),\n 'tags_content': re.compile(\n r'(<(?P<tag>{})\\b.*?</(?P=tag)>)'.format(r'|'.join(tags)),\n flags=re.I | re.M,\n ),\n }\n )",
"def test_match(self):\n\n # Test of the rematch case.\n regex = r\"([a-z]{1,})\\s([a-z]{1,})\\s\"\n expected = \"is\"\n actual = Regex(self.data, regex, rematch=True, group=1).match()\n\n self.assertEqual(expected, actual)\n\n # Test of the group case\n regex = \"e\"\n expected = \"e\"\n actual = Regex(self.data, regex, group=0).match()\n\n self.assertEqual(expected, actual)",
"def testMatchingMetrics(self):\n self.assertDictEqual(self.test_class._matching_metrics, {\n 'response': 1.0,\n 'spend': 0.0,\n })\n default_class = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=self.test_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window)\n self.assertDictEqual(default_class._matching_metrics, {\n 'response': 1.0,\n 'spend': 0.01,\n })"
] | [
"0.6749215",
"0.64610773",
"0.60556746",
"0.58584523",
"0.56751424",
"0.54838455",
"0.5467794",
"0.54370964",
"0.54370964",
"0.54370964",
"0.5433247",
"0.54226667",
"0.5415843",
"0.5362952",
"0.5348948",
"0.53339624",
"0.527907",
"0.523279",
"0.5200776",
"0.5183958",
"0.51480883",
"0.508487",
"0.5068616",
"0.5065737",
"0.5056428",
"0.50480837",
"0.50458956",
"0.5037641",
"0.5034248",
"0.49930486"
] | 0.70251894 | 0 |
if an alternative aggregation message is defined, it will be created here. FIELD is substituted with the event field FIELD $VAL is substituted with the regular expression group VAL | def create_aggregation_message(self, event, matchgroups):
msg = self.config["aggregatemessage"]
tokens = re.findall("[$#]\w+", msg)
for token in tokens:
if token[0] == '#':
msg = msg.replace(token, str(event[token[1:]]))
continue
if token[0] == '$' and token[1:] in matchgroups:
msg = msg.replace(token, str(matchgroups[token[1:]]))
continue
return msg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process(self, event):\n matchgroups = {}\n try:\n self.lock.acquire() # matchgroups are not thread safe, but we need to be reentrant here\n if not self.matcher.matches(event):\n return \"PASS\"\n matchgroups = self.matcher.get_match_groups()\n finally:\n self.lock.release()\n\n\n if self.autoclear:\n event[\"group_autoclear\"] = 1\n else:\n event[\"group_autoclear\"] = 0\n \n self.set_aggregation_group_id(event, matchgroups)\n (group, lastmod) = self.datasource.get_group_leader(event[\"group_id\"])\n\n if group and time.time()-lastmod >= self.config[\"maxdelay\"]:\n logging.debug(\"Cleared group %s \", event[\"group_id\"])\n self.datasource.deactivate_group(event[\"group_id\"])\n group = None\n\n if self.clear_matcher.matches(event):\n group_id = event[\"group_id\"]\n event[\"clear_group_leader\"] = group\n event[\"clear_group_id\"] = group_id\n event[\"group_id\"] = None\n self.datasource.deactivate_group(group_id)\n self.datasource.acknowledge_group(group_id, group)\n if self.auto_acknowledge:\n event[\"ack\"] = 1\n group = None\n return \"CLEAR\"\n\n \n if group: \n event[\"group_leader\"] = group\n event[\"group_active\"] = True\n return \"AGGR\"\n else:\n msg = self.create_aggregation_message(event, matchgroups)\n event[\"group_leader\"] = -1\n event[\"alternative_message\"] = msg\n event[\"group_active\"] = True\n return \"NEW\"",
"def _callback(self, matcher):\n matched_field = matcher.group(self.field)\n replacement = self.lookup.get(matched_field)\n if not replacement:\n return matcher.group(0)\n\n fields = list(f or \"\" for f in matcher.groups())\n fields[self.field - 1] = replacement\n\n return \"\".join(fields)",
"def set_aggregation_group_id(self, event, matchgroups):\n id = str(self.id)\n for field in self.use_fields_for_id:\n field = field.strip()\n id = id + str(event[field])\n \n attributes = matchgroups\n for i in attributes:\n id = id + i + attributes[i]\n event[\"group_id\"] = self.hash(id)",
"def _aggregation_target(self):\n ...",
"def add_aggregation_data(self, payload):\n raise NotImplementedError()",
"def set_aggregate_data(self, event_name, value, key=None):\n \n raise NotImplementedError()",
"def test_forbid_aggregation(self):\n\n bad_examples = \"\"\"\nsum([score]) ->\nAggregations are not allowed in this field.\n\nsum([score])\n^\n===\nsum(score) ->\nAggregations are not allowed in this field.\n\nsum(score)\n^\n===\nsum(department) ->\nA str can not be aggregated using sum.\n\nsum(department)\n^\n===\n2.1235 + sum(department) ->\nA str can not be aggregated using sum.\n\n2.1235 + sum(department)\n ^\n===\nsum(score) + sum(department) ->\nAggregations are not allowed in this field.\n\nsum(score) + sum(department)\n^\nA str can not be aggregated using sum.\n\nsum(score) + sum(department)\n ^\n===\nsum(score) + sum(department) ->\nAggregations are not allowed in this field.\n\nsum(score) + sum(department)\n^\nA str can not be aggregated using sum.\n\nsum(score) + sum(department)\n ^\n\"\"\"\n\n for field, expected_error in self.bad_examples(bad_examples):\n with self.assertRaises(Exception) as e:\n self.builder.parse(field, forbid_aggregation=True, debug=True)\n if str(e.exception).strip() != expected_error.strip():\n print(\"===\" * 10)\n print(str(e.exception))\n print(\"vs\")\n print(expected_error)\n print(\"===\" * 10)\n self.assertEqual(str(e.exception).strip(), expected_error.strip())",
"def filter_datum(fields: List[str], redaction: str,\n message: str, separator: str) -> str:\n for field in fields:\n message = re.sub(fr'{field}=.+?{separator}',\n f'{field}={redaction}{separator}', message)\n return message",
"def transform_aggregation(self, aggregation):\n new_aggregation = self.transform_subjects_aggregation(aggregation)\n new_aggregation = self.transform_authors_aggregation(new_aggregation)\n return new_aggregation",
"def __init__(self, field, derived_field = None):\r\n super(TextSubstituteNode, self).__init__()\r\n\r\n self.field = field\r\n self.derived_field = derived_field\r\n self.substitutions = []",
"def format(self, record):\n\n scrubbed = record[\"message\"]\n # scrubs any messages that match the message pattern\n if isinstance(scrubbed, dict):\n scrubbed = json.dumps(scrubbed)\n for search, replace in self.scrub_patterns.items():\n scrubbed = re.sub(search, replace, scrubbed)\n record[\"extra\"][\"scrubbed\"] = scrubbed\n\n if not record[\"extra\"].get(\"device\") or record[\"extra\"].get(\"device\") is None:\n record[\"extra\"][\"device\"] = \"\"\n else:\n record[\"extra\"][\"device\"] = f\"{record['extra']['device']} - \"\n return self.fmt",
"def logstash_processor(_, __, event_dict):\n if 'message' in event_dict and 'full_message' not in event_dict:\n event_dict['full_message'] = event_dict['message']\n event_dict['message'] = event_dict.pop('event', '')\n for key, value in event_dict.items():\n if hasattr(value, 'isoformat') and callable(value.isoformat):\n event_dict[key] = value.isoformat() + 'Z'\n event_dict['@version'] = 1\n event_dict['_type'] = event_dict['type'] = 'feedhq'\n return event_dict",
"def test_syslog_ng_priofield_bug_2132(self):\n transformer = SplitTransformer()\n transformer.setup(\"test\", {\n \"dateformat\" : \"%Y-%m-%d %H:%M:%S\",\n \"group_order\" : \"HOST_NAME HOST_ADDRESS SYSLOG_PRI TIME DATE MESSAGE\"\n })\n teststring = \"test_host\\t42.2.53.52\\t191\\t11:00:24\\t2012-12-10\\tTestmessage\"\n event = transformer.transform(teststring)\n assert event != None\n assert event[\"host_name\"] == \"test_host\"\n assert event[\"host_address\"] == IPAddress(\"42.2.53.52\")\n assert event[\"priority\"] == 7\n assert event[\"facility\"] == 23\n assert event[\"message\"] == \"Testmessage\"",
"def test_enforce_aggregation(self):\n\n good_examples = \"\"\"\n [score] -> sum(datatypes.score)\n [ScORE] -> sum(datatypes.score)\n [ScORE] + [ScORE] -> sum(datatypes.score + datatypes.score)\n max([ScORE] + [ScORE]) -> max(datatypes.score + datatypes.score)\n max(score) - min(score) -> max(datatypes.score) - min(datatypes.score)\n max(scores.score) -> max(scores.score)\n max([score] - [scores.score]) -> max(datatypes.score - scores.score)\n \"\"\"\n\n for field, expected_sql in self.examples(good_examples):\n expr, _ = self.builder.parse(field, enforce_aggregation=True, debug=True)\n self.assertEqual(expr_to_str(expr), expected_sql)",
"def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass",
"def __init__(self, aggr=\"add\", flow=\"target_to_source\", node_dim=0) -> None:\n super(MessagePassing, self).__init__()\n\n self.aggr = aggr\n assert self.aggr in [\"add\", \"mean\", \"max\"]\n\n self.flow = flow\n assert self.flow in [\"source_to_target\", \"target_to_source\"]\n\n self.node_dim = node_dim\n assert self.node_dim >= 0\n\n self.__msg_params__tmp = inspect.signature(self.message).parameters\n self.__msg_params__ = OrderedDict(self.__msg_params__tmp)\n\n self.__aggr_params__tmp = inspect.signature(self.aggregate).parameters\n self.__aggr_params__ = OrderedDict(self.__aggr_params__tmp)\n self.__aggr_params__.popitem(last=False)\n\n self.__update_params__tmp = inspect.signature(self.update).parameters\n self.__update_params__ = OrderedDict(self.__update_params__tmp)\n self.__update_params__.popitem(last=False)\n\n msg_args = set(self.__msg_params__.keys()) - msg_special_args\n aggr_args = set(self.__aggr_params__.keys()) - aggr_special_args\n update_args = set(self.__update_params__.keys()) - update_special_args\n\n self.__args__ = set().union(msg_args, aggr_args, update_args)",
"def _extend_pipeline_cfg(self, field, value):",
"def __push_aggregation_lowest_layer(self, aggregation_object, aggregation_name, table, id_name):\n id = 0\n aggregation_value = 0\n for aggregation in aggregation_object:\n id = aggregation[aggregation_name][0]\n aggregation_value = aggregation[aggregation_name][1]\n self.__postgre_db.update(table, \"aggregation=\" + str(aggregation_value), id_name + \"=\" + str(id))",
"def aggregate(self, aggregation):\n self._data = self._data.aggregate(**aggregation)",
"def __add_select_and_aggregate(self, select, groupby, where, window, tree):\r\n tuple_descriptor = TupleDescriptor()\r\n fields_to_verify = []\r\n all_fields = chain(select, where)\r\n if groupby != ['']:\r\n groupby = groupby[1:][0]\r\n all_fields = chain(all_fields, groupby)\r\n self.__remove_all(groupby, QueryTokens.EMPTY_STRING) \r\n for field in all_fields:\r\n (field_descriptors, verify) = self.__parse_field(field, self.twitter_td, True, False)\r\n fields_to_verify.extend(verify)\r\n tuple_descriptor.add_descriptor_list(field_descriptors)\r\n for field in fields_to_verify:\r\n self.__verify_and_fix_field(field, tuple_descriptor)\r\n \r\n # at this point, tuple_descriptor should contain a tuple descriptor\r\n # with fields/aliases that are correct (we would have gotten an\r\n # exception otherwise. built select_descriptor/group_descriptor\r\n # from it\r\n select_descriptor = TupleDescriptor()\r\n group_descriptor = TupleDescriptor()\r\n aggregates = []\r\n for field in select:\r\n (field_descriptors, verify) = self.__parse_field(field, tuple_descriptor, True, True)\r\n select_descriptor.add_descriptor_list(field_descriptors)\r\n if field_descriptors[0].field_type == FieldType.AGGREGATE:\r\n aggregates.append(field_descriptors[0])\r\n # add WHERE clause fields as invisible attributes\r\n for field in where:\r\n (field_descriptors, verify) = self.__parse_field(field, tuple_descriptor, True, False)\r\n select_descriptor.add_descriptor_list(field_descriptors)\r\n if len(aggregates) > 0:\r\n if window == None:\r\n raise QueryException(\"Aggregate expression provided with no WINDOW parameter\")\r\n for field in groupby:\r\n (field_descriptors, verify) = self.__parse_field(field, tuple_descriptor, True, True)\r\n group_descriptor.add_descriptor_list(field_descriptors)\r\n for alias in select_descriptor.aliases:\r\n select_field = select_descriptor.get_descriptor(alias)\r\n group_field = group_descriptor.get_descriptor(alias)\r\n if group_field == None and \\\r\n select_field.field_type != FieldType.AGGREGATE and \\\r\n select_field.visible:\r\n raise QueryException(\"'%s' appears in the SELECT but is is neither an aggregate nor a GROUP BY field\" % (alias))\r\n tree = operators.GroupBy(tree, group_descriptor, aggregates, window)\r\n tree.assign_descriptor(select_descriptor)\r\n return tree",
"def build_email_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = dict()\r\n tmp[field]['query'] = string\r\n tmp[field]['boost'] = 30.0\r\n tmp[field]['operator'] = 'and'\r\n answer['match'] = tmp\r\n return answer",
"def _rewrite_aggregate(self, node: [saldag.Aggregate, saldag.IndexAggregate]):\n\n in_group_cols = node.group_cols\n out_group_cols = node.out_rel.columns[:-1]\n for i in range(len(out_group_cols)):\n out_group_cols[i].coll_sets |= copy.deepcopy(in_group_cols[i].coll_sets)\n in_agg_col = node.agg_col\n out_agg_col = node.out_rel.columns[-1]\n out_agg_col.coll_sets |= copy.deepcopy(in_agg_col.coll_sets)",
"def _ms_sub_and_md(self, fieldName):\n l = []\n # for each var\n for v in self.pattern[fieldName]['vars']:\n l.append(\"{ \" + v + \" }\") # roll a new list of vars, each deli\n ms_sub = self.pattern[fieldName]['text'] % tuple(l)\n return self._ms2md(ms_sub)",
"def makeNewEvent( desc, oldEvent, xtra, data=None ):\n newOd = dict()\n oldOd = None\n if oldEvent:\n oldOd = oldEvent.getPayload()\n \n # has incoming stuff got any % symbols in it, attempt substitution\n if oldOd != '':\n if desc.has_key(\"type\"):\n if '%' in desc[\"type\"]:\n desc[\"type\"] = desc[\"type\"] % oldEvent.getPayload()\n if desc.has_key(\"source\"):\n if '%' in desc[\"source\"]:\n desc[\"source\"] = desc[\"source\"] % oldEvent.getPayload() \n \n if desc.has_key(\"other_data\"):\n for v in desc[\"other_data\"]:\n if '%' in desc[\"other_data\"][v]:\n newOd[v] = desc[\"other_data\"][v] % oldEvent.getPayload()\n else:\n newOd[v] = desc[\"other_data\"][v] \n\n # empty.\n #attempt string substitution here too\n if desc.has_key(\"copy_other_data\"):\n cpList = desc[\"copy_other_data\"]\n for key in cpList:\n if '%' in cpList[key]:\n if xtra and xtra.has_key( cpList[key] ):\n newOd[key] = xtra[ cpList[key] % oldEvent.getPayload() ] \n elif oldOd and oldOd.has_key( cpList[key] ):\n newOd[key] = oldOd[ cpList[key] % oldEvent.getPayload() ] \n else:\n if xtra and xtra.has_key( cpList[key] ):\n newOd[key] = xtra[ cpList[key] ]\n elif oldOd and oldOd.has_key( cpList[key] ):\n newOd[key] = oldOd[ cpList[key] ]\n\n # append/update payload \n if data:\n newOd.update(data)\n \n # may be empty.\n if newOd and len(newOd) == 0:\n newOd = None\n\n return Event( desc[\"type\"], desc[\"source\"], newOd )",
"def formatEventMessage(self, message):\r\n\t\treturn format.formatEventMessage(message)",
"def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]",
"def ParseAggregationSlot(self, G, node, relations, seen, results):\n slots = []\n templates = []\n\n # Get leading definition and aggregation relations\n outIsRelations = relations[gc.OutgoingRelations][Strings.is_]\n outPartRelations = relations[gc.OutgoingRelations][Strings.part]\n\n # Construct id pointing slots for aggregation relations\n for partRel in outPartRelations:\n slot = BLNlpClipsSlotMap()\n slot.IsIdSlot = True\n slot.Name = partRel[1] + gc.SlotIdSuffix\n slot.IdOf = partRel[1]\n slots.append(slot)\n\n # Construct deftemplate for aggregator if there is no one\n if not partRel[1] in seen:\n self.ParseObject(G, partRel[1], seen, results)\n seen.append(partRel[1])\n\n # Construct deftemplates for definition relations\n for isRel in outIsRelations:\n if not isRel[1] in seen:\n self.ParseObject(G, isRel[1], seen, results)\n seen.append(isRel[1])\n return slots",
"def add_substitution(self, pattern, repl):\r\n\r\n self.substitutions.append( (re.compile(pattern), repl) )",
"def register_aggregation(\n self,\n f: dd.Aggregation,\n name: str,\n parameters: List[Tuple[str, type]],\n return_type: type,\n replace: bool = False,\n ):\n self._register_callable(\n f,\n name,\n aggregation=True,\n parameters=parameters,\n return_type=return_type,\n replace=replace,\n )",
"def aggregate_all_occurences(self, field, match=None,\n project_post={\"count\": 1},\n group=None, unwind=None):\n groups = {'$group': {'_id': '${}'.format(field), 'count': {'$sum' : 1}}}\n if group is not None:\n for key, val in group.items():\n groups[\"$group\"][key] = val \n tmp = [groups,\n {\"$project\": project_post},\n {\"$sort\": {\"count\": -1 }}\n ]\n if unwind is not None:\n tmp.insert(0, unwind)\n if match is not None:\n tmp.insert(0, match)\n return tmp"
] | [
"0.57208526",
"0.55492425",
"0.53983456",
"0.5122011",
"0.50036806",
"0.49361655",
"0.48909393",
"0.4872973",
"0.4860018",
"0.48352015",
"0.4787186",
"0.47677344",
"0.47655874",
"0.4763589",
"0.47585917",
"0.47466496",
"0.47291303",
"0.47284135",
"0.47131282",
"0.4673214",
"0.46703207",
"0.46626544",
"0.4639122",
"0.4623241",
"0.4606463",
"0.45975804",
"0.4597397",
"0.45566928",
"0.45444486",
"0.45344442"
] | 0.75149137 | 0 |
Creates the aggregation group_id | def set_aggregation_group_id(self, event, matchgroups):
id = str(self.id)
for field in self.use_fields_for_id:
field = field.strip()
id = id + str(event[field])
attributes = matchgroups
for i in attributes:
id = id + i + attributes[i]
event["group_id"] = self.hash(id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_new_group_id():\n new_group = data_types.TestcaseGroup()\n new_group.put()\n return new_group.key.id()",
"def create_group(self, group):\n if self.dryrun:\n self.logger.info(\"Would create group %s\", group)\n return FakeGroupId()\n result = self.conn.usergroup.create(name=group)\n groupid = result['usrgrpids'][0]\n self.logger.info(\"Create group %s with id %s\", group, groupid)\n return groupid",
"def generate_group_id(self):\n if not hasattr(self.space, '_group_ctr'):\n self.space._group_ctr = 999\n self.space._group_ctr += 1\n return self.space._group_ctr",
"def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))",
"def group_id(self):\n return self._id",
"def assignGroupIDs(self):\n components = self.getComponents(graph_dictionary=self.graph_dict)\n self._gIDs = np.zeros(self.no_plants, dtype='object')\n for i in components.keys():\n self._gIDs[components[i]] = 'gID_' + str(i)",
"def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1",
"def group_id(self) -> str:\n return pulumi.get(self, \"group_id\")",
"def group_id(self) -> str:\n return pulumi.get(self, \"group_id\")",
"def allocate_group(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AllocateNewGroupID(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AllocateNewGroupID(key1, result_val)\n return result_val.i",
"def assignmentCreate(groupId):\n postData = request.json\n new_channel = db.channels.insert_one(\n {\n \"name\": postData.get(\"name\"),\n \"dis\": postData.get(\"dis\"),\n \"category\": \"assignments\",\n \"groupId\": groupId,\n }\n )\n insertAssignment = db.Assignment.insert_one(\n {\n \"name\": postData.get(\"name\"),\n \"dis\": postData.get(\"dis\"),\n \"maxGrade\": postData.get(\"maxGrade\"),\n \"dueDate\": postData.get(\"dueDate\"),\n \"startDate\": postData.get(\"startDate\"),\n \"url\": postData.get(\"url\"),\n \"channelId\": new_channel.inserted_id\n }\n )\n\n getId = insertAssignment.inserted_id\n assignment = db.Assignment.find_one({\"_id\": ObjectId(getId)})\n print(f\"Assignment {assignment}\")\n\n group = db.Group.find_one({\"_id\": ObjectId(groupId)})\n print(f\"Group from EOF: {group}\")\n group[\"assignmentIds\"].append(assignment[\"_id\"])\n print(f\"Group assignmentIds after append: {group['assignmentIds']}\")\n return jsonify({\"msg\": \"Your assignment has been created.\"}), 200",
"def group_id(self) -> int:\n return self._group_id",
"def group_id(self):\n return self._group_id",
"def group_id(self):\n return self._group_id",
"def group_id(self):\n return self._group_id",
"def create_group(group_id, group_name):\n\n kwargs = config.DEFAULT_REST_KWARGS\n kwargs[\"data\"] = {\"id\": group_id, \"name\": group_name}\n http_response = call_rest_api(\"/identities/groups/\", \"post\", **kwargs)\n if http_response.status_code != 201: # 201 = 'new group created'\n raise ValueError(http_response.text)\n logger.log(f\"New custom group, {group_name}, with ID: {group_id}, was created successfully.\")",
"def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )",
"def make_grp(self, name='grp', v=False):\n self.base[name] = self.get_group_array(v=v) #np.zeros(len(self.base), dtype=int)#self.get_group_array()",
"def create_group(self, identifier: str, group_name: str) -> Group:\n\n # APM-137701 - Namespace for custom device calculation should not be set\n group_id = get_group_id(\"\", identifier)\n if group_id in self._groups:\n raise ValueError(\"Group \" + group_name + \" already exist, id: \" + str(group_id))\n else:\n group = Group(group_id, group_name, self._technologies, self._results_builder)\n\n self._groups[group_id] = group\n return group",
"def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)",
"def EventContentMissionExcelAddGroupId(builder, GroupId):\n return AddGroupId(builder, GroupId)",
"def id(self):\n return self._group",
"def group_id(self):\n # type: () -> string_types\n return self._group_id",
"def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()",
"def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)",
"def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200",
"def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)",
"def GroupId(self):\n\t\treturn self._get_attribute('groupId')",
"def create_new_group(self, group_id, poll_id, name):\n obj = self.table()\n obj.group_id = str(group_id)\n obj.poll_id = poll_id\n obj.name = name\n self.db.session.add(obj)\n self.db.session.commit()",
"def test_create_group(self):\n pass"
] | [
"0.6823393",
"0.6713226",
"0.6573045",
"0.6502546",
"0.6305557",
"0.6275136",
"0.6239701",
"0.6216121",
"0.6216121",
"0.61537755",
"0.61489487",
"0.6143351",
"0.6088015",
"0.6088015",
"0.6088015",
"0.60435104",
"0.60379666",
"0.5998496",
"0.598665",
"0.59851414",
"0.5975809",
"0.5964486",
"0.5944217",
"0.5940085",
"0.5920052",
"0.5899303",
"0.5899151",
"0.58887887",
"0.5838731",
"0.5834282"
] | 0.7299634 | 0 |
Calculate the volume of a sphere with radius r. | def sphere_volume(r):
return 4/3. * math.pi * r ** 3 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sphere_volume(r):\n return (4/3) * 3.14159 * r**3",
"def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume",
"def sphere_volume(radius : number) -> number:\n volume = 4/3*(pi*radius*radius*radius)\n return volume",
"def sphereVolume(radius):\n volume = (4 / 3) * math.pi * radius ** 3\n return volume",
"def sphere_volume(sphere_radius):\n return (4 / 3 * np.pi * sphere_radius**3)",
"def nsphere_volume(n, r):\n return math.pi ** (n / 2) * (r ** n) / gamma(n / 2 + 1)",
"def calc_hypersphere_volume(r: float, n: int) -> float:\n return (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)",
"def sphvol(r):\n return (4./3.)*np.pi*(r**3.)",
"def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r",
"def volume(self) -> float:\n return 4 / 3 * np.pi * self.radius**3",
"def ellipsoid_volume(radius1: number, radius2: number, radius3: number) -> number:\n volume = 4/3*(pi*radius1*radius2*radius3)\n return volume",
"def calc_hypercube_volume(r: float, n: int) -> float:\n return (r * 2) ** n",
"def sphereArea(radius):\n area = 4 * math.pi * radius ** 2\n return area",
"def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area",
"def volume_from_rs(rs,Nel):\n a0 = 0.5291772 # Bohr radius (angstroms/bohr)\n volume = (4.0*pi/3.0)*Nel * (rs*a0)**3\n\n return volume",
"def cylinder_volume(radius: number, height: number) -> number:\n volume = pi*radius*radius*height\n return volume",
"def getSphereRadius(self):\n return 1.5",
"def surface_area_sphere(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"surface_area_sphere() only accepts non-negative values\")\r\n return 4 * pi * radius**2",
"def _generate_boxcar_volume(x, radius, center):\n\n # Form cubic position array for x, y, z\n X_cube = x.copy()\n\n\n # Find all points inside boxcar inside the cube\n vol = np.sqrt((X_cube - center) ** 2 / radius ** 2)\n vol = vol <= 1\n\n return vol.astype(float)",
"def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)",
"def tube_radius_from_volume(volume, length):\n a3 = 4.0 / 3.0 * np.pi\n a2 = np.pi * length\n a1 = 0\n a0 = -volume\n\n r = np.polynomial.polynomial.polyroots([a0, a1, a2, a3])\n\n radius = np.real(r[r > 0][0])\n # print \"geometry3d.pills_radius_from_volume \", radius\n return radius",
"def boringInterlude (radiusIn):\n\n\n import math\n volIn = (4/3) * math.pi * (radiusIn ** 3)\n vol = volIn/ 1728\n return vol",
"def sphrad(vol):\n return (3.*vol/(4.*np.pi))**(1./3.)",
"def volume(self):\n _alpha = np.radians(self.alpha)\n _beta = np.radians(self.beta)\n _gamma = np.radians(self.gamma)\n return self.coordx*self.coordy*self.coordz*np.sqrt(\n 2*np.cos(_alpha)*np.cos(_beta)*np.cos(_gamma) -\\\n np.cos(_alpha)**2 - np.cos(_beta)**2 - np.cos(_gamma)**2 + 1)",
"def surface_area_hemisphere(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"surface_area_hemisphere() only accepts non-negative values\")\r\n return 3 * pi * radius**2",
"def sphere(indiv):\n return sum([ x ** 2 for x in indiv])",
"def calc_hypersphere_cap_volume(r: float, c: float, n: int):\n term = (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)\n if c < 0:\n return term - calc_hypersphere_cap_volume(r, -c, n)\n else:\n a = (n + 1) / 2\n x = 1 - ((c ** 2) / (r ** 2))\n return 0.5 * term * betainc(a, 0.5, x)",
"def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)",
"def cone_volume(radius: number, height: number) -> number:\n return 1/3*(pi*radius*radius*height)",
"def volume (self):\n volume = self.sideLength**3\n return volume"
] | [
"0.89431643",
"0.8776244",
"0.86990744",
"0.8546791",
"0.85032845",
"0.82493275",
"0.8208103",
"0.7859043",
"0.74497455",
"0.7399191",
"0.73894525",
"0.7307988",
"0.7204269",
"0.71425915",
"0.7116606",
"0.6987146",
"0.68633825",
"0.67712337",
"0.67578685",
"0.6645747",
"0.6615624",
"0.6603443",
"0.65624666",
"0.6528788",
"0.6431447",
"0.641403",
"0.64053065",
"0.6379342",
"0.6368607",
"0.6273705"
] | 0.895751 | 0 |
Validates the user's email address, given the verification token | def post(self):
data = EmailAddressValidationSchema().load(request.json)
email_lowercase = data["email"].lower()
verification_token = data["verificationToken"]
UserRegistrationService.validate_email_address_verification_token(email_lowercase, verification_token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_email(uid, token):\n return True",
"def validate_email(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n self.user_in_db = User.users_db.get(self.email)\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if 'secret_token' not in decoded_token or decoded_token['secret_token'] != self.user_in_db['secret_token']:\n return {'error': 'Token is invalid'}\n\n self.user_in_db['secret_token'] = ''\n self.user_in_db['verified'] = True\n\n User.users_db.put(self.user_in_db)\n\n return decoded_token",
"def validate_email(self, data):\n user = account_models.User.objects.filter(username__iexact=data, is_active=True)\n if user:\n return data\n raise serializers.ValidationError(\"Email address not verified for any user account\")",
"async def verify_account(\n token: str = Form(...)\n):\n email = await verify_register_token(token)\n if not email:\n raise HTTPException(status_code=400, detail=\"Invalid email verify token\")\n record = await crud.user.get_by_email(email)\n if not record:\n raise HTTPException(\n status_code=404,\n detail=\"The user with this email does not exist in the system.\"\n )\n user = DBUser(**record)\n if user.is_email_verified:\n raise HTTPException(\n status_code=HTTP_409_CONFLICT,\n detail=\"User already verified\",\n )\n await crud.user.update(user.id, {'is_email_verified': True})\n send_new_account_email(email=user.email, username=user.username, first_name=user.first_name)\n return {\"msg\": \"Account verified\"}",
"def verify_email_token(self, token):\n href = '/accounts/emailVerificationTokens/' + token\n data = self._store.create_resource(href, {})\n\n return self.resource_class(client=self._client, properties=data)",
"def test_verifyEmailToken(self, testUser):\n test_token = testUser._get_email_verification_token()\n resulting_user, error = User.verify_email_verification_token(test_token)\n assert resulting_user == testUser\n assert error is None",
"def validate_email(self, value):\n verifier = EmailVerifier(value)\n if not verifier.is_valid():\n if verifier.errors:\n raise serializers.ValidationError(verifier.errors)\n error_code = verifier.data.get('result', 'unknown_error')\n if verifier.status_code == status.HTTP_200_OK:\n raise serializers.ValidationError({error_code:\n VALIDATION_ERRORS[error_code]})\n else:\n # This errors are 'Payment required' or 'Rate limit' errors etc, they\n # logged in by the EmailVerifier and should not be exposed to a user.\n raise serializers.ValidationError({'unknown_error':\n VALIDATION_ERRORS['unknown_error']})\n return value",
"def verify_email_token(self, token):\n href = '/accounts/emailVerificationTokens/' + token\n data = self._store.create_resource(href, {})\n\n return self.resource_class(properties=data, client=self._client)",
"def test_already_validated_email(self):\n token = self.authenticate_user(self.auth_user_data).data[\"token\"]\n verification_url = reverse(\n 'authentication:verify_email', kwargs={'token': token})\n\n response = self.client.get(\n verification_url,\n HTTP_AUTHORIZATION=f'token {token}'\n )\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def validate_email(self, email):\n data = {\n \"address\": email\n }\n resp = self.get(_u.build_uri(\"address.validate\"), data)\n return utils.handle_response(resp)",
"def verify_email(request, uidb64, token):\n user = request.user\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n associated_email = AssociatedEmail.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, AssociatedEmail.DoesNotExist):\n associated_email = None\n\n if associated_email is not None and associated_email.user == user:\n # Test that the token is correct\n if associated_email.check_token(token):\n associated_email.verification_date = timezone.now()\n associated_email.is_verified = True\n associated_email.save()\n if not user.is_credentialed:\n check_legacy_credentials(user, associated_email.email)\n logger.info('User {0} verified another email {1}'.format(user.id, associated_email))\n messages.success(request, 'The email address {} has been verified.'.format(\n associated_email))\n return redirect('edit_emails')\n\n logger.warning('Invalid Verification Link')\n return render(request, 'user/verify_email.html',\n {'title':'Invalid Verification Link', 'isvalid':False})",
"def validate_token(self, token):\n try:\n self._verification = models.EmailVerification.objects.get(\n token=token,\n )\n except models.EmailVerification.DoesNotExist:\n raise serializers.ValidationError(\n code='invalid_token',\n detail=_('The provided token does not exist or has expired.'),\n )\n\n return token",
"async def verify(token: TextData, background_tasks: BackgroundTasks):\n token_data = token.data\n mail, subject, body = await AccountProcessor.confirm_email(token_data)\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Account Verified!\"}",
"def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')",
"def request_verification(data):\n if 'email' in data:\n if user_exists(data['email']):\n return get_user_id(data['email'])\n else:\n return 401\n else:\n return 400",
"def validate_email(self, field):\n print(\"Validating email...\")\n if User.query.filter_by(email=field.data.lower()).first():\n print(\"Invalid\")\n raise ValidationError(\"Email already registered.\")\n print(\"VAlid email\")",
"def verify_email(entered_email):\n return EMAIL_RE.match(entered_email)",
"def validate_recipient_email(self, email):\n email_query = EmailAddress.objects.filter(\n email=email, is_verified=True\n )\n\n if not email_query.exists():\n raise serializers.ValidationError(\n ugettext(\"No Know Me user owns the provided email address.\")\n )\n\n self._recipient_email_inst = email_query.get()\n\n return email",
"def clean_email(self):\n e = self.cleaned_data['email']\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n pass\n # msg = 'This email is not associated with an account'\n # raise forms.ValidationError(msg)\n return e",
"def verify_email(self, request, *args, **kwargs):\n verified_key_text = getattr(settings, \"VERIFIED_KEY_TEXT\", None)\n\n if not verified_key_text:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n redirect_url = request.query_params.get(\"redirect_url\")\n verification_key = request.query_params.get(\"verification_key\")\n response_message = _(\"Missing or invalid verification key\")\n if verification_key:\n registration_profile = None\n try:\n registration_profile = RegistrationProfile.objects.select_related(\n \"user\", \"user__profile\"\n ).get(activation_key=verification_key)\n except RegistrationProfile.DoesNotExist:\n with use_master:\n try:\n registration_profile = (\n RegistrationProfile.objects.select_related(\n \"user\", \"user__profile\"\n ).get(activation_key=verification_key)\n )\n except RegistrationProfile.DoesNotExist:\n pass\n\n if registration_profile:\n registration_profile.activation_key = verified_key_text\n registration_profile.save()\n\n username = registration_profile.user.username\n set_is_email_verified(registration_profile.user.profile, True)\n # Clear profiles cache\n safe_delete(f\"{USER_PROFILE_PREFIX}{username}\")\n\n response_data = {\"username\": username, \"is_email_verified\": True}\n\n if redirect_url:\n query_params_string = urlencode(response_data)\n redirect_url = f\"{redirect_url}?{query_params_string}\"\n\n return HttpResponseRedirect(redirect_url)\n\n return Response(response_data)\n\n return HttpResponseBadRequest(response_message)",
"def __send_verification(self, email):\r\n user = User.getUser(email.lower())\r\n if user is None or user.verified:\r\n self.set_error(constants.STATUS_BAD_REQUEST, message=None, url=\"/\")\r\n return\r\n user.verificationCode = b64encode(CryptoUtil.get_verify_code(), \"*$\")\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'code': user.verificationCode,\r\n 'url': constants.VERIFICATION_URL\r\n }\r\n template = self.jinja2_env.get_template('verificationemail.jinja')\r\n message = mail.EmailMessage()\r\n message.sender = constants.SENDER_ADDRESS\r\n message.to = user.email\r\n message.subject = 'Please verify your address'\r\n message.body = template.render(template_values)\r\n message.send()\r\n user.put()",
"def validate_email(form, field):\n if not User.query.filter_by(email = field.data).first():\n raise ValidationError(\"Email is incorrect.\")",
"def validate_email(request):\r\n # get submitted email.\r\n email = request.GET.get('email', None)\r\n try:\r\n # check if an account with this email already exists, in case of editing user's profile.\r\n is_email_taken = User.objects.filter(email__iexact=email).exclude(email__iexact=request.user.email).exists()\r\n except: \r\n # check if an account with this email already exists, in case of registering new user.\r\n is_email_taken = User.objects.filter(email__iexact=email).exists()\r\n data = {'is_email_taken':is_email_taken}\r\n if data['is_email_taken']:\r\n data['error_message'] = 'An account with this Email already exists.'\r\n return JsonResponse(data)",
"def validate_email(request):\r\n # get submitted email.\r\n email = request.GET.get('email', None)\r\n try:\r\n # check if an account with this email already exists, in case of editing user's profile.\r\n is_email_taken = User.objects.filter(email__iexact=email).exclude(email__iexact=request.user.email).exists()\r\n except: \r\n # check if an account with this email already exists, in case of registering new user.\r\n is_email_taken = User.objects.filter(email__iexact=email).exists()\r\n data = {'is_email_taken':is_email_taken}\r\n if data['is_email_taken']:\r\n data['error_message'] = 'An account with this Email already exists.'\r\n return JsonResponse(data)",
"def verifyemail(request,id=None,key=None):\n logging.debug('')\n if settings.EMAIL_VALIDATION == True:\n user = User.objects.get(id=id)\n if user:\n if user.email_key == key:\n user.email_isvalid = True\n clear_email_validation_message(user)\n user.save()\n data = {'action_type': 'validation_complete'}\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n logging.error('hmm, no user found for email validation message - foul play?')\n raise Http404",
"def verification_link(email, request, token):\n domain = request.get_host()\n url = reverse('auth:verify', kwargs={'token': token})\n link = f'{domain}{url}'\n subject = \"Activation for your account\"\n message = f'Please Activate your account below.\\n{link}'\n from_mail = default.DEFAULT_FROM_EMAIL\n to_mail = [email]\n send_mail(subject, message, from_mail, to_mail, fail_silently=False)\n response_data = {\n \"msg\": 'Please check your email to verify your account '\n 'verification has been sent to {}'.format(email)\n }\n return response_data",
"def validate_email(self, email):\n if email and email_address_exists(email):\n raise serializers.ValidationError(\n \"A user is already registered with this e-mail address.\")\n\n return email",
"async def validate_email(message, *args):\n\n (vhash,) = args\n user = database.get_student_by_discord(str(message.author.id))\n if user is None:\n await message.author.send(\n embed=utils.create_embed(\n \"You aren't in the database. Did you use `!verify` first?\"\n )\n )\n\n if database.column_name_index(\"validated\", MEMBERS_COLUMNS, user) == 1:\n await message.author.send(\n embed=utils.create_embed(\"You have already been verified!\")\n )\n member = await client.get_member(message.author.id)\n await member.add_roles(discord.utils.get(client.guild.roles, name=\"members\"))\n return\n\n if database.column_name_index(\"token\", MEMBERS_COLUMNS, user) == vhash:\n try:\n database.verify_student(str(message.author.id))\n except:\n await message.author.send(\n embed=utils.create_embed(\n \"Oops! Something went wrong. Please contact an officer.\"\n )\n )\n return\n\n member = await client.get_member(message.author.id)\n await member.add_roles(discord.utils.get(client.guild.roles, name=\"members\"))\n await message.author.send(\n embed=utils.create_embed(\"Verified. You should have the Members role now.\")\n )\n await client.update_channel.send(\n embed=utils.create_embed(\n \"Validated user {0}\".format(message.author.mention)\n )\n )\n logging.info(\"Validated user {0}\")\n else:\n await message.author.send(\n embed=utils.create_embed(\"Incorrect token, please try again.\")\n )\n\n return",
"def email_from_invitation_token(token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token.encode('utf-8'))\n except (BadSignature, SignatureExpired):\n return False\n user_email = data.get('user_email')\n if user_email is None:\n return False\n if User.query.filter_by(email=user_email).first() is not None:\n return False\n return user_email",
"def validate_email(email):\r\n\r\n\t\tstatus = 'valid'\r\n\r\n\t\ttry:\r\n\t\t\tvalidate_email(email)\r\n\t\t\tuser = AuthTools.get_user_by_email(email)\r\n\r\n\t\t\tif user is not None:\r\n\t\t\t\tstatus = 'taken'\r\n\r\n\t\texcept:\r\n\t\t\tstatus = 'invalid'\r\n\r\n\t\treturn status"
] | [
"0.76968575",
"0.7643206",
"0.7167939",
"0.7090168",
"0.7051217",
"0.7026804",
"0.70227796",
"0.7012395",
"0.7004058",
"0.6945708",
"0.692403",
"0.68979686",
"0.6817104",
"0.68054706",
"0.68007165",
"0.6797659",
"0.67840916",
"0.67610574",
"0.6718849",
"0.66913974",
"0.66908854",
"0.6687861",
"0.6646911",
"0.6646911",
"0.66457474",
"0.6644506",
"0.663366",
"0.6617676",
"0.6608264",
"0.6606363"
] | 0.78256726 | 0 |
Sends Record using a SerializingProducer & AvroSerializer | def send_record(args):
topic = args.topic.rstrip()
schema_registry_config = {
'url': args.schema_registry }
schema_registry_client = SchemaRegistryClient(schema_registry_config)
avro_serializer = AvroSerializer(
schema_registry_client,
DATA_SCHEMA,
data_to_dict)
producer_config = {
"bootstrap.servers": args.bootstrap_servers,
"key.serializer": StringSerializer('utf_8'),
"value.serializer": avro_serializer
}
producer = SerializingProducer(producer_config)
split_incoming_data = args.record_value.split(',')
if not len(split_incoming_data) == 7: # Data Format Check
print('** Error: Insufficient Incoming Data: ', split_incoming_data)
raise Exception
try: # Data Format Check
incoming_data = {
'envId': int(split_incoming_data[0]),
'whenCollected': str(split_incoming_data[1]),
'timeLightOnMins': int(split_incoming_data[2]),
'humidity': int(split_incoming_data[3]),
'soilMoisture': int(split_incoming_data[4]),
'temperature': int(split_incoming_data[5]),
'waterConsumption': int(split_incoming_data[6]) }
except Exception as error:
print('** Error Creating Dict of Data: ', error)
print(f'Producing data records to topic {topic}. ^C to exit.')
producer.poll(1)
try:
key = args.record_key if args.record_key else str(uuid4())
data_object = Data(incoming_data)
print('\t-Producing Avro record. . .')
producer.produce(topic = topic,
key = key,
value = data_object,
on_delivery = delivery_report)
except ValueError:
print('\t-Invalid input, discarding record. . .')
print('\nFlushing records. . .')
producer.flush() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send(topicname, records, schema):\n # Parse the schema file\n #schema_definition = fastavro.schema.load_schema(schemafile)\n\n # Write into an in-memory \"file\"\n\n import fastavro\n import confluent_kafka\n\n out = BytesIO()\n fastavro.writer(out, schema, records)\n out.seek(0) # go back to the beginning\n\n # Connect to the IPAC Kafka brokers\n producer = confluent_kafka.Producer({'bootstrap.servers': 'ztfalerts04.ipac.caltech.edu:9092,ztfalerts05.ipac.caltech.edu:9092,ztfalerts06.ipac.caltech.edu:9092'})\n\n # Send an avro alert\n producer.produce(topic=topicname, value=out.read())\n producer.flush()",
"def emit(self, record):\n try:\n topic, record.msg = record.msg.split(TOPIC_DELIM,1)\n except Exception:\n topic = \"\"\n try:\n bmsg = cast_bytes(self.format(record))\n except Exception:\n self.handleError(record)\n return\n \n if isinstance(topic, str):\n btopic = cast_bytes(topic)\n else:\n print(\"Exception: topic is not string:{topic}\".format(topic=topic))\n btopic = b'Debug' \n\n self.socket.send_multipart([btopic, bmsg])",
"def serialize_to_bytes(self, record):\n result = self._quickavro_encoder.write(record)\n return result",
"def do_POST(self):\n self.responder = Responder()\n call_request_reader = ipc.FramedReader(self.rfile)\n call_request = call_request_reader.read_framed_message()\n resp_body = self.responder.respond(call_request)\n self.send_response(200)\n self.send_header('Content-Type', 'avro/binary')\n self.end_headers()\n resp_writer = ipc.FramedWriter(self.wfile)\n resp_writer.write_framed_message(resp_body)",
"def do_POST(self):\n self.responder = Responder()\n call_request_reader = ipc.FramedReader(self.rfile)\n call_request = call_request_reader.read_framed_message()\n resp_body = self.responder.respond(call_request)\n self.send_response(200)\n self.send_header('Content-Type', 'avro/binary')\n self.end_headers()\n resp_writer = ipc.FramedWriter(self.wfile)\n resp_writer.write_framed_message(resp_body)",
"def do_POST(self):\n self.responder = Responder()\n call_request_reader = ipc.FramedReader(self.rfile)\n call_request = call_request_reader.read_framed_message()\n resp_body = self.responder.respond(call_request)\n self.send_response(200)\n self.send_header('Content-Type', 'avro/binary')\n self.end_headers()\n resp_writer = ipc.FramedWriter(self.wfile)\n resp_writer.write_framed_message(resp_body)",
"def emit(self, record):\r\n try:\r\n self.enqueue(self.prepare(record))\r\n except Exception:\r\n self.handleError(record)",
"def emit(self, record):\n try:\n msg = self.format(record)\n stream = self.stream\n stream.Write_shared((msg + self.terminator).encode(self.encoding))\n # self.flush()\n except Exception:\n self.handleError(record)",
"def write(self, record):\n # Make Splunk ready payload data and append it to self._buffers list.\n self._buffer.append({\n 'index': self._index,\n 'sourcetype': 'json',\n 'event': record\n })\n\n # If the records count in self._buffer is more than allowed by\n # self._buffer_size, send those records to Splunk.\n if len(self._buffer) >= self._buffer_size:\n self._flush()",
"def emit(self, record):\n self.buffer.append(record.__dict__)",
"def emit(self, record):\n self.buffer.append(record.__dict__)",
"def emit(self, record):\n pass",
"def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)",
"def emit(self, record):\n\n print(record.__dict__)",
"def emit(self, record):\n self.buffer.append(record)\n while len(self.buffer) != 0:\n nextRecord = self.buffer.popleft()\n\n super().emit(nextRecord)\n\n if self.sock is None: # If we failed to send the record\n self.buffer.appendleft(nextRecord)\n break",
"def emit(self, record: LogRecord):\n try:\n self.enqueue(self.prepare(record))\n except Exception:\n self.handleError(record)",
"async def _send_message(producer, event_data):\n batch = await producer.create_batch()\n batch.add(EventData(_serialize_event_data_as_json(event_data)))\n await producer.send_batch(batch)",
"def emit(self, record):\n # encode data\n # Panda logger is going to be migrated. Until this is completed we need to support the old and new logger\n # The new logger needs to be json encoded and use POST method\n try:\n if self.encoding == JSON:\n arr=[{\n \"headers\":{\"timestamp\" : int(time.time())*1000, \"host\" : \"%s:%s\"%(self.url, self.port)},\n \"body\": \"{0}\".format(json.dumps(self.mapLogRecord(record)))\n }]\n data = json.dumps(arr)\n else:\n data = urlencode(self.mapLogRecord(record))\n\n # try to lock Semaphore\n if self.mySemaphore.acquire(False):\n # start Emitter\n _Emitter(self.host, self.port, self.urlprefix, self.method, data, self.mySemaphore).start()\n except UnicodeDecodeError:\n #We lose the message\n pass",
"def _attach_record_as_json(mfg_event, record):\n attachment = mfg_event.attachment.add()\n attachment.name = TEST_RECORD_ATTACHMENT_NAME\n test_record_dict = htf_data.convert_to_base_types(record)\n attachment.value_binary = _convert_object_to_json(test_record_dict)\n attachment.type = test_runs_pb2.TEXT_UTF8",
"def add_record(self, record):\n pass",
"def emit(self, record):\n data = self.mapLogRecord(record)\n client = Client()\n if self.method == 'GET':\n response = client.get(self.url, data)\n else:\n response = client.post(self.url, data)\n self.testcase.assertEqual(response.status_code, 200)\n self.testcase.assertContains(response, 'message saved')",
"def write(self, record):\n if not record:\n return\n\n # Convert to a dict - inefficient, I know...\n if type(record) is DASRecord:\n record = json.loads(record.as_json())\n if type(record) is dict:\n # If our local queue is full, throw away the oldest entries\n while self.send_queue.full():\n try:\n logging.debug('CachedDataWriter queue full - dropping oldest...')\n self.send_queue.get_nowait()\n except asyncio.QueueEmpty:\n logging.warning('CachedDataWriter queue is both full and empty?!?')\n\n # Enqueue our latest record for send\n self.send_queue.put_nowait(record)\n else:\n logging.warning('CachedDataWriter got non-dict/DASRecord object of '\n 'type %s: %s', type(record), str(record))",
"def enqueue(self, record):\r\n self.queue.put_nowait(record)",
"def record(self):\n # TODO: record the data",
"def write_record(self, obj: dict) -> None:\n opts = JSONOptions(strict_number_long=False, datetime_representation=DatetimeRepresentation.ISO8601,\n json_mode=JSONMode.RELAXED)\n obj_str = json_util.dumps(obj)\n logger.info(extra=dict(Func='Record', Op='Tail',\n Attributes={'identifier': self.identifier,\n 'record': obj_str}), msg=obj_str)",
"def serialize(self):\n return self.record",
"def emit(self, record):\n try:\n if record.exc_info:\n _, exc, *_ = record.exc_info\n if hasattr(exc, \"__pretty_exc__\"):\n try:\n self.emit_pretty_exception(exc, verbose=_is_verbose())\n if not _is_verbose():\n return\n # pylint: disable-next=broad-except\n except Exception: # noqa: BLE001, S110 # nosec B110\n pass\n\n msg = self.format(record)\n Tqdm.write(msg, file=self.stream, end=getattr(self, \"terminator\", \"\\n\"))\n self.flush()\n except (BrokenPipeError, RecursionError):\n raise\n except Exception: # noqa: BLE001, pylint: disable=broad-except\n self.handleError(record)",
"def write_record(self, record, buffer_cap=10000):\n if record is not None:\n # We need to enforce the correct encoding for both versions of python\n if sys.version_info[0] < 3:\n if isinstance(record, list):\n self.__queue.append([str(s).decode('utf-8') if isinstance(s, unicode) is False else s for s in record])\n else:\n self.__queue.append([str(s).decode('utf-8') if isinstance(s, unicode) is False else s for s in record.values()])\n else:\n if isinstance(record, list):\n self.__queue.append([s.decode('utf-8') if isinstance(s, bytes) else str(s) for s in record])\n else:\n self.__queue.append(s.decode('utf-8') if isinstance(s, bytes) else str(s) for s in record.values())\n if len(self.__queue) > buffer_cap:\n self.flush_record()\n return \"\"",
"def put_record(self, obj):\r\n for output in self.outputs:\r\n output.put_record(obj)",
"def transform_record(self, pid, record, links_factory=None, **kwargs):\n context = kwargs.get(\"marshmallow_context\", {})\n context.setdefault(\"pid\", pid)\n context.setdefault(\"record\", record)\n return self.dump(\n self.preprocess_record(pid, record, links_factory=links_factory, **kwargs),\n context,\n )"
] | [
"0.6517671",
"0.6180423",
"0.61635876",
"0.5895322",
"0.5895322",
"0.5895322",
"0.585719",
"0.5777968",
"0.57018113",
"0.5643523",
"0.5643523",
"0.5637681",
"0.55201244",
"0.55000967",
"0.5456839",
"0.54533917",
"0.5436357",
"0.54286325",
"0.5375399",
"0.53668606",
"0.53546804",
"0.53539115",
"0.534088",
"0.5336114",
"0.5325852",
"0.5305389",
"0.5285302",
"0.5258533",
"0.52286834",
"0.52281666"
] | 0.7554016 | 0 |
Restore policy parameters from saved checkpoint. | def restore(self, checkpoint):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)",
"def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)",
"def _check_restore_parameters(sess, saver):\r\n ckpt = tf.train.get_checkpoint_state(os.path.dirname( 'final_model/'))\r\n if ckpt and ckpt.model_checkpoint_path:\r\n print(\"Loading parameters for the Chatbot -> {}\".format(ckpt.model_checkpoint_path))\r\n saver.restore(sess, ckpt.model_checkpoint_path)\r\n\r\n else:\r\n print(\"Initializing fresh parameters for the Chatbot\")",
"def restore_checkpoint(restore_dir):\n restored_train_state = checkpoints.restore_checkpoint(restore_dir, None)\n variables = {'params': restored_train_state['optimizer']['target']}\n model_state = restored_train_state['model_state']\n variables.update(model_state)\n return variables",
"def _restore(self):\n self._logger = LOGGER\n self._param_store = pyro.get_param_store()\n self.set_state(self.best_params)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )",
"def restore(self):\n if os.path.isfile( \\\n os.path.join(self.network_path,'net_parameters.nnprm.index')):\n self.load_network_parameters(\n file_name='net_parameters', file_path=self.network_path)\n else:\n self.log(\"Could not load previous network parameters from:\\n{}\".format(\\\n os.path.join(self.network_path,'net_parameters.nnprm') ))\n self.log(\"Starting with untrained parameters\")",
"def restore(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def restore_policy(self) -> Optional[pulumi.Input['RestorePolicyPropertiesArgs']]:\n return pulumi.get(self, \"restore_policy\")",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def restore(self):\n # For multi-worker training, it should not restore a model in certain\n # worker setting (e.g. non-chief worker in ParameterServerStrategy).\n # pylint: disable=protected-access\n if self._model._in_multi_worker_mode() and not multi_worker_util.should_load_checkpoint():\n return\n self.read_checkpoint_manager.restore_or_initialize()",
"def restore(self, sess: tf.Session) -> None:\n super().restore(sess)\n BaseModel._restore_checkpoint(self.pretrained_saver, sess, path=FLAGS.pretrained_checkpoint)",
"def _restore_variables(self, checkpoint):\n checkpoint_variables_map = list_variables(checkpoint)\n valid_variable = lambda name: name.startswith('model/encoder') or \\\n name.startswith('model/decoder')\n checkpoint_variable_names = [name for (name, _) in checkpoint_variables_map\n if valid_variable(name)]\n\n variables = get_variables_to_restore()\n variable_names = [v.name.split(':')[0] for v in variables]\n assignment_map = {}\n for var in checkpoint_variable_names:\n if var in variable_names:\n assignment_map[var] = var\n\n init_from_checkpoint(checkpoint, assignment_map)",
"def restore(self):\n if self._restored_model:\n return\n with self.eval_graph.graph.as_default():\n last_checkpoint = self._find_last_checkpoint()\n # TODO(rbharath): Is setting train=False right here?\n saver = tf.train.Saver()\n saver.restore(self._get_shared_session(train=False), last_checkpoint)\n self._restored_model = True",
"def restore_fn(flags):\n # if flags.tf_initial_checkpoint is None:\n # return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then ignore.\n # if tf.train.latest_checkpoint(flags.train_dir):\n # tf.logging.info(\n # 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n # % flags.train_dir)\n # return None\n\n exclusions = []\n if flags.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in flags.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n # Change model scope if necessary.\n if flags.checkpoint_model_scope is not None:\n variables_to_restore = \\\n {var.op.name.replace(flags.model_name,\n flags.checkpoint_model_scope): var\n for var in variables_to_restore}\n\n tf.compat.v1.logging.info('++++++++++++++++++++')\n tf.compat.v1.logging.info('Fine-tuning from %s. Ignoring missing vars: %s' %\n (flags.pre_trained_checkpoint, flags.ignore_missing_vars))\n slim.assign_from_checkpoint_fn(flags.pre_trained_checkpoint,\n variables_to_restore,\n ignore_missing_vars=flags.ignore_missing_vars)",
"def restore_policy(self) -> pulumi.Output[Optional['outputs.RestorePolicyPropertiesResponse']]:\n return pulumi.get(self, \"restore_policy\")",
"def restore(self, sess, path=None, var_list=None):\n\n saver = tf.train.Saver(var_list)\n if path is None:\n path = tf.train.latest_checkpoint(os.path.dirname(self.config.CHECKPOINTS_PATH))\n saver.restore(sess, path)\n print(\"model restored from %s\" % path)",
"def _restore_training_state(self, restore_state):\n self.load_state_dict(restore_state[\"model\"])\n self.optimizer.load_state_dict(restore_state[\"optimizer\"])\n self.lr_scheduler.load_state_dict(restore_state[\"lr_scheduler\"])\n start_iteration = restore_state[\"iteration\"] + 1\n if self.config[\"verbose\"]:\n print(f\"Restored checkpoint to iteration {start_iteration}.\")\n\n if restore_state[\"best_model_found\"]:\n # Update checkpointer with appropriate information about best model\n # Note that the best model found so far may not be the model in the\n # checkpoint that is currently being loaded.\n self.checkpointer.best_model_found = True\n self.checkpointer.best_iteration = restore_state[\"best_iteration\"]\n self.checkpointer.best_score = restore_state[\"best_score\"]\n if self.config[\"verbose\"]:\n print(\n f\"Updated checkpointer: \"\n f\"best_score={self.checkpointer.best_score:.3f}, \"\n f\"best_iteration={self.checkpointer.best_iteration}\"\n )\n return start_iteration",
"def restore(self, checkpoint_frame=None):\n\n if checkpoint_frame:\n self.saver.restore(self.sess, self.path + '/tensorflow-model-%d' % checkpoint_frame)\n else:\n self.saver.restore(self.sess, self.saver.latest_checkpoint())",
"def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)",
"def restore(cls, env, experience_replay, checkpoint_folder):\n with open(\"_\".join([checkpoint_folder, \"params.json\"]), \"r\") as f:\n params = json.load(f)\n model = cls(\n env,\n experience_replay,\n tensorboard_log=None,\n checkpoint_folder=checkpoint_folder,\n **params\n )\n model.sess = get_tf_session()\n model.saver.restore(model.sess, checkpoint_folder)\n return model",
"def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)",
"def restore_models_optimizers_from_save(head_model, tail_model, ctx_proc, head_optim, tail_optim):\n head_model.load_state_dict(torch.load('checkpoint/headmodel.pt'))\n tail_model.load_state_dict(torch.load('checkpoint/tailmodel.pt'))\n ctx_proc.load_state_dict(torch.load('checkpoint/ctxmodel.pt'))\n head_optim.load_state_dict(torch.load('checkpoint/head_optim.pt'))\n tail_optim.load_state_dict(torch.load('checkpoint/tail_optim.pt'))",
"def restore(self):\n pert_params = list(self.net.parameters())\n saved_params = list(self.saved_net.parameters())\n for perturbed, saved in zip(pert_params, saved_params):\n perturbed_shape = perturbed.shape\n saved_shape = saved.shape\n perturbed = perturbed.flatten()\n saved = saved.flatten()\n for i, _ in enumerate(perturbed.data):\n perturbed.data[i] = saved.data[i]\n perturbed = perturbed.view(perturbed_shape)\n saved = saved.view(saved_shape)",
"def restore_model(self, file_name: str, only_load_processor: bool = False):\n path = os.path.join(self.checkpoint_path, file_name)\n with open(path, 'rb') as f:\n restored_state = pickle.load(f)\n if only_load_processor:\n restored_params = _filter_processor(restored_state['params'])\n else:\n restored_params = restored_state['params']\n self.params = hk.data_structures.merge(self.params, restored_params)\n self.opt_state = restored_state['opt_state']",
"def restore(self, ds_crossval):\n t = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[INFO] {} - Restoring from latest checkpoint ...\".format(t))\n self.ckpt_handle.restore(\n tf.train.latest_checkpoint(self.ckpt_dir)).expect_partial()\n self._eval_step(ds_crossval.data.values,\n ds_crossval.labels_onehot.values)\n self.best_accuracy = float(self.eval_accuracy.result())\n template = \"{} model restored after {} epoch(s) - \" \\\n \"Cross-vall accuracy = {:.9f}\"\n print(template.format(self.name, int(self.ckpt_handle.step),\n self.best_accuracy))",
"def restore(self,\n sess,\n ckpt_file,\n ckpt_type):\n if ckpt_file is None:\n raise FileNotFoundError(\"checkpoint file doesn't exist\")\n \n if ckpt_type == \"debug\":\n self.ckpt_debug_saver.restore(sess, ckpt_file)\n elif ckpt_type == \"epoch\":\n self.ckpt_epoch_saver.restore(sess, ckpt_file)\n else:\n raise ValueError(\"unsupported checkpoint type {0}\".format(ckpt_type))",
"def restore_args(self, model_settings: Dict[str, Any]) -> None:\n self.lrpatience = model_settings[\"config\"][\"lrpatience\"]\n self.lrdecay = model_settings[\"config\"][\"lrdecay\"]\n self.ntest = model_settings[\"config\"][\"ntest\"]\n self.ndiscard = model_settings[\"config\"][\"ndiscard\"]\n self.seed = model_settings[\"config\"][\"seed\"]\n self.timesteps = model_settings[\"config\"][\"timesteps\"]\n\n if self.verbose > 0:\n print(\"Restoring previous params: (\"\n f\"lrpatience: {self.lrpatience},\",\n f\"lrdecay: {self.lrdecay},\",\n f\"ntest: {self.ntest},\",\n f\"ndiscard: {self.ndiscard},\",\n f\"seed: {self.seed},\",\n f\"timesteps: {self.timesteps})\")",
"def _resume_checkpoint(self, resume_path, model, optimizer):\n if not resume_path:\n return model, optimizer\n\n self.logger.info(f'Loading checkpoint: {resume_path}')\n checkpoint = torch.load(resume_path)\n model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from \"\n \"that of checkpoint. Optimizer parameters not being resumed.\")\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(f'Checkpoint \"{resume_path}\" loaded')\n return model, optimizer",
"def restore_model_params(self, in_dict):\n\n self.trained_model_params = in_dict['model_params']",
"def reload_checkpoint(self):\n checkpoint_path = os.path.join(self.params.dump_path, 'checkpoint.pth')\n if not os.path.isfile(checkpoint_path):\n if self.params.reload_checkpoint == '':\n return\n else:\n checkpoint_path = self.params.reload_checkpoint\n assert os.path.isfile(checkpoint_path)\n logger.warning(\"Reloading checkpoint from %s ...\" % checkpoint_path)\n data = torch.load(checkpoint_path, map_location='cpu')\n\n # reload model parameters\n for name in self.MODEL_NAMES:\n getattr(self, name).load_state_dict(data[name])\n\n # reload optimizers\n for name in self.optimizers.keys():\n if False: # AMP checkpoint reloading is buggy, we cannot do that - TODO: fix - https://github.com/NVIDIA/apex/issues/250\n logger.warning(\"Reloading checkpoint optimizer %s ...\" % name)\n else: # instead, we only reload current iterations / learning rates\n logger.warning(\"Not reloading checkpoint optimizer %s.\" % name)\n for group_id, param_group in enumerate(self.optimizers[name].param_groups):\n if 'num_updates' not in param_group:\n logger.warning(\"No 'num_updates' for optimizer %s.\" % name)\n continue\n logger.warning(\"Reloading 'num_updates' and 'lr' for optimizer %s.\" % name)\n param_group['num_updates'] = data['%s_optimizer' % name]['param_groups'][group_id]['num_updates']\n param_group['lr'] = self.optimizers[name].get_lr_for_step(param_group['num_updates'])\n\n # reload main metrics\n self.epoch = data['epoch'] + 1\n self.n_total_iter = data['n_total_iter']\n self.best_metrics = data['best_metrics']\n self.best_stopping_criterion = data['best_stopping_criterion']\n logger.warning(\"Checkpoint reloaded. Resuming at epoch %i / iteration %i ...\" % (self.epoch, self.n_total_iter))"
] | [
"0.7186363",
"0.70417887",
"0.698167",
"0.6969412",
"0.6942512",
"0.68953955",
"0.6884264",
"0.67852926",
"0.6671625",
"0.6624348",
"0.6529385",
"0.6500647",
"0.64891875",
"0.6488359",
"0.6476572",
"0.6403476",
"0.6291075",
"0.6262366",
"0.62580115",
"0.62240016",
"0.6164733",
"0.61379343",
"0.61257774",
"0.6124804",
"0.61138195",
"0.6105918",
"0.6098807",
"0.6037821",
"0.6029292",
"0.6023606"
] | 0.7252105 | 0 |
Return Q function graph functor for building training/inference graph. | def get_q_func(self, is_training=False, reuse=False, scope='q_func'):
return functools.partial(self.q_func,
scope=scope,
reuse=reuse,
is_training=is_training) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_tf_graph(self):\n raise NotImplementedError",
"def _build_graph(self,\n question_word,\n question_word_mask,\n question_subword,\n question_subword_mask,\n question_char,\n question_char_mask,\n context_word,\n context_word_mask,\n context_subword,\n context_subword_mask,\n context_char,\n context_char_mask):\n with tf.variable_scope(\"graph\", reuse=tf.AUTO_REUSE):\n \"\"\"build representation layer for qanet model\"\"\"\n (question_feat, question_feat_mask, context_feat,\n context_feat_mask) = self._build_representation_layer(question_word, question_word_mask,\n question_subword, question_subword_mask, question_char, question_char_mask, context_word,\n context_word_mask, context_subword, context_subword_mask, context_char, context_char_mask)\n \n \"\"\"build understanding layer for qanet model\"\"\"\n (question_understanding, context_understanding, question_understanding_mask,\n context_understanding_mask) = self._build_understanding_layer(question_feat,\n context_feat, question_feat_mask, context_feat_mask)\n \n \"\"\"build interaction layer for qanet model\"\"\"\n answer_interaction, answer_interaction_mask = self._build_interaction_layer(question_understanding,\n context_understanding, question_understanding_mask, context_understanding_mask)\n \n \"\"\"build modeling layer for qanet model\"\"\"\n answer_modeling, answer_modeling_mask = self._build_modeling_layer(answer_interaction, answer_interaction_mask)\n \n \"\"\"build output layer for qanet model\"\"\"\n answer_output_list, answer_output_mask_list = self._build_output_layer(answer_modeling, answer_modeling_mask)\n answer_start_output = answer_output_list[0]\n answer_end_output = answer_output_list[1]\n answer_start_output_mask = answer_output_mask_list[0]\n answer_end_output_mask = answer_output_mask_list[1]\n \n return answer_start_output, answer_end_output, answer_start_output_mask, answer_end_output_mask",
"def _build_computation_graph(self):\n raise NotImplementedError",
"def build_inference_graph(self):\n self.build_train_graph()",
"def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()",
"def _build_algorithm(self):\n self.optimizer = tf.train.AdamOptimizer(self._lr, epsilon=1.5e-8)\n trainable_variables = tf.trainable_variables(\"main/qnet\")\n\n # Compute the state value.\n batch_size = tf.shape(self._observation)[0]\n action_index = tf.stack([tf.range(batch_size), self._action], axis=1)\n action_q = tf.gather_nd(self._qvals, action_index)\n assert_shape(action_q, [None])\n\n # Compute back up.\n ave_q = tf.add_n(self._target_qvals) / self._n_net\n assert_shape(tf.reduce_max(ave_q, axis=1), [None])\n q_backup = tf.stop_gradient(self._reward + self._discount * (1 - self._done) * tf.reduce_max(ave_q, axis=1))\n\n # Compute loss and optimize the object.\n loss = tf.reduce_mean(tf.squared_difference(q_backup, action_q)) # 损失值。\n self._train_op = self.optimizer.minimize(loss, var_list=trainable_variables)\n\n # Update target network.\n update_target_operation = []\n for i in reversed(range(1, self._n_net)): # i=0表示最近的模型。\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(f\"target_{i}/qnet\", f\"target_{i-1}/qnet\"))\n\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(\"target_0/qnet\", \"main/qnet\"))\n\n self.update_target_op = update_target_operation\n self._log_op = {\"loss\": loss}",
"def build(self, graph, name_scopes, training):\n raise NotImplementedError('Must be overridden by concrete subclass')",
"def build_graph(self):\n raise NotImplementedError",
"def _construct_graph(self):\n raise NotImplementedError",
"def build_model_fn(self):\n # Define the model_fn we want to return\n def model_fn(features, labels, mode):\n with tf.variable_scope(self.variable_scope):\n # 1. Define the input placeholder\n if len(self.input_shape) == 2: # Reshape if necessary\n new_shape = [-1] + list(self.input_shape) + [1]\n net_input = tf.reshape(\n tensor=features[\"x\"],\n shape=new_shape,\n name=\"L0_RESHAPE\"\n )\n else:\n net_input = features[\"x\"]\n\n # 2. Simply call the network\n self.tf_partial_network = sequence_to_net(\n sequence=self.encoded_network,\n input_tensor=net_input\n )\n\n # 3. Call here the functions for flops & density to avoid more\n # elements. The check is done because for some reason, the\n # number of FLOPS changes during training.\n if self.flops is None:\n self.flops = compute_network_flops(\n graph=tf.get_default_graph(),\n collection_name=self.variable_scope,\n logdir=self.log_path\n )\n\n if self.density is None:\n self.density = compute_network_density(\n graph=tf.get_default_graph(),\n collection_name=self.variable_scope\n )\n\n # 4. Build the fully-connected layer after the block\n with tf.name_scope(\"L_FC\"):\n # Flatten and connect to the Dense Layer\n ll_flat = tf.layers.flatten(\n inputs=self.tf_partial_network,\n name=\"Flatten\"\n )\n dense_layer = tf.layers.dense(\n inputs=ll_flat,\n units=1024,\n activation=tf.nn.relu,\n name=\"DENSE\"\n )\n dropout_layer = tf.layers.dropout(\n inputs=dense_layer,\n rate=0.4,\n # pylint: disable=no-member\n training=mode == tf.estimator.ModeKeys.TRAIN,\n name=\"DROPOUT\"\n )\n\n # 5. Build the prediction layer, based on a softmax\n with tf.name_scope(\"L_PRED\"):\n # Logits layer\n logits_layer = tf.layers.dense(\n inputs=dropout_layer,\n units=self.n_clases,\n name=\"PL_Logits\"\n )\n\n predictions = {\n \"classes\": tf.argmax(\n input=logits_layer,\n axis=1,\n name=\"PL_Classes\"\n ),\n \"probabilities\": tf.nn.softmax(\n logits=logits_layer,\n name=\"PL_Softmax\"\n )\n }\n\n # If we are asked for prediction only, we return the\n # prediction and stop adding nodes to the graph.\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions\n )\n\n # Build the training nodes\n with tf.name_scope(\"L_TRAIN\"):\n # Loss\n loss_layer = tf.losses.sparse_softmax_cross_entropy(\n labels=labels,\n logits=logits_layer\n )\n\n # Training Op\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.TRAIN:\n # The optimizer via Gradient Descent (we can change it)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=10e-08,\n name=\"OPT\"\n )\n # We say that we want to optimize the loss layer using\n # the optimizer.\n train_op = optimizer.minimize(\n loss=loss_layer,\n global_step=tf.train.get_global_step(),\n name=\"OPT_MIN\"\n )\n # And return\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n train_op=train_op\n )\n\n # Build the evaluation nodes (regular accuracy).\n with tf.name_scope(\"L_EVAL\"):\n # Evaluation metric is accuracy\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels,\n predictions=predictions[\"classes\"],\n name=\"ACC\"\n )\n }\n\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n eval_metric_ops=eval_metric_ops\n )\n\n # Return the model_fn function\n return model_fn",
"def classical_preprocessing(*args, **kwargs):\r\n qnode.construct(args, kwargs)\r\n return qml.math.stack(qnode.qtape.get_parameters())",
"def build_graph(self, data_dir, batch_size, mode):\r\n tensors = GraphReferences()\r\n assert batch_size > 0\r\n self.batch_size = batch_size\r\n \r\n if mode in (TRAIN, EVAL):\r\n trainData = TFDataLoaderUtil(data_dir, 'train2014')\r\n trainQuestions = [value['question'] for key, value in trainData.dataset.items()]\r\n self.MAX_QUES_PAD_LEN = max(list(map(lambda x: len(self.tokenizer.split_sentence(x)), trainQuestions)))\r\n self.tokenizer.generate_vocabulary(trainQuestions)\r\n logging.info('Size of Question Vectors: %d', self.MAX_QUES_PAD_LEN)\r\n \r\n self.trainTFDataset = trainData.genTFDatasetObject(self.tokenizer, \r\n self.MAX_QUES_PAD_LEN, \r\n self.batch_size, \r\n self.NUM_PARALLEL_CALLS, \r\n self.BUFFER_SIZE)\r\n tensors.quesVec = self.trainTFDataset.get_next()[0]\r\n tensors.posImg = self.trainTFDataset.get_next()[1]\r\n tensors.negImg = self.trainTFDataset.get_next()[2]\r\n \r\n if mode is EVAL:\r\n evalData = TFDataLoaderUtil(data_dir, 'val2014') \r\n self.evalTFDataset = evalData.genTFDatasetObject(self.tokenizer, \r\n self.MAX_QUES_PAD_LEN, \r\n self.batch_size, \r\n self.NUM_PARALLEL_CALLS, \r\n self.BUFFER_SIZE)\r\n \r\n tensors.quesVec = self.evalTFDataset.get_next()[0]\r\n tensors.posImg = self.evalTFDataset.get_next()[1]\r\n tensors.negImg = self.evalTFDataset.get_next()[2] \r\n \r\n siamGAN = SiamGan()\r\n quesEmbeds = QuestionEmbedding().stackedLSTMWordEmbedding(\r\n vocab_size=self.VOCAB_SIZE, \r\n embed_size=self.WORD_EMBED_SIZE, \r\n INP_SIZE=self.QUES_SIZE)\r\n \r\n tensors.posImgEmbeds = siamGAN.getDiscriminator(self.IMG_SHAPE)(tensors.posImage)\r\n \r\n tensors.negImgEmbeds = siamGAN.getDiscriminator(self.IMG_SHAPE)(tensors.negImg)\r\n\r\n tensors.genImgdata = siamGAN.getGenerator(self.QUES_EMBED_SIZE)(quesEmbeds(self.quesVec))\r\n\r\n tensors.genImgEmbeds = siamGAN.getDiscriminator(self.IMG_SHAPE)(tensors.genImgdata)\r\n \r\n \r\n if mode in (EVAL, TRAIN):\r\n\r\n tensors.discLoss, tensors.genLoss = siamGAN.tripletLoss(\r\n tensors.genImgEmbeds, \r\n tensors.posImgEmbeds, \r\n tensors.negImgEmbeds)\r\n #regularize\r\n \r\n tf.summary.scalar('cost_generator', tensors.genLoss)\r\n tf.summary.scalar('cost_discriminator', tensors.discLoss)\r\n tf.summary.tensor_summary('disc_pos', tensors.posImgEmbeds)\r\n tf.summary.tensor_summary('disc_neg', tensors.negImgEmbeds)\r\n tf.summary.scalar('mean_disc_pos', tf.reduce_mean(tensors.posImgEmbeds))\r\n tf.summary.scalar('mean_disc_neg', tf.reduce_mean(tensors.negImgEmbeds))\r\n \r\n # Cost of Decoder/Generator is VAE network cost and cost of generator\r\n # being detected by the discriminator.\r\n tensors.global_step = tf.Variable(0, name='global_step', trainable=False)\r\n t_vars = tf.trainable_variables()\r\n \r\n with tf.variable_scope(tf.get_variable_scope(), reuse=None):\r\n generator_vars = [var for var in t_vars if var.name.startswith('gen_')]\r\n discriminator_vars = [\r\n var for var in t_vars if var.name.startswith('disc_')\r\n ]\r\n \r\n tensors.discOptimizer = tf.train.GradientDescentOptimizer(\r\n self.DISC_LR).minimize(\r\n tensors.discLoss,\r\n var_list = discriminator_vars,\r\n global_step = tensors.global_step)\r\n \r\n tensors.genOptimizer = tf.train.AdamOptimizer(\r\n learning_rate = self.GEN_LR, \r\n beta1 = self.GEN_BETA1, \r\n beta2 = self.GEN_BETA2).minimize(\r\n tensors.genLoss,\r\n var_list = generator_vars,\r\n global_step = tensors.global_step)\r\n\r\n return tensors",
"def build_graph(self, name):\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n # change shape of input for when adding score\n self.input_positions = tf.placeholder(tf.float32, shape=(None, 1, 2,6), name='inputs')\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32, name='target')\n net = self.input_positions\n\n net = tf.layers.conv2d(inputs=net, filters=128, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=128, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n net = tf.layers.conv2d(inputs=net, filters=64, kernel_size=6,\n kernel_regularizer=tf.contrib.layers.l1_l2_regularizer(),\n data_format=\"channels_last\", padding='SAME', activation=tf.nn.relu)\n\n net = tf.layers.flatten(net)\n\n net = self.add_dense_layer(net, 12, tf.nn.relu)\n\n self.value = self.add_dense_layer(net, 1, name='state_q_value')\n self.advantage = self.add_dense_layer(net, 12, name='action_advantage')\n\n self.q_values = tf.add(self.value, tf.subtract(self.advantage,\n tf.reduce_mean(self.advantage, axis=1, keepdims=True)),\n name=\"action_q_values\")\n\n self.probabilities = tf.nn.softmax(self.q_values, name='probabilities')\n\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name='actions')\n self.actions_onehot = tf.one_hot(self.actions, 12, dtype=tf.float32)\n self.q = tf.reduce_sum(tf.multiply(self.q_values, self.actions_onehot), axis=1, name=\"selected_action_q\")\n\n tf.summary.histogram(\"Action_Q_values\", self.q)\n\n self.td_error = tf.square(self.target_q - self.q)\n self.loss = tf.reduce_mean(self.td_error, name=\"q_loss\")\n\n tf.summary.scalar(\"Q_Loss\", self.loss)\n self.reg_losses = tf.identity(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=name),\n name=\"reg_losses\")\n\n reg_loss = self.beta * tf.reduce_mean(self.reg_losses)\n tf.summary.scalar(\"Regularization_loss\", reg_loss)\n\n self.merge = tf.summary.merge_all()\n\n self.total_loss = tf.add(self.loss, reg_loss, name=\"total_loss\")\n self.train_step = tf.train.GradientDescentOptimizer(learning_rate=self.learningRate). \\\n minimize(self.total_loss, name='train')",
"def build_tf_graph(self, reuse=False):\n self._state_ph = tf.placeholder(tf.float32, self._state_shape)\n if self._include_timestep:\n self._step_ph = tf.placeholder(tf.int32, (1,))\n state = (self._state_ph, self._step_ph)\n else:\n state = self._state_ph\n self._qs, self._end_points_t = self.q_func(\n state, self._action_ph, scope='q_func', reuse=reuse, is_training=False)\n if self._build_target:\n self._target_qs, self._end_points_tp1 = self.q_func(\n state, self._action_ph, scope='target_q_func', reuse=reuse,\n is_training=False)",
"def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()",
"def gen_graph(self):",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build_model_fn(self):\n # Define the model_fn we want to return\n def model_fn(features, labels, mode):\n with tf.variable_scope(self.variable_scope):\n # 1. Define the input placeholder\n if len(self.input_shape) == 2:\n net_input = tf.reshape(\n tensor=features[\"x\"],\n shape=[-1] + list(self.input_shape) + [1],\n name=\"L0_RESHAPE\"\n )\n else:\n net_input = features[\"x\"]\n\n # 2. Simply call the network\n self.tf_partial_network = sequence_to_net(\n sequence=self.encoded_network,\n input_tensor=net_input\n )\n\n # 3. Build the Fully-Connected layers after block.\n with tf.name_scope(\"L_FC\"):\n # Flatten and connect to the Dense Layer\n ll_flat = tf.layers.flatten(\n inputs=self.tf_partial_network,\n name=\"Flatten\"\n )\n dense_layer = tf.layers.dense(\n inputs=ll_flat,\n units=1024,\n activation=tf.nn.relu,\n name=\"DENSE\"\n )\n dropout_layer = tf.layers.dropout(\n inputs=dense_layer,\n rate=0.4,\n # pylint: disable=no-member\n training=mode == tf.estimator.ModeKeys.TRAIN,\n name=\"DROPOUT\"\n )\n\n # 4. Build the Prediction Layer based on a Softmax\n with tf.name_scope(\"L_PRED\"):\n # Logits layer\n logits_layer = tf.layers.dense(\n inputs=dropout_layer,\n units=self.n_clases,\n name=\"PL_Logits\"\n )\n\n predictions = {\n \"classes\": tf.argmax(\n input=logits_layer,\n axis=1,\n name=\"PL_Classes\"\n ),\n \"probabilities\": tf.nn.softmax(\n logits=logits_layer,\n name=\"PL_Softmax\"\n )\n }\n\n # If we are asked for prediction only, we return the\n # prediction and stop adding nodes to the graph.\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions\n )\n\n # 4. Build the training nodes\n with tf.name_scope(\"L_TRAIN\"):\n # Loss\n loss_layer = tf.losses.sparse_softmax_cross_entropy(\n labels=labels,\n logits=logits_layer\n )\n\n # Training Op\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.TRAIN:\n # The optimizer via Gradient Descent (we can change it)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=10e-08,\n name=\"OPT\"\n )\n # We say that we want to optimize the loss layer using\n # the optimizer.\n train_op = optimizer.minimize(\n loss=loss_layer,\n global_step=tf.train.get_global_step(),\n name=\"OPT_MIN\"\n )\n # And return\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n train_op=train_op\n )\n\n # 5. Build the evaluation nodes.\n with tf.name_scope(\"L_EVAL\"):\n # Evaluation metric is accuracy\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels,\n predictions=predictions[\"classes\"],\n name=\"ACC\"\n )\n }\n\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n eval_metric_ops=eval_metric_ops\n )\n # End of tf.variable_scope()\n\n # Return the model_fn function\n return model_fn",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build_graph(self):\n pass",
"def _build_graph(self):\n pass",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build_graph(self):\n\n ##### Build Graph #####\n baseModel.build_graph(self)\n\n ##### Create Optimization #####\n with tf.variable_scope(\"optimize\"):\n self.add_loss()\n self.add_accuracy()\n self.initialize_learning_rate()\n self.initialize_optimization()\n\n ##### History and Checkpoints #####\n self.hasTrained = False\n self._lastSaved = collections.defaultdict(None)\n self.history = collections.defaultdict(list)\n self.saver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestLossSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n self.bestAccSaver = tf.train.Saver(\n tf.global_variables(), \n max_to_keep=self.FLAGS.keep)\n\n logging.basicConfig(level=logging.INFO)\n log_handler = logging.FileHandler(\"log.txt\")\n logging.getLogger().addHandler(log_handler)\n\n self.summaries = tf.summary.merge_all()",
"def build_graph(self):\n with vs.variable_scope(\"context\"):\n context_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n context_hiddens = context_encoder.build_graph(self.context_embs,\n self.context_mask) # (batch_size, context_len, hidden_size*2)\n\n with vs.variable_scope(\"question\"):\n question_encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)\n question_hiddens = question_encoder.build_graph(self.qn_embs,\n self.qn_mask) # (batch_size, question_len, hidden_size*2)\n question_last_hidden = tf.reshape(question_hiddens[:, -1, :], (-1, 2 * self.FLAGS.hidden_size))\n question_last_hidden = tf.contrib.layers.fully_connected(question_last_hidden,\n num_outputs=self.FLAGS.hidden_size)\n # Use context hidden states to attend to question hidden states\n\n # attn_output is shape (batch_size, context_len, hidden_size*2)\n # The following is BiDAF attention\n if self.FLAGS.use_bidaf:\n attn_layer = BiDAF(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens,\n self.context_mask) # (batch_size, context_len, hidden_size * 6)\n else: # otherwise, basic attention\n attn_layer = BasicAttn(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)\n _, attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens)\n # Concat attn_output to context_hiddens to get blended_reps\n blended_reps = tf.concat([context_hiddens, attn_output], axis=2) # (batch_size, context_len, hidden_size*4)\n\n blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size)\n\n decoder = RNNDecoder(self.FLAGS.batch_size, self.FLAGS.hidden_size, self.ans_vocab_size, self.FLAGS.answer_len,\n self.ans_embedding_matrix, self.keep_prob, sampling_prob=self.sampling_prob,\n schedule_embed=self.FLAGS.schedule_embed, pred_method=self.FLAGS.pred_method)\n (self.train_logits, self.train_translations, _), \\\n (self.dev_logits, self.dev_translations, self.attention_results) = decoder.build_graph(blended_reps_final, question_last_hidden,\n self.ans_embs, self.ans_mask, self.ans_ids,\n self.context_mask)",
"def build_tf_graph(self, reuse=False):\n self._state_ph = tf.placeholder(tf.float32, self._state_shape)\n if self._include_timestep:\n self._step_ph = tf.placeholder(tf.int32, (1,))\n state = (self._state_ph, self._step_ph)\n else:\n state = self._state_ph\n self._action, _ = self.a_func(\n state, self._action_size, scope='a_func', is_training=False)\n self._qs, self._end_points_t = self.q_func(\n state, self._action, scope='q_func', reuse=reuse, is_training=False)\n if self._build_target:\n self._target_qs, self._end_points_tp1 = self.q_func(\n state, self._action, scope='target_q_func', reuse=reuse,\n is_training=False)",
"def __init__(self, scope, target_network, env, flags):\n\n self.TF_FLAGS = flags\n self.env = env\n self.scope = 'dqn' + scope\n\n if 'target' in scope:\n with tf.variable_scope(scope):\n\n self.states = tf.placeholder(tf.float32, shape=(\n None, self.env.get_state_size()), name='states')\n\n self.q = self.create_network(scope='q_network_target')\n self.param = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope+'/q_network_target')\n\n elif 'local' in scope:\n\n with tf.variable_scope(scope):\n # Add the target network instance\n self.target_network = target_network\n\n # Create the placeholders for the inputs to the network\n self.states = tf.placeholder(\n tf.float32, shape=(None, self.env.get_state_size()),\n name='states')\n self.actions = tf.placeholder(\n tf.uint8, shape=(None, ), name='actions')\n self.q_targets = tf.placeholder(\n tf.float32, shape=(None,), name='q_targets')\n\n # Create the network with the goal of predicting the\n # action-value function\n self.q = self.create_network(scope='q_network')\n\n # The parameters of the network\n self.param = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/q_network')\n\n\n with tf.name_scope('q_network_loss'):\n # Difference between targets value and calculated ones by\n # the model\n actions_one_hot = tf.one_hot(self.actions, self.env.get_action_size(), 1.0, 0.0, name='action_one_hot')\n q = tf.reduce_sum(self.q * actions_one_hot, reduction_indices=1, name='q_acted')\n self.loss = tf.losses.mean_squared_error(\n q, self.q_targets)\n\n with tf.name_scope('train_q_network'):\n self.train_opt = tf.train.AdamOptimizer(\n self.TF_FLAGS.critic_learning_rate).minimize(self.loss)\n\n with tf.name_scope('update_q_target'):\n # Perform a soft update of the parameters: Critic network parameters = Local Parameters (LP) and Target network parameters (TP)\n # TP = tau * LP + (1-tau) * TP\n self.update_opt = [tp.assign(tf.multiply(self.TF_FLAGS.tau, lp) + tf.multiply(\n 1 - self.TF_FLAGS.tau, tp)) for tp, lp in zip(self.target_network.param, self.param)]\n\n with tf.name_scope('initialize_q_target_network'):\n # Set the parameters of the local network equal to the target one\n # LP = TP\n self.init_target_op = [tp.assign(lp) for tp, lp in zip(\n self.target_network.param, self.param)]\n\n with tf.name_scope('q_nexts'):\n self.q_nexts = tf.math.reduce_max(self.target_network.q, axis=1)",
"def _make_q_network(states, num_actions, params, scope_name):\n kernel_regularizer = tf.contrib.layers.l2_regularizer(params.l2_scale) if params.l2_scale else None\n with tf.variable_scope(scope_name) as scope:\n hidden0 = tf.layers.dense(states, params.hidden_units[0],\n activation=params.activation,\n kernel_initializer=params.initializer,\n kernel_regularizer=kernel_regularizer)\n hidden = tf.layers.dense(hidden0, params.hidden_units[1],\n activation=params.activation,\n kernel_initializer=params.initializer,\n kernel_regularizer=kernel_regularizer)\n outputs = tf.layers.dense(hidden, num_actions,\n kernel_initializer=params.initializer,\n kernel_regularizer=kernel_regularizer)\n\n trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope=scope.name)\n trainable_vars_by_name = {var.name[len(scope.name):]: var\n for var in trainable_vars}\n # get the regularization loss\n reg_loss = tf.losses.get_regularization_loss(scope_name)\n return outputs, trainable_vars_by_name, reg_loss",
"def model_fn_builder(config):\n def model_fn(features,labels,mode,params):\n \"\"\"The model_fn for Estimator\"\"\"\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec\n return model_fn"
] | [
"0.6407784",
"0.6063661",
"0.5972204",
"0.58639354",
"0.5774932",
"0.57135445",
"0.56873643",
"0.56436694",
"0.5618003",
"0.5546803",
"0.55211556",
"0.55197084",
"0.5515311",
"0.5491966",
"0.54637355",
"0.5455583",
"0.5406591",
"0.54064405",
"0.5396603",
"0.5387212",
"0.53796375",
"0.5377908",
"0.537008",
"0.53655976",
"0.5355636",
"0.5349918",
"0.53488904",
"0.53376216",
"0.5335679",
"0.53347784"
] | 0.6294904 | 1 |
Tests the config key parser with both string and full email formats. | def test_config_keys() -> None:
with pytest.raises(ConfigFileError):
f = StringIO("""
configs:
has.periods:
urls:
- json://localhost
""")
load_config(_logger, f)
with pytest.raises(ConfigFileError):
f = StringIO("""
configs:
bademail@:
urls:
- json://localhost
""")
load_config(_logger, f)
f = StringIO("""
configs:
[email protected]:
urls:
- json://localhost
""")
mrise = load_config(_logger, f)
assert len(mrise.senders) == 1
key = Key(user='user', domain='example.com')
assert key in mrise.senders | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_email_address(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_address'\n )\n self.assertEqual(u'[email protected]', key)",
"def test_is_valid_email(self):\n self.assertTrue(is_valid_email('[email protected]'))",
"def test_invalid_email_when_logging_in(self):\n pass",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_email():\n assert is_email(None) is None\n assert is_email('[email protected]') is None\n assert is_email('other')",
"def test_clean_email(self):\n\n raw_email = 'from=<[email protected]>'\n result = clean_email(raw_email)\n self.assertEqual(result, '[email protected]')",
"def is_email_address(value):\n return _COMPILED[EMAIL].match(value) != None",
"def is_valid_email(email):\n return \"@\" in email and \".\" in email",
"def test_compose_email2_good(self):\n pass",
"def test_email(self, env: yaenv.Env):\n _email = {\n 'EMAIL_BACKEND': yaenv.email.SCHEMES['dummy'],\n 'EMAIL_HOST_USER': '',\n 'EMAIL_HOST_PASSWORD': '',\n 'EMAIL_HOST': 'localhost'\n }\n assert env.email('EMAIL_URL') == _email\n _email.update({\n 'EMAIL_BACKEND': yaenv.email.SCHEMES['console'],\n 'EMAIL_HOST': '127.0.0.1'\n })\n assert env.email('EMAIL_URL_MISSING', 'console://127.0.0.1') == _email\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.email('INVALID_URL', 'invalid')\n assert 'Invalid e-mail' in str(err.value)\n assert env.email('MISSING') is None",
"def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False",
"def test_is_valid():\n # Expected input.\n assert make_european.is_valid('12/31/2015:[email protected]')\n # Check 'at' typo.\n assert not make_european.is_valid('12/31/2015:john.a.grahamgmail.com')\n # Check colon typo.\n assert not make_european.is_valid('12/31/2015::[email protected]')\n # Check forward slash replacement.\n assert not make_european.is_valid('12.31.2015:[email protected]')\n # Check order.\n assert not make_european.is_valid('[email protected]:12/31/2015')",
"def test_compose_email_somebad(self):\n pass",
"def email_type(verifield, required):\n return verifield is None or parseaddr(verifield) != ('','')",
"def config_email_host(email_config: dict) -> dict:\n print(\"Email host not configured.\\nPlease enter host: \")\n email_config[\"host\"] = sys.stdin.readline().strip()\n print(\"Enter Port: \")\n email_config[\"port\"] = int(sys.stdin.readline().strip())\n\n if email_config[\"host\"] != \"localhost\":\n print(\"Enter User Name: \")\n email_config[\"username\"] = sys.stdin.readline().strip()\n\n print(\"Enter Password: \")\n crypt = Crypt()\n email_config[\"password\"] = crypt.encrypt(getpass.getpass())\n\n print(\"Does the Email service use SSL? (y/n): \")\n email_config[\"ssl\"] = sys.stdin.readline().strip().lower() in [\n \"true\",\n \"y\",\n \"yes\",\n ]\n\n return email_config",
"def email_spec(\n tag: Tag = \"email\", conformer: Optional[Conformer] = None, **kwargs\n) -> Spec:\n\n @pred_to_validator(f\"Value '{{value}}' is not type 'str'\", complement=True)\n def is_str(x: Any) -> bool:\n return isinstance(x, str)\n\n child_validators = []\n for email_attr in _EMAIL_RESULT_FIELDS:\n in_attr = kwargs.pop(f\"{email_attr}_in\", _IGNORE_OBJ_PARAM)\n regex_attr = kwargs.pop(f\"{email_attr}_regex\", _IGNORE_OBJ_PARAM)\n exact_attr = kwargs.pop(f\"{email_attr}\", _IGNORE_OBJ_PARAM)\n\n if (\n sum(\n int(v is not _IGNORE_OBJ_PARAM)\n for v in [in_attr, regex_attr, exact_attr]\n )\n > 1\n ):\n raise ValueError(\n f\"Email specs may only specify one of {email_attr}, \"\n f\"{email_attr}_in, and {email_attr}_regex for any Email attribute\"\n )\n\n attr_validator = _obj_attr_validator(\n \"Email\", email_attr, exact_attr, regex_attr, in_attr\n )\n if attr_validator is not None:\n child_validators.append(attr_validator)\n\n if kwargs:\n raise ValueError(f\"Unused keyword arguments: {kwargs}\")\n\n def validate_email(p: EmailAddress) -> Iterator[ErrorDetails]:\n for validate in child_validators:\n yield from validate(p)\n\n def str_contains_email(s: str) -> Iterator[ErrorDetails]:\n try:\n addr = EmailAddress(addr_spec=s)\n except (TypeError, ValueError) as e:\n yield ErrorDetails(\n message=f\"String '{s}' does not contain a valid email address: {e}\",\n pred=str_contains_email,\n value=s,\n )\n else:\n yield from validate_email(addr)\n\n return ValidatorSpec.from_validators(\n tag, is_str, str_contains_email, conformer=conformer\n )",
"def test_is_invalid_email(self):\n self.assertFalse(is_valid_email('helloworld'))",
"def _validate(self, s: str):\n if not re.match(r'[^@]+@[^@]+\\.[^@]+', s):\n raise ValueError(('Invalid Django superuser email address \"{}\": '\n 'the format should be like '\n '\"[email protected]\"').format(s))",
"def test_get_email_address(self):\n email_addr = 'test_get_email_addr' + '@' + self.email_dom\n org = 'o=%s' % (self.org_name)\n people = '%s=%s' % (self.container_attr, self.user_container)\n uid = '%s=%s' % (self.user_key, self.user_id)\n dn = '%s,%s,%s,%s' % (uid, people, org, self.base_dn)\n dn_info = {self.smtp_address: [email_addr]}\n expected_result = [(dn, dn_info)] \n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n result = addr.get(email_addr)['data']\n self.assertEqual(result, expected_result)",
"def _parse_from_email(self, from_email):\n if isinstance(from_email, str):\n return self._generate_email(from_email)\n elif isinstance(from_email, dict):\n return self._generate_email(**from_email)\n else:\n raise ValueError('Invalid from email adress')",
"def test_invalid_email_address_input(self):\n email_addr = '*@domain.loc'\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n self.assertRaises(error.InputError, addr.get, email_addr)",
"def validate_email(val):\n match = re.match(email_pattern, val)\n if not match:\n raise argparse.ArgumentTypeError(\"Invalid email address\")\n return val",
"def test_good_email():\n good_email = \"[email protected]\"\n m = CannedRe.EMAIL.match(good_email)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx email test failed for %s\" % good_email\n assert m.string == good_email",
"def is_valid_email(form, value):\n if '@' not in value or len(value) > 200:\n raise forms.ValidationError(_('Invalid email address'))",
"def _assertEmailParsedCorrectly(self, email, data):\n self.assertIsInstance(email, EmailMultiAlternatives)\n self.assertEqual(email.to, data.get('recipient', '').split(','))\n self.assertEqual(email.from_email, data.get('sender', ''))\n self.assertEqual(email.subject, data.get('subject', ''))\n self.assertEqual(email.body, \"%s\\n\\n%s\" % (\n data.get('stripped-text', ''),\n data.get('stripped-signature', '')\n ))\n self.assertEqual(email.cc, data.get('cc', '').split(','))\n self.assertEqual(email.bcc, data.get('bcc', '').split(','))\n if 'html' in data:\n self.assertEqual(len(email.alternatives), 1)\n self.assertEqual(email.alternatives[0][0], data.get('stripped-html', ''))",
"def test_parse_email(self):\n self.email.open_path = './original_email.txt'\n self.email.open_email()\n self.email.parse_email()\n sample_email = open('./parsed_email.txt', 'r')\n parsed_email = sample_email.read()\n sample_email.close()\n self.assertMultiLineEqual(parsed_email, self.email.parsed_email)",
"def test_email_multiple_domains(create_user):\n emails = [\"[email protected]\"]\n patterns = [\"*bar.com\", \"*.bar.com\", \".bar.com\"]\n assert create_user.preprocess_pattern(emails, patterns) == True\n emails = [\"[email protected]\"]\n assert create_user.preprocess_pattern(emails, patterns) == False",
"def config_email_address() -> str:\n print(\"Email address not configured.\\nPlease enter your email: \")\n email = sys.stdin.readline().strip()\n\n return email",
"def test_get_user_by_emailuser_email_get(self):\n pass",
"def validate_element(self, value):\n if super(EmailField, self).validate_element(value):\n valid_uname, valid_domain = validation_util.valid_email(value)\n if not (valid_uname and valid_domain):\n if isinstance(valid_domain, int):\n val_error = ValidationError(\n 'Field encountered improperly formatted email address: %s' % value)\n else:\n if '@' not in value:\n val_error = ValidationError(\n 'Field encountered email address with missing @ '\n 'character: %s' % value)\n else:\n val_error = ValidationError(\n 'Field encountered email address with illegal '\n 'characters: %s' % value)\n\n raise val_error\n else:\n return value"
] | [
"0.6463005",
"0.60613185",
"0.59752065",
"0.5969907",
"0.5942804",
"0.5908352",
"0.58468086",
"0.58437914",
"0.58130974",
"0.5798833",
"0.5770068",
"0.57455426",
"0.5729695",
"0.57266825",
"0.57260585",
"0.5720321",
"0.572013",
"0.56889856",
"0.5688259",
"0.56671464",
"0.5617326",
"0.5613961",
"0.55987763",
"0.55960506",
"0.55820036",
"0.5569572",
"0.555834",
"0.55491436",
"0.55452037",
"0.55300623"
] | 0.6221637 | 1 |
List all app routes | def list_routes():
import urllib
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = urllib.parse.unquote(
"{:35s} {:35s} {}".format(
rule.endpoint,
methods,
url
)
)
output.append(line)
for line in sorted(output):
print(line) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)",
"def getRoutes(self):\n pass",
"def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))",
"def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList",
"def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES",
"def routes(methods_filter, route_filter):\n from utils import list_routes\n\n app_routes = list_routes(app, methods_filter, route_filter)\n if app_routes:\n for line in sorted(app_routes):\n print(\"{:8s} {:{width}s} {}\".format(line['method'], line['route'], line['endpoint'],\n width=70 + line['route_expanded_length']))\n else:\n print(\"No route !\")",
"def routes_available():\n return json.dumps(\n [\"%s\" % rule for rule in app.url_map.iter_rules()],\n indent=4,\n separators=(\",\", \": \"),\n )",
"def generate_app_routes(conf: T.Dict[T.Text, T.Any]) -> T.List[web.RouteDef]:\n app_routes = [\n web.get(\"/api/verify\", rest.verify),\n web.get(\"/api/interpolate\", rest.interpolate),\n ]\n if conf.get(\"graphiql\"):\n app_routes.append(graphql.get_view(graphiql=True))\n else:\n app_routes.append(graphql.get_view(graphiql=False))\n return app_routes",
"def routes(self):\n return self._routes",
"def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)",
"def print_routes() -> None:\n mbta = MBTA(config.CT_MBTA_API_KEY)\n routes = mbta.get_routes()\n title_text = f\"List of Routes on MBTA\"\n print(f\"{title_text:=^80}\")\n for route in routes:\n print(\n f\"ID: {route['id']}, NAME: {route['attributes']['long_name']}\"\n )\n return",
"def test_list_route_for_all_namespaces(self):\n pass",
"def show_routes(self):\n routelist= [(handler.regex.pattern, handler.handler_class) for handler in self.handlers[0][1]]\n print(55*\"-\")\n print(\" Routing table (order matters) :\")\n print(55*\"-\")\n for elem in routelist:\n print('{0:<20} {1:<30} '.format(elem[0], str(elem[1])))",
"def available_routes():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"/api/v1.0/start/end\"\r\n )",
"def get_routes():\n output = [f'{\"S. No.\":6}\\t{\"Endpoint\":50}\\t{\"Method\":8}\\n']\n\n for index, rule in enumerate(app.url_map.iter_rules()):\n for i, method in enumerate(rule.methods):\n output.append(f'{index + 1 if i == 0 else \"\":<6}\\t{rule.rule:50}\\t{method:10}')\n\n try:\n output.append(f'\\n{eval(rule.endpoint).__doc__}\\n')\n except NameError:\n output.append('\\n')\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')",
"def getRoutes(request):\n routes = {\n 'Item list': '/api/v1/items/',\n 'Item details': '/api/v1/item/<int:pk>/',\n\n 'JWT': '/api/v1/users/login/',\n }\n\n return Response(routes)",
"def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])",
"def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)",
"def get_routes(iface):\n # 1: get the static_routes entry.\n res = __salt__['freebsd_common.sysrc'](\"static_routes\")\n output = res[\"stdout\"]\n if res[\"retcode\"] or not output.startswith(\"static_routes:\"):\n _raise_error(\n \"Invalid sysrc output %r\" % (res,))\n routekeys = output[len(\"static_routes:\"):].strip().split()\n if not routekeys or routekeys == ['NO']:\n return []\n result = ['static_routes+=\"']\n for key in routekeys:\n route_iface, line = _get_static_route(key)\n if route_iface != iface:\n continue\n result.append(line)\n result[0] += \" \" + key\n result[0] += '\"'\n return result",
"def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers",
"def static_routes(self):\n return self._static_routes",
"def show_all_routes(self):\n try:\n routes = self.admin_repository.show_all_routes()\n if routes:\n for route in routes:\n print(\"Route Id: {}\".format(route[0]))\n print(\"Route : {}\".format(route[1]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def check_all_routes(event: ApplicationCreated) -> None:\n\n def remove_prefixes(path: str) -> str:\n path = f\"/{path}\" if not path.startswith(\"/\") else path\n for prefix in prefixes:\n if path.startswith(prefix):\n prefix_length = len(prefix)\n return path[prefix_length:]\n return path\n\n app = event.app\n settings = app.registry.settings\n apinames = settings.get(\"pyramid_openapi3_apinames\")\n if not apinames:\n # pyramid_openapi3 not configured?\n logger.warning(\n \"pyramid_openapi3 settings not found. \"\n \"Did you forget to call config.pyramid_openapi3_spec?\"\n )\n return\n\n for name in apinames: # pragma: no branch\n openapi_settings = settings[name]\n\n if not settings.get(\"pyramid_openapi3.enable_endpoint_validation\", True):\n logger.info(\"Endpoint validation against specification is disabled\")\n return\n\n prefixes = _get_server_prefixes(openapi_settings[\"spec\"])\n\n paths = list(openapi_settings[\"spec\"][\"paths\"].keys())\n routes = [\n remove_prefixes(route.path) for route in app.routes_mapper.routes.values()\n ]\n\n missing = [r for r in paths if r not in routes]\n if missing:\n raise MissingEndpointsError(missing)\n\n settings.setdefault(\"pyramid_openapi3\", {})\n settings[\"pyramid_openapi3\"].setdefault(\"routes\", {})\n\n # It is possible to have multiple `add_route` for a single path\n # (due to request_method predicates). So loop through each route\n # to create a lookup of route_name -> api_name\n for route_name, route in app.routes_mapper.routes.items():\n if remove_prefixes(route.path) in paths:\n settings[\"pyramid_openapi3\"][\"routes\"][route_name] = name",
"def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]",
"def print_routes(self):\n\n for route in self.app.router.routes():\n route_info = route.get_info()\n if \"formatter\" in route_info:\n url = route_info[\"formatter\"]\n elif \"path\" in route_info:\n url = route_info[\"path\"]\n elif \"prefix\" in route_info:\n url = route_info[\"prefix\"]\n else:\n url = \"Unknown type of route %s\" % route_info\n\n self.logger.info(\"Route has been setup %s at %s\", route.method, url)",
"def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')",
"def show_routes(request):\n\n routes = get_route_list_db('sf-muni')\n return HttpResponse(json.dumps(routes), content_type='application/json')",
"def build_routes(app):\n app.register_blueprint(workflow_plans_blueprint)\n app.register_blueprint(cache_blueprint)\n app.register_blueprint(config_blueprint)\n app.register_blueprint(dataset_blueprint)\n app.register_blueprint(graph_blueprint)\n app.register_blueprint(jobs_blueprint)\n app.register_blueprint(project_blueprint)\n app.register_blueprint(templates_blueprint)\n app.register_blueprint(version_blueprint)\n app.register_blueprint(apispec_blueprint)\n app.register_blueprint(versions_list_blueprint)",
"def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]]:\n return pulumi.get(self, \"routes\")",
"def routes_info():\n routes = []\n for rule in app.url_map.iter_rules():\n try:\n if rule.endpoint != 'static':\n if hasattr(app.view_functions[rule.endpoint], 'import_name'):\n import_name = app.view_functions[rule.endpoint].import_name\n obj = import_string(import_name)\n routes.append({rule.rule: \"%s\\n%s\" % (\",\".join(list(rule.methods)), obj.__doc__)})\n else:\n routes.append({rule.rule: app.view_functions[rule.endpoint].__doc__})\n except Exception as exc:\n routes.append({rule.rule: \n \"(%s) INVALID ROUTE DEFINITION!!!\" % rule.endpoint})\n route_info = \"%s => %s\" % (rule.rule, rule.endpoint)\n app.logger.error(\"Invalid route: %s\" % route_info, exc_info=True)\n # func_list[rule.rule] = obj.__doc__\n\n return jsonify(code=200, data=routes)"
] | [
"0.78200454",
"0.73298335",
"0.7275701",
"0.7271532",
"0.7183193",
"0.71806",
"0.71694124",
"0.6954345",
"0.6920193",
"0.6845532",
"0.6817044",
"0.6804565",
"0.67709076",
"0.6767728",
"0.67519045",
"0.67482877",
"0.67421955",
"0.66663814",
"0.65690017",
"0.6515906",
"0.64969194",
"0.6460918",
"0.64577824",
"0.64141524",
"0.639885",
"0.637426",
"0.6372943",
"0.6356939",
"0.6349163",
"0.6340636"
] | 0.76650923 | 1 |
Returns tuple of the mouse's x and y position. | def getMousePosition(self):
return (self.mouseData.x, self.mouseData.y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]",
"def mouse_coords(desktop=False):\n x, y = c_int(0), c_int(0)\n if desktop:\n mouse.SDL_GetGlobalMouseState(byref(x), byref(y))\n else:\n mouse.SDL_GetMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))",
"def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1",
"def get_mouse_coordinate(self):\n pos = pygame.mouse.get_pos()\n mov = pygame.mouse.get_rel()\n row = pos[0] // (self.CELL_WIDTH + self.MARGIN)\n col = (pos[1] - self.PANEL_HEIGHT) // (self.CELL_WIDTH + self.MARGIN)\n if mov != (0, 0) and not self.env.not_in_grid(row, col):\n return (row, col)\n return self.markerPos",
"def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])",
"def get_mouse_pos(self):\n return self.mouse_pos",
"def get_pos(self):\n return (self.x, self.y)",
"def get_mouse_position(self):\n raise NotImplementedError",
"def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y",
"def get_position(self):\n return (self.x_pos, self.y_pos)",
"def position(self):\n return self.x, self.y",
"def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m",
"def read_current_mouse_position():\n import pyautogui\n pyautogui.FAILSAFE = False\n return pyautogui.position()",
"def xy(self) -> Tuple[int, int]:\n return self._x, self._y",
"def get_point(self):\n return self._x, self._y",
"def position(self):\n return self._x, self._y",
"def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)",
"def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y",
"def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y",
"def getMouse():\n return pygame.mouse.get_pos()",
"def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1",
"def getXY(self):\n return (self.X,self.Y)",
"def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)",
"def mousePosition(self):",
"def get_mouse():\n if CONST.render == 'sfml':\n mouse_pos = Vector2(sfml.Mouse.get_position())/engine.screen_diff_ratio+engine.get_origin_pos()\n return mouse_pos,\\\n [sfml.Mouse.is_button_pressed(sfml.Mouse.LEFT),\n sfml.Mouse.is_button_pressed(sfml.Mouse.RIGHT),\n sfml.Mouse.is_button_pressed(sfml.Mouse.MIDDLE)]\n elif CONST.render == 'pookoo':\n return Vector2(pookoo.input.mouse.position()), [\n False,False,False\n ]\n elif CONST.render == 'kivy':\n return Vector2(), [False,False,False]",
"def mousePositionRaw(self):",
"def mousePositionRaw(self):",
"def mouse_delta():\n x, y = c_int(0), c_int(0)\n mouse.SDL_GetRelativeMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))",
"def getPos(self):\n return self.Xpos,self.Ypos",
"def get(self):\n return (self.x,self.y);"
] | [
"0.8341588",
"0.8308028",
"0.8275377",
"0.79267716",
"0.7811369",
"0.78063816",
"0.77965844",
"0.7750217",
"0.7621062",
"0.7612835",
"0.7541511",
"0.75267994",
"0.752542",
"0.74809784",
"0.7469829",
"0.7467763",
"0.744493",
"0.7417471",
"0.7417471",
"0.73869586",
"0.73104537",
"0.7308316",
"0.73030263",
"0.73022074",
"0.7297739",
"0.7275263",
"0.7275263",
"0.7263286",
"0.7243086",
"0.72269976"
] | 0.8774161 | 0 |
Will only return true once per left mouse button press. | def getMouseLeftDown(self):
if self.mouseData.leftNewlyActive:
self.mouseData.leftNewlyActive = False
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_left_click(event):\n return (event.type == pygame.MOUSEBUTTONDOWN\n and event.button == MOUSE_LEFT)",
"def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)",
"def mouse_left_down(self):\n pass",
"def left(event: EventType, widget: WidgetType) -> bool:\n return event.key == KEY_LEFT",
"def mouse_left_up(self):\n pass",
"def check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n position = pygame.mouse.get_pos()\n self.left_click(position)",
"def LeftClick(self):\n self._PressLeftButton()\n self._ReleaseAllButtons()",
"def on_left_mouse_click(self, event: Event) -> None:\n\t\tself.mouse_state.set_click(event.x, event.y)",
"def leftButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.LEFT_BUTTON)",
"def is_pressed(self) -> bool:\n return True",
"def left_mouse_down_handler(self, event):\r\n\r\n self.is_left_mouse_down = True\r\n if not self.is_game_over:\r\n self.update_reset_button()\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def is_pressed(self) -> bool:",
"def left_mouse_up_handler(self, event):\r\n\r\n self.is_left_mouse_down = False\r\n\r\n if self.reset_button.contains_event(event.pos):\r\n self.start_new_game()\r\n elif self.is_right_mouse_down:\r\n self.shortcut_click(event)\r\n else:\r\n tile = self.board.get_event_tile(event.pos)\r\n if tile is not None and not self.is_game_over:\r\n self.update_reset_button()\r\n if self.is_new_game:\r\n self.first_move(tile)\r\n tile_reveal_result = self.board.left_click_up(tile)\r\n self.process_tile_reveal(tile_reveal_result)\r\n if not self.is_game_over:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def _PressLeftButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_LEFT})\n time.sleep(self.send_delay)",
"def was_pressed(self) -> bool:",
"def was_pressed(self) -> bool:\n return True",
"def mouseDragged():\n if mousePressed:\n mousePressed()",
"def can_move_left(self):\r\n return self._position > 0",
"def get_pressed(self):\n\n self.update()\n\n if self.pressed:\n self.pressed = False\n return True\n\n return False",
"def moveLeft(self):\n if self._position.x != 0:\n self._position.x -=1\n return True\n return False",
"def getMouseRightDown(self):\n if self.mouseData.rightNewlyActive:\n self.mouseData.rightNewlyActive = False\n return True\n else:\n return False",
"def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)",
"def can_move_left(self):\n return self._position > 0",
"def getMouseLeft(self):\n return self.mouseData.leftActive",
"def check_left_side():\n maze.turn_right()\n #print 'checked left side'\n if maze.go():\n maze.turn_right()\n maze.turn_right()\n maze.go()\n maze.turn_right()\n # print 'i can go left'\n return True\n else:\n #print \"i can't go left\"\n maze.turn_left()\n return False",
"def mouse_on_button(self, mouse) -> bool:\n return self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y",
"def __check_if_got_pressed(self):\n mouse_x_pos,mouse_y_pos = pg.mouse.get_pos()\n\n if utilitiez.on_object(self.rect.x, self.rect.y, self.rect.width, self.rect.height, mouse_x_pos, mouse_y_pos,\n MOUSE_WIDTH, MOUSE_HEIGHT):\n self.__on_click()",
"def keyLeft(self):\n if pyxel.btnp(pyxel.KEY_LEFT, 10, 1) and not mapCheck(self, theFallen, -1, 0):\n mapDel(self, theFallen)\n self.x = max(-self.left, self.x - 1)\n mapAdd(self, theFallen)",
"def handle_left_shift_click(self, event):\n #placeholder to prevent handle_left_click being called\n return",
"def hits_left_or_right(self):\n if self.x >= self.scene.screen.get_width() - self.image.get_width() or self.x <= 0:\n return True\n else:\n return False"
] | [
"0.8006754",
"0.76775587",
"0.7675541",
"0.74690914",
"0.7183292",
"0.7112333",
"0.7002191",
"0.6991126",
"0.6943883",
"0.69427043",
"0.6892363",
"0.6866597",
"0.68091565",
"0.67808914",
"0.6759235",
"0.67316145",
"0.67145634",
"0.6712375",
"0.6689281",
"0.66657394",
"0.66433495",
"0.6590748",
"0.65881544",
"0.65702707",
"0.65306014",
"0.6519735",
"0.6513284",
"0.6508399",
"0.6494861",
"0.648407"
] | 0.8029612 | 0 |
Will only return true once per right mouse button press. | def getMouseRightDown(self):
if self.mouseData.rightNewlyActive:
self.mouseData.rightNewlyActive = False
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)",
"def mouse_right_down(self):\n pass",
"def right(event: EventType, widget: WidgetType) -> bool:\n return event.key == KEY_RIGHT",
"def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()",
"def rightButtonUp(self):\n\t\tautopy.mouse.toggle(False,autopy.mouse.RIGHT_BUTTON)",
"def mouse_right_up(self):\n pass",
"def _PressRightButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_RIGHT})\n time.sleep(self.send_delay)",
"def right_mouse_down_handler(self, event):\r\n\r\n self.is_right_mouse_down = True\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_new_game and not self.is_game_over and tile is not None:\r\n if not self.is_left_mouse_down:\r\n change_in_unflagged_mines = tile.toggle_flag()\r\n self.mine_counter.update(change_in_unflagged_mines)\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def right_click(self, event):\n if self['text'] == '*' and self.text == '*': # player right-clicks again\n self.auto_expose()\n self.parentGrid.update_number(True)\n else: # first right-click\n self['text'] = '*'\n self['fg'] = 'black'\n self.parentGrid.update_number()",
"def joy_right(event: EventType, widget: WidgetType) -> bool:\n return event.value == JOY_RIGHT",
"def onRightDown(self, event):\n\n pass",
"def handle_right_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received right click:', row, ',', col\n status = self.board.getcellstatus(row, col)\n if status == CellStatus.Opened:\n return\n elif status == CellStatus.Closed:\n self.remainingminecount = self.remainingminecount - 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/Flag.png\"))\n elif status == CellStatus.MarkedAsMine:\n self.remainingminecount = self.remainingminecount + 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsSuspectedMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/questionmark.png\"))\n elif status == CellStatus.MarkedAsSuspectedMine:\n self.board.setcellstatus(row, col, CellStatus.Closed)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/unopenedsquare.png\"))",
"def leftButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.LEFT_BUTTON)",
"def is_left_click(event):\n return (event.type == pygame.MOUSEBUTTONDOWN\n and event.button == MOUSE_LEFT)",
"def is_pressed(self) -> bool:",
"def getMouseLeftDown(self):\n if self.mouseData.leftNewlyActive:\n self.mouseData.leftNewlyActive = False\n return True\n else:\n return False",
"def right_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_right_click(self, *args)",
"def was_pressed(self) -> bool:",
"def _right_click(self, event):\n if self.disabled is False:\n self.menu.tk_popup(event.x_root, event.y_root)",
"def is_pressed(self) -> bool:\n return True",
"def right_mouse_up_handler(self, event):\r\n\r\n self.is_right_mouse_down = False\r\n\r\n if self.is_left_mouse_down:\r\n self.shortcut_click(event)\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_game_over and tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def right_click(coords=(0, 0)):\n _perform_click_input(button='right', coords=coords)",
"def was_pressed(self) -> bool:\n return True",
"def get_pressed(self):\n\n self.update()\n\n if self.pressed:\n self.pressed = False\n return True\n\n return False",
"def mouse_left_down(self):\n pass",
"def moveRight(self):\n if self._position.x != 14:\n self._position.x +=1\n return True\n return False",
"def has_right(self):\n return self.r is not None",
"def right_is_clear(): #py:right_is_clear\n return RUR._right_is_clear_()",
"def button_press_cb(self, source, event):\n\n if event.button == MOUSE_BUTTON_RIGHT:\n pass\n return True\n elif event.button == MOUSE_BUTTON_MIDDLE:\n self.emit('begin-move')\n return True",
"def rightClick(self):\n cmdId = self.executeCommand(Command.CLICK, {'button': 2})\n return cmdId"
] | [
"0.7968415",
"0.7648867",
"0.75920916",
"0.7204233",
"0.7191432",
"0.71218544",
"0.6907627",
"0.67827153",
"0.6778556",
"0.6776632",
"0.6702281",
"0.6695338",
"0.6671457",
"0.6664631",
"0.6655712",
"0.6652684",
"0.6624108",
"0.66104114",
"0.6571914",
"0.654676",
"0.6544498",
"0.6477724",
"0.6391627",
"0.63733536",
"0.63718706",
"0.6360863",
"0.63495797",
"0.63212514",
"0.63137466",
"0.62965333"
] | 0.8019463 | 0 |
return a list of TODOs owned by current user | def get_own_todos(current_user: models.User = Depends(get_current_user),
db: Session = Depends(get_db)):
todos = blogcrud.get_user_todos(db, current_user.id)
return todos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n current_user = fjwte.get_current_user()\n return Todo.get_items_by_user_id(current_user.id)",
"def get_queryset(self):\n user = self.request.user\n return Task.objects.filter(author=user)",
"def getResponsibleUsers():",
"def todo_list_view(request):\n\n context = {}\n queryset = Todo.objects.filter(user=request.user)\n context['lists'] = queryset\n return render(request,'todos/index.html', context)",
"async def todo(self, ctx):\n\n cursor = await db.execute(\"Select Thing from Todo where MemberID = ?\", (ctx.author.id,))\n result = await cursor.fetchall()\n\n if not result:\n return await send_embed(ctx, \"You do not have anything on your todo list.\", negative=True)\n\n result = [i[0] for i in result]\n\n embeds = []\n description = []\n\n for index, string in enumerate(result, start=1):\n\n description.append(f\"{index}. {string}\")\n\n if index % 10 == 0 or index == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n description=\"\\n\".join(description)\n )\n embed.set_author(name=str(ctx.author), icon_url=str(ctx.author.avatar_url))\n embeds.append(embed)\n description = []\n\n await self.bot.paginate(ctx, embeds)",
"def getInterestedUsers():",
"def get_todos(self):\n if self.is_new:\n # if its a new project then create the todo items from the yml\n # templates\n return self.get_yml_todos()\n else:\n # check for existing todos\n return self.get_db_todos()",
"def note_list(request):\n user = request.user\n notes = Note.objects.filter(author=user)\n serializer = NoteSerializer(notes, many=True)\n return Response(serializer.data)",
"def profile(request):\n tasklist_all = Todo.objects.filter(creator=request.user)\n tasklist_completed = Todo.objects.filter(creator=request.user, mark_done=False)\n print(tasklist_all)\n context={ \n 'tasklist_all':tasklist_all,\n 'tasklist_completed':tasklist_completed,\n }\n\n\n return render(request, 'lists/profile.html',context )",
"def get_todo_lists():\n todo_list: TodoList = TodoList.query.filter_by(user_id=g.current_user.id)\n return jsonify(success_result(data=[list.to_json() for list in todo_list]))",
"def todos(self):\r\n return Todos(self)",
"def get_todos(user_id):\n full_url = base_url + 'get-to-dos?userId=' + user_id + '&key=' + key\n response = requests.get(full_url)\n if response.status_code != 200:\n raise RequestException('Get To Dos failed with status code: {}'.format(response.status_code))\n return json.loads(response.text)",
"def assigned_todos(self):\r\n return AssignedTodos(self)",
"async def get_todos(user: str, journal_id: int) -> GetTodoItemsResponse:\n query = f\"\"\"\n select *\n from todo\n where user = '{user}' and ((completed is null) or (journal_id = {journal_id}))\n order by completed, created\n \"\"\"\n results = sql.run_query(query, fetch=True)\n return GetTodoItemsResponse(items=results)",
"def get_todo_list():\n\n # assume that a \"h264\" encoded file is complete\n return models.LibraryItem.objects.filter(h264=False)",
"def taskList(request):\n try:\n # if request.user.username == \"root\":\n # pass\n\n title = request.data.get(\"title\", None)\n desc = request.data.get(\"desc\", None)\n stat = request.data.get(\"status\", None)\n taskDueDate = request.data.get(\"taskDueDate\", None)\n sortby = request.data.get(\"sortby\", None)\n qs = Task.objects.filter(userID=request.user)\n if sortby:\n qs = qs.order_by(sortby)\n\n if title:\n qs = qs.filter(Q(title__exact=title))\n\n if desc:\n qs = qs.filter(Q(desc__exact=desc))\n\n if stat:\n qs = qs.filter(Q(status__exact=stat))\n\n if taskDueDate:\n qs = qs.filter(Q(taskDueDate__exact=taskDueDate))\n\n serializer = TaskSerializer(qs, many=True)\n if len(serializer.data) != 0:\n for i in range(len(serializer.data)):\n serializer.data[i]['userID'] = request.user.username\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n except Exception as e:\n return Response(e.args[0], status.HTTP_400_BAD_REQUEST)",
"def get_queryset(self):\n queryset = Snippet.objects.all()\n username = self.request.query_params.get('username', None)\n userqueryset = User.objects.all()\n users = userqueryset.filter(username=username)\n if users.count() != 0 and username is not None:\n queryset = queryset.filter(owner=users[0])\n return queryset\n return []",
"def list_todo_table(self):\n if self.is_todo_table_empty():\n print(\"nothing to do!\")\n return []\n else:\n return self.make_list_from_task()",
"def test_given_a_user_when_I_add_a_todo_Then_I_can_access_it_from_user_todo_collection(self):\n from .models import Tag\n from .models import TodoUser\n from .models import TodoItem\n\n user = TodoUser(\n email=u'[email protected]',\n first_name=u'Arthur',\n last_name=u'Pendragon',\n )\n self.session.add(user)\n\n tags = [u'quest', u'ni', u'knight']\n\n todo = TodoItem(user.email,\n u'Find a shrubbery', \n [u'quest', u'ni', u'knight'] \n ) \n self.session.add(todo)\n \n user_todo = user.todo_list.one()\n self.assertTrue(todo is user_todo)",
"def list(self, user, limit = 0, offset = 0, sort = None):\n userId = user['_id'] if user else None\n cursor = self.find({'ownerId': userId}, sort = sort)\n\n for r in self.filterResultsByPermission(cursor = cursor, user = user,\n level = AccessType.READ, limit = limit, offset = offset):\n yield r",
"def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items",
"def todolists(self):\r\n return tdl.GlobalTodolists(self)",
"def incomplete_tasks_user(user):\n return Task.objects.filter(user=user, done=False).exclude(folder='trash')",
"def get_queryset(self):\n user_requested = self.kwargs['user']\n self.check_object_permissions(self.request, user_requested)\n return Poll.objects.filter(created_by__username=user_requested)",
"def todo_list():\n if 'logged_in' not in session:\n return render_template('login.html')\n else:\n #conncetion to the database\n conn = sqlite3.connect(\"todo.db\")\n c = conn.cursor()\n\n #select query to get all values of task\n c.execute(\"SELECT Task_id, Description, Due_date, Modified_date FROM task WHERE status LIKE '1'\")\n result = c.fetchall()\n c.close()\n return render_template(\"task_list.html\", rows=result)",
"def get_queryset(self):\n user = self.request.user\n return Recipe.objects.filter(created_by=user)",
"def list(self, user_ids: Optional[List[UserId]]) -> List[U]:\n ...",
"def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users",
"def list_users(self):\n raise NotImplementedError",
"def user_has_access(self, user):\n if not user: return False\n query = db.Query(TaskListMember)\n query.filter('task_list =', self)\n query.filter('user =', user)\n return query.get()"
] | [
"0.7174186",
"0.67827654",
"0.6410615",
"0.63974357",
"0.6377105",
"0.63201666",
"0.6319978",
"0.6306578",
"0.62478435",
"0.6237057",
"0.6183957",
"0.59976774",
"0.59974366",
"0.5940442",
"0.5880697",
"0.5879238",
"0.5871809",
"0.5840541",
"0.5830627",
"0.5805847",
"0.5798051",
"0.5783814",
"0.5769946",
"0.57450914",
"0.57428586",
"0.57416",
"0.5727517",
"0.56949097",
"0.56881666",
"0.56812894"
] | 0.77829075 | 0 |
Create triangle that is triangle in XZ coordinages, with Y beeing the thickness" | def create_triangle( xsize, ysize, zsize, place=(0,0,0), rotate=(1,0,0,0) ):
b1 = create_box( (xsize,ysize,zsize), place=(0,0,0 ) )
ms = max( xsize, zsize )
angle = math.atan( zsize / xsize ) * 180 / math.pi
b2 = create_box( (2*ms, 2*ysize, 2*ms), place=(0,-EPS,0), rotate=(0,1,0,-angle) )
tr = create_cut( b1, b2 )
return relocate( tr, place, rotate ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_triangle(tup):\n x, y, z = tup[0], tup[1], tup[2]\n t_draw = turtle.Turtle()\n for index in range(3):\n t_draw.forward()",
"def triangle_shape(height):\n mot = str()\n if height == 0:\n return str()\n else :\n for i in range (height):\n esp = height-1-i\n mot = mot+ esp*\" \" + (2*i+1)*\"x\" +esp*\" \"\n if i!=height-1:\n mot = mot+ \"\\n\"\n return(mot)",
"def draw_triangle():\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)",
"def drawTriangle(t, color, x, y):\n ## t.color(color)\n ## t.begin_fill()\n for i in range(3):\n t.forward(x)\n t.right(y)",
"def triangle(self):\n \n R = Householder.triangle_operation(self)[0] \n \n return(R.round(10))",
"def triangle(halfSideLength = 15, robotHeight = -90):\n# ^ \n# / \\ \n# / \\ \n# / \\ \n# /_______\\\n# \n# | a | \n# a = halfSideLength\n\n hHalf = (halfSideLength * m.sqrt(3)/2)/2\n\n posTriangle = [\n [-hHalf,halfSideLength,robotHeight,0,0,0,'mov'],\n [-hHalf,-halfSideLength,robotHeight,0,0,0,'lin'],\n [hHalf,0,robotHeight,0,0,0,'lin'],\n [-hHalf,halfSideLength,robotHeight,0,0,0,'lin'],\n [0,0,-127,0,0,0,'mov']\n ]\n\n return posTriangle",
"def draw_triangle_outline(x1, y1,\n x2, y2,\n x3, y3, color, border_width=1):\n first_point = [x1, y1]\n second_point = [x2, y2]\n third_point = [x3, y3]\n point_list = (first_point, second_point, third_point)\n draw_polygon_outline(point_list, color, border_width)",
"def triangle(length=40.0, r=3.175 / 2):\n\t# equilateral triangle:\n\ta = np.array([0, 0])\n\tb = np.array([length, 0])\n\tc = np.array([length / 2, length * math.sin(math.pi / 3)])\n\ttri_pts = PolyLine([a, b, c, a])\n\toffs_pts = addOffset(tri_pts, r)\n\ttri_pts = centerObjects(offs_pts, tri_pts)\n\treturn tri_pts, offs_pts",
"def Triangle(points=None):\n if points is None:\n points = [[0, 0, 0], [1, 0, 0], [0.5, 0.5**0.5, 0]]\n\n if len(points) != 3:\n raise TypeError('Points must be given as length 3 np.ndarray or list')\n\n check_valid_vector(points[0], 'points[0]')\n check_valid_vector(points[1], 'points[1]')\n check_valid_vector(points[2], 'points[2]')\n\n cells = np.array([[3, 0, 1, 2]])\n return wrap(pyvista.PolyData(points, cells))",
"def draw_triangle(x, y, length=10):\n radius = length/math.sqrt(3)\n my_turtle.penup()\n my_turtle.goto(x, y+radius)\n my_turtle.pendown()\n my_turtle.right(60)\n for i in range(3):\n my_turtle.forward(length)\n my_turtle.right(120)\n\n my_turtle.left(60)\n my_turtle.penup()",
"def create_two_init_triangles(points):\n return [(points[0], points[1], points[2]),\n (points[0], points[2], points[3])]",
"def triangle_shape(n, fillchar=\"x\", spacechar=\" \"):\n width = 2 * n - 1\n return \"\\n\".join(\n (fillchar * (2 * i + 1)).center(width, spacechar) for i in range(n)\n )",
"def cross_pts_triangle(p1, p2, p3):\n return (p1[:, 0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[:, 1] - p3[1])",
"def get_ztf_footprint_corners():\n x = 6.86 / 2\n return [-x, +x, +x, -x] * u.deg, [-x, -x, +x, +x] * u.deg",
"def create_inner_tri(point, v1, v2, v3):\n return [(point, v1, v2), (point, v1, v3), (point, v2, v3)]",
"def triangle(t):\n if int(t) % 2 == 0:\n y = t - int(t)\n else:\n y = 2 - (t - int(t) + 1)\n return abs(y)",
"def triangle(p1, p2, p3, width, height):\r\n v1 = vec2(round(p1.x), round(p1.y))\r\n v2 = vec2(round(p2.x), round(p2.y))\r\n v3 = vec2(round(p3.x), round(p3.y))\r\n if (v1.y > v2.y):\r\n temp = v1\r\n v1 = v2\r\n v2 = temp\r\n if (v1.y > v3.y):\r\n temp = v1\r\n v1 = v3\r\n v3 = temp\r\n if (v2.y > v3.y):\r\n temp = v2\r\n v2 = v3\r\n v3 = temp\r\n if (v1.y != v2.y): k_12 = (v2.x - v1.x)/(v2.y - v1.y)\r\n if (v1.y != v3.y): k_13 = (v3.x - v1.x)/(v3.y - v1.y)\r\n if (v2.y != v3.y): k_23 = (v3.x - v2.x)/(v3.y - v2.y)\r\n if (v1.y == v2.y):\r\n if (v1.x < v2.x):\r\n xl, xu = v1.x, v2.x\r\n left = False\r\n else:\r\n xl, xu = v2.x, v1.x\r\n left = True\r\n if (v1.y >= 0 and v1.y < height):\r\n xl = max(xl, 0)\r\n xu = min(xu, width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, v1.y)\r\n else:\r\n left = v2.x < k_13*(v2.y - v1.y) + v1.x\r\n if (left):\r\n k1, k2 = k_12, k_13\r\n else:\r\n k1, k2 = k_13, k_12\r\n yl = max(v1.y, 0)\r\n yu = min(v2.y, height)\r\n for y in range(yl, yu):\r\n xl = max(math.floor(k1*(y - v1.y) + v1.x + 0.5), 0)\r\n xu = min(math.floor(k2*(y - v1.y) + v1.x + 0.5), width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, y)\r\n if (v2.y == v3.y):\r\n if (v2.x < v3.x):\r\n xl, xu = v2.x, v3.x\r\n else:\r\n xl, xu = v3.x, v2.x\r\n if (v2.y >= 0 and v2.y < height):\r\n xl = max(xl, 0)\r\n xu = min(xu, width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, v2.y)\r\n else:\r\n if (left):\r\n k1, k2 = k_23, k_13\r\n t1, t2 = v2, v1\r\n else:\r\n k1, k2 = k_13, k_23\r\n t1, t2 = v1, v2\r\n yl = max(v2.y, 0)\r\n yu = min(v3.y + 1, height)\r\n for y in range(yl, yu):\r\n xl = max(math.floor(k1*(y - t1.y) + t1.x + 0.5), 0)\r\n xu = min(math.floor(k2*(y - t2.y) + t2.x + 0.5), width - 1)\r\n for x in range(xl, xu + 1):\r\n yield vec2(x, y)",
"def triangle(self):\n [r,c] = self.D\n m = min(r,c)\n S = self\n T = zeros(r,c)\n while m > 0:\n NoLigne = 0\n while S[NoLigne, 0] == 0 and (NoLigne < m - 1):\n NoLigne += 1\n S = S.swap(NoLigne,0)\n if S[0, 0] != 0:\n pivot = S[0,0]\n for k in range(1,m):\n if S[k,0] != 0:\n S = S.comb_lignes(pivot, -S[k,0],k,0)\n #print(\"pivot = \"+str(pivot))\n #print(\"S dans for :\")\n #print(S)\n T = T.remplace_ligned(r - m,S.F)\n #print(\"Évolution de T :\")\n #print(T)\n S = S.decoupe()\n m -= 1\n return T",
"def draw_triangle(alpha, x0, width, orientation, base=10,\n **kwargs):\n x0, y0 = x0\n x1 = x0 + width\n y1 = y0 + alpha*(x1 - x0)\n plt.plot([x0, x1], [y0, y1], 'k')\n if (alpha >= 0 and orientation == 'up') \\\n or (alpha < 0 and orientation == 'down'):\n plt.plot([x0, x1], [y1, y1], 'k')\n plt.plot([x0, x0], [y0, y1], 'k')\n # plt.plot lines have nice rounded caps\n # plt.hlines(y1, x0, x1, **kwargs)\n # plt.vlines(x0, y0, y1, **kwargs)\n corner = [x0, y1]\n elif (alpha >= 0 and orientation == 'down') \\\n or (alpha < 0 and orientation == 'up'):\n plt.plot([x0, x1], [y0, y0], 'k')\n plt.plot([x1, x1], [y0, y1], 'k')\n # plt.hlines(y0, x0, x1, **kwargs)\n # plt.vlines(x1, y0, y1, **kwargs)\n corner = [x1, y0]\n else:\n raise ValueError(r\"Need $\\alpha\\in\\mathbb{R} and orientation\\in{'up', 'down'}\")\n return corner",
"def createThreePoints(cls, x1, y1, z1, x2, y2, z2, x3, y3, z3):\n d = np.array([x2 - x1, y2 - y1, z2 - z1])\n p0 = np.array([x1, y1, z1])\n return cls(p0, d)",
"def _triangle(self, c, sigma):\n triangle_x = (-abs(c[0] - self._neigx)) + sigma\n triangle_y = (-abs(c[1] - self._neigy)) + sigma\n triangle_x[triangle_x < 0] = 0.\n triangle_y[triangle_y < 0] = 0.\n return outer(triangle_x, triangle_y)",
"def tri(x, y, colour, invert=0, a=100):\n x *= cell_size\n y *= cell_size\n\n noStroke()\n fill(colour[0], colour[1], colour[2], a)\n if invert == 1: # top right\n triangle(x, y, x + cell_size, y, x + cell_size, y + cell_size)\n cx, cy = cell_size + x - in_x, y + in_y\n elif invert == 2: # bottom right\n triangle(x + cell_size, y, x + cell_size, y + cell_size, x, y + cell_size)\n cx, cy = cell_size + x - in_x, cell_size + y - in_y\n elif invert == 3: # bottom left\n triangle(x, y, x + cell_size, y + cell_size, x, y + cell_size)\n cx, cy = x + in_x, cell_size + y - in_y\n else: # top left\n triangle(x, y, x + cell_size, y, x, y + cell_size)\n cx, cy = x + in_x, y + in_y\n \n # randomly draw a circle inside some triangles?\n # calculate x/y distance from corner\n # maybe randomly draw the incircle in the space not filled by the triangle\n \n if randint(0, 100) > 97 and incircles:\n fill(random(360), 50, 90, 100)\n circle(cx, cy, int(in_x*2))",
"def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )",
"def triangle(height):\n for row in range(height):\n for column in range(1,row+2):\n print(CHAR, end = '')\n print()",
"def generate(self):\n inside = self.crystal.is_inside(self.x,self.y,self.z)\n X = np.vstack((self.x[inside],self.y[inside],self.z[inside]))\n return self.rot.rotate(X)",
"def xzplane(draw, r, y, shift = np.array([1000, 1000, 0, 0]), scale = 300):\n extent = 2.8\n pln = np.array(\n [\n [-extent,y,0],\n [extent,y,0],\n [extent,y,extent*2],\n [-extent,y,extent*2]\n ]\n )\n pln = np.dot(pln, np.transpose(r))\n pln = pln * scale + shift[:3]\n draw.polygon([(pln[0][0],pln[0][1]),(pln[1][0],pln[1][1]),(pln[2][0],pln[2][1]),(pln[3][0],pln[3][1])], (0,102,255,70))",
"def draw_triangle_filled(x1, y1,\n x2, y2,\n x3, y3, color):\n\n first_point = [x1, y1]\n second_point = [x2, y2]\n third_point = [x3, y3]\n point_list = (first_point, second_point, third_point)\n draw_polygon_filled(point_list, color)",
"def get_triangle(radius, xcenter, ycenter, numberOfVertices):\n\n\n angles = [0.0, (2. / 3.0) * math.pi, (4.0 / 3.0) * math.pi]\n\n vertices = []\n\n for angle in range(0, len(angles)):\n x = radius * math.cos(angles[angle]) + xcenter\n y = radius * math.sin(angles[angle]) + ycenter\n vertices.append(x) # append the x value to the vertex list\n vertices.append(y) # append the y value to the vertex list\n\n # convert the vertices list to pyGlet vertices format\n vertexList = pyglet.graphics.vertex_list(numberOfVertices, ('v2f', vertices))\n\n return vertexList",
"def _plot_triangle(self, image, x, y, rotation, side_length, color):\n\n ## Precalculate the 'unit' triangle's centered coords, facing east\n coords = [(-0.283, -0.5), (-0.283, 0.5), (0.567, 0)]\n ## Calculate the triangle's side length scaling factor\n scale_factor = side_length / 1\n\n ## Scale, rotate, and translate the coords about the origin\n for index, coord in enumerate(coords):\n _x, _y = self._rotate_coordinate(*coord, -rotation)\n _x, _y = self._scale_coordinate(_x, _y, scale_factor)\n coords[index] = self._translate_coordinate(_x, _y, x, y)\n\n draw = ImageDraw.Draw(image, \"RGBA\")\n draw.polygon(coords, fill=color, outline=color)\n del draw\n\n return image",
"def _generate_equilateral_triangle_around_point(\n center_point: Coordinate, altitude: Decimal = TRIANGLE_ALTITUDE\n) -> Tuple[Coordinate, Coordinate, Coordinate]:\n # This is how you math I guess?\n side_length = 2 * altitude / Decimal(sqrt(3))\n\n ay = center_point.y + altitude / 2\n ax = center_point.x\n a = Coordinate(ay, ax)\n\n by = center_point.y - altitude / 2\n bx = center_point.x + side_length / 2\n b = Coordinate(by, bx)\n\n cy = center_point.y - altitude / 2\n cx = center_point.x - side_length / 2\n c = Coordinate(cy, cx)\n\n return (a, b, c)"
] | [
"0.67842567",
"0.66823167",
"0.6456786",
"0.6393206",
"0.6376432",
"0.6367165",
"0.6350288",
"0.6309972",
"0.62916946",
"0.6245929",
"0.61663145",
"0.61014986",
"0.6092199",
"0.60713196",
"0.6067632",
"0.60645217",
"0.6037411",
"0.60368097",
"0.6018011",
"0.5999398",
"0.5979797",
"0.5960023",
"0.59274673",
"0.59111106",
"0.5900213",
"0.5894846",
"0.589384",
"0.5885615",
"0.5877432",
"0.5867424"
] | 0.7213213 | 0 |
Return a directory for tag on branch or raise BuildNotFound | def find_build(branch, tag, build_path=None, old_build_path=None):
for directory_format in [build_path, old_build_path]:
if directory_format is None:
continue
loc = directory_format % (branch, tag)
if isdir(loc):
return loc
raise BuildNotFound(branch, tag) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def b2d(homedir, branch):\n return os.path.join(homedir, 'integ', branch, 'integ')",
"def tag(referencefile):\n dirpath = path.abspath(referencefile)\n\n if path.isdir(dirpath):\n dircontents = listdir(dirpath)\n else:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n\n while not 'tag' in dircontents:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n if len(dircontents) == 0 or path.split(dirpath)[1] == 'chemistry':\n print(\"tag file not found\")\n return None\n\n return path.join(dirpath, 'tag')",
"def _branchPath(self, path):\n assert self.branch_dir is not None\n return os.path.join(self.branch_dir, path)",
"def bzr_branch(uri, branch):\n try:\n repo = bzr_repo(uri)\n for name, branch_uri in bzr_branches(repo):\n if name == branch:\n return name, branch_uri\n except Exception as e:\n log.error(e)\n raise\n raise Exception('branch %s cannot be found on repository %s' %(branch, uri))",
"def _get_tag(self, current_path, commit_sha):\n command = [\"git\", \"describe\", \"--tags\", commit_sha]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n elif \"fatal: no tags can describe '{}'.\".format(commit_sha) in error.decode(\n \"utf-8\"\n ).lower():\n return None\n elif \"fatal: no names found\" in error.decode(\"utf-8\").lower():\n return None\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )",
"def find_git_repository(self, path):\n while path is not None:\n git_path = os.path.join(path,'.git')\n if os.path.exists(git_path) and os.path.isdir(git_path):\n return path\n path = os.path.dirname(path)\n return None",
"def find_build_dir(hw, r):\n os.chdir(hw)\n find_cache(hw, r);\n os.chdir(\"..\")",
"def find_git_dir(directory):\n directory = os.path.abspath(directory)\n if not os.path.exists(directory):\n return \"\"\n\n for _ in range(10):\n path = os.path.join(directory, \".git\")\n if os.path.exists(path):\n return directory\n\n if directory == \"/\":\n return \"\"\n\n directory = os.path.abspath(os.path.join(directory, os.pardir))\n\n return \"\"",
"def build_path(cls, relpath):\r\n if os.path.basename(relpath).startswith('BUILD'):\r\n return relpath\r\n else:\r\n return os.path.join(relpath, 'BUILD')",
"def get_previous_path(cls,tag) :\n if re.search('./',tag) :\n a,tag = os.path.split(tag)\n l = cls.Variants[tag]\n if len(l) == 2 :\n return os.path.join(l[0],l[1])\n else :\n return l[0]",
"def get_git_dir(tree):\n\n return os.path.join(tree, \".git\")",
"def build_directory(self) -> Optional[str]:\n return self.configuration.build_directory",
"def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")",
"def test_should_raise_if_git_repo_not_exists(self): # pylint: disable=invalid-name\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n \"Directory `basedir` isn't git repository root.\" in err_msg)",
"def get_branch(project_root: str) -> str:\n if os.path.isfile(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION'):\n with open(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION') as f:\n return f.read().replace('\\n', '')\n\n child = subprocess.Popen('cd {0} && git rev-parse --abbrev-ref HEAD'.format(project_root),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n exit_code = child.wait()\n branch = child.stdout.read().decode()\n if len(branch) != 0:\n branch = branch.replace('\\n', '')\n else:\n return 'unknown'\n if exit_code == 0 and branch != 'HEAD':\n return branch\n else:\n return 'unknown'",
"def collect_single(self, path, req_tag=True):\n\n #print('? %s' % path)\n\n # For local files, strip download path.\n # Also ignore any parent directories.\n if path.startswith(self.dlpath):\n folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))\n else:\n folder = os.path.basename(os.path.dirname(path))\n\n # The folder contains the tokens needed to perform\n # matching of project, gitref, etc.\n rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)\n if rinfo is None or len(rinfo) == 0:\n print('Incorrect folder/file name format for %s' % folder)\n return None\n\n info = dict(rinfo)\n\n # Ignore AppVeyor Debug builds\n if info.get('bldtype', '').lower() == 'debug':\n print('Ignoring debug artifact %s' % folder)\n return None\n\n tag = info.get('tag', None)\n if tag is not None and (len(tag) == 0 or tag.startswith('$(')):\n # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)\n # with an empty value when not set, it leaves that token\n # in the string - so translate that to no tag.\n del info['tag']\n\n # Perform matching\n unmatched = list()\n for m,v in self.match.items():\n if m not in info or info[m] != v:\n unmatched.append(m)\n\n # Make sure all matches were satisfied, unless this is a\n # common artifact.\n if info.get('p', '') != 'common' and len(unmatched) > 0:\n # print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))\n return None\n\n return Artifact(self, path, info)",
"def tree_lookup(self, target_path, commit):\n segments = target_path.split(\"/\")\n tree_or_blob = commit.tree\n path = ''\n while segments:\n dirent = segments.pop(0)\n if isinstance(tree_or_blob, pygit2.Tree):\n if dirent in tree_or_blob:\n tree_or_blob = self.repo[tree_or_blob[dirent].oid]\n # self.logger.debug('%s in %s' % (dirent, path))\n if path:\n path += '/'\n path += dirent\n else:\n # This is probably because we were called on a\n # commit whose parent added a new directory.\n self.logger.debug(' %s not in %s in %s' %\n (dirent, path, commit.hex[:8]))\n return None\n else:\n self.logger.debug(' %s not a tree in %s' %\n (tree_or_blob, commit.hex[:8]))\n return None\n return tree_or_blob",
"def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')",
"def checkGit(directory):",
"def git_dir_checkout_branch(c, org_name, repo_name, remote, branch):\n print('Fetching updates from Git repository')\n c.run('git remote add {remote} [email protected]:{org_name}/{repo_name}.git'.format(remote=remote, org_name=org_name, repo_name=repo_name),\n warn=True)\n c.run('git fetch --all')\n\n print('Checking out {}/{}'.format(remote, branch))\n try:\n c.run('git checkout {}/{}'.format(remote, branch))\n except Failure:\n # probably branch is tag name\n print('Checking out failed. Assuming this is a tag, attempting to checkout without stating remote')\n c.run('git checkout {}'.format(branch))",
"def get_repository_dir():\n expected = os.path.abspath(__file__).rsplit('/', 2)[0]\n\n # get_path verifies the existance of these directories\n get_path(expected, 'data')\n get_path(expected, 'latex')\n\n return expected",
"def _find_repo() -> str:\n\tstart = os.path.abspath(os.getcwd())\n\tcurrent = start\n\twhile current != \"/\":\n\t\trepo = os.path.join(current, \".repo\")\n\t\tif os.path.exists(repo):\n\t\t\tLOGGER.debug(\"Found .repo at %s\", repo)\n\t\t\treturn repo\n\t\tcurrent = os.path.dirname(current)\n\traise RepoNotFoundError(\"Not .repo found in any directory along {}\".format(start))",
"def next_deploy_tag(location):\n ensure_dir(location)\n with utils.cd(location):\n timestamp = datetime.utcnow()\n date = timestamp.strftime('%F')\n cmd = ['/usr/bin/git', 'tag', '--list']\n tag_fmt = os.path.join(TAG_PREFIX, '{}', '*')\n cmd.append(tag_fmt.format(date))\n seq = len(subprocess.check_output(cmd).splitlines()) + 1\n tag_fmt = os.path.join(TAG_PREFIX, '{0}', '{1:04d}')\n return tag_fmt.format(date, seq)",
"def repo_dir(repo, *path, mkdir=False):\n path = repo_path(repo, *path)\n if os.path.exists(path):\n if (os.path.isdir(path)):\n return path\n else:\n raise Exception(\"Not a directory %s\" % path)\n\n if mkdir:\n os.makedirs(path)\n return path\n else:\n return None",
"def test_find_builder_dir_bad_version_dir_name(mock_fs: testing.MockFs):\n mock_fs.add_file('path/to/ds0/9.9./features.json')\n mock_fs.add_file('path/to/ds0/1.0.o/features.json')\n mock_fs.add_file('path/to/ds0/other/features.json')\n assert _find_builder_dir('ds0') is None\n\n mock_fs.add_file('path/to/ds0/1.1.0/features.json')\n assert _find_builder_dir('ds0') == 'path/to/ds0/1.1.0'",
"def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)",
"def repo_dir(repo, *path, mkdir=False):\n\n path = repo_path(repo, *path)\n\n if os.path.exists(path):\n if (os.path.isdir(path)):\n return path\n else:\n raise Exception(\"Not a directory %s\" % path)\n\n if mkdir:\n os.makedirs(path)\n return path\n else:\n return None",
"def _tag_to_sha1(self):\n def get_sha1(url):\n # Ceph (and other projects) uses annotated tags for releases. This\n # has the side-effect of making git ls-remote return the sha1 for\n # the annotated tag object and not the last \"real\" commit in that\n # tag. By contrast, when a person (or a build system) issues a\n # \"git checkout <tag>\" command, HEAD will be the last \"real\" commit\n # and not the tag.\n # Below we have to append \"^{}\" to the tag value to work around\n # this in order to query for the sha1 that the build system uses.\n return repo_utils.ls_remote(url, \"%s^{}\" % self.tag)\n\n git_url = repo_utils.build_git_url(self.project)\n result = get_sha1(git_url)\n # For upgrade tests that are otherwise using ceph-ci.git, we need to\n # also look in ceph.git to lookup released tags.\n if result is None and 'ceph-ci' in git_url:\n alt_git_url = git_url.replace('ceph-ci', 'ceph')\n log.info(\n \"Tag '%s' not found in %s; will also look in %s\",\n self.tag,\n git_url,\n alt_git_url,\n )\n result = get_sha1(alt_git_url)\n\n if result is None:\n raise CommitNotFoundError(self.tag, git_url)\n return result",
"def getPath(self, uri):\n if os.path.isdir(uri):\n return uri\n else:\n raise RepoException(\"The repo path does not exist: %s\" % uri)",
"def tag(self):\n if self.method == 'buildArch':\n # Note: buildArch tag will be an int here.\n return self.params[1]\n if self.method in ('createdistrepo', 'distRepo', 'newRepo', 'runroot',\n 'tagBuild', 'waitrepo'):\n return self.params[0]\n if self.method == 'tagNotification':\n return self.params[2]\n if self.method == 'buildMaven':\n return self.params[1]['name']"
] | [
"0.5977417",
"0.5876411",
"0.5687731",
"0.567183",
"0.560582",
"0.54256225",
"0.5397359",
"0.5381051",
"0.52119887",
"0.51686037",
"0.5160606",
"0.5136722",
"0.51248574",
"0.51212746",
"0.5051708",
"0.5051698",
"0.5049382",
"0.50226355",
"0.5020252",
"0.50200105",
"0.5010866",
"0.4992363",
"0.49773398",
"0.4969287",
"0.4957146",
"0.49526972",
"0.49526122",
"0.49511105",
"0.49495047",
"0.49474692"
] | 0.7497679 | 0 |
Part of the public interface for a MetricWatcher. Should take a rule config (from soaconfigs) + some additional metadata (e.g., auth information) and return a fullyformed MetricWatcher subclass | def from_config(
cls: Type[MetricWatcherT],
config: BaseRule,
on_failure_callback: Callable[['MetricWatcher'], None],
auth_callback: Optional[Callable[[], Any]] = None,
) -> MetricWatcherT:
raise NotImplementedError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_metric(self) -> EvalMetric:\n pass",
"def new(ruletype, **kwargs):\n try:\n ruleclass = TYPE_MAP[ruletype]\n except KeyError:\n raise error.InvalidRule('Unrecognized rule type: %s' % ruletype)\n\n try:\n return ruleclass(**kwargs)\n except TypeError:\n log.error('BADNESS. ruletype: %s, data: %s', ruletype, kwargs)\n raise\n #raise error.InvalidRule(\n # '%s does not work that way.\\nDetails: %s.\\nData: %s' % (\n # ruletype, err, kwargs))",
"def __init__(self, *args, **kwargs):\n self.metric_type = kwargs['acc_metric'] \n\n if self.metric_type == 'Accuracy':\n self.metric_object = Accuracy(*args, **kwargs) \n elif self.metric_type == 'AveragePrecision':\n self.metric_object = AveragePrecision(*args, **kwargs)\n elif self.metric_type == 'mAP':\n self.metric_object = MAP(*args, **kwargs)\n elif self.metric_type == 'SSD_AP':\n self.metric_object = SSD_AP(*args, **kwargs)\n else:\n self.metric_type = None",
"def __init__(self, config, label, items, **kwargs):\n\n self.config = config\n self.label = label\n self._app_cache = None\n self._metric_cache = None\n self._app_helper = kwargs.get('app_helper')\n\n # Other important configuration values\n required = set(['module', 'method', 'metric'])\n attrs = set(['notifier', 'app']) | required\n for attr in attrs:\n setattr(self, '_' + attr, None)\n self.additional = {}\n\n # Process configuration\n for option, value in items:\n if option in attrs:\n setattr(self, '_' + option, value)\n required.discard(option)\n else:\n self.additional[option] = value\n\n # Add app to required if necessary\n if option == 'app' and not self._app_helper:\n required.add('app_helper')\n\n # Make sure we got the essential configuration\n if required:\n raise Exception(\"Missing configuration options for %s: %s\" %\n (label, ', '.join(required)))\n\n # Grab the method we're operating on\n method_cls = utils.import_class_or_module(self._module)\n if inspect.ismodule(method_cls):\n method = raw_method = getattr(method_cls, self._method)\n kind = 'function'\n else:\n method, raw_method, kind = _get_method(method_cls, self._method)\n self._method_cache = method\n\n # We need to wrap the replacement if its a static or class\n # method\n if kind == 'static method':\n meth_wrap = staticmethod\n elif kind == 'class method':\n meth_wrap = classmethod\n else:\n meth_wrap = lambda f: f\n\n # Wrap the method to perform statistics collection\n @functools.wraps(method)\n def wrapper(*args, **kwargs):\n # Deal with class method calling conventions\n if kind == 'class method':\n args = args[1:]\n\n # Handle app translation\n label = None\n if self._app:\n args, kwargs, label = self.app(*args, **kwargs)\n\n # Run the method, bracketing with statistics collection\n # and notification\n value = self.metric.start()\n result = method(*args, **kwargs)\n self.notifier(self.metric(value), self.metric.vtype,\n label or self.label)\n\n return result\n # Save some introspecting data\n wrapper.tach_descriptor = self\n wrapper.tach_function = method\n\n # Save what we need\n self._method_cls = method_cls\n self._method_wrapper = meth_wrap(wrapper)\n self._method_orig = raw_method\n\n setattr(self._method_cls, self._method, self._method_wrapper)",
"def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track],\n metric_time: datetime.datetime, *args, **kwargs):\n\n raise NotImplementedError",
"def _get_metric_config(self, config):\n metric_config = dict()\n metric_config['include_metrics'] = config.get('include_metrics', {})\n metric_config['exclude_metrics'] = config.get('exclude_metrics', {})\n return metric_config",
"def make_metric(self, name, metadata=None, **kwargs):\n return make_metric(name, metadata=metadata, accessor=self.accessor, **kwargs)",
"def make_metric(self, name, metadata=None, **kwargs):\n return make_metric(name, metadata=metadata, accessor=self.accessor, **kwargs)",
"def __init__(self):\n super().__init__()\n self.metric = 'FALLOUT'",
"def __init__(self, rule):\n Rule.__init__(self)\n self.__rule = rule",
"def __init__(self, rule):\n Rule.__init__(self)\n self.__rule = rule",
"def __init__(self, rule):\n Rule.__init__(self)\n self.__rule = rule",
"def __init__(self, rule):\n Rule.__init__(self)\n self.__rule = rule",
"def __init__(self, rule):\n Rule.__init__(self)\n self.__rule = rule",
"def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()",
"def to_profiler_rule_config_dict(self):\n profiler_rule_config_request = {\n \"RuleConfigurationName\": self.name,\n \"RuleEvaluatorImage\": self.image_uri,\n }\n\n profiler_rule_config_request.update(build_dict(\"InstanceType\", self.instance_type))\n profiler_rule_config_request.update(build_dict(\"VolumeSizeInGB\", self.volume_size_in_gb))\n profiler_rule_config_request.update(\n build_dict(\"LocalPath\", self.container_local_output_path)\n )\n profiler_rule_config_request.update(build_dict(\"S3OutputPath\", self.s3_output_path))\n\n if self.rule_parameters:\n profiler_rule_config_request[\"RuleParameters\"] = self.rule_parameters\n for k, v in profiler_rule_config_request[\"RuleParameters\"].items():\n profiler_rule_config_request[\"RuleParameters\"][k] = str(v)\n\n return profiler_rule_config_request",
"def __init__(\n self, decorator_metric_name: str, deprecation_warning: str | None = None\n ):\n\n # Parameterize the decorator metric name.\n # (Ignore spurious mypy complaints - https://github.com/python/mypy/issues/2427)\n self._decorator = gather_metrics(decorator_metric_name, self._decorator) # type: ignore\n self._deprecation_warning = deprecation_warning",
"def add_metric_filter(self, id: str, *, filter_pattern: \"IFilterPattern\", metric_name: str, metric_namespace: str, default_value: typing.Optional[jsii.Number]=None, metric_value: typing.Optional[str]=None) -> \"MetricFilter\":\n ...",
"def __init__(self, **kwargs):\n\n super(NUFlowstatisticsaggregationrule, self).__init__()\n\n # Read/Write Attributes\n \n self._name = None\n self._matching_criteria = None\n self._description = None\n self._aggregation_criteria = None\n self._associated_traffic_type_id = None\n \n self.expose_attribute(local_name=\"name\", remote_name=\"name\", attribute_type=str, is_required=True, is_unique=True)\n self.expose_attribute(local_name=\"matching_criteria\", remote_name=\"matchingCriteria\", attribute_type=str, is_required=True, is_unique=False, choices=[u'L4_SERVICE', u'L4_SERVICE_GROUP'])\n self.expose_attribute(local_name=\"description\", remote_name=\"description\", attribute_type=str, is_required=False, is_unique=False)\n self.expose_attribute(local_name=\"aggregation_criteria\", remote_name=\"aggregationCriteria\", attribute_type=str, is_required=True, is_unique=False, choices=[u'FORWARD_AND_REVERSE_TRAFFIC_PORT_AGG'])\n self.expose_attribute(local_name=\"associated_traffic_type_id\", remote_name=\"associatedTrafficTypeID\", attribute_type=str, is_required=True, is_unique=False)\n \n\n self._compute_args(**kwargs)",
"def __call__(self, config: Mapping[Sensor, Sequence[Action]], tracks: Set[Track],\n metric_time: datetime.datetime, *args, **kwargs):\n\n # Reward value\n config_metric = 0\n\n predicted_sensors = list()\n memo = {}\n # For each sensor in the configuration\n for sensor, actions in config.items():\n predicted_sensor = copy.deepcopy(sensor, memo)\n predicted_sensor.add_actions(actions)\n predicted_sensor.act(metric_time)\n if isinstance(sensor, Sensor):\n predicted_sensors.append(predicted_sensor) # checks if its a sensor\n\n # Create dictionary of predictions for the tracks in the configuration\n predicted_tracks = set()\n for track in tracks:\n predicted_track = copy.copy(track)\n predicted_track.append(self.predictor.predict(predicted_track, timestamp=metric_time))\n predicted_tracks.add(predicted_track)\n\n for sensor in predicted_sensors:\n\n # Assumes one detection per track\n detections = {detection.groundtruth_path: detection\n for detection in sensor.measure(predicted_tracks, noise=False)\n if isinstance(detection, TrueDetection)}\n\n for predicted_track, detection in detections.items():\n # Generate hypothesis based on prediction/previous update and detection\n hypothesis = SingleHypothesis(predicted_track.state, detection)\n\n # Do the update based on this hypothesis and store covariance matrix\n update = self.updater.update(hypothesis)\n\n previous_cov_norm = np.linalg.norm(predicted_track.covar)\n update_cov_norm = np.linalg.norm(update.covar)\n\n # Replace prediction with update\n predicted_track.append(update)\n\n # Calculate metric for the track observation and add to the metric\n # for the configuration\n metric = previous_cov_norm - update_cov_norm\n config_metric += metric\n\n if self.method_sum is False and len(detections) != 0:\n\n config_metric /= len(detections)\n\n # Return value of configuration metric\n return config_metric",
"def __init__(self, hass, *, config_entry, controller):\n self.controller = controller\n self.config_entry = config_entry\n\n update_interval = timedelta(seconds=MIN_SCAN_INTERVAL)\n\n super().__init__(\n hass,\n _LOGGER,\n name=DOMAIN,\n update_interval=update_interval,\n )",
"def __new__(cls, conf):\n # Call is already for a subclass, so pass it through\n RunnerClass = cls\n return super(Runner, cls).__new__(RunnerClass)",
"def __init__(self, metricName, timeResolutions = (86400,)):\n self.metric = metricName\n self.timeResolutions = timeResolutions",
"def metric(self, metric_id):\r\n return Metric(self, metric_id)",
"def __init__(self):\n super().__init__()\n self.metric = 'FMEASR'",
"def test_net_hook_alias(self):\n\n class Node:\n my_metric = Metric(Int64)\n\n @my_metric.net_hook\n def update_my_metric(self, value):\n return abs(value)\n\n node = Node()\n\n exp_value = 42\n node.my_metric = exp_value\n\n self.assertEqual(node.my_metric, exp_value)\n\n # Just make sure the method is callable. In a real Device or\n # Node this would likely have some desirable side-effect.\n self.assertEqual(node.update_my_metric(-exp_value), exp_value)\n\n my_metric = get_metric_object(node, 'my_metric')\n\n # Make sure the net_hook is still called in the correct way\n exp_value = 55\n my_metric.set_network(node, Int64(-exp_value))\n self.assertEqual(node.my_metric, exp_value)",
"def config(cls) -> HandlerConfig:\n MyType.clear_interning_cache()\n MyOtherType.clear_interning_cache()\n\n # Create the function to wrap.\n mock_function = mock.Mock()\n\n # Create fake unit classes.\n mock_from_unit = mock.MagicMock(spec=MyType.decorate(MyUnit))\n mock_from_unit.__name__ = \"MockFromUnit\"\n mock_to_unit = mock.MagicMock(spec=MyOtherType.decorate(MyUnit))\n mock_to_unit.__name__ = \"MockToUnit\"\n\n # Make it look like the two units are not compatible.\n mock_to_unit.is_compatible.return_value = False\n mock_from_unit.is_compatible.return_value = False\n\n # Create the wrapper instance.\n wrapper_class = CastHandler(mock_from_unit, mock_to_unit)\n # Wrap the function.\n wrapped_handler = wrapper_class(mock_function)\n\n return cls.HandlerConfig(handler=wrapped_handler,\n mock_function=mock_function,\n wrapper_class=wrapper_class,\n mock_from_unit=mock_from_unit,\n mock_to_unit=mock_to_unit)",
"def build_monitoring_addon_profile(self) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get(\n \"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID\"\n )\n CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get(\n \"CONST_MONITORING_USING_AAD_MSI_AUTH\"\n )\n\n # TODO: can we help the user find a workspace resource ID?\n monitoring_addon_profile = self.models.ManagedClusterAddonProfile(\n enabled=True,\n config={\n CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: self.context.get_workspace_resource_id(),\n CONST_MONITORING_USING_AAD_MSI_AUTH: \"true\"\n if self.context.get_enable_msi_auth_for_monitoring()\n else \"false\",\n },\n )\n # post-process, create a deployment\n self.context.external_functions.ensure_container_insights_for_monitoring(\n self.cmd, monitoring_addon_profile,\n self.context.get_subscription_id(),\n self.context.get_resource_group_name(),\n self.context.get_name(),\n self.context.get_location(),\n remove_monitoring=False,\n aad_route=self.context.get_enable_msi_auth_for_monitoring(),\n create_dcr=True,\n create_dcra=False,\n enable_syslog=self.context.get_enable_syslog(),\n data_collection_settings=self.context.get_data_collection_settings()\n )\n # set intermediate\n self.context.set_intermediate(\"monitoring_addon_enabled\", True, overwrite_exists=True)\n return monitoring_addon_profile",
"def _create_kube_apiserver_metrics_instance(self, instance):\n kube_apiserver_metrics_instance = deepcopy(instance)\n endpoint = instance.get('prometheus_url')\n prometheus_url = endpoint\n\n # Allow using a proper URL without introducing a breaking change since\n # the scheme option is deprecated.\n if not match('^https?://.*$', endpoint):\n scheme = instance.get('scheme', self.DEFAULT_SCHEME)\n prometheus_url = \"{0}://{1}\".format(scheme, endpoint)\n\n kube_apiserver_metrics_instance['prometheus_url'] = prometheus_url\n\n # Most set ups are using self signed certificates as the APIServer can be used as a CA.\n ssl_verify = instance.get('ssl_verify', self.DEFAULT_SSL_VERIFY)\n kube_apiserver_metrics_instance['ssl_verify'] = ssl_verify\n\n # We should default to supporting environments using RBAC to access the APIServer.\n bearer_token_auth = instance.get('bearer_token_auth', self.DEFAULT_BEARER_TOKEN_AUTH)\n kube_apiserver_metrics_instance['bearer_token_auth'] = bearer_token_auth\n\n return kube_apiserver_metrics_instance",
"def config(conf, ctx, pattern=None, desc=None, cast=None):\n\n def decorator(func):\n fninfo = _fn_get_info(func)\n fninfo.configs.append((conf, ctx, pattern, desc, cast))\n setattr(func, FNINFO_ATTR, fninfo)\n return func\n\n return decorator"
] | [
"0.52101",
"0.5050569",
"0.50056726",
"0.49772125",
"0.4909664",
"0.48076355",
"0.4777214",
"0.4777214",
"0.47341272",
"0.47207183",
"0.47207183",
"0.47207183",
"0.47207183",
"0.47207183",
"0.47016814",
"0.4695809",
"0.46777844",
"0.46469286",
"0.4639769",
"0.4592974",
"0.45886287",
"0.45648804",
"0.45327932",
"0.45076394",
"0.4449839",
"0.4447313",
"0.4436329",
"0.44361925",
"0.44307846",
"0.44293907"
] | 0.74009496 | 0 |
Create a new instance of the UpdateTrigger Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. | def __init__(self, temboo_session):
super(UpdateTrigger, self).__init__(temboo_session, '/Library/Xively/Triggers/UpdateTrigger') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_update_trigger(self):\n self.execute(self.commands.update_function(\n self.name,\n self._equals(\n self.intersection.dest_columns,\n 'NEW',\n self.intersection.origin_columns\n ),\n self.primary_key_column\n ))\n\n self.execute(self.commands.update_trigger(\n self.triggers['UPDATE'],\n self.source.name,\n self.name\n ))",
"def create(\n self,\n name,\n tags,\n targets,\n warn_value=None,\n error_value=None,\n desc='',\n ttl=600,\n ttl_state=STATE_NODATA,\n sched=None,\n expression='',\n trigger_type=None,\n is_remote=False,\n mute_new_metrics=False,\n **kwargs\n ):\n return Trigger(\n self._client,\n name,\n tags,\n targets,\n warn_value,\n error_value,\n desc,\n ttl,\n ttl_state,\n sched,\n expression,\n trigger_type,\n is_remote,\n mute_new_metrics,\n **kwargs\n )",
"def createTrigger(self):\n return _libsbml.Event_createTrigger(self)",
"def createTrigger(self):\n return _libsbml.Model_createTrigger(self)",
"def create_trigger(self, trigger, conditions=[], dampenings=[]):\n full_trigger = {'trigger': trigger, 'conditions': conditions, 'dampenings': dampenings}\n self._post(path='triggers/trigger', data=full_trigger)",
"def setTrigger(self, *args):\n return _libsbml.Event_setTrigger(self, *args)",
"def create_triggers(self):\n triggers = self.get_source_triggers()\n if not triggers:\n self.create_insert_trigger()\n self.create_update_trigger()\n self.create_delete_trigger()\n self.commit()",
"def trigger(self, sid):\r\n return Trigger(self, sid)",
"def set_TriggerID(self, value):\n super(UpdateTriggerInputSet, self)._set_input('TriggerID', value)",
"def update(self, id, **kwargs):\n body = {}\n for k, v in kwargs.items():\n if v is not None:\n body.update({k: v})\n\n return self._update('/v1/webhooks/%s' % id, kwargs)",
"def update(cls, webhook_endpoint_id, url=None, events=None, status=None):\n data = {}\n if url:\n data['url'] = url\n if events:\n data['events'] = events\n if status:\n data['status'] = status\n return WebhookEndpoint(Requester.patch(cls.endpoint + '/' + webhook_endpoint_id, data=data))",
"def getTrigger(self, *args):\n return _libsbml.Event_getTrigger(self, *args)",
"def set(self):\n if not os.path.isfile(self._trigger_file):\n with open(self._trigger_file, \"w\"):\n pass\n logger.debug(\"Set preview update trigger: %s\", self._trigger_file)",
"def test_update(self, updateRecords=None):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)\n instance.resource_id = 4\n update_args = self.update_domain_only_args\n self._stubout_update(\n instance,\n fake_dns_instance,\n updateRecords,\n **update_args)\n\n uprops = dict(instance.properties)\n uprops.update({\n 'emailAddress': '[email protected]',\n 'ttl': 5555,\n 'comment': 'updated comment',\n })\n if updateRecords:\n uprops['records'] = updateRecords\n ut = rsrc_defn.ResourceDefinition(instance.name,\n instance.type(),\n uprops)\n instance.state_set(instance.CREATE, instance.COMPLETE)\n scheduler.TaskRunner(instance.update, ut)()\n self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()",
"def update(self, refresh=False, parameter=None, **kwargs):\n if refresh:\n self.__init__(path=self._path, service=self.service)\n\n default_asset = {\n \"maxTimerLengthSeconds\": self._maxTimerLengthSeconds,\n \"totalTimeMinMilliseconds\": self._totalTimeMinMilliseconds,\n \"uniqueTriggerId\": self._uniqueTriggerId,\n \"verticalScrollPercentageList\": self._verticalScrollPercentageList,\n \"horizontalScrollPercentageList\": self._horizontalScrollPercentageList,\n \"containerId\": self._containerId,\n \"waitForTagsTimeout\": self._waitForTagsTimeout,\n \"accountId\": self._accountId,\n \"waitForTags\": self._waitForTags,\n \"intervalSeconds\": self._intervalSeconds,\n \"eventName\": self._eventName,\n \"visibilitySelector\": self._visibilitySelector,\n \"workspaceId\": self._workspaceId,\n \"customEventFilter\": self._customEventFilter,\n \"parentFolderId\": self._parentFolderId,\n \"continuousTimeMinMilliseconds\": self._continuousTimeMinMilliseconds,\n \"selector\": self._selector,\n \"triggerId\": self._triggerId,\n \"tagManagerUrl\": self._tagManagerUrl,\n \"fingerprint\": self._fingerprint,\n \"visiblePercentageMax\": self._visiblePercentageMax,\n \"path\": self._path,\n \"name\": self._name,\n \"visiblePercentageMin\": self._visiblePercentageMin,\n \"type\": self._type,\n \"notes\": self._notes,\n \"interval\": self._interval,\n \"filter\": self._filter,\n \"autoEventFilter\": self._autoEventFilter,\n \"limit\": self._limit,\n \"checkValidation\": self._checkValidation,\n }\n update_asset = {**default_asset, **kwargs}\n\n if parameter:\n parameter_dict = {**param_dict(self._parameter), **param_dict(parameter)}\n parameter = list(parameter_dict.values())\n else:\n parameter = self._parameter\n\n update_asset[\"parameter\"] = [x.to_obj() for x in parameter]\n\n update_asset = {k: v for k, v in update_asset.items() if v is not None}\n\n request = self.triggers_service.update(path=self.path, body=update_asset)\n response = request.execute()\n self.__init__(trigger=response, service=self.service)",
"def update_targets(self, target, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.update_targets_with_http_info(target, **kwargs)\n else:\n (data) = self.update_targets_with_http_info(target, **kwargs)\n return data",
"def triggerCondition(cls, state):\n x1, x2 = state\n y = cls.X2toY(x1, x2)\n norm = np.math.sqrt(x1**2 + y**2)\n\n # calculate next trigger step\n dt = (29 * x1 + norm**2) / (5.36 * norm * x1**2 + norm**2) * cls.tau_trigger\n return dt",
"def update_webhook_schedule():\n session = create_session()\n settings = db.settings.get(session, \"main\")\n settings = SiteSettings(**settings)\n time = cron_parser(settings.webhooks.webhookTime)\n job = JOB_STORE.get(\"webhooks\")\n\n scheduler.reschedule_job(\n job.scheduled_task.id,\n trigger=\"cron\",\n hour=time.hours,\n minute=time.minutes,\n )\n\n session.close()\n logger.info(scheduler.print_jobs())",
"async def test_webhook_endpoint_generates_telegram_callback_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_callback_query,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_callback\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_callback_query)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"data\"] == update_callback_query[\"callback_query\"][\"data\"]",
"def isSetTrigger(self):\n return _libsbml.Event_isSetTrigger(self)",
"def _toggleTrigger(self, message: IRCMessage) -> IRCResponse:\n triggerName = message.parameterList[1]\n if triggerName in self.storage:\n self.storage[triggerName][\"enabled\"] = not self.storage[triggerName][\"enabled\"]\n currentStatus = \"enabled\" if self.storage[triggerName][\"enabled\"] else \"disabled\"\n return IRCResponse(f\"Trigger {triggerName} is now {currentStatus}\", message.replyTo)\n else:\n return IRCResponse(f\"No trigger named {triggerName} exists.\", message.replyTo)",
"def send_update_notification(item, target, name):\n\n # Check to see if anything actually changed. A row could be updated with the same values.\n changes = get_changes(target)\n\n # If no changes are found, then we do not need to create a notification.\n # Therefore, we check to see if there are changes before continuing.\n if changes:\n\n # Get the name of the administrator who made the change.\n administrator = \"{} {}\".format(current_user.first_name, current_user.last_name)\n\n # Format the title for the notification.\n title = \"Updated {}\".format(item)\n\n # Format the title for the notification.\n message = \"{} {} was updated by {}\".format(item, name, administrator)\n\n # Create the new notification and add to the database.\n new_notification = Notifications(title=title, message=message)\n db.session.add(new_notification)",
"async def async_attach_trigger(\n hass: HomeAssistant,\n config: ConfigType,\n action: TriggerActionType,\n trigger_info: TriggerInfo,\n) -> CALLBACK_TYPE:\n return await toggle_entity.async_attach_trigger(hass, config, action, trigger_info)",
"def from_dict(cls, _dict: Dict) -> 'GatewayChangeRequestGatewayClientGatewayUpdateAttributes':\n args = {}\n if 'type' in _dict:\n args['type'] = _dict.get('type')\n else:\n raise ValueError('Required property \\'type\\' not present in GatewayChangeRequestGatewayClientGatewayUpdateAttributes JSON')\n if 'updates' in _dict:\n args['updates'] = _dict.get('updates')\n else:\n raise ValueError('Required property \\'updates\\' not present in GatewayChangeRequestGatewayClientGatewayUpdateAttributes JSON')\n return cls(**args)",
"def trigger_properties(self) -> Optional[pulumi.Input['FlowTriggerConfigTriggerPropertiesArgs']]:\n return pulumi.get(self, \"trigger_properties\")",
"def post(self):\n send_slack_log('Entered /slack/update_msg')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n trigger_id = request.form['trigger_id']\n channel_id = request.form['channel_id']\n response = open_form(channel_id,\n trigger_id,\n config['slack_update_form_path'])\n send_slack_log('Response info:')\n send_slack_log(str(response))\n return 'Please enter the updated msg information in the form'",
"async def update_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n assignment_callback_url: Union[str, object] = values.unset,\n fallback_assignment_callback_url: Union[str, object] = values.unset,\n configuration: Union[str, object] = values.unset,\n task_reservation_timeout: Union[int, object] = values.unset,\n re_evaluate_tasks: Union[str, object] = values.unset,\n ) -> WorkflowInstance:\n data = values.of(\n {\n \"FriendlyName\": friendly_name,\n \"AssignmentCallbackUrl\": assignment_callback_url,\n \"FallbackAssignmentCallbackUrl\": fallback_assignment_callback_url,\n \"Configuration\": configuration,\n \"TaskReservationTimeout\": task_reservation_timeout,\n \"ReEvaluateTasks\": re_evaluate_tasks,\n }\n )\n\n payload = await self._version.update_async(\n method=\"POST\",\n uri=self._uri,\n data=data,\n )\n\n return WorkflowInstance(\n self._version,\n payload,\n workspace_sid=self._solution[\"workspace_sid\"],\n sid=self._solution[\"sid\"],\n )",
"async def async_attach_trigger(\n hass, config, action, automation_info, *, platform_type=\"event\"\n):\n event_type = config.get(CONF_EVENT_TYPE)\n event_data_schema = None\n if config.get(CONF_EVENT_DATA):\n event_data_schema = vol.Schema(\n {\n vol.Required(key): value\n for key, value in config.get(CONF_EVENT_DATA).items()\n },\n extra=vol.ALLOW_EXTRA,\n )\n\n @callback\n def handle_event(event):\n \"\"\"Listen for events and calls the action when data matches.\"\"\"\n if event_data_schema:\n # Check that the event data matches the configured\n # schema if one was provided\n try:\n event_data_schema(event.data)\n except vol.Invalid:\n # If event data doesn't match requested schema, skip event\n return\n\n hass.async_run_job(\n action,\n {\n \"trigger\": {\n \"platform\": platform_type,\n \"event\": event,\n \"description\": f\"event '{event.event_type}'\",\n }\n },\n event.context,\n )\n\n return hass.bus.async_listen(event_type, handle_event)",
"def unsetTrigger(self):\n return _libsbml.Event_unsetTrigger(self)",
"def get_trigger(stokes, min_window_size, delay_end):\n\n utc_start = Time(\"2019-01-01 12:00:00\")\n time = 38.249\n\n # trigger\n dm = 56.791\n trigger = {'dm': dm, 'snr': 15.2, 'width': 2, 'beam': 22, 'time': time,\n 'utc_start': utc_start, 'stokes': stokes}\n\n # event parameters\n # dm delay is less than the minimum trigger duration, so start/end time do not need to take DM into account\n shift = 2.048\n event_start_full = utc_start + TimeDelta(time, format='sec') - TimeDelta(shift, format='sec')\n event_end_full = event_start_full + TimeDelta(min_window_size + delay_end + shift, format='sec')\n\n event_start, event_start_frac = event_start_full.iso.split('.')\n event_end, event_end_frac = event_end_full.iso.split('.')\n\n event_info = trigger.copy()\n event_info['utc_start'] = trigger['utc_start'].iso.replace(' ', '-')\n event_info['event_start'] = event_start.replace(' ', '-')\n event_info['event_start_frac'] = event_start_frac\n event_info['event_end'] = event_end.replace(' ', '-')\n event_info['event_end_frac'] = event_end_frac\n\n event = dedent(\"\"\"\\\n N_EVENTS 1\n {utc_start}\n {event_start} {event_start_frac} {event_end} {event_end_frac} {dm} {snr} {width} {beam}\n \"\"\".format(**event_info))\n return trigger, event"
] | [
"0.55399466",
"0.4846183",
"0.4789044",
"0.47353917",
"0.46725985",
"0.44396847",
"0.39952904",
"0.3906584",
"0.390634",
"0.388709",
"0.3831259",
"0.37764362",
"0.37753603",
"0.3763408",
"0.37489897",
"0.37427178",
"0.37355223",
"0.3697208",
"0.3646685",
"0.3630165",
"0.36292082",
"0.36160606",
"0.36159304",
"0.3615815",
"0.35957187",
"0.35838336",
"0.35789388",
"0.35558182",
"0.35536835",
"0.35387817"
] | 0.49308053 | 1 |
Set the value of the ThresholdValue input for this Choreo. ((optional, string) Threshold that will cause the trigger to activate. Include input only if changing Threshold Value.) | def set_ThresholdValue(self, value):
super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setThreshold(self, value):\n return self._set(threshold=value)",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def threshold(self,thresholdValue):\n # TO DO\n pass",
"def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)",
"def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass",
"def set_value(self, value):\n\n self._progress.setValue(value)",
"def setFrequencyThreshold(self, value):\n return self._set(frequencyThreshold=value)",
"def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)",
"def trigger_level(self, value):\n self.lib.SetTriggerLevel(ct.c_float(value))",
"def set_value(\n self,\n value: float,\n ) -> None:\n self._data_provider.set_value(value)",
"def set_value(self, value, uncertainty):\n self.value = value\n self.uncertainty = uncertainty",
"def set_value(self, value):\n self.value = value",
"def set_value(self, value):\n self.value = value",
"def set_value(self, value):\n self.value = value",
"def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0",
"def value(self, value: float):\n if value is None:\n raise ValueError(\"Invalid value for `value`, must not be `None`\") # noqa: E501\n\n self._value = value",
"def set_brightness(self, value):\n self.parent.backlight.set_brightness(value)",
"def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)",
"def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)",
"def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)",
"def value(self, value: float):\n\n self._value = value",
"def value(self, value: float):\n\n self._value = value",
"def set_value(self, value: float):\n self.points[0, 0] = value\n return self",
"def set_value(self, value):\n self.value = value\n return self",
"def set_tau_threshold(self, tau_threshold):\n\n if tau_threshold < 0:\n raise ValueError(\"The tau threshold cannot be smaller than 0.\")\n\n core.xc_func_set_tau_threshold(self.xc_func, ctypes.c_double(tau_threshold))",
"def threshold_amount(self, threshold_amount):\n if self.local_vars_configuration.client_side_validation and threshold_amount is None: # noqa: E501\n raise ValueError(\"Invalid value for `threshold_amount`, must not be `None`\") # noqa: E501\n\n self._threshold_amount = threshold_amount"
] | [
"0.7035364",
"0.5995165",
"0.5995165",
"0.5995165",
"0.5995165",
"0.5995165",
"0.5991856",
"0.586901",
"0.5700665",
"0.53603107",
"0.52978474",
"0.5290875",
"0.52556384",
"0.52308434",
"0.5228519",
"0.51740694",
"0.51740694",
"0.51740694",
"0.5158728",
"0.5143797",
"0.5082036",
"0.49998158",
"0.49998158",
"0.49998158",
"0.4998485",
"0.4998485",
"0.49444303",
"0.49391875",
"0.4938926",
"0.4925848"
] | 0.75535905 | 0 |
Set the value of the TriggerID input for this Choreo. ((required, integer) TriggerID for the trigger that you wish to update.) | def set_TriggerID(self, value):
super(UpdateTriggerInputSet, self)._set_input('TriggerID', value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trigger_id(self, trigger_id):\n\n self._trigger_id = trigger_id",
"def delete_trigger(self, trigger_id):\n self._delete(path=\"triggers/{}\".format(trigger_id))",
"def get_trigger_by_id(self, trigger_id):\n return self.triggers[trigger_id]",
"def trigger_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"trigger_id\")",
"def set_interrupt_trigger(ft_handle: FtHandle, trigger: GpioTrigger) -> None:\n result: Ft4222Status = _set_interrupt_trigger(\n ft_handle, trigger.value)\n\n if result != Ft4222Status.OK:\n raise Ft4222Exception(result)",
"def delete(self, trigger_id):\n try:\n self._client.delete(self._full_path(trigger_id))\n return False\n except InvalidJSONError:\n return True",
"def setTrigger(self, *args):\n return _libsbml.Event_setTrigger(self, *args)",
"def trigger_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"trigger_id\")",
"def fetch_by_id(self, trigger_id):\n result = self._client.get(self._full_path(trigger_id + '/state'))\n if 'state' in result:\n trigger = self._client.get(self._full_path(trigger_id))\n return Trigger(self._client, **trigger)\n elif not 'trigger_id' in result:\n raise ResponseStructureError(\"invalid api response\", result)",
"def trigger_name(self, trigger_name: \"str\"):\n self._attrs[\"triggerName\"] = trigger_name",
"def bucket_id(self, bucket_id):\n if bucket_id is None:\n raise ValueError(\"Invalid value for `bucket_id`, must not be `None`\")\n\n self._bucket_id = bucket_id",
"def setID(self, id):\n self._id = id\n return self.callRemote('setID', id)",
"def isSetTrigger(self):\n return _libsbml.Event_isSetTrigger(self)",
"def set_reference_id(self, reference_id):\n self.reference_id = reference_id",
"def setJobId(self, jobid):\n self._ShREEKConfig.setJobId(jobid)",
"def set_id(self, id):\n\n\t\tif id is not None and not isinstance(id, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__id = id\n\t\tself.__key_modified['id'] = 1",
"def frequency_trigger(self, frequency_trigger):\n\n self._frequency_trigger = frequency_trigger",
"def get_callback_trigger(callback_context):\n if not callback_context.triggered:\n trigger_id = None\n else:\n trigger_id = callback_context.triggered[0][\"prop_id\"].split(\".\")[0]\n\n return trigger_id",
"def set_bpq_id(self, bpq_id):\n if not (bpq_id and (len(bpq_id) > 0)):\n raise ValueError\n \n self.bpq_id = bpq_id\n self.bpq_id_len = len(bpq_id)\n \n return",
"def reset_throttling(self, trigger_id):\n try:\n self._client.delete(self._full_path(trigger_id + '/throttling'))\n return True\n except InvalidJSONError:\n return False",
"def attachment_id(self, attachment_id):\n\n self._attachment_id = attachment_id",
"def dataset_id(self, dataset_id):\n if dataset_id is None:\n raise ValueError(\"Invalid value for `dataset_id`, must not be `None`\") # noqa: E501\n\n self._dataset_id = dataset_id",
"def portal_id(self, portal_id):\n if (\n self.local_vars_configuration.client_side_validation and portal_id is None\n ): # noqa: E501\n raise ValueError(\n \"Invalid value for `portal_id`, must not be `None`\"\n ) # noqa: E501\n\n self._portal_id = portal_id",
"def trigger_condition(self, trigger_condition: \"str\"):\n self._attrs[\"triggerCondition\"] = trigger_condition",
"def SetId(self, new_id):\r\n\r\n self.id = new_id",
"async def set_trigger(self, tag, message):\n if tag in self.triggers:\n try:\n msg = await self.get_message(self.channel, self.triggers[tag])\n except discord.NotFound:\n pass\n else:\n # Remove reactions on the previous trigger (from this tag)\n for reaction in msg.reactions:\n if reaction.me and reaction.emoji in emoji.TRIGGERS[tag]:\n await self.remove_reaction(msg, reaction.emoji, self.me)\n self.triggers.pop(tag)\n if message is not None:\n self.triggers[tag] = message.id",
"def thread_trigger_id(self) -> str | None:\n trigger_id = None\n if hasattr(threading.current_thread(), 'trigger_id'):\n trigger_id = threading.current_thread().trigger_id # type: ignore\n return trigger_id",
"def attachment_upload_id(self, attachment_upload_id):\n\n self._attachment_upload_id = attachment_upload_id",
"def plan_id(self, plan_id: str):\n if plan_id is None:\n raise ValueError(\"Invalid value for `plan_id`, must not be `None`\") # noqa: E501\n\n self._plan_id = plan_id",
"def project_id(self, project_id):\n if project_id is None:\n raise ValueError(\"Invalid value for `project_id`, must not be `None`\") # noqa: E501\n\n self._project_id = project_id"
] | [
"0.6884074",
"0.5484074",
"0.51305044",
"0.49737403",
"0.4899946",
"0.48826936",
"0.46575096",
"0.44376594",
"0.4368194",
"0.43429962",
"0.42614675",
"0.4221972",
"0.4156011",
"0.415287",
"0.4144462",
"0.41385454",
"0.4132832",
"0.41177928",
"0.41103038",
"0.40706295",
"0.4051034",
"0.4036671",
"0.4009284",
"0.39915863",
"0.3976677",
"0.39372298",
"0.39355618",
"0.3933311",
"0.39231905",
"0.39227676"
] | 0.6217905 | 1 |
Retrieve the value for the "ResponseStatusCode" output from this Choreo execution. ((integer) The response status code returned from Xively. For a successful trigger update, the code should be 200.) | def get_ResponseStatusCode(self):
return self._output.get('ResponseStatusCode', None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def response_code(self):\n return self._response_code",
"def response_code(self):\n return self._response_code",
"def get_status_code(self, response):\n if hasattr(response, 'status_int'):\n return response.status_int\n return response.status",
"def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code",
"def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code",
"def response_code(self):\r\n return self._response_code",
"def get_status_code(self):\n return self.__response.status_code",
"def response_status(self):\n return self.__response_status",
"def status_code(self) -> int:\n return pulumi.get(self, \"status_code\")",
"def status_code(self):\n return self._status_code",
"def custom_block_response_status_code(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")",
"def _get_status_code(response: Response) -> int:\n status_code = response.status_code\n if isinstance(status_code, HTTPStatus):\n return status_code.value\n else:\n return status_code",
"def status_code(self) -> Optional[int]:\n if self.response is not None:\n return self.response.status_code\n return None",
"def response_status(self):\n if \"responseStatus\" in self._prop_dict:\n if isinstance(self._prop_dict[\"responseStatus\"], OneDriveObjectBase):\n return self._prop_dict[\"responseStatus\"]\n else :\n self._prop_dict[\"responseStatus\"] = ResponseStatus(self._prop_dict[\"responseStatus\"])\n return self._prop_dict[\"responseStatus\"]\n\n return None",
"def custom_block_response_status_code(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")",
"def custom_block_response_status_code(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")",
"def code(self):\n\t\treturn self.status_code",
"def status_code(self):\n return int(self._status[:3])",
"def get_response_status(response_code):\n if is_success(response_code):\n return 'success'\n return 'error'",
"def status_code(self):\n return int(self.status.split()[1])",
"def status_code(self):\r\n return int(self._status[:3])",
"def status(self, value):\n if isinstance(value, (long, int)):\n if 100 <= value <= 900:\n status = _RESPONSE_STATUSES.get(value, '')\n if status:\n self._status = '%d %s' % (value, status)\n else:\n self._status = str(value)\n else:\n raise ValueError('Bad response code: %d' % value)\n elif isinstance(value, basestring):\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if _RE_RESPONSE_STATUS.match(value):\n self._status = value\n else:\n raise ValueError('Bad response code: %d' % value)\n else:\n raise TypeError('Bad type of response code.')",
"def getResponseCode(self) -> int:\n ...",
"def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover",
"def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')",
"def getReturnCode(self):\n retcode = self.sendCmd(\"echo $?\")\n try:\n return int(retcode)\n except:\n return retcode",
"def get_retcode(self):\n return self._retcode",
"def return_code(self):\n return self._failures",
"def result_code(self):\n return self._result_code",
"def error_code(self):\n return self.json['response'].get('error_code')"
] | [
"0.67395395",
"0.67395395",
"0.66692615",
"0.6604862",
"0.6604862",
"0.65936613",
"0.6561299",
"0.64286965",
"0.6367107",
"0.636263",
"0.634679",
"0.6177823",
"0.61737144",
"0.6155878",
"0.6047667",
"0.6047667",
"0.5972856",
"0.5878245",
"0.58305305",
"0.5826923",
"0.57805556",
"0.5644335",
"0.56151223",
"0.548574",
"0.54815286",
"0.54757684",
"0.5470735",
"0.54277635",
"0.5401828",
"0.5399895"
] | 0.8141129 | 0 |
Fetched packs' data from context and formats it to an installable object. | def get_packs_data_from_context():
instance_context = demisto.context()
context_packs_data = instance_context.get('ContentData')
if isinstance(context_packs_data, list):
context_entries = [
{
'packid': pack['packID'],
'packversion': 'latest',
}
for pack in context_packs_data
]
else:
context_entries = [
{
'packid': context_packs_data['packID'],
'packversion': 'latest',
}
]
return_results(
CommandResults(
outputs_prefix='ConfigurationSetup',
outputs={'MarketplacePacks': context_entries},
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def package_data(self, data):\n pass",
"def get_data(self, context):\n # Things to do\n return context",
"def getPackageInfo(self, pid):\n if pid == self.ROOT_PACKAGE:\n pack = RootPackage(self, OWNER).toInfoData()\n elif pid in self.packages:\n pack = self.packages[pid].toInfoData()\n pack.stats = self.db.getStatsForPackage(pid)\n else:\n pack = self.db.getPackageInfo(pid)\n\n if not pack: return None\n\n # todo: what does this todo mean?!\n #todo: fill child packs and files\n packs = self.db.getAllPackages(root=pid)\n if pid in packs: del packs[pid]\n pack.pids = packs.keys()\n\n files = self.db.getAllFiles(package=pid)\n pack.fids = files.keys()\n\n return pack",
"def _pack(self):\n pass",
"def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context",
"def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context",
"def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context",
"def get_data(self, context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context",
"def _store_package_metadata(self):\n\n context = self._config.context\n log.debug('processing chef_json file {0} for package metadata'.format(self._get_chef_json_full_path()))\n with open(self._get_chef_json_full_path()) as chef_json_file:\n chef_json = json.load(chef_json_file)\n log.debug(chef_json.dump)\n\n context.package.attributes = {}\n for x in self._config.pkg_attributes:\n context.package.attributes[x] = chef_json.get(x, None)",
"def prepare_data(self):",
"def get_data(self, **context):\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context",
"def pack():\n PackCommandExecutor().pack()",
"def pack_goods(self, by=None):",
"def pack_goods(self, by=None):",
"def _pack_as_cdt_ctx(self, obj: CDTContext):\n n = len(obj) * 2\n self.pack_array_header(n)\n\n for item in obj:\n self._pack_as_cdt_item(item)\n\n return",
"def _fetch_data(self):\n pass",
"def _load(self):\n with qisys.sh.TempDir() as work_dir:\n pkg = portage.xpak.tbz2(self.path)\n pkg.decompose(work_dir, cleanup=0)\n arch, arch_variant = _get_pkg_arch(work_dir)\n with open(os.path.join(work_dir, 'PF'), 'r') as fpf:\n pf = fpf.readline().strip()\n name, version, revision = portage.versions.pkgsplit(pf)\n dependency = dict()\n for dep, dep_filename in _DEPENDENCY.items():\n dep_path = os.path.join(work_dir, dep_filename)\n if not os.path.exists(dep_path):\n dependency[dep] = list()\n continue\n with open(dep_path, 'r') as fdep:\n dependency[dep] = fdep.read().strip().split()\n dependency['all'] = list()\n for dep_list in _DEPENDENCY:\n dependency['all'].extend(dependency[dep_list])\n for dep, dep_list in dependency.items():\n dependency[dep] = list(set(dep_list))\n metadata = {\n 'name': name,\n 'version': version,\n 'revision': revision,\n 'arch': arch,\n 'arch_variant': arch_variant,\n 'dependencies': dependency,\n }\n self.metadata = metadata",
"def _load(self):\n graph = self.context.parent.graph.get_context(self.context.identifier)\n data = {}\n for (_, p, o) in graph.triples((self.context.identifier, None, None)):\n if not p.startswith(META):\n continue\n name = p[len(META):]\n data[name] = o.toPython()\n return data",
"def get_product_pack(self, ):\n\n # Parse response dict for product data\n\n result = {}\n\n # Iterate over incoming raw data and pick values corresponding to \"Product\" db table\n for item in self.raw_data['searchResult']['item']:\n for field in item.keys():\n if format_ebay_col_name(field) in product_table_columns:\n if type(item[field]) == str:\n if '\"' in item[field]: item[field] = item[field].replace('\"', '\\\\\"')\n result[format_ebay_col_name(field)] = item[field]\n\n # Handle several nested values\n while Switch(format_ebay_col_name(field)):\n if case('PRODUCT_ID'):\n result['PRODUCT_ID'] = item[field]['value']\n if case('PRIMARY_CATEGORY'):\n result['CATEGORY_ID'] = item[field]['categoryId']\n result['CATEGORY_NAME'] = item[field]['categoryName']\n if case('CONDITION'):\n result['CONDITION_DISPLAY_NAME'] = item[field]['conditionDisplayName']\n result['CONDITION_ID'] = item[field]['conditionId']\n break\n\n # Fill missing values with \"NULL\"s\n for table_filed in product_table_columns:\n if table_filed not in result.keys(): result[table_filed] = 'NULL'\n\n return result",
"def get_data():\n pass",
"def _store_package_metadata(self):",
"def get_objects_data(self):\n pass",
"def _pack_data( self, data ) : \r\n \r\n # hints = self._translation_table.get( type(data), None )\r\n hints = self._get_hints( data ) \r\n \r\n if hints is None : \r\n \r\n ## #debug: \r\n ## print \"_pack_data(): no hints for data type %s (data repr: %s)\" % (type(data), repr(data))\r\n \r\n # \"one-level recursion\" : \r\n # return self._pack_data( repr(data) )\r\n return self._pack_data( str(data) )\r\n \r\n ## # our special case ( grep 'bugfix' to see why we want a zero block ) \r\n ## if data is None: data = 0\r\n \r\n # else ... \r\n \r\n # 'DescType' + 'length' + 'data'\r\n desctype = hints[0]\r\n if desctype == 'TEXT' : \r\n length = len(data)\r\n data_str = data \r\n else :\r\n length = struct.calcsize( hints[1] )\r\n data_str = struct.pack( hints[1], data )\r\n \r\n length_str = struct.pack('=H', length)\r\n \r\n \r\n return _cat(desctype, length_str, data_str)",
"def _load(self):\n\n # This can happen when the object is not loaded yet\n # Usually when __init__ calls super().__init__()\n # and OrderSource starts initializing the instance attributes\n if not hasattr(self, \"_data\"):\n return\n\n if self._data is None:\n try:\n self._data = self.storage.load(basket=self)\n except BasketCompatibilityError as error:\n msg = _(\"Basket loading failed: Incompatible basket (%s).\")\n messages.error(self.request, msg % error)\n self.storage.delete(basket=self)\n self._data = self.storage.load(basket=self)\n self.dirty = False\n self.uncache()\n return self._data",
"def load_data(self):",
"def get_context_data(self, request, **kwargs):\n for piece_name in self.pieces.keys():\n piece = getattr(self, piece_name)\n self.context = piece.get_context_data(self.context, **kwargs)\n return self.context",
"def prepare(self, context):\n raise NotImplementedError",
"def load_data(self) -> None:",
"def get_data(self):\n pass",
"def get_data(self):\n pass"
] | [
"0.5854237",
"0.57914305",
"0.5674762",
"0.56493485",
"0.5541139",
"0.5541139",
"0.5541139",
"0.5541139",
"0.5501793",
"0.5384755",
"0.53700507",
"0.53397727",
"0.5255681",
"0.5255681",
"0.5247963",
"0.5226133",
"0.5220658",
"0.5211134",
"0.51883596",
"0.5187239",
"0.51723456",
"0.5144685",
"0.51361835",
"0.51275104",
"0.508175",
"0.5028677",
"0.502168",
"0.5000318",
"0.49918708",
"0.49918708"
] | 0.73318 | 0 |
Redetect faces on images. | def detectFaces():
faceEngine = VLFaceEngine()
detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())
detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)
pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection))
pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))
imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)
pprint.pprint(
detector.redetect(
images=[
ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),
ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),
ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]),
]
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self,image):\r\n \r\n self._faces=[]\r\n \r\n if util.isgray(image):\r\n image=cv2.equalizeHist(image)\r\n \r\n else:\r\n \r\n image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n cv2.equalizeHist(image,image)\r\n \r\n minsize=util.widthheightdividedby(image,8)\r\n\r\n \r\n\r\n \r\n facerect=self._faceclassifier.detectMultiScale(image,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n \"\"\"if facerects is not None:\r\n \r\n for facerect in facerects:\r\n face=face()\r\n \r\n face.facerect=facerect\r\n \r\n \r\n x,y,w,h=facerect\r\n \r\n # Seek an eye in the upper-left part of the face. \r\n searchRect = (x+w/7, y, w*2/7, h/2) \r\n face.leftEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek an eye in the upper-right part of the face. \r\n searchRect = (x+w*4/7, y, w*2/7, h/2) \r\n face.rightEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek a nose in the middle part of the face. \r\n searchRect = (x+w/4, y+h/4, w/2, h/2) \r\n face.noseRect = self._detectOneObject( \r\n self._noseClassifier, image, searchRect, 32) \r\n \r\n # Seek a mouth in the lower-middle part of the face. \r\n searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) \r\n face.mouthRect = self._detectOneObject( \r\n self._mouthClassifier, image, searchRect, 16) \r\n \r\n \r\n \r\n self._faces.append(face)\r\n\r\n \r\n \r\n def _detectoneobject(self,\r\n classifier,\r\n image,\r\n rect,\r\n imagesizetominsizeratio):\r\n \r\n x ,y ,w ,h=rect\r\n \r\n minsize=util.widthheightdividedby(image,\r\n imagesizetominsizeratio)\r\n \r\n subimage=image[y:y+h,x:x+w]\r\n \r\n subrect=classifier.dectectMultiScale(subimage,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n if len(subrect)==0:\r\n return None\r\n \r\n subx,suby,subw,subh=subrects[0]\r\n \r\n return (x+subx,y+suby,w+subw,h+subh)\r\n \r\n \"\"\"",
"def detect_faces(self, img):\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n\n nrof_faces = bounding_boxes.shape[0]\n img_size = np.asarray(img.shape)[0:2]\n\n faces = []\n faces_rects = []\n\n for i in range(nrof_faces):\n det = bounding_boxes[i,0:4]\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-5/2, 0)\n bb[1] = np.maximum(det[1]-5/2, 0)\n bb[2] = np.minimum(det[2]+5/2, img_size[1])\n bb[3] = np.minimum(det[3]+5/2, img_size[0])\n faces.append(img[bb[1]:bb[3], bb[0]:bb[2], :])\n faces_rects.append({'name': 'none', 'x': bb[0], 'y': bb[1], 'w': bb[2]-bb[0], 'h': bb[3]-bb[1]})\n\n return [img, faces, faces_rects]",
"async def asyncRedetectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n detections = await detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),\n ],\n asyncEstimate=True,\n )\n pprint.pprint(detections)\n task1 = detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [severalFaces[0][0].boundingBox.rect]),\n ],\n asyncEstimate=True,\n )\n task2 = detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [severalFaces[0][1].boundingBox.rect]),\n ],\n asyncEstimate=True,\n )\n for task in (task1, task2):\n pprint.pprint(task.get())",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces",
"def detect_faces(self, image):\n return self.face_detector(image, 1)",
"def __detect_face(self, img):\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n return self.detector(gray, 1)",
"def detectFaces_allFiles(directory):\n files_list = glob.glob(directory)\n for file in files_list:\n print(file)\n img = cv2.imread(file)\n if img is not None:\n height, width, channel = img.shape\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n age = find_age(file)\n\n if age >= 0:\n if faces != ():\n for (x, y, w, h) in faces:\n # On decale y vers le haut pour mieux centrer le visage\n if y - int(0.1*h) >= 0:\n y -= int(0.1*h)\n h *= 1.2\n else:\n h += y + int(0.1*h)\n y = 0\n if h > width:\n h = width\n # A partir de l'origine du visage (point en haut a gauche), on definit\n # notre carre, de cote le nouveau h\n if x + 0.8*h > width:\n x_right = width\n x_left = width - int(h)\n elif x - 0.2*h < 0:\n x_left = 0\n x_right = int(h)\n else:\n x_right = min(int(x) + int(0.8*h), int(width))\n x_left = int(x_right) - int(h)\n y_top = int(y)\n y_bottom = int(y) + int(h)\n roi_color = img[y_top:y_bottom, x_left:x_right]\n cv2.imwrite(\"./FacePhoto/{}.jpg\".format(extract_filename(file)), resize_image(roi_color, 227))\n else:\n files_list.remove(file)\n else:\n files_list.remove(file)\n cv2.destroyAllWindows()\n return files_list",
"def recogniseFace(self, imagefilenames, selectedFileName, selectedDirectory, numOfEigenfaces, thresholdVal):\r\n print 'recogniseFace()::'\r\n self.facet.checkCache(selectedDirectory, imagefilenames, numOfEigenfaces)\r\n mindist, matchfile = self.facet.findMatchingImage(selectedFileName, numOfEigenfaces, thresholdVal)\r\n self.processMatchResult(matchfile, mindist, numOfEigenfaces)",
"def findFaces(self):\n\t\trects = self.detectAll()\n\t\tif len(rects)==0:\n\t\t\trects = []\n\t\telse:\n\t\t\trects[:, 2:] += rects[:, :2]\n\t\tself.analyzeFrame(rects)",
"def classify_face(im):\r\n faces = get_encoded_faces()\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n \"\"\"\r\n Resize optinal \r\n \"\"\"\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n \"\"\"\r\n All the photo lables in the faces foler end with (number) so a simiple .find(\"(\") command takes the () away from\r\n the label leaving us with the full name of the person\r\n\r\n \"\"\"\r\n\r\n result = name.find('(') \r\n fullname = (name[:result])\r\n \"\"\"\r\n If face_recogntion module recognizes a face but that face is not in the faces module then \r\n it will print unknown and we print 12345678 to use it on the start attednace program \r\n\r\n \"\"\"\r\n if (name == \"Unknown\"):\r\n print(\"12345678\")\r\n else:\r\n \"\"\"\r\n f'{len(face_locayion)}-people - will return the number of people in photo taken by Nao'\r\n \"\"\"\r\n print (f'{len(face_locations)}-people')\r\n print (fullname)\r\n print(courseid)\r\n print (lateornot)\r\n c34 = fullname.find(' ')\r\n firstname = (fullname[:c34])\r\n lastname = (fullname[c34:])\r\n \"\"\"\r\n We get all the data courseid , fristname , lastname, datetime1,and late or not and submited on the website \r\n \r\n\r\n \"\"\"\r\n login_data = {\r\n\t 'Course': courseid,\r\n\t 'FirstName': firstname,\r\n\t 'LastName': lastname,\r\n\t 'Date': datetime2,\r\n\t 'Attendance': 'on',\r\n\t 'Late': latev,\r\n\t 'submitbutton': 'Submit'\r\n }\r\n if(fullname == \"Unknow\"):\r\n \tprint(\"I-dont-know-you\")\r\n else:\r\n \r\n with requests.Session() as s:\r\n \turl = \"https://rbattendance.000webhostapp.com/update.php\"\r\n \tr = s.get(url)\r\n \tsoup = BeautifulSoup(r.content, 'html5lib')\r\n \tr = s.post(url, data = login_data)\r\n \t#print(r.content)\r\n \r\n \r\n\r\n\r\n\r\n\r\n \"\"\"\r\n This for loop is reponsible for drawing on the image \r\n \"\"\"\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n \r\n \r\n while True:\r\n #cv2.imshow('Video', img)\r\n #if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names",
"def detection():\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=3,\n minSize=(30, 30)\n )\t#Haar-cascade: A Face detection algorithm\n\n area = faces[:,2] * faces[:,3]\n faces = np.c_[faces,area]\t#concatenates area values to last column of 'face' array.\n\n print('All detected faces\\n',faces)\n i,j = unravel_index(faces.argmax(), faces.shape)\t# gets the position of maximum value from 'face' array.\n print(i,j)\n print(\"Found %d Face%s!\" %(len(faces),\"s\"[len(faces)==1:]))\n\n X = faces[i,0]\n Y = faces[i,1]\n W = faces[i,2]\n H = faces[i,3]\n \n cv2.rectangle(image, (X, Y), (X + W, Y + H), (0, 255, 0), 2)\n roi_color = image[Y:Y + H, X:X + W] \n print(\"Face(largest) Extracted.\")\n cv2.imwrite('Extracted_face.jpg', roi_color)\t#Image Extraction.\n status = cv2.imwrite('Output.jpg', image)\n print(\"Image Output.jpg written to filesystem: \", status)",
"def find_all_faces_in_one_img(img_path, detector, img_size, dst_path):\r\n\r\n img_path = img_path.replace('\\\\', '/')\r\n img_name = img_path.split(sep='/')[-1].split(sep='.')[0]\r\n\r\n assert img_path.split(sep='/')[-1].split(sep='.')[1] in [\r\n 'png', 'jpg'], 'files should be images with a \".jpg\" or \".png\" extension !'\r\n\r\n img_extension = '.jpg'\r\n\r\n all_detected_faces, detection_status = faces_detector(\r\n img_path, detector, img_size, threshold_confidence=0.90)\r\n\r\n if detection_status == 'success':\r\n for faces in all_detected_faces:\r\n cv.imwrite(os.path.join(dst_path, img_name + img_extension), faces)\r\n elif detection_status == 'failure':\r\n os.remove(img_path)\r\n\r\n return detection_status",
"def detect_faces(path):\n from google.cloud import vision\n from PIL import Image, ImageDraw\n import io\n\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n response = client.face_detection(image=image)\n faces = response.face_annotations\n face_distance = [10000000] * len(faces)\n face_area = []\n face_vertices = []\n\n counter = 0\n for face in faces:\n face_vertices.append((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y))\n face_area.append(area((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y), \n (face.bounding_poly.vertices[1].x, face.bounding_poly.vertices[1].y),\n (face.bounding_poly.vertices[2].x, face.bounding_poly.vertices[2].y)))\n im = Image.open(path)\n cropped = im.crop((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y, face.bounding_poly.vertices[2].x, face.bounding_poly.vertices[2].y))\n #cropped.show()\n cropped.save(\"./media/images/\" + str(counter) + \".jpg\")\n counter += 1\n \n for i in range(len(faces)):\n min_dist = 0\n for j in range(len(faces)):\n distance = dist(face_vertices[i], face_vertices[j])\n if distance > 0 and (face_area[i] + face_area[j]) / distance < face_distance[i]: \n face_distance[i] = (face_area[i] + face_area[j]) / distance\n \n \n with Image.open(path) as im:\n counter = 0\n \n draw = ImageDraw.Draw(im)\n for face in faces:\n draw.rectangle([face.bounding_poly.vertices[counter].x, face.bounding_poly.vertices[counter].y,\n face.bounding_poly.vertices[counter + 2].x, face.bounding_poly.vertices[counter + 2].y], None, \"#0000ff\", 3)\n for i in range(len(faces)):\n if face_distance[i] < 30 or len(faces) == 1: colour = \"#00ff00\"\n else: colour = \"#ff0000\"\n draw.rectangle([faces[i].bounding_poly.vertices[0].x, faces[i].bounding_poly.vertices[0].y,\n faces[i].bounding_poly.vertices[2].x, faces[i].bounding_poly.vertices[2].y], None, colour, 3)\n draw.text((faces[i].bounding_poly.vertices[0].x - 10, faces[i].bounding_poly.vertices[0].y - 10), str(i+1), \"#ff0000\",font=None, anchor=None, spacing=4, align='left', direction=None, features=None, language=None, stroke_width=1, stroke_fill=None, embedded_color=False)\n\n im.save(\"./media/images/upload.jpg\")\n return len(faces)\n if response.error.message:\n raise Exception('Error')",
"def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]",
"def get_check_folder():\r\n filelist = [file for file in os.listdir('temp') if file.endswith('.png')]\r\n image_count = len(filelist)\r\n if image_count == 0:\r\n print\"No faces detected in image.\"\r\n exit()\r\n print \"Detected \"+str(image_count)+\" faces in the image.\"\r\n if filelist:\r\n for image_path in filelist:\r\n target = cv2.imread(\"temp/\" + image_path)\r\n cv2.imshow(\"detected face\", target)\r\n k = cv2.waitKey(1) & 0xFF\r\n img_to_del = Image.open(\"temp/\" + image_path)\r\n for folder in get_immediate_subdirectories():\r\n count = 0\r\n val = 0\r\n folder_filelist = [file for file in os.listdir(\"detected_faces/\" + folder) if\r\n file.endswith('.png')]\r\n for file in folder_filelist:\r\n img_to_compare = Image.open(\"detected_faces/\" + folder + \"/\" + file)\r\n if img_to_del.size > img_to_compare.size:\r\n temp_image_resized = img_to_del.resize(img_to_compare.size, Image.ANTIALIAS)\r\n index = get_ssim(temp_image_resized, img_to_compare)\r\n elif img_to_del.size < img_to_compare.size:\r\n img_to_compare = img_to_compare.resize(img_to_del.size, Image.ANTIALIAS)\r\n index = get_ssim(img_to_del, img_to_compare)\r\n else:\r\n index = get_ssim(img_to_del, img_to_compare)\r\n val += index\r\n count += 1\r\n if count > 0:\r\n index = val/count\r\n if index > min_ssim_index_val:\r\n print \" Detected a face in DB folder \"+ folder\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))",
"def detect_faces(image):\n\n face_locations = face_recognition.face_locations(image)\n return face_locations",
"def faces(self, image):\n\n response = self._send_request(\"faces\", files=dict(image=image))\n return response['objectdetection']",
"def face_detect(sess, net, image_name):\n\n\t# Load the demo image\n\tim_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n\tim = cv2.imread(im_file)\n\n\t# Detect all object classes and regress object bounds\n\ttimer = Timer()\n\ttimer.tic()\n\t# scores, boxes = im_detect(sess, net, im)\n\tscores, boxes, eyes, smiles = im_detect_ori(sess, net, im)\n\ttimer.toc()\n\tprint ('Detection took {:.3f}s for '\n\t\t\t'{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n\t# Visualize detections for each class\n\t# im = im[:, :, (2, 1, 0)]\n\t# fig, ax = plt.subplots(figsize=(8, 8))\n\t# ax.imshow(im, aspect='equal')\n\n\tCONF_THRESH = 0.9\n\tNMS_THRESH = 0.3\n\tfor cls_ind, cls in enumerate(CLASSES[20:]):\n\t\tcls_ind += 20 # because we skipped everything except face\n\t\tcls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\t\tcls_scores = scores[:, cls_ind]\n\t\tdets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)\n\t\tkeep = nms(dets, NMS_THRESH)\n\t\tdets = dets[keep, :]\n\t\teye = eyes[keep, :]\n\t\tsmile= smiles[keep, :]\n\n\tinds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n\tface_num = len(inds)\n\tprint '{} faces detected!'.format(face_num)\n\tdets = dets[inds, :]\n\teye = eye[inds, 1]\n\tsmile = smile[inds, 1]\n\n\treturn dets, eye, smile",
"def recognize_faces(image_file_path):\n image_pil = Image.open(image_file_path)\n draw = ImageDraw.Draw(image_pil)\n\n known_face_encodings_dict = get_known_face_encodings_dict()\n known_names = list(known_face_encodings_dict.keys())\n known_face_encodings = list(known_face_encodings_dict.values())\n\n del known_face_encodings_dict\n\n for face_location in face_detection.get_face_locations(image_file_path):\n face_encoding = get_face_encodings(\n image_file_path, known_face_locations=[face_location]\n )[0]\n\n recognition_flags = face_recognition.compare_faces(\n known_face_encodings, face_encoding\n )\n\n for flag, name in zip(recognition_flags, known_names):\n if not flag:\n continue\n\n top, right, bottom, left = face_location\n draw.rectangle((left, top, right, bottom), outline=\"#FF1493\")\n text_width, text_height = draw.textsize(name)\n draw.rectangle(\n (left, bottom, right, bottom + text_height + 10),\n fill=\"#FF1493\",\n outline=\"#FF1493\",\n )\n draw.text((left + 6, bottom + 5), name, fill=\"white\")\n\n del draw # conserve resources\n image_pil.show()",
"def detect_face(gray):\r\n face_cascade = cv2.CascadeClassifier(classifier_file_name)\r\n faces = face_cascade.detectMultiScale(gray, scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=flags)\r\n return faces",
"def get_faces(self, image):\n\t\t\n\t\t# Convert the image to grayscale and normalise\n\t\tcv.CvtColor(image, self.gray, cv.CV_BGR2GRAY)\n\t\tcv.EqualizeHist(self.gray, self.gray)\n\t\t\n\t\t# Detect faces\n\t\treturn cv.HaarDetectObjects(self.gray, self.cascade, self.storage,\n\t\t scale_factor = 1.3,\n\t\t min_neighbors = 2,\n\t\t flags = cv.CV_HAAR_DO_CANNY_PRUNING,\n\t\t min_size = (40,40))",
"def make_face_recognition(update: Update, _: CallbackContext) -> None:\n # message.photo is a list of PhotoSize objects,\n # which represent different sizes of the same photo\n\n # print(\"Enter to make_face_recognition\")\n img_from_user = update.message.photo[-1].get_file()\n img_file = io.BytesIO()\n img_from_user.download(out=img_file)\n img_array = face_recognition.load_image_file(img_file)\n # Find all the faces in the image\n face_locations = face_recognition.face_locations(img_array)\n # print(face_locations)\n img_with_rects = _make_rects(img_array, face_locations)\n out_file = 'tmp.jpg'\n Image.fromarray(img_with_rects, 'RGB').save(out_file, format=\"JPEG\")\n update.message.bot.send_photo(\n update.message.chat_id,\n photo=open(out_file, 'rb'))",
"def _box_faces(image):\n for face in image.faces:\n _box_face(image, face)\n return image",
"def face_detection(image, xml_path):\n\n face_cascade = cv2.CascadeClassifier(xml_path)\n faces = face_cascade.detectMultiScale(image, 1.3, 5)\n\n images = []\n\n for face in faces:\n x_beginning, y_beginning, face_width, face_height = face\n roi_img = image[y_beginning:y_beginning + face_height, x_beginning:x_beginning + face_width]\n\n images.append(roi_img)\n\n return faces, images",
"def face_detection(img, faceCascade=faceCascade):\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tfaces = faceCascade.detectMultiScale(\n\t\tgray,\n\t\tscaleFactor=1.2,\n\t\tminNeighbors=5,\n\t\tminSize=(32, 32))\n\n\t# If no face detected\n\tif len(faces) == 0:\n\t\tw = min(img.shape[0], img.shape[1])\n\t\treturn img[(img.shape[0]-w)//2:(img.shape[0]+w)//2, (img.shape[1]-w)//2:(img.shape[1]+w)//2, :]\n\n\t# If faces detected, choose the face with the max size\n\tmax_h, index = 0, 0\n\tfor i, (x, y, w, h) in enumerate(faces):\n\t\tif max_h < h:\n\t\t\tmax_h, index = h, i\n\n\t(x, y, w, h) = faces[index]\n\n\tif img.shape[0]>img.shape[1]:\n\t\tif x + w/2 < img.shape[0]/2:\n\t\t\treturn img[:img.shape[1],:,:]\n\n\t\telse:\n\t\t\treturn img[-img.shape[1]:,:,:]\n\n\telse:\n\t\tif y + h/2 < img.shape[1]/2:\n\t\t\treturn img[:,:img.shape[0],:]\n\n\t\telse:\n\t\t\treturn img[:,-img.shape[0]:,:]",
"def detect_face(image):\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n faces = faceCascade.detectMultiScale(image)\n if len(faces)>=1:#Should be == , not >=\n return True\n return False",
"def detect_face_task(img):\n\n # paramter for detect\n # image_size = 160\n # margin = 44\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n\n # caffe model\n pnet = caffe_model.get_pnet()\n rnet = caffe_model.get_rnet()\n onet = caffe_model.get_onet()\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n print('detect bounding: ', bounding_boxes)\n print('Find faces: ', bounding_boxes.shape[0])\n\n # all_faces is faces information list, include face bytes, face position\n all_faces = []\n for face_position in bounding_boxes:\n face_position = face_position.astype(int)\n print('face position: ', face_position)\n\n # each face information, include position, face image\n head_rect = face_position[:4].tolist() # numpy array to python list\n head_img = misc.toimage(img).crop(head_rect)\n head_img_io = StringIO.StringIO()\n head_img.save(head_img_io, format='JPEG')\n head_img_b64 = base64.b64encode(head_img_io.getvalue())\n\n # construct response\n face_info = {}\n face_info['rect'] = head_rect\n face_info['image'] = head_img_b64\n\n all_faces.append(face_info)\n\n return all_faces",
"def get_faces(image):\n return (image.crop(face) for face in image.faces)",
"def crop_faces(cv_imgs, only_one=True, using_bundled_library=False):\n faces_coords = []\n\n if using_bundled_library:\n for image in cv_imgs:\n coords = face_recognition.face_locations(image)\n coords = [(y2, x1, y1 - y2, x2 - x1) for x1, y1, x2, y2 in coords]\n faces_coords.append(coords)\n return faces_coords\n\n img_with_several_faces = 0\n img_with_no_face = 0\n img_with_one_face = 0\n for index, image in enumerate(cv_imgs):\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(\n image,\n scaleFactor=1.01,\n minNeighbors=5,\n minSize=(200, 200)\n )\n\n if only_one:\n if len(faces) == 1:\n img_with_one_face += 1\n (x, y, w, h) = faces[0]\n faces_coords.append((x, y, w, h))\n else:\n faces_coords.append(None)\n if len(faces) == 0:\n img_with_no_face += 1\n else:\n img_with_several_faces += 1\n else:\n faces_coords.append(faces)\n\n return faces_coords"
] | [
"0.7132961",
"0.705328",
"0.6980958",
"0.68435824",
"0.6793622",
"0.6746617",
"0.6708263",
"0.6608204",
"0.65846044",
"0.65693116",
"0.6552808",
"0.655144",
"0.6540004",
"0.6527223",
"0.6510737",
"0.6488948",
"0.644064",
"0.6420578",
"0.64109945",
"0.6405241",
"0.64046896",
"0.63688356",
"0.63619804",
"0.6345916",
"0.6340749",
"0.6335363",
"0.63205445",
"0.6320281",
"0.6291996",
"0.62828225"
] | 0.7460728 | 0 |
Async redetect faces on images. | async def asyncRedetectFaces():
faceEngine = VLFaceEngine()
detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)
detections = await detector.redetect(
images=[
ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),
],
asyncEstimate=True,
)
pprint.pprint(detections)
task1 = detector.redetect(
images=[
ImageForRedetection(imageWithSeveralFaces, [severalFaces[0][0].boundingBox.rect]),
],
asyncEstimate=True,
)
task2 = detector.redetect(
images=[
ImageForRedetection(imageWithSeveralFaces, [severalFaces[0][1].boundingBox.rect]),
],
asyncEstimate=True,
)
for task in (task1, task2):
pprint.pprint(task.get()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detect_face_task(img):\n\n # paramter for detect\n # image_size = 160\n # margin = 44\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n\n # caffe model\n pnet = caffe_model.get_pnet()\n rnet = caffe_model.get_rnet()\n onet = caffe_model.get_onet()\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n print('detect bounding: ', bounding_boxes)\n print('Find faces: ', bounding_boxes.shape[0])\n\n # all_faces is faces information list, include face bytes, face position\n all_faces = []\n for face_position in bounding_boxes:\n face_position = face_position.astype(int)\n print('face position: ', face_position)\n\n # each face information, include position, face image\n head_rect = face_position[:4].tolist() # numpy array to python list\n head_img = misc.toimage(img).crop(head_rect)\n head_img_io = StringIO.StringIO()\n head_img.save(head_img_io, format='JPEG')\n head_img_b64 = base64.b64encode(head_img_io.getvalue())\n\n # construct response\n face_info = {}\n face_info['rect'] = head_rect\n face_info['image'] = head_img_b64\n\n all_faces.append(face_info)\n\n return all_faces",
"def detect_faces(self, img):\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n\n nrof_faces = bounding_boxes.shape[0]\n img_size = np.asarray(img.shape)[0:2]\n\n faces = []\n faces_rects = []\n\n for i in range(nrof_faces):\n det = bounding_boxes[i,0:4]\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-5/2, 0)\n bb[1] = np.maximum(det[1]-5/2, 0)\n bb[2] = np.minimum(det[2]+5/2, img_size[1])\n bb[3] = np.minimum(det[3]+5/2, img_size[0])\n faces.append(img[bb[1]:bb[3], bb[0]:bb[2], :])\n faces_rects.append({'name': 'none', 'x': bb[0], 'y': bb[1], 'w': bb[2]-bb[0], 'h': bb[3]-bb[1]})\n\n return [img, faces, faces_rects]",
"async def detect_face(face_file, max_results=4):\n image_content = face_file.read()\n batch_request = [{\n 'image': {\n 'content': base64.b64encode(image_content).decode('utf-8')\n },\n 'features': [{\n 'type': 'FACE_DETECTION',\n 'maxResults': max_results,\n }]\n }]\n\n service = get_vision_service()\n request = service.images().annotate(body={\n 'requests': batch_request,\n })\n loop = asyncio.get_event_loop()\n response = await loop.run_in_executor(None, request.execute)\n\n return response['responses'][0]['faceAnnotations'] if 'faceAnnotations' in response['responses'][0] else None",
"def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n imageWithOneFace = VLImage.load(filename=EXAMPLE_O)\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(\n detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]),\n ]\n )\n )",
"def update(self,image):\r\n \r\n self._faces=[]\r\n \r\n if util.isgray(image):\r\n image=cv2.equalizeHist(image)\r\n \r\n else:\r\n \r\n image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n cv2.equalizeHist(image,image)\r\n \r\n minsize=util.widthheightdividedby(image,8)\r\n\r\n \r\n\r\n \r\n facerect=self._faceclassifier.detectMultiScale(image,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n \"\"\"if facerects is not None:\r\n \r\n for facerect in facerects:\r\n face=face()\r\n \r\n face.facerect=facerect\r\n \r\n \r\n x,y,w,h=facerect\r\n \r\n # Seek an eye in the upper-left part of the face. \r\n searchRect = (x+w/7, y, w*2/7, h/2) \r\n face.leftEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek an eye in the upper-right part of the face. \r\n searchRect = (x+w*4/7, y, w*2/7, h/2) \r\n face.rightEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek a nose in the middle part of the face. \r\n searchRect = (x+w/4, y+h/4, w/2, h/2) \r\n face.noseRect = self._detectOneObject( \r\n self._noseClassifier, image, searchRect, 32) \r\n \r\n # Seek a mouth in the lower-middle part of the face. \r\n searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) \r\n face.mouthRect = self._detectOneObject( \r\n self._mouthClassifier, image, searchRect, 16) \r\n \r\n \r\n \r\n self._faces.append(face)\r\n\r\n \r\n \r\n def _detectoneobject(self,\r\n classifier,\r\n image,\r\n rect,\r\n imagesizetominsizeratio):\r\n \r\n x ,y ,w ,h=rect\r\n \r\n minsize=util.widthheightdividedby(image,\r\n imagesizetominsizeratio)\r\n \r\n subimage=image[y:y+h,x:x+w]\r\n \r\n subrect=classifier.dectectMultiScale(subimage,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n if len(subrect)==0:\r\n return None\r\n \r\n subx,suby,subw,subh=subrects[0]\r\n \r\n return (x+subx,y+suby,w+subw,h+subh)\r\n \r\n \"\"\"",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def get_data(self):\n global CAM\n count = 0\n while CAM.isOpened():\n count += 1\n print('COUNT' + str(count))\n _, frame = CAM.read()\n\n # cropped face\n cropped_face, bbox_coordinate, anchor_coordinate = detect_faces(frame)\n if cropped_face is None:\n print(\"NONE FACE DETECTED\")\n sleep(1)\n continue\n\n # get fake face\n fake_face, profile_feature_vector = generate_frontal_face(cropped_face)\n\n cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)\n fake_face = cv2.cvtColor(fake_face, cv2.COLOR_BGR2RGB)\n\n # face matching\n face_matcher = FaceMatcher()\n matched_face, matched_name, matched_front_fake_face, matched_diff = \\\n face_matcher.match(cropped_face, fake_face, profile_feature_vector)\n\n matched_face = cv2.cvtColor(matched_face, cv2.COLOR_BGR2RGB)\n matched_front_fake_face = cv2.cvtColor(matched_front_fake_face, cv2.COLOR_BGR2RGB)\n\n _, cropped_face_jpeg = cv2.imencode('.jpg', cropped_face)\n _, fake_face_jpeg = cv2.imencode('.jpg', fake_face)\n _, matched_face_jpeg = cv2.imencode('.jpg', matched_face)\n _, matched_front_fake_face_jpeg = cv2.imencode('.jpg', matched_front_fake_face)\n\n encoded_cropped_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(cropped_face_jpeg.tobytes()).decode())\n encoded_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(fake_face_jpeg.tobytes()).decode())\n\n encoded_matched_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_face_jpeg.tobytes()).decode())\n encoded_matched_front_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_front_fake_face_jpeg.tobytes()).decode())\n\n # get detection model return here and send to face frontalization model\n SIO.emit('detection', {'cropped_face': encoded_cropped_face,\n 'fake_face': encoded_fake_face,\n 'matched_face': encoded_matched_face,\n 'matched_name': matched_name,\n 'matched_front_fake_face': encoded_matched_front_fake_face,\n 'id': uuid.uuid4().hex},\n namespace='/detections')\n sleep(self.delay)",
"def detect_faces(path):\n from google.cloud import vision\n from PIL import Image, ImageDraw\n import io\n\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n response = client.face_detection(image=image)\n faces = response.face_annotations\n face_distance = [10000000] * len(faces)\n face_area = []\n face_vertices = []\n\n counter = 0\n for face in faces:\n face_vertices.append((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y))\n face_area.append(area((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y), \n (face.bounding_poly.vertices[1].x, face.bounding_poly.vertices[1].y),\n (face.bounding_poly.vertices[2].x, face.bounding_poly.vertices[2].y)))\n im = Image.open(path)\n cropped = im.crop((face.bounding_poly.vertices[0].x, face.bounding_poly.vertices[0].y, face.bounding_poly.vertices[2].x, face.bounding_poly.vertices[2].y))\n #cropped.show()\n cropped.save(\"./media/images/\" + str(counter) + \".jpg\")\n counter += 1\n \n for i in range(len(faces)):\n min_dist = 0\n for j in range(len(faces)):\n distance = dist(face_vertices[i], face_vertices[j])\n if distance > 0 and (face_area[i] + face_area[j]) / distance < face_distance[i]: \n face_distance[i] = (face_area[i] + face_area[j]) / distance\n \n \n with Image.open(path) as im:\n counter = 0\n \n draw = ImageDraw.Draw(im)\n for face in faces:\n draw.rectangle([face.bounding_poly.vertices[counter].x, face.bounding_poly.vertices[counter].y,\n face.bounding_poly.vertices[counter + 2].x, face.bounding_poly.vertices[counter + 2].y], None, \"#0000ff\", 3)\n for i in range(len(faces)):\n if face_distance[i] < 30 or len(faces) == 1: colour = \"#00ff00\"\n else: colour = \"#ff0000\"\n draw.rectangle([faces[i].bounding_poly.vertices[0].x, faces[i].bounding_poly.vertices[0].y,\n faces[i].bounding_poly.vertices[2].x, faces[i].bounding_poly.vertices[2].y], None, colour, 3)\n draw.text((faces[i].bounding_poly.vertices[0].x - 10, faces[i].bounding_poly.vertices[0].y - 10), str(i+1), \"#ff0000\",font=None, anchor=None, spacing=4, align='left', direction=None, features=None, language=None, stroke_width=1, stroke_fill=None, embedded_color=False)\n\n im.save(\"./media/images/upload.jpg\")\n return len(faces)\n if response.error.message:\n raise Exception('Error')",
"def camera_operation(self):\r\n ret, self.frame = self.cap.read() #get frame/ read from camera\r\n\r\n #try finding faces\r\n try:\r\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\r\n faces = FACE_CASCADE.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5)\r\n #print(faces)\r\n for(x, y, w, h) in faces:\r\n #print(x, y, w, h) \r\n self.roi_gray = gray[y: y+h, x: x+w] #region of interest is face\r\n #Drawing Rectangle\r\n color = (255, 0, 0)\r\n stroke = 2\r\n end_cord_x = x+w\r\n end_cord_y = y+h\r\n cv2.rectangle(self.frame, (x,y), (end_cord_x, end_cord_y), color, stroke)\r\n self.FACE_FOUND = True\r\n\r\n \"\"\"While training if more than one face detected\"\"\"\r\n if (self.TRAIN_FLAG == True) and (len(faces) > 1):\r\n self.pop_window(title=\"Warning\", msg=\"Training takes only one face. \\nMultiple face detected.\")\r\n self.FACE_FOUND = False\r\n\r\n \"\"\"recognize faces, show with name\"\"\"\r\n if self.RECOGNIZE_FLAG == True:\r\n Id, confidence = RECOGNIZER.predict(self.roi_gray)\r\n print(confidence)\r\n \r\n name = self.names[Id-1] #get corresponding name\r\n\r\n \"\"\"if id not found, lock the screen\"\"\"\r\n if (confidence > CONFIDENCE_THRESHOLD) and (self.RECOGNIZE_FLAG == True):\r\n subprocess.call(LOCK_CODE)\r\n print(\"Unknown\")\r\n\r\n \"\"\"put name with face bouding box\"\"\"\r\n #if confidence value less than threshold value,\r\n #the smalller the value the better the accuracy\r\n if (name in self.names) and (confidence < CONFIDENCE_THRESHOLD) and (self.TRAIN_FLAG == False):\r\n cv2.putText(self.frame, name, (x, y+w+20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (250, 250, 250))\r\n print(Id)\r\n\r\n\r\n\r\n \r\n except:\r\n #self.FACE_FOUND = False\r\n pass #run anyway\r\n \r\n\r\n #_______________________Check record flag____________________________________\r\n #print(self.RECORD_FLAG)\r\n if self.RECORD_FLAG == True:\r\n print(\"Recording man!\")\r\n self.video_writer.write(self.frame)\r\n #notify on image about recording\r\n cv2.putText(self.frame, \"Recording..\", (5, 380), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\r\n\r\n #_______________________Train model with new face____________________________\r\n #print(self.TRAIN_FLAG)\r\n if self.TRAIN_FLAG == True:\r\n #print(\"Training Mode\")\r\n #notify about Training\r\n cv2.putText(self.frame, \"Training Mode\", (5, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\r\n #put sample number on screen\r\n cv2.putText(self.frame, str(self.sample_num), (10, 300), cv2.FONT_HERSHEY_COMPLEX, 4, (255, 255, 255), 2, cv2.LINE_AA)\r\n \r\n self.counter += 1 #start counter\r\n #print(self.counter)\r\n \r\n if self.sample_num == MAX_SAMPLE_COLLECTION_NUM: #reached max sample number\r\n cv2.putText(self.frame, \"Training, wait!\", (10, 350), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 255), 1, cv2.LINE_AA)\r\n self.update_img_label(self.frame)\r\n self.sample_num = 0 #set sample number to zero\r\n self.TRAIN_FLAG = False #stop saving\r\n self.pop_window(title=\"INFO\", msg=\"Sample images collected, Train?\")\r\n\r\n self.train()\r\n\r\n\r\n elif (self.counter == 12) and (self.FACE_FOUND == True): #after 1 sec and if face found\r\n print(\"saving roi\")\r\n self.sample_num += 1 #increment sample number\r\n cv2.imwrite(f\"{PARENT_PATH}\\\\{DATASET_DIR}\\\\user.{self.id}.{self.sample_num}.jpg\", self.roi_gray)\r\n \r\n self.counter = 0 #make it zero\r\n self.FACE_FOUND = False #False, wait for next face confirmation\r\n\r\n elif self.counter == 12:\r\n print(\"Waiting for face\")\r\n self.counter = 0\r\n \r\n\r\n \r\n #_______________set current frame in QLabel___________________\r\n self.update_img_label(self.frame)",
"def recogniseFace(self, imagefilenames, selectedFileName, selectedDirectory, numOfEigenfaces, thresholdVal):\r\n print 'recogniseFace()::'\r\n self.facet.checkCache(selectedDirectory, imagefilenames, numOfEigenfaces)\r\n mindist, matchfile = self.facet.findMatchingImage(selectedFileName, numOfEigenfaces, thresholdVal)\r\n self.processMatchResult(matchfile, mindist, numOfEigenfaces)",
"def run(self):\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n try:\n name = self.recog(frame)\n boxes, probs, landmarks = mtcnn.detect(frame, landmarks=True)\n if self.last_box is not None:\n # print('last_box: ', self.last_box)\n cx_0, cy_0 = (self.last_box[0][0] + self.last_box[0][2]) // 2, (self.last_box[0][1] + self.last_box[0][3]) // 2\n cx_1, cy_1 = (boxes[0][0] + boxes[0][2]) // 2, (boxes[0][1] + boxes[0][3]) // 2\n w_0, h_0 = self.last_box[0][2] - self.last_box[0][0], self.last_box[0][3] - self.last_box[0][1]\n w_1, h_1 = boxes[0][2] - boxes[0][0], boxes[0][3] - boxes[0][1]\n\n factor_center = 0.3\n new_cx = cx_0 + factor_center * (cx_1 - cx_0)\n new_cy = cy_0 + factor_center * (cy_1 - cy_0)\n\n factor_hw = 0.3\n new_w = w_0 + factor_hw * (w_1 - w_0)\n new_h = h_0 + factor_hw * (h_1 - h_0)\n\n boxes = [[int(new_cx - new_w // 2), int(new_cy - new_h // 2),\n int(new_cx + new_w // 2), int(new_cy + new_h // 2)]]\n\n self.last_box = boxes\n\n # draw on frame\n self._draw(frame, boxes, probs, landmarks, name)\n print(name)\n # draw on frame\n\n except:\n pass\n\n # Show the frame\n cv2.imshow('Face Detection', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()",
"def face_detector_process(self, frame):\n frame = self.frame_pre_process(frame)\n\n # Clear Face detector from previous frame\n self.face_detector.clear()\n\n # When we use async IE use buffer by using Queue\n self.face_detector.start_async(frame)\n\n # Predict and return ROI\n rois = self.face_detector.get_roi_proposals(frame)\n\n if self.QUEUE_SIZE_NUM < len(rois):\n log.warning(\"Too many faces for processing.\" \\\n \" Will be processed only %s of %s.\" % \\\n (self.QUEUE_SIZE_NUM, len(rois)))\n rois = rois[:self.QUEUE_SIZE_NUM]\n \n self.rois = rois\n \n return (rois)",
"def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces",
"def track_faces_in_video(self):\r\n\r\n logger.debug('Executing face tracking')\r\n\r\n track_loaded = False\r\n\r\n # Try to load YAML file with tracking results\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n track_faces = utils.load_YAML_file(self.track_file_path)\r\n\r\n if track_faces:\r\n self.tracked_faces = track_faces\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n track_loaded = True\r\n\r\n if not track_loaded:\r\n\r\n # Check existence of detection results\r\n\r\n if len(self.detected_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.det_file_path):\r\n\r\n print 'Loading YAML file with detection results'\r\n logger.debug('Loading YAML file with detection results')\r\n\r\n with open(self.det_file_path) as f:\r\n\r\n self.detected_faces = yaml.load(f)\r\n\r\n print 'YAML file with detection results loaded'\r\n logger.debug('YAML file with detection results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No detection results found!'\r\n logger.warning('No detection results found!')\r\n\r\n return\r\n\r\n # Get shot cuts\r\n self.calc_hist_diff()\r\n\r\n print '\\n\\n### Face tracking ###\\n'\r\n logger.debug('\\n\\n### Face tracking ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n self.tracked_faces = []\r\n\r\n self.disc_tracked_faces = []\r\n\r\n # Counter for frames with detected faces\r\n frame_counter = 0\r\n\r\n # If a reduced frame rate is used, frames are less\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n used_fps = c.USED_FPS\r\n min_segment_duration = c.MIN_SEGMENT_DURATION\r\n tracking_min_int_area = c.TRACKING_MIN_INT_AREA\r\n min_size_width = c.FACE_DETECTION_MIN_SIZE_WIDTH\r\n min_size_height = c.FACE_DETECTION_MIN_SIZE_HEIGHT\r\n max_fr_with_miss_det = c.MAX_FR_WITH_MISSED_DET\r\n use_aligned_face = c.USE_ALIGNED_FACE_IN_TRACKING\r\n\r\n if self.params is not None:\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n if c.MIN_SEGMENT_DURATION_KEY in self.params:\r\n min_segment_duration = self.params[\r\n c.MIN_SEGMENT_DURATION_KEY]\r\n if c.TRACKING_MIN_INT_AREA_KEY in self.params:\r\n tracking_min_int_area = self.params[\r\n c.TRACKING_MIN_INT_AREA_KEY]\r\n if c.MIN_SIZE_WIDTH_KEY in self.params:\r\n min_size_width = self.params[c.MIN_SIZE_WIDTH_KEY]\r\n if c.MIN_SIZE_HEIGHT_KEY in self.params:\r\n min_size_height = self.params[c.MIN_SIZE_HEIGHT_KEY]\r\n if c.MAX_FR_WITH_MISSED_DET_KEY in self.params:\r\n max_fr_with_miss_det = self.params[\r\n c.MAX_FR_WITH_MISSED_DET_KEY]\r\n if c.USE_ALIGNED_FACE_IN_TRACKING_KEY in self.params:\r\n use_aligned_face = self.params[\r\n c.USE_ALIGNED_FACE_IN_TRACKING_KEY]\r\n\r\n # Minimum duration of a segment in frames\r\n min_segment_frames = int(\r\n math.ceil(self.fps * min_segment_duration))\r\n\r\n if not use_or_fps:\r\n min_segment_frames = int(\r\n math.ceil((used_fps + 1) * min_segment_duration))\r\n\r\n # Make copy of detected faces\r\n detection_list = list(self.detected_faces)\r\n\r\n # Iterate through frames in detected_faces\r\n for detection_dict in detection_list:\r\n\r\n self.progress = 100 * (frame_counter / self.saved_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n elapsed_s = detection_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n frame_name = detection_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n faces = detection_dict[c.FACES_KEY]\r\n\r\n face_counter = 0\r\n\r\n # Iterate though faces in frame\r\n for face_dict in faces:\r\n\r\n track_window = face_dict[c.BBOX_KEY]\r\n\r\n left_eye_pos = face_dict[c.LEFT_EYE_POS_KEY]\r\n\r\n right_eye_pos = face_dict[c.RIGHT_EYE_POS_KEY]\r\n\r\n nose_pos = face_dict[c.NOSE_POSITION_KEY]\r\n\r\n file_name = face_dict[c.ALIGNED_FACE_FILE_NAME_KEY]\r\n\r\n # Counter for faces in segment\r\n segment_face_counter = 1\r\n\r\n segment_frame_list = []\r\n\r\n # Start new segment\r\n segment_frame_dict = {c.FRAME_COUNTER_KEY: frame_counter,\r\n c.ELAPSED_VIDEO_TIME_KEY: elapsed_s,\r\n c.DETECTION_BBOX_KEY: track_window,\r\n c.TRACKING_BBOX_KEY: track_window,\r\n c.LEFT_EYE_POS_KEY: left_eye_pos,\r\n c.RIGHT_EYE_POS_KEY: right_eye_pos,\r\n c.NOSE_POSITION_KEY: nose_pos,\r\n c.ALIGNED_FACE_FILE_NAME_KEY: file_name,\r\n c.DETECTED_KEY: True,\r\n c.SAVED_FRAME_NAME_KEY: frame_name}\r\n\r\n segment_frame_list.append(segment_frame_dict)\r\n\r\n aligned_file_path = None\r\n rgb_roi = None\r\n if use_aligned_face:\r\n # Use the aligned face as the\r\n # Region of Interest for tracking\r\n complete_file_name = file_name + '.png'\r\n aligned_file_path = os.path.join(\r\n self.align_path, complete_file_name)\r\n\r\n rgb_roi = cv2.imread(\r\n aligned_file_path, cv2.IMREAD_COLOR)\r\n\r\n else:\r\n # Use detected face as the\r\n # Region of Interest for tracking\r\n x0 = track_window[0]\r\n y0 = track_window[1]\r\n w = track_window[2]\r\n h = track_window[3]\r\n x1 = x0 + w\r\n y1 = y0 + h\r\n\r\n frame_path = os.path.join(\r\n self.frames_path, frame_name)\r\n\r\n # Whole frame\r\n rgb = cv2.imread(frame_path, cv2.IMREAD_COLOR)\r\n\r\n # Face\r\n rgb_roi = rgb[y0:y1, x0:x1]\r\n\r\n if rgb_roi is None:\r\n print('Warning! Face to be tracked is None')\r\n\r\n if use_aligned_face:\r\n logger.warning(\r\n 'Face ' + aligned_file_path + ' is None')\r\n else:\r\n logger.warning(\r\n 'Face from frame ' + frame_name + ' is None')\r\n\r\n face_counter += 1\r\n\r\n continue\r\n\r\n # Convert image to hsv\r\n hsv_roi = cv2.cvtColor(rgb_roi, cv2.COLOR_BGR2HSV)\r\n\r\n mask_roi = cv2.inRange(\r\n hsv_roi, np.array((0., 60., 32.)),\r\n np.array((180., 255., 255.)))\r\n\r\n hist = cv2.calcHist(\r\n [hsv_roi], [0], mask_roi, [16], [0, 180])\r\n\r\n cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)\r\n hist = hist.reshape(-1)\r\n\r\n # Face should not be considered anymore\r\n del (detection_list[frame_counter]\r\n [c.FACES_KEY][face_counter])\r\n\r\n sub_frame_counter = frame_counter + 1\r\n\r\n missed_det_counter = 0\r\n\r\n # Iterate through subsequent frames\r\n for sub_det_dict in detection_list[sub_frame_counter:]:\r\n\r\n # Check if a new shot begins\r\n if sub_frame_counter in self.cut_idxs:\r\n break\r\n\r\n sub_frame_name = sub_det_dict[c.SAVED_FRAME_NAME_KEY]\r\n\r\n sub_frame_path = os.path.join(\r\n self.frames_path, sub_frame_name)\r\n\r\n # Read image from given path\r\n sub_image = cv2.imread(\r\n sub_frame_path, cv2.IMREAD_COLOR)\r\n\r\n if sub_image is None:\r\n print('Warning! Image is None')\r\n logger.warning(\r\n 'Image ' + sub_frame_path + ' is None')\r\n\r\n continue\r\n\r\n # Convert image to hsv\r\n sub_hsv = cv2.cvtColor(sub_image, cv2.COLOR_BGR2HSV)\r\n\r\n sub_mask = cv2.inRange(sub_hsv,\r\n np.array((0., 60., 32.)),\r\n np.array((180., 255., 255.)))\r\n\r\n # Apply meanshift to get the new location\r\n prob = cv2.calcBackProject(\r\n [sub_hsv], [0], hist, [0, 180], 1)\r\n prob &= sub_mask\r\n term_crit = (cv2.TERM_CRITERIA_EPS\r\n | cv2.TERM_CRITERIA_COUNT, 10, 1)\r\n\r\n track_box, track_window = cv2.CamShift(\r\n prob, track_window, term_crit)\r\n\r\n track_x0 = track_window[0]\r\n track_y0 = track_window[1]\r\n track_w = track_window[2]\r\n track_h = track_window[3]\r\n\r\n # Check size of track window\r\n if ((track_w <= min_size_width)\r\n or (track_h <= min_size_height)):\r\n\r\n break\r\n\r\n segment_frame_dict = {}\r\n\r\n track_list = (\r\n int(track_x0), int(track_y0), int(track_w),\r\n int(track_h))\r\n\r\n segment_frame_dict[c.TRACKING_BBOX_KEY] = track_list\r\n\r\n sub_faces = sub_det_dict[c.FACES_KEY]\r\n\r\n sub_face_counter = 0\r\n\r\n sim = False\r\n\r\n det_bbox = None\r\n\r\n for sub_face_dict in sub_faces:\r\n\r\n det_bbox = sub_face_dict[c.BBOX_KEY]\r\n\r\n # If track window corresponds to\r\n # a detected face,\r\n # delete detection from list\r\n\r\n (sim, int_area, int_area_pct) = utils.is_rect_similar(\r\n track_window, det_bbox, tracking_min_int_area)\r\n\r\n if sim:\r\n # det_face_counter = det_face_counter + 1\r\n\r\n track_window = det_bbox\r\n\r\n break\r\n\r\n sub_face_counter += 1\r\n\r\n t_x0 = track_window[0]\r\n t_y0 = track_window[1]\r\n t_w = track_window[2]\r\n t_h = track_window[3]\r\n\r\n segment_frame_dict[c.DETECTION_BBOX_KEY] = det_bbox\r\n\r\n # If a detected face corresponds to track window\r\n # delete detected face from detection list\r\n\r\n if sim:\r\n\r\n missed_det_counter = 0\r\n\r\n segment_frame_dict[c.DETECTED_KEY] = True\r\n\r\n segment_frame_dict[c.LEFT_EYE_POS_KEY] = (\r\n sub_face_dict[c.LEFT_EYE_POS_KEY])\r\n segment_frame_dict[c.RIGHT_EYE_POS_KEY] = (\r\n sub_face_dict[c.RIGHT_EYE_POS_KEY])\r\n\r\n segment_frame_dict[c.NOSE_POSITION_KEY] = (\r\n sub_face_dict[c.NOSE_POSITION_KEY])\r\n\r\n segment_frame_dict[c.ALIGNED_FACE_FILE_NAME_KEY] = (\r\n sub_face_dict[c.ALIGNED_FACE_FILE_NAME_KEY])\r\n\r\n del (detection_list[sub_frame_counter]\r\n [c.FACES_KEY][sub_face_counter])\r\n\r\n else:\r\n\r\n # Check if distance from last detection\r\n # is too big\r\n missed_det_counter += 1\r\n\r\n if missed_det_counter > max_fr_with_miss_det:\r\n\r\n # Remove last frames and\r\n # interrupt tracking\r\n for i in range(0, max_fr_with_miss_det):\r\n segment_frame_list.pop()\r\n\r\n segment_face_counter = (\r\n segment_face_counter - max_fr_with_miss_det)\r\n\r\n break\r\n\r\n segment_frame_dict[c.DETECTED_KEY] = False\r\n\r\n elapsed_ms = sub_det_dict[c.ELAPSED_VIDEO_TIME_KEY]\r\n\r\n # Update list of frames for segment\r\n segment_frame_dict[\r\n c.FRAME_COUNTER_KEY] = sub_frame_counter\r\n segment_frame_dict[\r\n c.ELAPSED_VIDEO_TIME_KEY] = elapsed_ms\r\n\r\n track_list = (\r\n int(t_x0), int(t_y0), int(t_w), int(t_h))\r\n\r\n segment_frame_dict[c.TRACKING_BBOX_KEY] = track_list\r\n segment_frame_dict[\r\n c.SAVED_FRAME_NAME_KEY] = sub_frame_name\r\n\r\n segment_frame_list.append(segment_frame_dict)\r\n\r\n del sub_image\r\n\r\n sub_frame_counter += 1\r\n\r\n segment_face_counter += 1\r\n\r\n # Segment must be considered only if its number\r\n # of frames is greater or equals than a minimum\r\n if segment_face_counter >= min_segment_frames:\r\n\r\n segments = self.divide_segment_by_face(\r\n segment_frame_list)\r\n\r\n if len(segments) > 0:\r\n self.tracked_faces.extend(segments)\r\n\r\n else:\r\n\r\n segment_dict = {c.FRAMES_KEY: segment_frame_list}\r\n\r\n self.disc_tracked_faces.append(segment_dict)\r\n\r\n # Check histograms of detected faces and\r\n # divide segment accordingly\r\n\r\n face_counter += 1\r\n\r\n frame_counter += 1\r\n\r\n # Create directory for this video\r\n\r\n if not (os.path.exists(self.track_path)):\r\n os.makedirs(self.track_path)\r\n\r\n # Save tracking result in YAML file\r\n utils.save_YAML_file(self.track_file_path, self.tracked_faces)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for face tracking:', time_in_seconds, 's\\n'\r\n logger.debug('Time for face tracking:', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.FACE_TRACKING_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)",
"def run(self):\n rate = rospy.Rate(50)\n\n while not rospy.is_shutdown():\n self.detect_face()\n rate.sleep()\n # clean up while shutdown\n self.cap.release()\n rospy.loginfo(\"[FACE] all done!\")",
"def __detect_face(self, img):\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n return self.detector(gray, 1)",
"def make_face_recognition(update: Update, _: CallbackContext) -> None:\n # message.photo is a list of PhotoSize objects,\n # which represent different sizes of the same photo\n\n # print(\"Enter to make_face_recognition\")\n img_from_user = update.message.photo[-1].get_file()\n img_file = io.BytesIO()\n img_from_user.download(out=img_file)\n img_array = face_recognition.load_image_file(img_file)\n # Find all the faces in the image\n face_locations = face_recognition.face_locations(img_array)\n # print(face_locations)\n img_with_rects = _make_rects(img_array, face_locations)\n out_file = 'tmp.jpg'\n Image.fromarray(img_with_rects, 'RGB').save(out_file, format=\"JPEG\")\n update.message.bot.send_photo(\n update.message.chat_id,\n photo=open(out_file, 'rb'))",
"def get_check_folder():\r\n filelist = [file for file in os.listdir('temp') if file.endswith('.png')]\r\n image_count = len(filelist)\r\n if image_count == 0:\r\n print\"No faces detected in image.\"\r\n exit()\r\n print \"Detected \"+str(image_count)+\" faces in the image.\"\r\n if filelist:\r\n for image_path in filelist:\r\n target = cv2.imread(\"temp/\" + image_path)\r\n cv2.imshow(\"detected face\", target)\r\n k = cv2.waitKey(1) & 0xFF\r\n img_to_del = Image.open(\"temp/\" + image_path)\r\n for folder in get_immediate_subdirectories():\r\n count = 0\r\n val = 0\r\n folder_filelist = [file for file in os.listdir(\"detected_faces/\" + folder) if\r\n file.endswith('.png')]\r\n for file in folder_filelist:\r\n img_to_compare = Image.open(\"detected_faces/\" + folder + \"/\" + file)\r\n if img_to_del.size > img_to_compare.size:\r\n temp_image_resized = img_to_del.resize(img_to_compare.size, Image.ANTIALIAS)\r\n index = get_ssim(temp_image_resized, img_to_compare)\r\n elif img_to_del.size < img_to_compare.size:\r\n img_to_compare = img_to_compare.resize(img_to_del.size, Image.ANTIALIAS)\r\n index = get_ssim(img_to_del, img_to_compare)\r\n else:\r\n index = get_ssim(img_to_del, img_to_compare)\r\n val += index\r\n count += 1\r\n if count > 0:\r\n index = val/count\r\n if index > min_ssim_index_val:\r\n print \" Detected a face in DB folder \"+ folder\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))",
"def face_detect(sess, net, image_name):\n\n\t# Load the demo image\n\tim_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n\tim = cv2.imread(im_file)\n\n\t# Detect all object classes and regress object bounds\n\ttimer = Timer()\n\ttimer.tic()\n\t# scores, boxes = im_detect(sess, net, im)\n\tscores, boxes, eyes, smiles = im_detect_ori(sess, net, im)\n\ttimer.toc()\n\tprint ('Detection took {:.3f}s for '\n\t\t\t'{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n\t# Visualize detections for each class\n\t# im = im[:, :, (2, 1, 0)]\n\t# fig, ax = plt.subplots(figsize=(8, 8))\n\t# ax.imshow(im, aspect='equal')\n\n\tCONF_THRESH = 0.9\n\tNMS_THRESH = 0.3\n\tfor cls_ind, cls in enumerate(CLASSES[20:]):\n\t\tcls_ind += 20 # because we skipped everything except face\n\t\tcls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\t\tcls_scores = scores[:, cls_ind]\n\t\tdets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)\n\t\tkeep = nms(dets, NMS_THRESH)\n\t\tdets = dets[keep, :]\n\t\teye = eyes[keep, :]\n\t\tsmile= smiles[keep, :]\n\n\tinds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n\tface_num = len(inds)\n\tprint '{} faces detected!'.format(face_num)\n\tdets = dets[inds, :]\n\teye = eye[inds, 1]\n\tsmile = smile[inds, 1]\n\n\treturn dets, eye, smile",
"def __detect_objs(self):\n while True:\n # Wait for input images\n if (not self.__predict_start) or \\\n (self.__img is None):\n continue\n\n # Client for detection\n client = vision.ImageAnnotatorClient()\n\n # Encode image to binary\n _, img_buffer = cv2.imencode(\".jpg\", self.__img)\n img_bytes = img_buffer.tobytes()\n\n # Change to vision Image type\n image = vision.Image(content=img_bytes)\n # Detect Person\n self.__detect_info = client.object_localization(image=image,\n max_results=self.__max_results\n ).localized_object_annotations\n cv2.waitKey(30)",
"def run(self, frame, dict_results):\n run_result = {repr(self): False}\n try:\n # flip the image in order to represent a true self of the person not mirror of it\n # and convert its colors.\n image = cv2.cvtColor(cv2.flip(frame, 1), cv2.COLOR_BGR2RGB)\n # make it read only image in order to improve the performance\n image.flags.writeable = False\n # process it by face mesh model\n results = self.face_mesh.process(image)\n\n if results.multi_face_landmarks:\n # face has been detected\n run_result[repr(self)] = True\n # show face net on image\n if config.DEBUG:\n self.draw_annotations(image, results)\n # sleep(config.TIMEOUT)\n except Exception as e:\n self.face_mesh.close()\n # write error to log file\n loggerService.get_logger().error(str(e))\n finally:\n dict_results.update(run_result)",
"def detectFaces_allFiles(directory):\n files_list = glob.glob(directory)\n for file in files_list:\n print(file)\n img = cv2.imread(file)\n if img is not None:\n height, width, channel = img.shape\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n age = find_age(file)\n\n if age >= 0:\n if faces != ():\n for (x, y, w, h) in faces:\n # On decale y vers le haut pour mieux centrer le visage\n if y - int(0.1*h) >= 0:\n y -= int(0.1*h)\n h *= 1.2\n else:\n h += y + int(0.1*h)\n y = 0\n if h > width:\n h = width\n # A partir de l'origine du visage (point en haut a gauche), on definit\n # notre carre, de cote le nouveau h\n if x + 0.8*h > width:\n x_right = width\n x_left = width - int(h)\n elif x - 0.2*h < 0:\n x_left = 0\n x_right = int(h)\n else:\n x_right = min(int(x) + int(0.8*h), int(width))\n x_left = int(x_right) - int(h)\n y_top = int(y)\n y_bottom = int(y) + int(h)\n roi_color = img[y_top:y_bottom, x_left:x_right]\n cv2.imwrite(\"./FacePhoto/{}.jpg\".format(extract_filename(file)), resize_image(roi_color, 227))\n else:\n files_list.remove(file)\n else:\n files_list.remove(file)\n cv2.destroyAllWindows()\n return files_list",
"def detect(self, frame): \n return self.__detect_faces(frame)",
"def get_face(detector, img_queue, box_queue):\n while True:\n image = img_queue.get()\n box = detector.extract_cnn_facebox(image)\n box_queue.put(box)",
"def update(self):\n # Get frame from video source:\n ret, frame = self.vid.read()\n\n if ret:\n # Convert the captured frame into grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n\n # Get all faces from the video frame\n faces = self.faceCascade.detectMultiScale(gray, 1.2,5)\n\n # For each face in faces\n for (x, y, w, h) in faces:\n # Create rectangle around the face\n cv2.rectangle(frame, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)\n\n # Recognize the face belongs to which ID\n Id = self.recognizer.predict(gray[y:y+h,x:x+w])\n\n ### IDENTIFICATION & SOCKET CODE GOES HERE\n if Id[0] == self.user_id:\n # If the target face is found 10 times then access is granted\n self.identification_count += 1\n if self.identification_count > 10:\n self.master.switch_frame(AccessGranted)\n\n name_to_put = self.user_name\n else:\n name_to_put = \"Unknown - Access Denied\"\n\n # Put text describe who is in the picture\n cv2.rectangle(frame, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)\n cv2.putText(frame, str(name_to_put), (x,y-40), self.font, 2, (255,255,255), 3)\n\n self.after(50, self.update)",
"def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)",
"def classify_face(im):\r\n faces = get_encoded_faces()\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n \"\"\"\r\n Resize optinal \r\n \"\"\"\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n \"\"\"\r\n All the photo lables in the faces foler end with (number) so a simiple .find(\"(\") command takes the () away from\r\n the label leaving us with the full name of the person\r\n\r\n \"\"\"\r\n\r\n result = name.find('(') \r\n fullname = (name[:result])\r\n \"\"\"\r\n If face_recogntion module recognizes a face but that face is not in the faces module then \r\n it will print unknown and we print 12345678 to use it on the start attednace program \r\n\r\n \"\"\"\r\n if (name == \"Unknown\"):\r\n print(\"12345678\")\r\n else:\r\n \"\"\"\r\n f'{len(face_locayion)}-people - will return the number of people in photo taken by Nao'\r\n \"\"\"\r\n print (f'{len(face_locations)}-people')\r\n print (fullname)\r\n print(courseid)\r\n print (lateornot)\r\n c34 = fullname.find(' ')\r\n firstname = (fullname[:c34])\r\n lastname = (fullname[c34:])\r\n \"\"\"\r\n We get all the data courseid , fristname , lastname, datetime1,and late or not and submited on the website \r\n \r\n\r\n \"\"\"\r\n login_data = {\r\n\t 'Course': courseid,\r\n\t 'FirstName': firstname,\r\n\t 'LastName': lastname,\r\n\t 'Date': datetime2,\r\n\t 'Attendance': 'on',\r\n\t 'Late': latev,\r\n\t 'submitbutton': 'Submit'\r\n }\r\n if(fullname == \"Unknow\"):\r\n \tprint(\"I-dont-know-you\")\r\n else:\r\n \r\n with requests.Session() as s:\r\n \turl = \"https://rbattendance.000webhostapp.com/update.php\"\r\n \tr = s.get(url)\r\n \tsoup = BeautifulSoup(r.content, 'html5lib')\r\n \tr = s.post(url, data = login_data)\r\n \t#print(r.content)\r\n \r\n \r\n\r\n\r\n\r\n\r\n \"\"\"\r\n This for loop is reponsible for drawing on the image \r\n \"\"\"\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n \r\n \r\n while True:\r\n #cv2.imshow('Video', img)\r\n #if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names",
"def detection():\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=3,\n minSize=(30, 30)\n )\t#Haar-cascade: A Face detection algorithm\n\n area = faces[:,2] * faces[:,3]\n faces = np.c_[faces,area]\t#concatenates area values to last column of 'face' array.\n\n print('All detected faces\\n',faces)\n i,j = unravel_index(faces.argmax(), faces.shape)\t# gets the position of maximum value from 'face' array.\n print(i,j)\n print(\"Found %d Face%s!\" %(len(faces),\"s\"[len(faces)==1:]))\n\n X = faces[i,0]\n Y = faces[i,1]\n W = faces[i,2]\n H = faces[i,3]\n \n cv2.rectangle(image, (X, Y), (X + W, Y + H), (0, 255, 0), 2)\n roi_color = image[Y:Y + H, X:X + W] \n print(\"Face(largest) Extracted.\")\n cv2.imwrite('Extracted_face.jpg', roi_color)\t#Image Extraction.\n status = cv2.imwrite('Output.jpg', image)\n print(\"Image Output.jpg written to filesystem: \", status)",
"def extract_faces(image_path: str, pk: int):\n image = Image.open(image_path)\n image = np.array(image)\n\n if image.shape[0] <= 0 or image.shape[1] <= 0:\n return None\n\n import mtcnn\n\n # detect faces from image\n face_detector = mtcnn.MTCNN()\n detections = face_detector.detect_faces(image)\n\n if len(detections) < 1:\n return None\n\n from deepface.basemodels.Facenet import InceptionResNetV2\n\n # load InceptionResNet model provided by deepface\n facenet_model = InceptionResNetV2()\n facenet_model.load_weights(get_weights(\"facenet\"))\n\n # normalize faces and get embeddings\n faces = [normalize_face(image, face) for face in detections]\n embeddings = facenet_model.predict(np.vstack(faces), batch_size=len(faces))\n\n for i in range(len(faces)):\n person_id = recognize_person(embeddings[i])\n print(person_id, flush=True)\n face_obj = models.Face.objects.create(\n confidence=detections[i]['confidence'],\n left=detections[i]['box'][0],\n top=detections[i]['box'][1],\n width=detections[i]['box'][2],\n height=detections[i]['box'][3],\n photo_id=pk,\n person_id=person_id\n )\n\n save_embeddings(embeddings[i], face_obj.id, person_id)",
"def detect_face(gray):\r\n face_cascade = cv2.CascadeClassifier(classifier_file_name)\r\n faces = face_cascade.detectMultiScale(gray, scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=flags)\r\n return faces"
] | [
"0.64969593",
"0.6364005",
"0.6304755",
"0.6286546",
"0.6172801",
"0.61590046",
"0.61203325",
"0.60647845",
"0.6005734",
"0.5988241",
"0.5965875",
"0.5962146",
"0.5954071",
"0.59476674",
"0.5939201",
"0.5933306",
"0.5929238",
"0.59187454",
"0.591381",
"0.5893143",
"0.58397377",
"0.583273",
"0.5821033",
"0.580599",
"0.5785871",
"0.5755994",
"0.57514435",
"0.5747699",
"0.57032156",
"0.5696099"
] | 0.86137897 | 0 |
Get status of EC2 spot request | def get_status(ec2,spot_request_id):
current = ec2.describe_spot_instance_requests(SpotInstanceRequestIds=[spot_request_id,])
instance_id = current[u'SpotInstanceRequests'][0][u'InstanceId'] if u'InstanceId' in current[u'SpotInstanceRequests'][0] else None
return instance_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response",
"def remote_status():",
"async def get_status():",
"def status():\n (code, message) = rest_api.status(request)\n if (code == 200):\n return 'Running'\n else:\n abort(code)",
"def getStatus():\n return json.dumps({'camera': Camera.status(), 'rover': rover.status()}), 200",
"def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()",
"def __get_status_api(self):\r\n try:\r\n return Call_shelly_api(url=self.__api_address + \"/status\")\r\n except ShellyException as err:\r\n _LOGGER.warning(err)",
"def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()",
"def status():\n _request('worklog/status/')",
"def status(self, request):\n return (200, 'OK')",
"def getStatus():",
"def _get_status(self):\n if self._state in [\"processed\", \"error\"]:\n return self._state\n \n get_resp = requests.get(self.location, cookies={\"session\": self.session})\n\n self._state = get_resp.json()[\"status\"]\n self.slice_time = get_resp.json()[\"slice_time\"]\n \n return self._state",
"def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)",
"def sipserver_status(self) -> str:",
"def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status",
"def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def status(self):\n return self._get(path='status')",
"def get_status():\n data = {\n 'status': 'up',\n }\n jsn = json.dumps(data)\n\n resp = Response(jsn, status=200, mimetype='application/json')\n\n return resp",
"def getStatus(self, request, context):\n \n statusDrone = str(self.vehicle.system_status).rpartition(':')[2]\n\t \n return droneconnect_pb2.Status(status = statusDrone)",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()",
"def ec2_status(resource, metadata, return_count=False):\n\n instances = resource.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [metadata['fqdn']]},\n {'Name': 'instance-state-name', 'Values': ['pending', 'running']}, ])\n\n # get a count of the instances and then either return count or print results\n count = sum(1 for _ in instances)\n if return_count:\n # return count for conditional consumption in other functions\n return count\n else:\n # print for human consumption\n if count == 0:\n print(\"No instances running\")\n else:\n print(count, \"instances running\")\n print('{:20} {:15} {:22} {:18} {}'.format(\n 'instance_id', 'state', 'instance_name', 'public_ip_address', 'instance_role'))\n for instance in instances:\n # tags order does not deterministically stay from run to run and stored as list of dicts\n # tags = {instance.tags[0]['Key']: instance.tags[0]['Value'],\n # instance.tags[1]['Key']: instance.tags[1]['Value']}\n # probably there is a much better way to map this but let's make it a dict of tags\n tags = {}\n for tag in instance.tags:\n tags[tag['Key']] = tag['Value']\n\n print('{:20} {:15} {:22} {:18} {}'.format(\n instance.id, instance.state['Name'], tags['Name'],\n instance.public_ip_address, tags['Role']))",
"def status():\n return 'OK'",
"async def _get_spot_feed(self):\n self._logger.debug(\"Polling Spot API\")\n async with aiohttp.ClientSession() as session:\n response = await session.request(\n method=\"GET\",\n url=self.spot_url,\n params=self.params\n )\n json_resp = await response.json()\n _response = json_resp.get(\"response\")\n\n if \"errors\" in _response:\n self._logger.error(\"Error from Spot API: '%s'\", _response)\n else:\n await self.handle_response(_response)",
"def get_qnet_status(self, request, suffix=''):\n abs_path = self.qnet_domain + self.qnet_status\n # check absolute path and used element\n if abs_path != '' and self.qnet_element != '':\n # try to request\n try:\n url = self._format_api_url(abs_path)\n response = self._request_get(url)\n except Exception as e:\n return HTTPServerError(body = \"GET Qnet status error: %s\" % str(e))\n\n # return result\n return HTTPOk(headers={'Content-Type': 'application/json'},\n body=json.dumps(response['wstatus']))\n\n else:\n return HTTPServerError(body=\"Bad request to the Qnet platform\")",
"def handle_status(self, request):\n \"\"\"\n @api {get} /status Get node status\n @apiName GetNodeStatus\n @apiGroup Node\n @apiVersion 1.1.0\n\n @apiSuccess {Boolean} execution_enabled Task execution is enabled on the node.\n @apiSuccess {Boolean} leader Node is the leader.\n @apiSuccess {String} name Node name.\n @apiSuccess {Boolean} scheduler_running The scheduler is running on the node.\n @apiSuccess {String} address Node IP address.\n @apiSuccess {String[]} pools Pools in which the node is registered.\n @apiSuccess {Object} running_processes Processes running on the host.\n @apiSuccess {Object} running_processes.process Process.\n @apiSuccess {String} running_processes.process.start_time Time the process started, ISO 8601 formatted.\n @apiSuccess {String} running_processes.process.task ID of the task.\n @apiSuccess {Boolean} cluster_joined Node has joined the cluster.\n @apiSuccess {Boolean} contending_for_lead Node is contending for lead.\n @apiSuccess {Boolean} pools_joined Node has joined its pools.\n\n @apiSuccessExample {json} Example response:\n {\n \"execution_enabled\": true,\n \"leader\": false,\n \"name\": \"node2\",\n \"scheduler_running\": false,\n \"address\": \"127.0.0.1:32002\",\n \"pools\": [\"pool1\", \"pool2\"],\n \"running_processes\": {\n \"b26e5cc2ef3f11e4817b0026b951c045\": {\n \"start_time\": \"2015-04-30T13:49:18.351494+00:00\",\n \"task\": \"508b4b72e44611e49e76c81f66cd0cca\"\n }\n },\n \"cluster_joined\": true,\n \"contending_for_lead\": true,\n \"pools_joined\": true\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n status = {\n 'name': self.cluster.nodename,\n 'address': self.cluster.addr,\n 'pools': self.cluster.mypools,\n 'leader': self.cluster.is_leader,\n 'cluster_joined': self.cluster.cluster_joined,\n 'pools_joined': self.cluster.pools_joined,\n 'contending_for_lead': self.cluster.contending_for_lead,\n\n 'execution_enabled': self.manager.enabled,\n 'running_processes': dict([ (execid, { 'task': details['task'], 'start_time': details['start_time'].isoformat() }) for (execid, details) in self.manager.running_processes.items() ]),\n\n 'scheduler_running': self.cluster.scheduler.running\n }\n\n return HTTPReply(body = json.dumps(status), headers = headers)",
"def test_get_status(self):\n pass",
"def test_get_status(self):\n pass",
"def status(self):\n self.scion_sh('status')",
"def status_check():\n return {\"status\": \"OK\"}"
] | [
"0.6296138",
"0.6140248",
"0.60616577",
"0.588999",
"0.5862307",
"0.5762618",
"0.5736567",
"0.5726555",
"0.57067794",
"0.56971884",
"0.5692415",
"0.56664914",
"0.5663505",
"0.5642255",
"0.5640854",
"0.56370384",
"0.5602836",
"0.5599503",
"0.5592363",
"0.5583365",
"0.5575961",
"0.5565143",
"0.55148506",
"0.54785705",
"0.54774314",
"0.5476058",
"0.5466051",
"0.5466051",
"0.5463782",
"0.5461781"
] | 0.7878497 | 0 |
A helper script to launch a spot P2 instance running Deep Video Analytics To use this please change the keyname, security group and IAM roles at the top | def launch_spot():
ec2 = boto3.client('ec2')
ec2r = boto3.resource('ec2')
ec2spec = dict(ImageId=AMI,
KeyName = KeyName,
SecurityGroupIds = [SecurityGroupId, ],
InstanceType = "p2.xlarge",
Monitoring = {'Enabled': True,},
IamInstanceProfile = IAM_ROLE)
output = ec2.request_spot_instances(DryRun=False,
SpotPrice="0.4",
InstanceCount=1,
LaunchSpecification = ec2spec)
spot_request_id = output[u'SpotInstanceRequests'][0][u'SpotInstanceRequestId']
logging.info("instance requested")
time.sleep(30)
waiter = ec2.get_waiter('spot_instance_request_fulfilled')
waiter.wait(SpotInstanceRequestIds=[spot_request_id,])
instance_id = get_status(ec2, spot_request_id)
while instance_id is None:
time.sleep(30)
instance_id = get_status(ec2,spot_request_id)
instance = ec2r.Instance(instance_id)
with open("host",'w') as out:
out.write(instance.public_ip_address)
logging.info("instance allocated")
time.sleep(10) # wait while the instance starts
env.hosts = [instance.public_ip_address,]
fh = open("connect.sh", 'w')
fh.write("#!/bin/bash\n" + "ssh -i " + env.key_filename + " " + env.user + "@" + env.hosts[0] + "\n")
fh.close()
local("fab deploy_ec2") # this forces fab to set new env.hosts correctly | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': '[email protected]'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='[email protected]',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id",
"def deploy_ec2():\n import webbrowser\n run('cd deepvideoanalytics && git pull && cd docker_GPU && ./rebuild.sh && nvidia-docker-compose up -d')\n # webbrowser.open('{}:8000'.format(env.hosts[0]))",
"def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance",
"def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i",
"def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance",
"def launch(\n *,\n key_name: Optional[str],\n instance_type: str,\n ami: str,\n ami_user: str,\n tags: Dict[str, str],\n display_name: Optional[str] = None,\n size_gb: int,\n security_group_name: str,\n instance_profile: Optional[str],\n nonce: str,\n delete_after: datetime.datetime,\n) -> Instance:\n\n if display_name:\n tags[\"Name\"] = display_name\n tags[\"scratch-delete-after\"] = str(delete_after.timestamp())\n tags[\"nonce\"] = nonce\n tags[\"git_ref\"] = git.describe()\n tags[\"ami-user\"] = ami_user\n\n ec2 = boto3.client(\"ec2\")\n groups = ec2.describe_security_groups()\n security_group_id = None\n for group in groups[\"SecurityGroups\"]:\n if group[\"GroupName\"] == security_group_name:\n security_group_id = group[\"GroupId\"]\n break\n\n if security_group_id is None:\n vpcs = ec2.describe_vpcs()\n vpc_id = None\n for vpc in vpcs[\"Vpcs\"]:\n if vpc[\"IsDefault\"] == True:\n vpc_id = vpc[\"VpcId\"]\n break\n if vpc_id is None:\n default_vpc = ec2.create_default_vpc()\n vpc_id = default_vpc[\"Vpc\"][\"VpcId\"]\n securitygroup = ec2.create_security_group(\n GroupName=security_group_name,\n Description=\"Allows all.\",\n VpcId=vpc_id,\n )\n security_group_id = securitygroup[\"GroupId\"]\n ec2.authorize_security_group_ingress(\n GroupId=security_group_id,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"tcp\",\n FromPort=22,\n ToPort=22,\n )\n\n network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {\n \"AssociatePublicIpAddress\": True,\n \"DeviceIndex\": 0,\n \"Groups\": [security_group_id],\n }\n\n say(f\"launching instance {display_name or '(unnamed)'}\")\n with open(ROOT / \"misc\" / \"scratch\" / \"provision.bash\") as f:\n provisioning_script = f.read()\n kwargs: RunInstancesRequestRequestTypeDef = {\n \"MinCount\": 1,\n \"MaxCount\": 1,\n \"ImageId\": ami,\n \"InstanceType\": cast(InstanceTypeType, instance_type),\n \"UserData\": provisioning_script,\n \"TagSpecifications\": [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for (k, v) in tags.items()],\n }\n ],\n \"NetworkInterfaces\": [network_interface],\n \"BlockDeviceMappings\": [\n {\n \"DeviceName\": \"/dev/sda1\",\n \"Ebs\": {\n \"VolumeSize\": size_gb,\n \"VolumeType\": \"gp3\",\n },\n }\n ],\n \"MetadataOptions\": {\n # Allow Docker containers to access IMDSv2.\n \"HttpPutResponseHopLimit\": 2,\n },\n }\n if key_name:\n kwargs[\"KeyName\"] = key_name\n if instance_profile:\n kwargs[\"IamInstanceProfile\"] = {\"Name\": instance_profile}\n i = boto3.resource(\"ec2\").create_instances(**kwargs)[0]\n\n return i",
"def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance",
"def create_instance(self, image='ami-660c3023', key_name='linuxonEC2', instance_type='t1.micro', security_groups=['default']):\n return self.conn.run_instances(image,\n key_name=key_name,\n instance_type=instance_type,\n security_groups=security_groups).instances[0]",
"def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance",
"def quickie():\n #info = { \"instance_type\": { default = \"t2.micro\", all = [ \"t2.micro\" ] }, \"image_id\" : { default = \"\", all = [] }, \"security_groups\" : { default = [], all = [] }, \"key_name\": { default = \"\", all = [] }}\n client = boto3.client(\"EC2\")\n data = client.describe_images()\n info[\"image_id\"][\"all\"]\n args = {}\n for attr in info:\n print(\"Available values for \"+attr+\":\\n\"+\" \".join(info[attr]))\n default = info[attr][0]\n var = raw_input(\"Choose \"+attr+\"[\"+default+\"]:\")\n if var == \"\":\n var = default\n if re.match(\"^.+\\s\", attr):\n args[attr] = [var]\n else:\n args[attr] = args\n reservation = client.run_instances(**args)",
"def launch_example_ec2_cmd(*args, **kwargs):\n return launch_example_ec2(*args, **kwargs)",
"async def daily_pvp(self, ctx):\n embed = await self.daily_embed([\"pvp\"], ctx=ctx)\n try:\n embed.set_thumbnail(\n url=\"https://render.guildwars2.com/file/\"\n \"FE01AF14D91F52A1EF2B22FE0A552B9EE2E4C3F6/511340.png\")\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Need permission to embed links\")",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def main():\n utils.vip_main(actuator_agent, identity='platform.d.actuator')",
"def openConsole():\n # Create an ARN out of the information provided by the user.\n role_arn = \"arn:aws:iam::\" + accountId() + \":role/\" + getArgs().role_name\n\n # Connect to AWS STS and then call AssumeRole.\n # Returns temporary security credentials.\n sts_connection = STSConnection()\n assumed_role_object = sts_connection.assume_role(\n role_arn=role_arn,\n role_session_name=\"AssumeRoleSession\"\n )\n\n # Format resulting credentials into a JSON block.\n tmp_creds = {\n \"sessionId\": assumed_role_object.credentials.access_key,\n \"sessionKey\": assumed_role_object.credentials.secret_key,\n \"sessionToken\": assumed_role_object.credentials.session_token,\n }\n json_temp_credentials = json.dumps(tmp_creds)\n\n # Make a request to the AWS federation endpoint to get a sign-in\n # token, passing parameters in the query string.\n params = {\n \"Action\": \"getSigninToken\",\n \"Session\": json_temp_credentials,\n }\n request_url = \"https://signin.aws.amazon.com/federation\"\n r = requests.get(request_url, params=params)\n\n # The return value from the federation endpoint, the token.\n sign_in_token = json.loads(r.text)[\"SigninToken\"]\n # Token is good for 15 minutes.\n\n # Create the URL to the console with token.\n params = {\n \"Action\": \"login\",\n \"Issuer\": \"\",\n \"Destination\": \"https://console.aws.amazon.com/\",\n \"SigninToken\": sign_in_token,\n }\n request_url = \"https://signin.aws.amazon.com/federation?\"\n request_url += urlencode(params)\n\n # Use the default browser to sign in to the console using the\n # generated URL.\n browser = webbrowser.get()\n if getArgs().incognito:\n webbrowser.Chromium.raise_opts = [\"\", \"--incognito\"]\n webbrowser.Chrome.raise_opts = [\"\", \"--incognito\"]\n webbrowser.Mozilla.remote_args = ['--private-window', '%s']\n browser.open(request_url, new=1)",
"def launch_cvd(instance_name, zone, sig_server_addr, sig_server_port, use_user_disk=True):\n\n cuttlefish_dir = '/usr/local/share/cuttlefish'\n user_data_dir = '/mnt/user_data'\n\n launch_command = f'gcloud compute ssh --zone={zone} {instance_name} -- '\n\n if use_user_disk:\n launch_command += f'HOME={user_data_dir} \\\n ANDROID_HOST_OUT={cuttlefish_dir} \\\n ANDROID_PRODUCT_OUT={cuttlefish_dir} '\n else:\n launch_command += f'HOME={cuttlefish_dir} '\n\n launch_command += f'{cuttlefish_dir}/bin/launch_cvd \\\n --start_webrtc --daemon \\\n --webrtc_sig_server_addr={sig_server_addr} \\\n --webrtc_sig_server_port={sig_server_port} \\\n --start_webrtc_sig_server=false \\\n --webrtc_device_id={instance_name} \\\n --report_anonymous_usage_stats=y'\n\n os.system(launch_command)\n\n print(f'Launched cuttlefish on {instance_name} at {sig_server_addr}:{sig_server_port}')",
"def main():\n utils.vip_main(peakShaverAgent, \n version=__version__)",
"def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)",
"def main():\n\n parser = cli.Parser()\n parser.add_required_arguments(cli.Argument.VM_NAME)\n args = parser.get_args()\n si = service_instance.connect(args)\n\n content = si.RetrieveContent()\n\n vm = get_vm(content, args.vm_name)\n vm_moid = vm._moId\n\n vcenter_data = content.setting\n vcenter_settings = vcenter_data.setting\n console_port = '7331'\n\n for item in vcenter_settings:\n key = getattr(item, 'key')\n if key == 'VirtualCenter.FQDN':\n vcenter_fqdn = getattr(item, 'value')\n\n session_manager = content.sessionManager\n session = session_manager.AcquireCloneTicket()\n\n vc_cert = ssl.get_server_certificate((args.host, int(args.port)))\n vc_pem = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n vc_cert)\n vc_fingerprint = vc_pem.digest('sha1')\n\n print(\"Open the following URL in your browser to access the \"\n \"Remote Console.\\n\"\n \"You have 60 seconds to open the URL, or the session\"\n \"will be terminated.\\n\")\n print(\"http://\" + args.host + \":\" + console_port + \"/console/?vmId=\"\n + str(vm_moid) + \"&vmName=\" + args.vm_name + \"&host=\" + vcenter_fqdn\n + \"&sessionTicket=\" + session + \"&thumbprint=\" + str(vc_fingerprint))\n print(\"Waiting for 60 seconds, then exit\")\n time.sleep(60)",
"def ssh_to_ec2(instance):\n subprocess.Popen(['ssh', instance.dns_name])",
"def start_instance(InstanceId=None):\n pass",
"def ec2_start(resource, metadata):\n\n # do minimal provisioning of machine through cloud-init\n # this installs git and bootstraps puppet to provision the rest\n # requires recent ubuntu (14.04/16.04) or RHEL/CentOS 7\n userdata = \"\"\"#cloud-config\npackage_update: true\nhostname: {hostname}\nfqdn: {fqdn}\nmanage_etc_hosts: true\npackages:\n - git\nwrite_files:\n - path: /etc/facter/facts.d/hostgroup.txt\n content: hostgroup=aws\n - path: /etc/facter/facts.d/role.txt\n content: role={role}\nruncmd:\n - git clone {repo} /etc/puppet\n - /etc/puppet/support_scripts/bootstrap-puppet.sh\"\"\".format(\n hostname=metadata['hostname'], fqdn=metadata['fqdn'],\n role=metadata['role'], repo=metadata['repo'])\n\n instances = resource.create_instances(\n ImageId=metadata['ami'],\n MinCount=1,\n MaxCount=1,\n InstanceType=metadata['type'],\n SubnetId=metadata['subnet'],\n SecurityGroupIds=[metadata['secgroup']],\n KeyName=metadata['keypair'],\n UserData=userdata,\n BlockDeviceMappings=[\n {\n 'DeviceName': '/dev/sda1', # root so far, sometimes /dev/xvdh ?\n 'Ebs': {\n 'VolumeSize': 20,\n 'DeleteOnTermination': True,\n 'VolumeType': 'gp2'\n },\n },\n ]\n )\n\n # not sure if we really need to sleep before tagging but\n # we wait until running anyway which takes much longer than 1 second\n time.sleep(1)\n for instance in instances:\n # first set tags, Name and Role\n instance.create_tags(\n Resources=[instance.id],\n Tags=[\n {\n 'Key': 'Role',\n 'Value': metadata['role']\n },\n {\n 'Key': 'Name',\n 'Value': metadata['fqdn']\n },\n ]\n )\n\n # ensure system is running before we print address to connect to\n instance.wait_until_running()\n # instance.load()\n ec2_status(resource, metadata)",
"def launch_instance(tag, key_name, group_name, inst_type, ami_name, user_data,\n wait=True, returninfo=None):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n failures = 0\n max_failures = 10\n while True:\n try:\n reservation = ec2.run_instances(ami_name,\n key_name=key_name,\n security_groups=[group_name],\n instance_type=inst_type,\n user_data=None)\n break\n except Exception, err:\n # Failed to get instance; wait 15 seconds and then try again (up to\n # 10 total times)\n errortext = str(err)\n if errortext.find(\"Not authorized for images\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that the AMI code in\"\n print \"* CloudSetup.py is deprecated. Please go to\"\n print \"* https://aws.amazon.com/marketplace/ and search for\"\n print \"* \\\"Ubuntu server lts hvm\\\", selecting the most recent\"\n print \"* version. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and then copy the AMI ID for the US East region.\"\n print \"* Copy that to the AMI_NAME value in CloudSetup.py\"\n print \"* and re-run.\"\n print \"***************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"***************************************\"\n return None\n elif errortext.find(\"accept terms and subscribe\") >= 0:\n print \"**************************************\"\n print \"* Error from AWS suggests that you have never used this\"\n print \"* AMI before and need to accept its terms and\"\n print \"* subscribe to it. Please follow the link in the below\"\n print \"* error text. Click \\\"Continue\\\", \\\"Manual Launch\\\",\"\n print \"* and \\\"Accept Terms\\\". After receiving email\"\n print \"* confirmation, you can re-run the code.\"\n print \"**************************************\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n failures += 1\n if failures == max_failures:\n print \"**************************************\"\n print \"* Maximum number of instance launch failures reached.\"\n print \"* (Full text of error):\"\n print errortext\n print \"**************************************\"\n return None\n print \" ** ec2.run_instances failed for tag\", tag, \"; waiting 15\"\n print \" ** seconds and then trying again...\"\n time.sleep(15)\n\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance = reservation.instances[0]\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n instance.add_tag(\"tag\", tag)\n time.sleep(5) # Slow things down -- they're never running super fast anyway\n\n if wait:\n print \" Instance requested, waiting for 'running' for tag\", tag\n while instance.state != \"running\":\n print \" %s ...\" % tag\n time.sleep(5)\n try:\n instance.update()\n except boto.exception.EC2ResponseError as e:\n print \"******************\"\n print \"Error caught in instance.update():\"\n print e.strerror\n print \"******************\"\n print \" %s done!\" % tag\n if returninfo:\n returninfo.put(tag)\n return instance",
"def new_sddc_l2vpn(**kwargs):\n proxy = kwargs['proxy']\n session_token = kwargs['sessiontoken']\n display_name = kwargs['display_name']\n remote_addr = kwargs['remote_address']\n endpoint = kwargs['endpoint']\n\n match endpoint:\n case \"Public-IP\":\n endpoint = \"Public-IP1\"\n case \"Private-IP\":\n endpoint = \"Private-IP1\"\n\n ike_profile_json = {\n \"dh_groups\": [\n \"GROUP14\"\n ],\n \"digest_algorithms\": [\n \"SHA2_256\"\n ],\n \"encryption_algorithms\": [\n \"AES_128\"\n ],\n \"ike_version\": \"IKE_V2\",\n \"display_name\": \"__l2vpn__internal__\",\n \"id\": \"__l2vpn__internal__\",\n \"resource_type\": \"IPSecVpnIkeProfile\"\n }\n\n ipsec_tun_json = {\n \"dh_groups\": [\n \"GROUP14\"\n ],\n \"digest_algorithms\": [],\n \"encryption_algorithms\": [\n \"AES_GCM_128\"\n ],\n \"enable_perfect_forward_secrecy\": True,\n \"display_name\": \"__l2vpn__internal__\",\n \"id\": \"__l2vpn__internal__\",\n \"resource_type\": \"IPSecVpnTunnelProfile\"\n }\n\n dpd_json = {\n \"dpd_probe_interval\": \"60\",\n \"dpd_probe_mode\": \"PERIODIC\",\n \"display_name\": \"__l2vpn__internal__\",\n \"id\": \"__l2vpn__internal__\",\n \"resource_type\": \"IPSecVpnDpdProfile\"\n }\n\n #Create IPSec VPN tunnel\n ipsec_json = {\n \"resource_type\": \"RouteBasedIPSecVpnSession\",\n \"id\": \"__l2vpn__internal__\",\n \"display_name\": \"L2VPN\",\n \"tunnel_interfaces\": [\n {\n \"ip_subnets\": [\n {\n \"ip_addresses\": [\n \"169.254.31.253\"\n ],\n \"prefix_length\": 30\n }\n ],\n \"resource_type\": \"IPSecVpnTunnelInterface\",\n \"id\": \"default-tunnel-interface\",\n \"display_name\": \"default-tunnel-interface\"\n }\n ],\n \"local_endpoint_path\": f\"/infra/tier-0s/vmc/ipsec-vpn-services/default/local-endpoints/{endpoint}\",\n \"ike_profile_path\": \"/infra/ipsec-vpn-ike-profiles/__l2vpn__internal__\",\n \"tunnel_profile_path\": \"/infra/ipsec-vpn-tunnel-profiles/__l2vpn__internal__\",\n \"dpd_profile_path\": \"/infra/ipsec-vpn-dpd-profiles/__l2vpn__internal__\",\n \"tcp_mss_clamping\": {\n \"direction\": \"NONE\"\n },\n \"authentication_mode\": \"PSK\",\n \"psk\": \"None\",\n \"peer_address\": remote_addr,\n \"peer_id\": remote_addr\n }\n # print(json.dumps(ipsec_json, indent=2))\n dpd_response_code = new_ipsec_vpn_dpd_profile_json(proxy, session_token, dpd_json, \"__l2vpn__internal__\")\n ike_response_code = new_ipsec_vpn_ike_profile_json(proxy, session_token, \"__l2vpn__internal__\", ike_profile_json)\n tun_response_code = new_ipsec_vpn_profile_json(proxy, session_token, \"__l2vpn__internal__\", ipsec_tun_json)\n if dpd_response_code == 200 and ike_response_code == 200 and tun_response_code == 200:\n json_respon_status_code = new_sddc_ipsec_session_json(proxy, session_token, ipsec_json, \"__l2vpn__internal__\")\n if json_respon_status_code == 200:\n l2vpn_json = {\n \"transport_tunnels\": [\n \"/infra/tier-0s/vmc/ipsec-vpn-services/default/sessions/__l2vpn__internal__\"\n ],\n \"tcp_mss_clamping\": {\n \"direction\": \"BOTH\"\n },\n \"resource_type\": \"L2VPNSession\",\n \"id\": \"__l2vpn__internal__\",\n \"display_name\": display_name\n }\n json_response_status_code = new_l2vpn_json(proxy, session_token, \"__l2vpn__internal__\", l2vpn_json)\n if json_response_status_code == 200:\n sys.exit(f\"SDDC L2VPN {display_name} has been created successfully\")\n else:\n print(f\"There was an error creating {display_name} L2VPN\")\n sys.exit(1)\n else:\n print(f\"There was an error creating the IPSec tunnel for {display_name} L2VPN\")\n sys.exit(1)\n else:\n print(\"There was an error creating one of the tunnel encryption profiles\")\n sys,exit(1)",
"def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)",
"def __call__(self):\n LOGGER.info('frequency_id: {0}'.format(self._frequency_id))\n ec2_helper = EC2Helper()\n zone = ec2_helper.get_cheapest_spot_price(self._instance_type, self._spot_price)\n\n if zone is not None:\n user_data_mime = self.get_mime_encoded_user_data()\n LOGGER.info('{0}'.format(user_data_mime))\n\n ec2_helper.run_spot_instance(\n self._ami_id,\n self._spot_price,\n user_data_mime,\n self._instance_type, None,\n self._created_by,\n '{0}-{1}'.format(self._frequency_id, self._name),\n instance_details=self._instance_details,\n zone=zone,\n ephemeral=True)\n else:\n LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format(self._instance_type, self._spot_price))",
"def request_spot_instances(self, price, image_id, count=1, type='one-time',\r\n valid_from=None, valid_until=None,\r\n launch_group=None, availability_zone_group=None,\r\n key_name=None, security_groups=None,\r\n user_data=None, addressing_type=None,\r\n instance_type='m1.small', placement=None,\r\n kernel_id=None, ramdisk_id=None,\r\n monitoring_enabled=False, subnet_id=None,\r\n block_device_map=None):\r\n params = {'LaunchSpecification.ImageId':image_id,\r\n 'Type' : type,\r\n 'SpotPrice' : price}\r\n if count:\r\n params['InstanceCount'] = count\r\n if valid_from:\r\n params['ValidFrom'] = valid_from\r\n if valid_until:\r\n params['ValidUntil'] = valid_until\r\n if launch_group:\r\n params['LaunchGroup'] = launch_group\r\n if availability_zone_group:\r\n params['AvailabilityZoneGroup'] = availability_zone_group\r\n if key_name:\r\n params['LaunchSpecification.KeyName'] = key_name\r\n if security_groups:\r\n l = []\r\n for group in security_groups:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l,\r\n 'LaunchSpecification.SecurityGroup')\r\n if user_data:\r\n params['LaunchSpecification.UserData'] = base64.b64encode(user_data)\r\n if addressing_type:\r\n params['LaunchSpecification.AddressingType'] = addressing_type\r\n if instance_type:\r\n params['LaunchSpecification.InstanceType'] = instance_type\r\n if placement:\r\n params['LaunchSpecification.Placement.AvailabilityZone'] = placement\r\n if kernel_id:\r\n params['LaunchSpecification.KernelId'] = kernel_id\r\n if ramdisk_id:\r\n params['LaunchSpecification.RamdiskId'] = ramdisk_id\r\n if monitoring_enabled:\r\n params['LaunchSpecification.Monitoring.Enabled'] = 'true'\r\n if subnet_id:\r\n params['LaunchSpecification.SubnetId'] = subnet_id\r\n if block_device_map:\r\n block_device_map.build_list_params(params, 'LaunchSpecification.')\r\n return self.get_list('RequestSpotInstances', params,\r\n [('item', SpotInstanceRequest)],\r\n verb='POST')",
"def connect_dev_endpoint(self):\n\n done = False\n\n while not done:\n\n endpoint = self.glue_engine.get_dev_endpoint(EndpointName=self.dev_endpoint_name)\n\n status = endpoint[\"DevEndpoint\"][\"Status\"]\n\n done = status == \"READY\"\n\n if status == \"PROVISIONING\":\n print(\"Still provisionning...\")\n time.sleep(30)\n elif status == \"READY\":\n print(\"Done\")\n done = True\n else:\n print(\"There was an error\")\n print(status)\n\n public_ip = endpoint[\"DevEndpoint\"][\"PublicAddress\"]\n\n os.system(\n \"ssh -i {} glue@{} -t gluepyspark\".format(self.dev_endpoint_private_rsa, public_ip))",
"def startami(image, instancetype, accesskey, secretkey, pkname):\n if not is_valid_instance_type(image, instancetype):\n raise ValueError(\"Invalid instance type: '%s'\" % instancetype)\n\n conn = EC2Connection(accesskey, secretkey)\n image = conn.get_image(get_image_id(image))\n reservation = image.run(instance_type=instancetype, key_name=pkname)\n instance = reservation.instances[0]\n\n waitForInstanceToRun(instance)\n\n # [AN] call script instanceStartup.py\n return str(instance.dns_name)",
"async def start_ec2_instance(self, env):\n instanceDef= {\n 'AWS_AMI_ID': os.getenv(\"AWS_AMI_ID\"),\n 'AWS_KEYNAME': os.getenv(\"AWS_KEYNAME\"),\n 'AWS_SECURITY_GROUP': os.getenv('AWS_SECURITY_GROUP'),\n 'AWS_SUBNET': os.getenv(\"AWS_SUBNET\"),\n 'DryRun':False,\n 'AWS_INSTANCE_NAME': 'Jupyter',\n 'AWS_IAM_ARN': os.getenv('AWS_IAM_ARN')\n }\n \n self.log.debug('building instance')\n ip = await self.buildInstance(instanceDef, env)\n return ip"
] | [
"0.5938847",
"0.58754605",
"0.5437055",
"0.5432101",
"0.54282796",
"0.53524303",
"0.53459686",
"0.5296672",
"0.52392614",
"0.51798517",
"0.50719494",
"0.5044687",
"0.5031047",
"0.5015609",
"0.4999175",
"0.4978533",
"0.49736512",
"0.49699563",
"0.49691266",
"0.49682745",
"0.49540618",
"0.49441853",
"0.49262887",
"0.49219102",
"0.48889962",
"0.4865705",
"0.48621994",
"0.48283392",
"0.48268723",
"0.48170942"
] | 0.6438622 | 0 |
Translates edges of histogram bins to bin centres. | def bin_edges_to_centres(edges):
if edges.ndim == 1:
steps = (edges[1:] - edges[:-1]) / 2
return edges[:-1] + steps
else:
steps = (edges[1:, 1:] - edges[:-1, :-1]) / 2
centres = edges[:-1, :-1] + steps
return centres | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_centers_from_bins(bins):\n return 0.5 * (bins[:-1] + bins[1:])",
"def bin_edges_to_center(edges):\n df = np.diff(edges)\n if isinstance(df[0], datetime.timedelta) and sys.version_info[0:2]<=(3,2):\n return edges[:-1] + df//2\n else:\n return edges[:-1] + df/2",
"def find_bin_edges(bin_centres):\n\n if not isinstance(bin_centres, np.ndarray):\n bin_centres = np.asarray(bin_centres)\n\n edges = bin_centres[:-1] + 0.5 * (bin_centres[1:] - bin_centres[:-1])\n bins = np.concatenate(([2 * bin_centres[0] - edges[0]], edges,\n [2 * bin_centres[-1] - edges[-1]]))\n\n return bins",
"def bin_center_to_edges(centers):\n edges = bin_edges_to_center(centers)\n edges = np.append(centers[0]-(edges[0]-centers[0]), edges)\n edges = np.append(edges, centers[-1]+(centers[-1]-edges[-1]))\n return edges",
"def bin_centres_to_edges(centres, sort=True):\r\n if sort:\r\n centres = np.sort(centres.flatten())\r\n internal_means = (centres[1:] + centres[:-1]) / 2.0\r\n before, after = (\r\n centres[0] - (internal_means[0] - centres[0]),\r\n centres[-1] + (centres[-1] - internal_means[-1]),\r\n )\r\n return np.hstack([before, internal_means, after])",
"def from_bins(bins):\n return 0.5*(bins[1:] + bins[:-1])",
"def bins (self):\n return self._bins",
"def bins (self):\n return self._bins",
"def bins(self):\n return self._bins",
"def bin_centers(radial_bins):\n\n outer = radial_bins[1:]\n inner = radial_bins[:-1]\n return 0.5 * (outer + inner)",
"def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins",
"def normalize(histogram):\n nbins = histogram.GetNbinsX()\n integral = histogram.Integral(1,nbins)\n newhist = histogram.Clone()\n newhist.Reset()\n for bin in range(1,nbins+1):\n ibinY = histogram.GetBinContent(bin)\n newhist.SetBinContent(bin,ibinY/integral)\n return newhist",
"def transform_bins(self, X):\n binner = self.binner_\n if hasattr(binner, \"tree_\"):\n dec_path = self.binner_.decision_path(X)\n association = numpy.zeros((X.shape[0],))\n association[:] = -1\n for j in self.leaves_:\n ind = dec_path[:, j] == 1\n ind = numpy.asarray(ind.todense()).flatten()\n if not numpy.any(ind):\n # No training example for this bucket.\n continue\n association[ind] = self.mapping_.get(j, -1)\n\n elif hasattr(binner, \"transform\"):\n association = numpy.zeros((X.shape[0],))\n association[:] = -1\n tr = binner.transform(X)\n for i, x in enumerate(tr):\n d = tuple(numpy.asarray(\n x.todense()).ravel().astype(numpy.int32))\n association[i] = self.mapping_.get(d, -1)\n else:\n raise NotImplementedError( # pragma: no cover\n \"binner is not a decision tree or a transform\")\n return association",
"def _determine_histogram_bins(self, ma_maps):\n if isinstance(ma_maps, list):\n ma_values = self.masker.transform(ma_maps)\n elif isinstance(ma_maps, np.ndarray):\n ma_values = ma_maps.copy()\n else:\n raise ValueError(f\"Unsupported data type '{type(ma_maps)}'\")\n\n # Determine bins for null distribution histogram\n # Remember that numpy histogram bins are bin edges, not centers\n # Assuming values of 0, .001, .002, etc., bins are -.0005-.0005, .0005-.0015, etc.\n INV_STEP_SIZE = 100000\n step_size = 1 / INV_STEP_SIZE\n max_ma_values = np.max(ma_values, axis=1)\n # round up based on resolution\n max_ma_values = np.ceil(max_ma_values * INV_STEP_SIZE) / INV_STEP_SIZE\n max_poss_ale = self.compute_summarystat(max_ma_values)\n # create bin centers\n hist_bins = np.round(np.arange(0, max_poss_ale + (1.5 * step_size), step_size), 5)\n self.null_distributions_[\"histogram_bins\"] = hist_bins",
"def grid_to_bins(grid, start_bin_val, end_bin_val):\n bin_centers = (grid[1:] + grid[:-1])/2.0\n bins = np.concatenate([[start_bin_val], bin_centers, [end_bin_val]])\n return bins",
"def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges",
"def test_centrally(self):\n import numpy as np\n import histogrammar\n\n h = histogrammar.CentrallyBin([0, 10, 20, 40, 100])\n h.fillnumpy([-5, 5, 5, 50, 10, 100, 1000, 50, 50])\n\n np.testing.assert_array_equal(h.bin_entries(), [1., 3., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(), [float('-inf'), 5., 15., 30., 70., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(), [0., 10., 20., 40., 100.])\n assert h.num_bins() == 5\n assert h.n_bins == 5\n np.testing.assert_almost_equal(h.mpv, 10.)\n\n np.testing.assert_array_equal(h.bin_entries(10, 40), [3., 0., 3.])\n np.testing.assert_array_equal(h.bin_edges(10, 40), [5., 15., 30., 70.])\n np.testing.assert_array_equal(h.bin_centers(10, 40), [10., 20., 40.])\n assert h.num_bins(10, 40) == 3\n\n np.testing.assert_array_equal(h.bin_entries(5, 70), [3., 0., 3.])\n np.testing.assert_array_equal(h.bin_edges(5, 70), [5., 15., 30., 70.])\n np.testing.assert_array_equal(h.bin_centers(5, 70), [10., 20., 40.])\n assert h.num_bins(5, 70) == 3\n\n np.testing.assert_array_equal(h.bin_entries(5, 110), [3., 0., 3., 2.])\n np.testing.assert_array_equal(h.bin_edges(5, 110), [5., 15., 30., 70., float('inf')])\n np.testing.assert_array_equal(h.bin_centers(5, 110), [10., 20., 40., 100.])\n assert h.num_bins(5, 110) == 4",
"def bins(self):\n\n if self.hist_x_min is None or self.hist_x_max is None or self.hist_n_bin is None:\n return None\n\n if self.x_log:\n return np.logspace(np.log10(self.hist_x_min),\n np.log10(self.hist_x_max),\n self.hist_n_bin + 1)\n else:\n return np.linspace(self.hist_x_min, self.hist_x_max,\n self.hist_n_bin + 1)",
"def get_histogram(self):\n\n for bin in range(self.bins.size):\n bin_inf = self.bins[bin]\n try: bin_sup = self.bins[bin + 1]\n except IndexError: bin_sup = self.vmax\n self.hist[bin] = np.sum(\n (self.values >= bin_inf)*(self.values < bin_sup))\n\n binned_values = np.sum(self.hist)\n if binned_values == 0: return self.hist # no binned value\n else: self.hist /= np.sum(self.hist)\n return self.hist",
"def bins(self):\n\n if self.hist_x_min is None or self.hist_x_max is None or self.hist_n_bin is None:\n return None\n\n if self.x_log:\n return np.logspace(np.log10(self.hist_x_min),\n np.log10(self.hist_x_max),\n self.hist_n_bin + 1)\n elif isinstance(self.hist_x_min, np.datetime64):\n x_min = self.hist_x_min.astype(int)\n x_max = self.hist_x_max.astype(self.hist_x_min.dtype).astype(int)\n return np.linspace(x_min, x_max, self.hist_n_bin + 1).astype(self.hist_x_min.dtype)\n else:\n return np.linspace(self.hist_x_min, self.hist_x_max,\n self.hist_n_bin + 1)",
"def _getNormalBinWidths(self, style):\n \n bin_size = style.get(\"normalize_bin_size\", \"auto\")\n if bin_size == \"auto\":\n # Use the size of the first bins.\n x0, x1 = self.histogram.axes[0].getBinRange(0)\n y0, y1 = self.histogram.axes[1].getBinRange(0)\n return x1 - x0, y1 - y0\n elif bin_size is None:\n return None\n else:\n size_x, size_y = bin_size\n return size_x, size_y",
"def binning_axis(self) -> int:\r\n return 0",
"def bin_binarise(self):\n pass",
"def _get_bin_edges(a, bins, range):\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, str):\n raise NotImplementedError(\n 'only integer and array bins are implemented')\n elif isinstance(bins, cupy.ndarray) or numpy.ndim(bins) == 1:\n # TODO(okuta): After #3060 is merged, `if cupy.ndim(bins) == 1:`.\n if isinstance(bins, cupy.ndarray):\n bin_edges = bins\n else:\n bin_edges = numpy.asarray(bins)\n\n if (bin_edges[:-1] > bin_edges[1:]).any(): # synchronize! when CuPy\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n if isinstance(bin_edges, numpy.ndarray):\n bin_edges = cupy.asarray(bin_edges)\n elif numpy.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError:\n raise TypeError(\n '`bins` must be an integer, a string, or an array')\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # numpy's gh-10322 means that type resolution rules are dependent on\n # array shapes. To avoid this causing problems, we pick a type now and\n # stick with it throughout.\n bin_type = cupy.result_type(first_edge, last_edge, a)\n if cupy.issubdtype(bin_type, cupy.integer):\n bin_type = cupy.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = cupy.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n return bin_edges",
"def __init__(self, bins):\n self.bins = bins",
"def get_bins_labels(self, is_outer_map=False):\n if is_outer_map:\n return np.concatenate(([np.NINF], self.original_bins, [np.PINF]))\n else:\n return self.original_bins",
"def label_values_to_bins(array_to_bin:object, bin_count:int):\n\t\t# Make 1D for qcut.\n\t\tarray_to_bin = array_to_bin.flatten()\n\t\t# For really unbalanced labels, I ran into errors where bin boundaries would be duplicates all the way down to 2 bins.\n\t\t# Setting `duplicates='drop'` to address this.\n\t\tbin_numbers = pd.qcut(x=array_to_bin, q=bin_count, labels=False, duplicates='drop')\n\t\t# Convert 1D array back to 2D for the rest of the program.\n\t\tbin_numbers = np.reshape(bin_numbers, (-1, 1))\n\t\treturn bin_numbers",
"def correlation_bins(shred):\n return 0",
"def calculateMetallicityBinEdges(self):\n\n if self.binInLogSpace:\n logMetallicities = np.log10(self.metallicityGrid)\n b= logMetallicities[:-1] + (logMetallicities[1:] - logMetallicities[:-1])/2.\n b = 10.**b #the boundaries for integration are not in log space so\n #convert to \"normal\" numbers.\n else:\n b= (self.metallicityGrid[1:] - self.metallicityGrid[:-1])/2. \\\n + self.metallicityGrid[:-1] \n\n self.metallicityBinEdges = np.zeros(len(b)+2)\n\n #the lowest/highest metallicity bin edge are set in options\n #the calculated b edges are all in between\n\n self.metallicityBinEdges[0] = self.metallicityLowerLimit\n self.metallicityBinEdges[-1] = self.metallicityUpperLimit\n self.metallicityBinEdges[1:-1] = b",
"def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges"
] | [
"0.7115142",
"0.6772283",
"0.6753914",
"0.6714895",
"0.64245945",
"0.63743883",
"0.6348091",
"0.6348091",
"0.63018626",
"0.6241863",
"0.62182707",
"0.61705595",
"0.6123319",
"0.6078565",
"0.5994055",
"0.597821",
"0.59385145",
"0.5935507",
"0.5913347",
"0.59129584",
"0.5912303",
"0.58190596",
"0.58115387",
"0.5783075",
"0.57709223",
"0.57661515",
"0.5740256",
"0.5732798",
"0.5731689",
"0.572617"
] | 0.7626651 | 0 |
Construct a graphical linearlyspaced grid within a ternary space. | def ternary_grid(
data=None, nbins=10, margin=0.001, force_margin=False, yscale=1.0, tfm=lambda x: x
):
if data is not None:
data = close(data)
if not force_margin:
margin = min([margin, np.nanmin(data[data > 0])])
# let's construct a bounding triangle
bounds = np.array( # three points defining the edges of what will be rendered
[
[margin, margin, 1.0 - 2 * margin],
[margin, 1.0 - 2 * margin, margin],
[1.0 - 2 * margin, margin, margin],
]
)
xbounds, ybounds = ABC_to_xy(bounds, yscale=yscale).T # in the cartesian xy space
xbounds = np.hstack((xbounds, [xbounds[0]]))
ybounds = np.hstack((ybounds, [ybounds[0]]))
tck, u = scipy.interpolate.splprep([xbounds, ybounds], per=True, s=0, k=1)
# interpolated outer boundary
xi, yi = scipy.interpolate.splev(np.linspace(0, 1.0, 10000), tck)
A, B, C = xy_to_ABC(np.vstack([xi, yi]).T, yscale=yscale).T
abcbounds = np.vstack([A, B, C])
abounds = tfm(abcbounds.T)
ndim = abounds.shape[1]
# bins for evaluation
bins = [
np.linspace(np.nanmin(abounds[:, dim]), np.nanmax(abounds[:, dim]), nbins)
for dim in range(ndim)
]
binedges = [bin_centres_to_edges(b) for b in bins]
centregrid = np.meshgrid(*bins)
edgegrid = np.meshgrid(*binedges)
assert len(bins) == ndim
return bins, binedges, centregrid, edgegrid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_grid(self):\n for k in range(0, NUM + 1):\n self.create_line(k * UNIT, 0, k * UNIT, SIZE, width=THICKNESS)\n self.create_line(0, k * UNIT, SIZE, k * UNIT, width=THICKNESS)",
"def make_grid(N):\n\n x = np.linspace(-2. , 2 , N)\n y = np.linspace(-2. , 2 , N)\n # two evenly spaced grids from -2 to 2\n\n return x, y",
"def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])",
"def linear_grid(D, n = 100, min_max = (-100, 100)):\r\n\r\n g = np.linspace(min_max[0], min_max[1], n)\r\n G = np.ones((n, D))\r\n\r\n return G*g[:,None]",
"def SimpleMeasuredGrid(min_x,min_y,max_x,max_y,x_spacing,y_spacing,\n color=(0.5,1.0,0.5,1.0),xoff=-0.14,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%d\" % int(hval+0.5))\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%d\" % int(vval+0.5))\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer",
"def linear_grid(D, n = 100, min_max = (-100, 100)):\n\n g = np.linspace(min_max[0], min_max[1], n)\n G = np.ones((n, D))\n\n return G*g[:,None]",
"def create_grid(self):\n\n # If called when a grid already exists create a new grid\n if self.grid:\n self.grid = []\n\n grid_pen = QPen(QColor(215, 215, 215), 1)\n w = 10000\n h = 10000\n self.addLine(-10000, 0, 10000, 0, QPen(QColor(0, 0, 0), 2))\n self.addLine(0, -10000, 0, 10000, QPen(QColor(0, 0, 0), 2))\n\n w = int(w / self.grid_spacing) * self.grid_spacing\n h = int(h / self.grid_spacing) * self.grid_spacing\n for i in range(-w, w, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(-w, i, w, i, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n for i in range(-h, h, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(i, -h, i, h, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n\n self.grid_built = True",
"def makeGrid(self):\n self.h = self.step_x\n self.k = self.step_t\n self.t, self.x = np.meshgrid(np.arange(self.min_t, self.max_t, self.step_t), np.arange(self.min_x, self.max_x\n , self.step_x))",
"def make_grid(n, gl=False):\n\n if gl:\n low = 0.0 # Lower Range\n high = 1.0\n p = 0.5\n\n # The method here uses 2*n points so halve it\n n, r, wt = GridGenerator.gaussp(low, high, n//2)\n r = np.concatenate((r, np.zeros((n))))\n wt = np.concatenate((wt, np.zeros((n))))\n for i in range(n):\n r[2*n-(i+1)] = (1.0/r[i])**2\n wt[2*n-(i+1)] = (wt[i]/p)*r[2*n - (i+1)]**1.5\n else:\n n, r, wt = GridGenerator.radial_chebyshev(n)\n\n return n, r, wt",
"def create_grid(tree):\n\t\n\twp = tree['misc']['working precision'] \n\tndim = tree['eqns']['ndim']\n\tgrid = tree['grid']['size']\n\tgeom = tree['grid']['geom']\n\n\tnxgb, nygb, nzgb = grid['nxgb'], grid['nygb'], grid['nzgb'] \n\tLx , Ly , Lz = geom['Lx'] , geom['Ly'] , geom['Lz'] \n\n\tdmpi = tree['mpi']['dMpi']\n\tibeg,jbeg,kbeg = dmpi.ibeg,dmpi.jbeg,dmpi.kbeg\n\tiend,jend,kend = dmpi.iend,dmpi.jend,dmpi.kend\n\n\thlo = tree['num']['hlo'] \n\n\t# create domain\n\t# pt 1 2 n-1 n period\n\t# 0 L |\n\t# full domain is [0,L] but careful: |--o--|--o--| ... |--o--|--o--|--V\n\t# \\___________________________/ \n\t\n\tbc = tree['bc']['allbc']\n\n\tif ('i1' in bc) or ('imax' in bc):\n\t\tdx = Lx/cst(nxgb+2*hlo-1)\n\t\tx = np.linspace(cst(0.0),Lx,nxgb+2*hlo,dtype=wp)\n\telse:\t\n\t\tdx = Lx/cst(nxgb)\n\t\tx = np.arange(dx/cst(2.),Lx,dx,dtype=wp)\n\n\tif nygb > 1:\n\t\tif ('j1' in bc) or ('jmax' in bc):\n\t\t\tdy = Ly/cst(nygb+2*hlo-1)\n\t\t\ty = np.linspace(cst(0.0),Ly,nygb+2*hlo,dtype=wp)\n\t\telse:\t\t\n\t\t\tdy = Ly/cst(nygb)\n\t\t\ty = np.arange(dy/cst(2.),Ly,dy,dtype=wp)\n\telse:\n\t\tLy = cst(0.); y = []; dy = cst(0.)\n\t\t\n\tif nzgb > 1:\n\t\tif ('k1' in bc) or ('kmax' in bc):\n\t\t\tdz = Lz/cst(nzgb+2*hlo-1)\n\t\t\tz = np.linspace(cst(0.0),Lz,nzgb+2*hlo,dtype=wp)\n\t\telse:\t\t\t\n\t\t\tdz = Lz/cst(nzgb)\n\t\t\tz = np.arange(dz/cst(2.),Lz,dz,dtype=wp)\n\telse:\n\t\tLz = cst(0.); z = []; dz = cst(0.)\n\n\tgeom['dx'], geom['dy'], geom['dz'] = dx, dy, dz\n\tgeom['x'] , geom['y'] , geom['z'] = x , y , z\n\n\t# global iend ! ibeg iend ! ibeg\n\t# pt# n-1 n ! 1 2 n-1 n ! 1 2\n\t# ... |--o--|--o--|!|--o--|--o--| ... |--o--|--o--|!|--o--|--o--| ...\n\t# w/ hlo [<------------------------------------------------------->]\n\t# loc py ind 0 1 hlo hlo+n-1 n+2*hlo-1\n\t# glo py ind ibeg+hlo-1 iend+hlo-1\n\t\n\tif ndim == 3:\n\t\txloc = x[ibeg-1:iend] # without halos\n\t\tyloc = y[jbeg-1:jend]\n\t\tzloc = z[kbeg-1:kend]\n\t\txx,yy,zz = np.meshgrid(xloc,yloc,zloc,sparse=False,indexing='ij')\n\t\tgeom['xloc'],geom['yloc'], geom['zloc'] = xloc,yloc,zloc\n\telif ndim == 2:\n\t\txloc = x[ibeg-1:iend] # without halos\n\t\tyloc = y[jbeg-1:jend]\n\t\txx,yy = np.meshgrid(xloc,yloc,sparse=False,indexing='ij')\n\t\tgeom['xloc'],geom['yloc'] = xloc,yloc\n\telse:\n\t\txloc = x[ibeg-1:iend] # without halos\n\t\tgeom['xloc'] = xloc\n\treturn tree",
"def grid_maker(width, height):\n grid = [['.' for i in range(width)] for j in range(height)]\n return grid",
"def nicegrid():\n pl.grid(b=True, which='major', color='black', linestyle='-')\n\n pl.grid(b=True, which='minor', color='silver', linestyle=':')",
"def SimpleLatLongGrid(min_x,min_y,max_x,max_y,hdeg,hmin,hsec,vdeg,vmin,vsec,\n color=(0.5,1.0,0.5,1.0),xoff=-0.18,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n x_spacing=float(hdeg)+(float(hmin)+(float(hsec)/60.0))/60.0\n y_spacing=float(vdeg)+(float(vmin)+(float(vsec)/60.0))/60.0\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n hstr=GetLatLongString(hval,'longitude')\n pshp.set_property('position',hstr)\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n vstr=GetLatLongString(vval,'latitude')\n pshp.set_property('position',vstr)\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True)\n\n return layer",
"def create_grids(self):\n \n par = self.par\n\n # a. retirement\n \n # pre-decision states\n par.grid_m_ret = nonlinspace(par.eps,par.m_max_ret,par.Nm_ret,par.phi_m)\n par.Nmcon_ret = par.Nm_ret - par.Na_ret\n \n # post-decision states\n par.grid_a_ret = nonlinspace(0,par.a_max_ret,par.Na_ret,par.phi_m)\n \n # b. working: state space (m,n,k) \n par.grid_m = nonlinspace(par.eps,par.m_max,par.Nm,par.phi_m)\n\n par.Nn = par.Nm\n par.n_max = par.m_max + par.n_add\n par.grid_n = nonlinspace(0,par.n_max,par.Nn,par.phi_n)\n\n par.grid_n_nd, par.grid_m_nd = np.meshgrid(par.grid_n,par.grid_m,indexing='ij')\n\n # c. working: w interpolant (and wa and wb and wq)\n par.Na_pd = np.int_(np.floor(par.pd_fac*par.Nm))\n par.a_max = par.m_max + par.a_add\n par.grid_a_pd = nonlinspace(0,par.a_max,par.Na_pd,par.phi_m)\n \n par.Nb_pd = np.int_(np.floor(par.pd_fac*par.Nn))\n par.b_max = par.n_max + par.b_add\n par.grid_b_pd = nonlinspace(0,par.b_max,par.Nb_pd,par.phi_n)\n \n par.grid_b_pd_nd, par.grid_a_pd_nd = np.meshgrid(par.grid_b_pd,par.grid_a_pd,indexing='ij')\n \n # d. working: egm (seperate grids for each segment)\n \n if par.solmethod == 'G2EGM':\n\n # i. dcon\n par.d_dcon = np.zeros((par.Na_pd,par.Nb_pd),dtype=np.float_,order='C')\n \n # ii. acon\n par.Nc_acon = np.int_(np.floor(par.Na_pd*par.acon_fac))\n par.Nb_acon = np.int_(np.floor(par.Nb_pd*par.acon_fac))\n par.grid_b_acon = nonlinspace(0,par.b_max,par.Nb_acon,par.phi_n)\n par.a_acon = np.zeros(par.grid_b_acon.shape)\n par.b_acon = par.grid_b_acon\n\n # iii. con\n par.Nc_con = np.int_(np.floor(par.Na_pd*par.con_fac))\n par.Nb_con = np.int_(np.floor(par.Nb_pd*par.con_fac))\n \n par.grid_c_con = nonlinspace(par.eps,par.m_max,par.Nc_con,par.phi_m)\n par.grid_b_con = nonlinspace(0,par.b_max,par.Nb_con,par.phi_n)\n\n par.b_con,par.c_con = np.meshgrid(par.grid_b_con,par.grid_c_con,indexing='ij')\n par.a_con = np.zeros(par.c_con.shape)\n par.d_con = np.zeros(par.c_con.shape)\n \n elif par.solmethod == 'NEGM':\n\n par.grid_l = par.grid_m\n\n # e. shocks\n assert (par.Neta == 1 and par.var_eta == 0) or (par.Neta > 1 and par.var_eta > 0)\n\n if par.Neta > 1:\n par.eta,par.w_eta = log_normal_gauss_hermite(np.sqrt(par.var_eta), par.Neta)\n else:\n par.eta = np.ones(1)\n par.w_eta = np.ones(1)\n\n # f. timings\n par.time_work = np.zeros(par.T)\n par.time_w = np.zeros(par.T)\n par.time_egm = np.zeros(par.T)\n par.time_vfi = np.zeros(par.T)",
"def grid_04():\n plot = {\"Walls\": [\"N\", \"S\", \"W\"], \"TARDIS\": False, \"Transmat\": False,\n \"Plot\": f'\\nEerie blue lights lit the cold corridors. To the NORTH, SOUTH, and WEST are solid metal walls.\\n'}\n return plot",
"def make_grid(tensors, nrow=2, padding=2, isNormalized=True):\n grid = tv.utils.make_grid(tensor=tensors.detach().cpu(),\n nrow=nrow,\n padding=padding,\n normalize=(not isNormalized))\n if isNormalized:\n ndgrid = grid.mul(255).add_(0.5).clamp_(0, 255).permute(\n 1, 2, 0).numpy().astype(np.uint16)\n else:\n ndgrid = grid.clamp_(0, 255).permute(1, 2, 0).numpy().astype(np.uint16)\n return ndgrid",
"def make_coordinate_grid(spatial_size, type):\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n x = 2 * (x / (w - 1)) - 1\n y = 2 * (y / (h - 1)) - 1\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n return meshed",
"def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right",
"def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid",
"def test_linear_grid(self):\n L, M, N, NFP, axis, endpoint = 8, 5, 3, 2, True, False\n g = LinearGrid(L, M, N, NFP, sym=False, axis=axis, endpoint=endpoint)\n\n np.testing.assert_equal(g.num_rho, L + 1)\n np.testing.assert_equal(g.num_theta, 2 * M + 1)\n np.testing.assert_equal(g.num_zeta, 2 * N + 1)\n assert g.endpoint == endpoint\n\n nodes = np.stack(\n [\n np.tile(\n np.repeat(np.linspace(1, 0, g.num_rho, axis)[::-1], g.num_theta),\n g.num_zeta,\n ),\n np.tile(\n np.linspace(0, 2 * np.pi, g.num_theta, endpoint),\n g.num_rho * g.num_zeta,\n ),\n np.repeat(\n np.linspace(0, 2 * np.pi / NFP, g.num_zeta, endpoint),\n g.num_rho * g.num_theta,\n ),\n ]\n ).T\n\n np.testing.assert_allclose(g.nodes, nodes)\n np.testing.assert_allclose(g.weights.sum(), (2 * np.pi) ** 2)\n # spacing.prod != weights for grid with duplicates\n if not endpoint:\n np.testing.assert_allclose(g.spacing.prod(axis=1), g.weights)",
"def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)",
"def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg",
"def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])",
"def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid",
"def make_grid(data=None, xmin=-5, xmax=5, ymin=-5, ymax=5, n_points = 400):\n if data is not None:\n xmin, ymin = np.min(data, axis = 0)\n xmax, ymax = np.max(data, axis = 0)\n\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n\n x, y = np.meshgrid(np.linspace(xmin, xmax, n_points), np.linspace(ymin, ymax, n_points))\n grid = np.c_[x.ravel(), y.ravel()] # grid has n_points ^2 row and 2 columns\n return x, y, grid",
"def create_mine_grid(rows, cols, dim, space, anchors, spherical, gaussian, scale=1.):\n lerpv = get_interpfn(spherical, gaussian)\n\n u_list = np.zeros((rows, cols, dim))\n # compute anchors\n cur_anchor = 0\n for y in range(rows):\n for x in range(cols):\n if y%space == 0 and x%space == 0:\n if anchors is not None and cur_anchor < len(anchors):\n u_list[y,x,:] = anchors[cur_anchor]\n cur_anchor = cur_anchor + 1\n else:\n u_list[y,x,:] = np.random.normal(0,scale, (1, dim))\n # interpolate horizontally\n for y in range(rows):\n for x in range(cols):\n if y%space == 0 and x%space != 0:\n lastX = space * (x // space)\n nextX = lastX + space\n fracX = (x - lastX) / float(space)\n# print(\"{} - {} - {}\".format(lastX, nextX, fracX))\n u_list[y,x,:] = lerpv(fracX, u_list[y, lastX, :], u_list[y, nextX, :])\n # interpolate vertically\n for y in range(rows):\n for x in range(cols):\n if y%space != 0:\n lastY = space * (y // space)\n nextY = lastY + space\n fracY = (y - lastY) / float(space)\n u_list[y,x,:] = lerpv(fracY, u_list[lastY, x, :], u_list[nextY, x, :])\n\n u_grid = u_list.reshape(rows * cols, dim)\n\n return u_grid",
"def make_grid(self):\n length = self.size / 8\n # draw horizontal lines\n for y in range(0, self.size, length):\n self.window.create_line(0, y, self.size, y, fill = \"blue\")\n \n # draw vertical lines\n for x in range(0, self.size, length):\n self.window.create_line(x, 0, x, self.size, fill = \"blue\")\n\n # draw the axes red\n self.window.create_line(\n 0,\n self.size / 2,\n self.size, \n self.size / 2, \n fill = \"red\"\n )\n self.window.create_line(\n self.size / 2, 0,\n self.size / 2, \n self.size, \n fill = \"red\"\n )\n print(\"Grid Made.\")",
"def create_grid(spl,\n var_to_plot,\n xmin=None,\n xmax=None,\n ymin=None,\n ymax=None):\n\n # Find the lowest and the highest heights to calibrate the ylims\n\n max_heights, min_heights, dates = [], [], []\n for df in spl:\n max_heights.append(df['height [> 0: top, < 0: bottom of elem.] (cm)'].iloc[-1])\n min_heights.append(df['height [> 0: top, < 0: bottom of elem.] (cm)'].iloc[0])\n dates.append(df['dates'].iloc[0])\n\n if ymin and ymax:\n max_height = ymax\n min_height = ymin\n else:\n max_height = np.max(max_heights)\n min_height = np.min(min_heights)\n max_height = round(max_height ,5 ,'up')\n min_height = round(min_height ,5 ,'down')\n\n grid_resolution = 100\n vertical_grid = np.linspace(min_height ,max_height ,grid_resolution)\n\n # Trim spl to fit specified xmin, xmax\n\n if xmin and xmax:\n\n spl = [sp for (date,sp) in zip(dates,spl) if (xmin < date < xmax)]\n dates = [date for date in dates if (xmin < date < xmax)]\n\n grid = np.full((grid_resolution, len(spl)), np.nan)\n\n for count, df in enumerate(spl):\n\n heights = np.array(df['height [> 0: top, < 0: bottom of elem.] (cm)'])\n variables = np.array(df[var_to_plot])\n\n # regular_variables = np.interp(vertical_grid, heights, variables, left = np.nan, right = np.nan)\n\n kind = 'nearest'\n\n my_interp = interpolate.interp1d(heights,\n variables,\n kind=kind,\n bounds_error = False,\n fill_value = (np.nan ,np.nan))\n\n regular_variables = my_interp(vertical_grid)\n\n grid[: ,count] = np.flip(regular_variables, axis=0)\n\n return_dict = {'grid':grid,\n 'max_height':max_height,\n 'min_height':min_height,\n 'dates':dates}\n\n return(return_dict)",
"def make_grid(dataset):\n top_left_lat = dataset[\"a\"][0]\n top_left_lng = dataset[\"a\"][1]\n top_right_lng = dataset[\"c\"][1]\n bot_left_lat = dataset[\"b\"][0]\n\n lng_row = []\n lat_col = []\n i = top_left_lng\n while i < top_right_lng:\n lng_row.append(round(i, 5))\n i += step\n j = bot_left_lat\n while j < top_left_lat:\n lat_col.append(round(j, 5))\n j += step\n out_grid = []\n for i in lat_col:\n row = []\n for j in lng_row:\n row.append(\"{0}:{1}:0\".format(i, j))\n out_grid.append(row)\n return out_grid",
"def grid_11():\n plot = {\"Walls\": [\"N\", \"W\"], \"TARDIS\": False, \"Transmat\": False,\n \"Plot\": \"\\nThe corridor continues to the EAST and SOUTH. The walls to the WEST and NORTH\\n\"\n \"sounds hollow, maybe there is something behind them?\\n\"}\n return plot"
] | [
"0.6353607",
"0.6309061",
"0.61757565",
"0.6122297",
"0.6120471",
"0.6088273",
"0.6082796",
"0.6081821",
"0.60757345",
"0.6066482",
"0.60520643",
"0.6011054",
"0.5992132",
"0.5968607",
"0.59675235",
"0.59593165",
"0.59503216",
"0.59470177",
"0.59250623",
"0.5922438",
"0.592061",
"0.59133303",
"0.5904318",
"0.59030175",
"0.58903784",
"0.5858953",
"0.5843884",
"0.5840657",
"0.5838138",
"0.58343667"
] | 0.63151515 | 1 |
Limit speed in range [1000,1000] | def limit_speed(speed):
if speed > 1000:
speed = 1000
elif speed < -1000:
speed = -1000
return speed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def limit_speed(speed):\n if speed > 900:\n speed = 900\n elif speed < -900:\n speed = -900\n return -speed",
"def speed(self, value: int, /) -> None:",
"def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)",
"def servo_set_speed_limit(ch, speed):\n\n # Check to make sure speed is in range\n speed = max(speed, speed_limit_min)\n speed = min(speed, speed_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_speed, ch, speed)",
"def speed(self, speed: int, time: int = 0, /) -> None:",
"def speed_limit(comp):\n return max(min(comp, SPEED_LIMIT), -1 * SPEED_LIMIT)",
"def adjustSpeed(self, speed):\n\t\tif self.timeout <= 0:\n\t\t\tself.speed = max(self.minimumSpeed, min(self.maximumSpeed, self.speed + speed))",
"def speed(n):\n turtleTmp.speed(max(1, min(n, 10)))",
"def speed(self, s=0):",
"def set_speed():\n pass",
"def set_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n set_left_speed(speed)\n #time.sleep(.1)\n set_right_speed(speed)",
"def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)",
"def servo_set_speed_limit(ch, accel):\n\n # Check to make sure speed is in range\n speed = max(accel, accel_limit_min)\n speed = min(accel, accel_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_accel, ch, accel)",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def set_speed(self, speed=0):\n speed = clamp(speed)\n self._state.speed = speed\n self.send_command(Command.SET_SPEED, [int(speed)])",
"def set_speed(self,speed):\n self.speed = speed",
"def use_max_speed(self):\n command = _build_robovac_command(RobovacModes.SET_SPEED, RobovacCommands.FAST_SPEED)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def init_max_speed(self, speed):\n self.max_speed = speed",
"def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)",
"def pwm_limit(self, value):\n self._write(MX_PWM_LIMIT, value)",
"def max_speed(self) -> float:\n return 2",
"def velocity_limit(self, value):\n self._write(MX_VELOCITY_LIMIT, value)",
"def set_speed(self, speed):\n self.speed = speed",
"def increase_speed(self, character):\n character.speed = min(character.max_steps/4, character.speed * 1.25)",
"def setSpeedEngine1(speed: int):\n pass",
"def speed(self, value: float):\n self._speed = value",
"def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1",
"def max_front_wheel_speed():",
"def setSpeedEngine2(speed: int):\n pass"
] | [
"0.8183303",
"0.71591496",
"0.6988229",
"0.698505",
"0.6964697",
"0.6929783",
"0.69163376",
"0.68221915",
"0.67862874",
"0.6731271",
"0.67008835",
"0.6679034",
"0.663012",
"0.6583044",
"0.6583044",
"0.65218097",
"0.65210927",
"0.648919",
"0.6475178",
"0.64562607",
"0.644576",
"0.6437253",
"0.64315915",
"0.64126736",
"0.63832897",
"0.6361149",
"0.63318604",
"0.6289678",
"0.62807405",
"0.62530065"
] | 0.8521427 | 0 |
Performs Biopython based motif matching and writes the results to a dictionary indexed by chromosome. | def biopythonMM(pwmFileName,genomeDict,mpbsDict,scoringMethod,tempLocation,pseudocounts=0.1,bitscore=12.0,fpr=0.01,precision=10**4,highCutoff=0.7,functionalDepth=0.9):
# Reading PWM
pwm = readPwmFile(pwmFileName,tempLocation,pseudocounts)
pwmName = pwmFileName.split("/")[-1].split(".")[0]
pwmLen = len(pwm)
# Evaluating threshold
pwmThreshold = 0.0
if(scoringMethod == "bitscore"):
pwmThreshold = bitscore
elif(scoringMethod == "fpr"):
sd = Motif.ScoreDistribution(pwm,precision=precision)
pwmThreshold = sd.threshold_fpr(fpr)
elif(scoringMethod == "boyle"):
maxScore = pwm.max_score()
minScore = 0.0 # TODO Boyle's rule is not suited for negative values.
pwmThreshold = min(highCutoff*maxScore,functionalDepth*(maxScore-minScore))
else:
sys.stderr.write("Choose a valid scoring method.\n")
sys.exit(0)
# Creating aditional parameters
chrList = constants.getChromList(reference=[mpbsDict])
tempMpbsDict = dict([(e,[]) for e in chrList])
maxValue = -99.0
# Iterating on chromosomes
for chrName in chrList:
# Reading genome
sequence = genomeDict[chrName]
# Performing biopython's motif matching
for pos, score in pwm.search_pwm(sequence,threshold=pwmThreshold):
if(score > maxValue): maxValue = score
if(pos >= 0): tempMpbsDict[chrName].append([pos,pos+pwmLen,pwmName,score,"+"])
else: tempMpbsDict[chrName].append([-pos,-pos+pwmLen,pwmName,score,"-"])
# Update scores - new scores are within [0,1000]
for chrName in chrList:
for e in tempMpbsDict[chrName]:
mpbsDict[chrName].append([e[0],e[1],e[2],int(1000*(e[3]-pwmThreshold)/(maxValue-pwmThreshold)),e[4]])
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_RE(self, index):\n if index is None:\n self.logger.error(\"The bowtie genome index must be specified to \"\n \"map restriction enzyme sites\")\n return None\n self.logger.info(\"Mapping restriction enyzme recognition sites\")\n # Start bowtie as a subprocess\n mapping = subprocess.Popen(\n self.arguments + [index, '-'], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # Send the raw sequence of the DpnII recognition site\n mapping.stdin.write(b'GATC')\n mapping.stdin.close()\n bed = {}\n total = 0\n # Retrieve the alignments from bowtie\n with mapping.stdout as f:\n for line in f:\n line = line.decode('UTF-8').split('\\t')\n chrom, start = line[2], int(line[3])\n stop = start + 4\n if chrom not in bed:\n bed[chrom] = []\n bed[chrom].append((start, stop))\n total += 1\n # Log mapping results\n with mapping.stderr as f:\n for line in f:\n if line[0] == '#':\n continue\n self.logger.debug(line.decode('UTF-8').rstrip('\\n'))\n # Sort chromosome list by name/number\n chroms = numpy.array(list(bed))\n chrints = []\n for i in range(chroms.shape[0]):\n try:\n chrints.append((\n str(int(chroms[i].lstrip('chr'))).rjust(2, '0'),\n chroms[i]))\n except ValueError:\n chrints.append((chroms[i], chroms[i]))\n chrints.sort()\n chroms = []\n for i in range(len(chrints)):\n chroms.append(chrints[i][1])\n self.chroms = numpy.array(chroms)\n self.chr_indices = numpy.zeros(self.chroms.shape[0] + 1,\n dtype=numpy.int32)\n if self.focus is None:\n self.logger.info(\"Defaulting to a fragment-focused analysis\")\n self.focus = 'fragments'\n if self.focus == 'fragments':\n N = total - self.chroms.shape[0]\n else:\n N = total\n # Arrange data into single array with indexed chromosomes\n self.data = numpy.zeros(N, dtype=numpy.dtype([\n ('chr', numpy.int32), ('coords', numpy.int32, (2,)),\n ('treatment', numpy.int32), ('control', numpy.int32),\n ('score', numpy.float64), ('alignable', numpy.bool)]))\n self.data['alignable'].fill(True)\n for i in range(self.chroms.shape[0]):\n chrom = self.chroms[i]\n bed[chrom] = numpy.array(bed[chrom])\n bed[chrom] = bed[chrom][numpy.argsort(bed[chrom][:, 0]), :]\n start = self.chr_indices[i]\n if self.focus == 'fragments':\n self.chr_indices[i + 1] = start + bed[chrom].shape[0] - 1\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, 0] = bed[chrom][:-1, 1]\n self.data['coords'][start:stop, 1] = bed[chrom][1:, 0]\n else:\n self.chr_indices[i + 1] = start + bed[chrom].shape[0]\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, :] = bed[chrom]\n self.data['chr'][start:stop] = i",
"def run_matching(self):\n paradic = self.cfg['param']['paradic']\n print 'in run_matching() n_bins = ' +str(paradic['n_bins'])\n\n f = open(self.work_dir+'matches.txt','w')\n matching = self.run_proc(['match_cli', 'keys_im0.txt',\n 'keys_im1.txt',\n str(paradic['flag_match']),\n str(paradic['C_match']),\n str(paradic['n_hist']),\n str(paradic['n_ori']),\n str(paradic['n_bins'])],\n stdout=f)\n self.wait_proc(matching, timeout=self.timeout)\n return 1",
"def handle_seq(seq, barcode_map, result_dict):\n for i in range(len(seq)):\n for barcode in barcode_map.keys():\n possible_match = seq[i: i + len(barcode)]\n if possible_match == barcode:\n result_dict[barcode][i] += 1",
"def motif_report(chrom, cluster):\n\tsmallest_ref_pos = min([x[0] for x in cluster])\n\tupper_bound_ref = max([x[0] + len(x[1]) - 1 for x in cluster])\n\toriginal_region = get_region_from_das(assembly, chrom, smallest_ref_pos, upper_bound_ref).upper()\n\trefseq = original_region\n\tif not refseq or len(refseq)==0:\n\t\tprint \" problem obtaining region on the reference sequence:\"\n\t\tprint assembly, chrom, smallest_ref_pos, upper_bound_ref\n\t\treturn None, None\n\n\tseqinfo = []\n\tmotifs = []\n\talignment = []\n\t# I don't really need the alignment here, but since I have it implemented ...\n\talt = [smallest_ref_pos, refseq[0], refseq[0], 1, 1, 1, None]\n\talignment.append(refseq)\n\tseqinfo.append(alt)\n\tfor v in cluster:\n\t\t[pos, ref, alts, var_counts, total_count, max_reach] = v\n\t\tif alts=='' or var_counts=='': continue\n\t\trelative_pos = pos - smallest_ref_pos\n\t\tcounts = var_counts.split(\",\")\n\t\talternatives = alts.split(\",\")\n\t\ta = -1 # index of the alternative sequence fo this position\n\t\tfor sequence in alternatives:\n\t\t\ta += 1\n\t\t\tmotif = find_motif_in_pair(sequence, ref)\n\t\t\tif motif and not motif in motifs: motifs.append(motif)\n\t\t\talt = [pos, ref, sequence, int(counts[a]), total_count, len(ref), motif]\n\t\t\tseqinfo.append(alt)\n\t\t\tadd_modified_seq(alignment, sequence, ref, relative_pos)\n\traw_seqs = [original_region]\n\tfreq = {}\n\tfreq[original_region] = \"1:1\"\n\tfor s in range(1,len(alignment)):\n\t\tmodified_seq = alignment[s]\n\t\t[pos, ref, sequence, count, total_count, max_reach, motif] = seqinfo[s]\n\t\traw_seq = modified_seq.replace('-','')\n\t\t# it seemed to me at one point that eac might have duplicates, but they seem\n\t\t# to have caught them and assigned them a frequency of 0\n\t\tif count==0: continue\n\t\tif raw_seq in raw_seqs:\n\t\t\tpass\n\t\telse:\n\t\t\traw_seqs.append(raw_seq)\n\t\tfreq[raw_seq] = \"%d:%d\" % (count,total_count)\n\n\t########### REPORT/STORE TO DB ##########################\n\t# never mind the clustering - I am not sure any more that it helps\n\t# motif counting though should protect me from counting as diffent\n\t# variants when a motif indel is assigned to a different place in the repeat expansion\n\t# as different a variant\n\t#for cluster in find_comparable_patterns(raw_seqs, motifs):\n\treport_items= []\n\tfor seq in raw_seqs:\n\t\tpattern = decomposition(seq, motifs)\n\t\treport_items.append(prettyprint(pattern) + \",\" + freq[to_string(pattern)])\n\n\n\treturn \",\".join(motifs), \";\".join(report_items)",
"def get_matches(self, file_map) -> dict:\r\n get_file_dict = {}\r\n match_dict = {\r\n 'GIF': re.findall(b'(?s)(\\x47\\x49\\x46\\x38\\x39\\x61.{80})', file_map),\r\n 'RTF': re.findall(b'(?s)(.{20}\\x35\\x30\\x34\\x65\\x34\\x37\\x30.{80}|.{20}\\x66\\x66\\x64\\x38\\x66\\x66.{80}|'\r\n b'.{20}\\x66\\x66\\x64\\x38\\x66\\x66\\x65\\x30\\x30\\x30\\x31\\x30.{80})', file_map),\r\n }\r\n if self.jpgsos:\r\n match_dict['JPG_SOS'] = jpg_sos(file_map)\r\n elif self.sof2sos:\r\n match_dict['JPG_SOF2SOS'] = jpg_sof2sos(file_map)\r\n elif self.jump:\r\n match_dict['JPG_JUMP'] = jpg_jump(file_map)\r\n else:\r\n match_dict['JPG'] = re.findall(b'(?s)(\\xff\\xd8\\xff\\xe0\\x00\\x10.{80})', file_map)\r\n match_dict['JPG2'] = re.findall(b'(?s)(\\xff\\xd8\\xff.{80})', file_map)\r\n if self.idat:\r\n match_dict['PNG_IDAT'] = idat(file_map)\r\n else:\r\n match_dict['PNG'] = re.findall(b'(?s)(\\x89\\x50\\x4e\\x47.{82})', file_map)\r\n m = re.match(br'^(?P<magic_beans>\\x49\\x49\\x2a\\x00[^\\x00\\x00]{2}.{80})',file_map,re.S)\r\n if m:\r\n match_dict['TIF'] = [m.group('magic_beans')]\r\n for file_type, regex_match in match_dict.items():\r\n if len(regex_match) > 0:\r\n get_file_dict[file_type] = regex_match\r\n return get_file_dict",
"def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)",
"def scan_and_score(regions, motifs_obj, args, log_q, qs):\r\n\t\r\n\tlogger = TobiasLogger(\"\", args.verbosity, log_q)\t#sending all logger calls to log_q\r\n\r\n\tlogger.debug(\"Setting up scanner/bigwigs/fasta\")\r\n\tmotifs_obj.setup_moods_scanner()\t#MotifList object\r\n\r\n\tpybw = {condition: pyBigWig.open(args.signals[i], \"rb\") for i, condition in enumerate(args.cond_names)}\r\n\tfasta_obj = pysam.FastaFile(args.genome)\r\n\tchrom_boundaries = dict(zip(fasta_obj.references, fasta_obj.lengths))\r\n\r\n\trand_window = 200\r\n\r\n\tbackground_signal = {\"gc\":[], \"signal\":{condition:[] for condition in args.cond_names}}\r\n\r\n\t######## Scan for motifs in each region ######\r\n\tlogger.debug(\"Scanning for motif occurrences\")\r\n\tall_TFBS = {motif.prefix: RegionList() for motif in motifs_obj} \t# Dict for saving sites before writing\r\n\tfor i, region in enumerate(regions):\r\n\t\tlogger.spam(\"Processing region: {0}\".format(region.tup()))\r\n\t\r\n\t\textra_columns = region\r\n\t\t\r\n\t\t#Check whether region is within boundaries\r\n\t\tif region.end > chrom_boundaries[region.chrom]:\r\n\t\t\tlogger.error(\"Input region {0} is beyond chromosome boundaries ({1}: {2})\".format(region, region.chrom, chrom_boundaries[region.chrom]))\r\n\t\t\traise Exception \r\n\r\n\t\t#Random positions for sampling\r\n\t\treglen = region.get_length()\r\n\t\trandom.seed(reglen)\t\t#Each region is processed identifically regardless of order in file\r\n\t\trand_positions = random.sample(range(reglen), max(1,int(reglen/rand_window)))\t\t#theoretically one in every 500 bp\r\n\t\tlogger.spam(\"Random indices: {0} for region length {1}\".format(rand_positions, reglen))\r\n\r\n\t\t#Read footprints in region\r\n\t\tfootprints = {}\r\n\t\tfor condition in args.cond_names:\r\n\t\t\tfootprints[condition] = region.get_signal(pybw[condition], logger=logger, key=condition)\r\n\t\t\t\t\r\n\t\t\tif len(footprints[condition]) == 0:\r\n\t\t\t\tlogger.error(\"ERROR IN REGION: {0}\".format(region))\r\n\t\t\t\traise Exception\r\n\r\n\t\t\t#Read random positions for background\r\n\t\t\tfor pos in rand_positions:\r\n\t\t\t\tbackground_signal[\"signal\"][condition].append(footprints[condition][pos])\r\n\r\n\t\t#Scan for motifs across sequence from fasta\r\n\t\tseq = fasta_obj.fetch(region.chrom, region.start, region.end)\r\n\t\tregion_TFBS = motifs_obj.scan_sequence(seq, region)\t\t#RegionList of TFBS, automatically scanned on both plus/minus strands\r\n\r\n\t\t#Extend all TFBS with extra columns from peaks and bigwigs \r\n\t\textra_columns = region\r\n\t\tfor TFBS in region_TFBS:\r\n\t\t\tmotif_length = TFBS.end - TFBS.start \r\n\t\t\tpos = TFBS.start - region.start + int(motif_length/2.0) #middle of TFBS\r\n\t\t\t\r\n\t\t\tTFBS.extend(extra_columns)\r\n\r\n\t\t\t#Assign scores from bigwig\r\n\t\t\tfor bigwig in args.cond_names:\r\n\t\t\t\tbigwig_score = footprints[bigwig][pos]\r\n\t\t\t\tTFBS.append(\"{0:.5f}\".format(bigwig_score))\r\n\r\n\t\t#Split regions to single TFs\r\n\t\tfor TFBS in region_TFBS:\r\n\t\t\tall_TFBS[TFBS.name].append(TFBS)\t#TFBS.name is the prefix of motif\r\n\r\n\t####### All input regions have been scanned #######\r\n\tglobal_TFBS = RegionList()\t#across all TFs\r\n\r\n\t#Sent sites to writer\r\n\tfor name in all_TFBS:\t\r\n\t\tall_TFBS[name] = all_TFBS[name].resolve_overlaps()\r\n\t\tno_sites = len(all_TFBS[name])\r\n\r\n\t\tlogger.spam(\"Sending {0} sites from {1} to bed-writer queue\".format(no_sites, name))\r\n\t\tbed_content = all_TFBS[name].as_bed()\t#string \r\n\t\tqs[name].put((name, bed_content))\r\n\r\n\t\tglobal_TFBS.extend(all_TFBS[name])\r\n\t\tall_TFBS[name] = []\r\n\r\n\toverlap = global_TFBS.count_overlaps()\r\n\r\n\t#Close down open file handles\r\n\tfasta_obj.close()\r\n\tfor bigwig_f in pybw:\r\n\t\tpybw[bigwig_f].close()\r\n\t\r\n\tlogger.stop()\r\n\tlogger.debug(\"Done: 'scan_and_score' finished for this chunk of regions (time elapsed: {0})\".format(logger.total_time))\r\n\r\n\treturn(background_signal, overlap)",
"def run_genomemap(args):\n genome_map(args)",
"def blastresults(br_naam1, br_naam2, br_pos1, br_pos2):\n singles = {}\n twogdeellijst = openfile(br_naam1 + br_naam2)\n print(br_pos1, \"pos1\", br_pos2, \"pos2\")\n for zin in twogdeellijst:\n singles[(zin.split()[br_pos1] + \" \" + zin.split()[br_pos2])] = 0\n return singles.keys()",
"def motifs(self, thre, align):\n\n if self._parse is None:\n print \"No previous parsing\"\n print \"Parsing file...\"\n seqs = self.parse()\n self._parse = seqs\n print \"Done\"\n else:\n seqs = self._parse\n\n seqs[0].weight(self._seqfile, self._predfile) # weight first sequence\n sleep(1)\n known = seqs[0].motifs(thre, align) # extract motifs from first seq\n\n mot = {} # known motifs dictionary\n\n for i, k in enumerate(known):\n name = \"motif\"+str(i+1) # enumerate motifs\n\n mot[name] = {}\n mot[name][\"size\"] = k[2] - k[1] # size of the motif\n\n # start position of motif real position\n mot[name][\"start\"] = k[1]+1\n mot[name][\"stop\"] = k[2] # end position of motif\n mot[name][\"score\"] = k[3] # average score real position\n\n mot[name][\"align\"] = k[4] # average alignment score of sequence\n\n for j, s in enumerate(seqs):\n mot[name][s.name()] = {}\n # extract motif from each sequence\n mot[name][s.name()][\"seq\"] = s[k[1]+1:k[2]]\n mot[name][s.name()][\"start\"] = s.get_real_pos(k[1])\n if j == 0:\n # real position\n mot[name][\"start\"] = mot[name][s.name()][\"start\"]\n\n mot[\"threshold\"] = thre # general threshold used\n mot[\"align\"] = align # used alignment score\n self._motifs = mot\n\n return mot",
"def find_match(line,dic):\n seqid = line[0:seqid_len]\n sequence = line[(seqid_len + f_primer_len):(len(line) - r_primer_len)]\n if seqid in dic:\n increment(dic[seqid],sequence,1)\n else:\n dic[seqid] = {sequence:1}",
"def overlay_resources_score_motifs(motif_sites_input_file,\n motifs_overlapping_tracks_output_dir,\n chromatin_tracks_dir_path,\n chromatin_tracks_files):\n\n # for motif_sites_input_file in motif_sites_input_files:\n with open(motif_sites_input_file) as f:\n chr_n_file = f.readline().strip().split('\\t')[0].strip() + '.bed'\n # it is assumed for every motif file name there exists a matching file name in the chromatin_tracks_input_dir\n if chr_n_file in chromatin_tracks_files:\n motifs_overlapping_tracks_file = motifs_overlapping_tracks_output_dir + '/' + '.'.join(\n motif_sites_input_file.split('/')[-1].split('.')[0:-1]) + '_overlapping_tracks' + '.bed7'\n motifs_overlapping_tracks_file_tmp = motifs_overlapping_tracks_file + '_tmp'\n # create or overwrite output files\n if not os.path.exists(motifs_overlapping_tracks_file):\n \n motif_sites_input_file_sorted = motif_sites_input_file + '_sorted'\n chromatin_tracks_input_file = chromatin_tracks_dir_path +'/'+ chr_n_file\n chromatin_tracks_input_file_sorted = chromatin_tracks_input_file + '_sorted'\n \n print(\"intersecting: \" + motif_sites_input_file + ' and ' + chromatin_tracks_input_file)\n \n os.system(\"\"\"sort -k1,1 -k2,2n -k3,3n {} > {}\"\"\".format(motif_sites_input_file, motif_sites_input_file_sorted))\n os.system(\"\"\"sort -k1,1 -k2,2n -k3,3n {} > {}\"\"\".format(chromatin_tracks_input_file, chromatin_tracks_input_file_sorted))\n \n\n motif_sites_file_obj = BedTool(motif_sites_input_file_sorted)\n motif_sites_file_obj.map(BedTool(chromatin_tracks_input_file_sorted), c=4, o=['collapse']).saveas(motifs_overlapping_tracks_file_tmp)\n \n with open(motifs_overlapping_tracks_file_tmp, 'r') as infile, open(motifs_overlapping_tracks_file,\n 'w') as outfile:\n line = infile.readline()\n while line:\n\n sline = line.split('\\t')\n if len(sline) > 6:\n if sline[7] != '.' and sline[7] != \".\\n\":\n my_list = sline[7].split(',')\n cell_assay_values_dict_ChromHMM = {}\n cell_assay_values_dict_cCRE = {}\n cell_assay_values_dict_IndexDHS = {}\n cell_assay_values_dict_RegElem = {}\n cell_assay_values_dict_DNaseq = {}\n elem_list = []\n for elem in my_list:\n # TODO: check if statement below\n if elem.__contains__('#'):\n cell_value = elem.split('#')[0]\n assay_value = elem.split('#')[1]\n if len(elem.split('#')) > 2:\n state_value = elem.split('#')[2].rstrip(\"\\n\")\n\n if assay_value == \"ChromHMM\":\n if cell_value not in list(cell_assay_values_dict_ChromHMM.keys()):\n cell_assay_values_dict_ChromHMM[cell_value] = []\n cell_assay_values_dict_ChromHMM[cell_value].append(state_value)\n\n elif assay_value == \"cCRE\":\n if cell_value not in list(cell_assay_values_dict_cCRE.keys()):\n cell_assay_values_dict_cCRE[cell_value] = []\n cell_assay_values_dict_cCRE[cell_value].append(state_value)\n\n elif assay_value == \"IndexDHS\":\n if cell_value not in list(cell_assay_values_dict_IndexDHS.keys()):\n cell_assay_values_dict_IndexDHS[cell_value] = []\n cell_assay_values_dict_IndexDHS[cell_value].append(state_value)\n\n elif assay_value == \"RegElem\":\n if cell_value not in list(cell_assay_values_dict_RegElem.keys()):\n cell_assay_values_dict_RegElem[cell_value] = []\n cell_assay_values_dict_RegElem[cell_value].append(state_value)\n\n elif assay_value == \"DNase-seq\":\n if cell_value not in list(cell_assay_values_dict_DNaseq.keys()):\n cell_assay_values_dict_DNaseq[cell_value] = []\n cell_assay_values_dict_DNaseq[cell_value].append(float(state_value))\n\n else:\n elem_list.append(elem.rstrip(\"\\n\"))\n\n for cell in cell_assay_values_dict_ChromHMM:\n elem_list.append(cell + \"#ChromHMM#\" +\n Counter(cell_assay_values_dict_ChromHMM[cell]).most_common(1)[0][\n 0])\n\n for cell in list(cell_assay_values_dict_cCRE.keys()):\n elem_list.append(\n cell + \"#cCRE#\" + Counter(cell_assay_values_dict_cCRE[cell]).most_common(1)[0][\n 0])\n\n for cell in list(cell_assay_values_dict_IndexDHS.keys()):\n elem_list.append(cell + \"#IndexDHS#\" +\n Counter(cell_assay_values_dict_IndexDHS[cell]).most_common(1)[0][\n 0])\n\n for cell in list(cell_assay_values_dict_RegElem.keys()):\n elem_list.append(cell + \"#RegElem#\" +\n Counter(cell_assay_values_dict_RegElem[cell]).most_common(1)[0][0])\n\n for cell in list(cell_assay_values_dict_DNaseq.keys()):\n elem_list.append(\n cell + \"#DNase-seq#\" + str(max(cell_assay_values_dict_DNaseq[cell])))\n\n outfile.write('\\t'.join(sline[0:7]) + '\\t' + ','.join(elem_list) + '\\n')\n\n line = infile.readline()\n\n os.remove(motifs_overlapping_tracks_file_tmp)\n os.remove(motif_sites_input_file_sorted)\n os.remove(chromatin_tracks_input_file_sorted)\n\n print(\"Finished intersecting: \" + motif_sites_input_file + ' and ' + chromatin_tracks_input_file)\n else:\n print(\"Use existing data files in \" + motifs_overlapping_tracks_file)\n else:\n print(\"Specified chromatin track file \" + chr_n_file + \" cannot be found and will be ignored.\")\n return None\n cleanup()\n return motifs_overlapping_tracks_file",
"def target_intersection(self, runid):\n\n def targeting(shuffledict, seg_copy_array, cell_name):\n bedstring = \"\"\n seg_counts_dict = defaultdict(int)\n breakpoint_counts = 0\n sum_counts = 0\n for cell in shuffledict:\n with suppress(IndexError):\n i = len(cell_name)\n cell_label = cell[:i]\n\n if not cell_name == cell_label:\n continue\n\n shuffled_list = shuffledict[cell]\n scipy.random.shuffle(shuffled_list)\n sum_counts += sum(shuffled_list)\n\n for i in range(len(shuffled_list)):\n if shuffled_list[i] == 0:\n continue\n\n breakpoint_counts += 1\n segment_index = i\n if i == 0:\n segment_index = 1\n\n chrm = seg_copy_array[seg_copy_array[:, 0] == segment_index][0, 1].decode()\n chrom_slice = seg_copy_array[seg_copy_array[:, 1] == chrm.encode()]\n chrom_seg_count = chrom_slice.shape[0]\n start_seg = segment_index\n stop_seg = segment_index+1\n\n # Prevents us from running past the end of the chromosome\n if segment_index+1 > chrom_seg_count:\n stop_seg = segment_index\n start_seg = segment_index-1\n\n coord_start = int(seg_copy_array[seg_copy_array[:, 0] == start_seg][0, 2])\n coord_stop = int(seg_copy_array[seg_copy_array[:, 0] == stop_seg][0, 3])\n\n segkey = \"{}.{}\".format(chrm, coord_start)\n seg_counts_dict[segkey] += 1\n bedstring += \"{0} {1} {2} {3} {0}|{1}|{2}|{3}\\n\".format(chrm, coord_start, coord_stop, \"x\")\n\n if eval(self.args.PairedBreakpoints):\n segment_index = shuffled_list[i]+i\n\n # Since segments are paired we can run past the end of the list.\n if segment_index > len(shuffled_list):\n segment_index = len(shuffled_list)-1\n\n # If the shuffle results in a segment overlap, skip it.\n if not shuffled_list[segment_index] == 0:\n continue\n\n start_seg = segment_index\n stop_seg = segment_index+1\n\n # Prevents us from running past the end of the chromosome by flipping direction of region\n if segment_index + 1 > chrom_seg_count:\n start_seg = shuffled_list[i] - i\n stop_seg = start_seg-1\n\n coor_start = int(seg_copy_array[seg_copy_array[:, 0] == start_seg][0, 2])\n coor_stop = int(seg_copy_array[seg_copy_array[:, 0] == stop_seg][0, 3])\n breakpoint_counts += 1\n segkey = \"{}.{}\".format(chrm, coord_start)\n seg_counts_dict[segkey] += 1\n bedstring += \"{0} {1} {2} {3} {0}|{1}|{2}|{3}\\n\".format(chrm, coor_start, coor_stop, \"x\")\n\n return bedstring, seg_counts_dict, breakpoint_counts\n\n encoded_cell_name = self.args.Cell_Name\n shuffle_dict = self.shuffle_dict_unpaired\n if eval(self.args.PairedBreakpoints):\n shuffle_dict = self.shuffle_dict_pairs\n output_data_dict = defaultdict(lambda: defaultdict(str))\n\n iteration_limit = int(self.args.Iteration_Count)/int(self.args.Spawn)\n iteration_count = 0\n while iteration_count < iteration_limit:\n if iteration_count % int(self.args.Prog_Check) == 0:\n self.log.info(\"Iteration: {} of {} for job {}\".format(iteration_count, iteration_limit, runid))\n\n bed_string, segment_count_dict, total_breakpoints = \\\n targeting(shuffle_dict, self.seg_analyzer.seg_copy_array, encoded_cell_name)\n\n # Bedtool Section.\n breakpoint_bedtool = pybedtools.BedTool(bed_string, from_string=True)\n target_bedtool = pybedtools.BedTool(self.args.Target_File, from_string=False)\n\n # Find target intersects for printing.\n breakpoint_target_intersect = breakpoint_bedtool.intersect(target_bedtool, wb=True, stream=True)\n\n \"\"\"\n The breakpoint target intersect pybedtools object is expected to have this structure;\n l[0] = Breakpoint chrom; l[1] = Breakpoint start coord; l[2] = Breakpoint end coord; \n l[3] = aberration copy type; l[4] = segment ID for internal tracking. The next items are from the target BED \n file. Make sure column 5 in that file is the target name.\n \"\"\"\n\n # Processing Breakpoint Intersects.\n intersect_dict = defaultdict(list)\n total_targeted_breakpoints = 0\n unique_targeted_breakpoints = 0\n\n for l in breakpoint_target_intersect:\n chrom = l[4].split(\"|\")[0]\n start = l[4].split(\"|\")[1]\n segment_key = \"{}.{}\".format(chrom, start)\n intersect_dict[segment_key].append(l[9])\n\n for k in intersect_dict:\n total_targeted_breakpoints += segment_count_dict[k]\n if segment_count_dict[k] > 0:\n unique_targeted_breakpoints += 1\n\n output_data_dict[iteration_count] = \"{}\\t{}\\t{}\\t{}\\n\"\\\n .format(total_breakpoints, total_targeted_breakpoints, len(segment_count_dict), len(intersect_dict))\n\n iteration_count += 1\n\n # Process data for output and write file.\n outstring = \"\"\n\n for k in output_data_dict:\n outstring += output_data_dict[k]\n\n permuted_shuffle_file_name = \\\n \"{}{}{}{}\".format(self.args.Working_Folder, self.args.Cell_Name, self.args.Job_Name, runid)\n permuted_shuffle_file = open(permuted_shuffle_file_name, 'w')\n permuted_shuffle_file.write(outstring)\n permuted_shuffle_file.close()\n\n return",
"def mapper(self, key, value):\n \n overall = t = time.time()\n \n index1, index2 = key \n didx1, didx2, em_iters = value\n\n t = time.time()\n# X = tools.binary_read('self_X')\n# d1 = tools.get_data_from_indices(X, didx1)\n# d2 = tools.get_data_from_indices(X, didx2)\n# sys.stderr.write(\"get_data_from_indices: {0}\\n\".format(time.time()-t))\n d1 = tools.get_data_from_file_from_indices('self_X', didx1)\n d2 = tools.get_data_from_file_from_indices('self_X', didx2)\n sys.stderr.write(\"get_data_from_file_from_indices: {0}\\n\".format(time.time()-t))\n data = np.concatenate((d1, d2))\n \n t = time.time()\n util.unarchive('gmm.tgz', 'gmm')\n g1 = pickle.load(open('gmm/'+str(index1), 'r'))\n g2 = pickle.load(open('gmm/'+str(index2), 'r'))\n sys.stderr.write(\"read iter_gmm_list: {0}\\n\".format(time.time()-t))\n new_gmm = g1\n score = 0\n t = time.time()\n try:\n new_gmm, score = compute_distance_BIC(g1, g2, data, em_iters)\n except:\n raise\n #data_to_yield = (score, new_gmm, g1, g2, index1, index2)\n data_to_yield = (score, index1, index2)\n sys.stderr.write(\"compute_distance_BIC: {0}\\n\".format(time.time()-t))\n sys.stderr.write(\"total BIC time: {0}\\n\".format(time.time()-overall))\n yield 1, data_to_yield",
"def __init__(self, reads, fasta_handler, chromosome_name, region_start_position, region_end_position):\n self.region_start_position = region_start_position\n self.region_end_position = region_end_position\n self.chromosome_name = chromosome_name\n self.fasta_handler = fasta_handler\n self.reads = reads\n\n # the store which reads are creating candidates in that position\n self.coverage = defaultdict(int)\n self.rms_mq = defaultdict(int)\n self.mismatch_count = defaultdict(int)\n self.match_count = defaultdict(int)\n\n # the base and the insert dictionary for finding alleles\n self.positional_allele_dictionary = {}\n self.read_allele_dictionary = {}\n self.reference_dictionary = {}\n\n # few new dictionaries for image creation\n self.base_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.insert_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.delete_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.read_info = defaultdict(tuple)\n self.insert_length_info = defaultdict(int)\n self.delete_length_info = defaultdict(int)\n self.positional_read_info = defaultdict(list)\n\n # for image generation\n self.image_row_for_reads = defaultdict(tuple)\n self.image_row_for_ref = defaultdict(list)\n self.positional_info_index_to_position = defaultdict(tuple)\n self.positional_info_position_to_index = defaultdict(tuple)\n self.allele_dictionary = defaultdict(lambda: defaultdict(list))\n self.read_id_by_position = defaultdict(list)",
"def parse_mappings(species, infile, outfile):\n mappings = dict()\n # if species doesn't have prepared mapping file the script should exit with status 0 and return BigWig file\n # with output name and warining\n if species not in MAPPINGS_FILES:\n msg = 'Chromosome mappings for Species \"{}\" are not supported.'.format(species)\n send_message(warning(msg))\n os.rename(infile, outfile)\n sys.exit(0)\n\n for basename in MAPPINGS_FILES[species]:\n filename = os.path.join(MAPPINGS_DIR, basename)\n mappings.update(parse_mapping_file(filename))\n return mappings",
"def target_mapping(self):\n\n map_list = []\n self.bin_tracking_array = self.seg_analyzer.bin_tracking_array\n self.log.info(\"Spawning {0} jobs to begin building Target_Bed_Map_Array for permutation analysis.\"\n .format(self.args.Spawn))\n\n p = pathos.multiprocessing.Pool(int(self.args.Spawn))\n for lst in p.starmap(self.sub_target_mapping,\n zip(itertools.repeat(self.bin_tracking_array), itertools.repeat(self.target_bed_array),\n itertools.repeat(self.args), self.seg_analyzer.chrom_list)):\n\n map_list.extend(lst)\n\n map_list.sort(key=lambda x: x[0])\n\n if eval(self.args.Map_File):\n self.log.info(\"Writing Map File\")\n file_data = \"\"\n map_file = open(\"{0}{1}_{2}_mapfile.txt\"\n .format(self.args.Working_Folder, self.args.Job_Name, self.args.Cell_Name), 'w')\n map_file.write(\"Chrom\\tstart\\tstop\\trefBinID\\ttargetBinID\\ttargetCount\\n\")\n\n for row in sorted(map_list, key=itemgetter(0)):\n\n coord_start = int(self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 2])\n coord_stop = int(self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 3])\n chrom = self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 1].decode()\n r_count = len(row[1])\n file_data += (\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n\"\n .format(chrom, coord_start, coord_stop, row[0], row[1], r_count))\n\n map_file.write(file_data)\n map_file.close()\n self.log.info(\"Map File Written\")\n\n self.log.info(\"Target_Bed_Map_Array built.\")\n return numpy.array(map_list, dtype='object')",
"def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn",
"def _play_matches(self, chunk, build_results=True):\n interactions = defaultdict(list)\n index_pair, match_params, repetitions, seed = chunk\n p1_index, p2_index = index_pair\n player1 = self.players[p1_index].clone()\n player2 = self.players[p2_index].clone()\n match_params[\"players\"] = (player1, player2)\n match_params[\"seed\"] = seed\n match = Match(**match_params)\n for _ in range(repetitions):\n match.play()\n\n if build_results:\n results = self._calculate_results(match.result)\n else:\n results = None\n\n interactions[index_pair].append([match.result, results])\n return interactions",
"def map_to_mgi(adata, copy = False):\n from pybiomart import Server\n # connest to the biomart server\n server = Server(host='http://www.ensembl.org')\n\n # retrieve the mouse data set we need\n dataset = (server.marts['ENSEMBL_MART_ENSEMBL']\n .datasets['mmusculus_gene_ensembl'])\n\n # recieve the mapping from ensembl to MGI\n conv_table = dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'])\n\n # we first drop duplicates in the first column\n conv_table = conv_table.drop_duplicates(conv_table.columns.values[0])\n\n # convert the gene names from the adata object to a data frame\n adata_table = pd.DataFrame(adata.var_names)\n\n # give the first column a name\n adata_table.columns = ['Gene stable ID']\n\n # change the gene table so that the ensembl names are now the index\n conv_table = conv_table.set_index('Gene stable ID')\n\n # project the names from the conversion table on the corr. names in the\n # adata var names table\n mapping = adata_table.join(conv_table, on='Gene stable ID')\n\n # how many could we not map\n not_found_mgi = sum(pd.isnull(mapping).iloc[:,1])\n\n # how many ensg symbols did we map several times?\n rep_ensg = len(mapping.iloc[:, 0]) - len(set(mapping.iloc[:, 0]))\n\n # how many mgi symbols did we map several times?\n rep_mgi = len(mapping.iloc[:, 1]) - len(set(mapping.iloc[:, 1]))\n\n # print this information\n print('Genes where no MGI annotations where found: {}\\nENSG repetition: {}\\nMGI repetition: {}'.\\\n format(not_found_mgi, rep_ensg, rep_mgi))\n\n # fill nans in mgi column with corresponding ensembl annotations\n mapping['Gene name'].fillna(mapping['Gene stable ID'], inplace = True)\n\n # add the new gene names to the adata object\n adata.var['mgi_symbols'] = mapping['Gene name'].tolist()",
"def read(results_file):\n results = {}\n sequences = []\n current_method = 0\n with open(results_file) as results_handle: \n for line in results_handle:\n # Find all floating point numbers in this line\n line_floats_res = re.findall(\"-*\\d+\\.\\d+\", line)\n line_floats = [float(val) for val in line_floats_res]\n # The results file is organized by method\n if \"(A) Nei-Gojobori (1986) method\" in line:\n current_method = 1\n continue\n elif \"(B) Yang & Nielsen (2000) method\" in line:\n current_method = 2\n continue\n elif \"(C) LWL85, LPB93 & LWLm methods\" in line:\n current_method = 3\n continue\n if current_method == 1:\n # Nei_Gojobori results are organized in a lower \n # triangular mattrix, with the sequence names labeling\n # the rows and statistics in the format:\n # w (dN dS) per column\n # Example row (2 columns):\n # 0.0000 (0.0000 0.0207) 0.0000 (0.0000 0.0421)\n matrix_row_res = re.match(\"(.+)\\s{5,15}\",line)\n if matrix_row_res is not None:\n seq_name = matrix_row_res.group(1).strip()\n sequences.append(seq_name)\n results[seq_name] = {}\n for i in range(0, len(line_floats), 3):\n NG86 = {}\n NG86[\"omega\"] = line_floats[i]\n NG86[\"dN\"] = line_floats[i+1]\n NG86[\"dS\"] = line_floats[i+2]\n results[seq_name][sequences[i/3]] = {\"NG86\":NG86}\n results[sequences[i/3]][seq_name] = {\"NG86\":NG86}\n elif current_method == 2:\n # Yang & Nielsen results are organized in a table with\n # each row comprising one pairwise species comparison.\n # Rows are labeled by spequence number rather than by\n # sequence name.\n # Example (header row and first table row):\n # seq. seq. S N t kappa omega dN +- SE dS +- SE\n # 2 1 67.3 154.7 0.0136 3.6564 0.0000 -0.0000 +- 0.0000 0.0150 +- 0.0151\n row_res = re.match(\"\\s+(\\d+)\\s+(\\d+)\", line)\n if row_res is not None:\n seq1 = int(row_res.group(1))\n seq2 = int(row_res.group(2))\n seq_name1 = sequences[seq1-1]\n seq_name2 = sequences[seq2-1]\n YN00 = {}\n YN00[\"S\"] = line_floats[0]\n YN00[\"N\"] = line_floats[1]\n YN00[\"t\"] = line_floats[2]\n YN00[\"kappa\"] = line_floats[3]\n YN00[\"omega\"] = line_floats[4]\n YN00[\"dN\"] = line_floats[5]\n YN00[\"dN SE\"] = line_floats[6]\n YN00[\"dS\"] = line_floats[7]\n YN00[\"dS SE\"] = line_floats[8]\n results[seq_name1][seq_name2][\"YN00\"] = YN00\n results[seq_name2][seq_name1][\"YN00\"] = YN00\n seq_name1 = None\n seq_name2 = None\n elif current_method == 3:\n # The remaining methods are grouped together. Statistics\n # for all three are listed for each of the pairwise \n # species comparisons, with each method's results on its\n # own line.\n # The stats in this section must be handled differently\n # due to the possible presence of NaN values, which won't\n # get caught by my typical \"line_floats\" method used above.\n # Example:\n # 2 (Pan_troglo) vs. 1 (Homo_sapie)\n\n # L(i): 143.0 51.0 28.0 sum= 222.0\n # Ns(i): 0.0000 1.0000 0.0000 sum= 1.0000\n # Nv(i): 0.0000 0.0000 0.0000 sum= 0.0000\n # A(i): 0.0000 0.0200 0.0000\n # B(i): -0.0000 -0.0000 -0.0000\n # LWL85: dS = 0.0227 dN = 0.0000 w = 0.0000 S = 45.0 N = 177.0\n # LWL85m: dS = -nan dN = -nan w = -nan S = -nan N = -nan (rho = -nan)\n # LPB93: dS = 0.0129 dN = 0.0000 w = 0.0000\n comp_res = re.match(\"\\d+ \\((.+)\\) vs. \\d+ \\((.+)\\)\", line)\n if comp_res is not None:\n seq_name1 = comp_res.group(1)\n seq_name2 = comp_res.group(2)\n elif seq_name1 is not None and seq_name2 is not None:\n if \"dS =\" in line:\n stats = {}\n line_stats = line.split(\":\")[1].strip()\n stats_split = line_stats.split()\n for i in range(0, len(stats_split), 3):\n stat = stats_split[i].strip(\"()\")\n value = stats_split[i+2].strip(\"()\")\n try:\n stats[stat] = float(value)\n except:\n stats[stat] = None\n if \"LWL85:\" in line:\n results[seq_name1][seq_name2][\"LWL85\"] = stats\n results[seq_name2][seq_name1][\"LWL85\"] = stats\n elif \"LWL85m\" in line:\n results[seq_name1][seq_name2][\"LWL85m\"] = stats\n results[seq_name2][seq_name1][\"LWL85m\"] = stats\n elif \"LPB93\" in line:\n results[seq_name1][seq_name2][\"LPB93\"] = stats\n results[seq_name2][seq_name1][\"LPB93\"] = stats\n if len(results) == 0:\n raise ValueError, \"Invalid results file\"\n return results",
"def find_matchable_chips(ibs):\n from . import match_chips3 as mc3\n from . import matching_functions as mf\n qreq = ibs.qreq\n qaids = ibs.get_valid_aids()\n qreq = mc3.prep_query_request(qreq=qreq, qaids=qaids, daids=qaids)\n mc3.pre_exec_checks(ibs, qreq)\n qaid2_nns = mf.nearest_neighbors(ibs, qaids, qreq)\n mf.rrr()\n qaid2_nnfilt = mf.identity_filter(qaid2_nns, qreq)\n qaid2_chipmatch_FILT = mf.build_chipmatches(qaid2_nns, qaid2_nnfilt, qreq)\n qaid2_ranked_list = {}\n qaid2_ranked_scores = {}\n for qaid, chipmatch in six.iteritems(qaid2_chipmatch_FILT):\n (aid2_fm, aid2_fs, aid2_fk) = chipmatch\n #aid2_nMatches = {aid: fs.sum() for (aid, fs) in six.iteritems(aid2_fs)}\n aid2_nMatches = {aid: len(fm) for (aid, fm) in six.iteritems(aid2_fs)}\n nMatches_list = np.array(aid2_nMatches.values())\n aid_list = np.array(aid2_nMatches.keys())\n sortx = nMatches_list.argsort()[::-1]\n qaid2_ranked_list[qaid] = aid_list[sortx]\n qaid2_ranked_scores[qaid] = nMatches_list[sortx]\n\n scores_list = []\n strings_list = []\n for qaid in qaids:\n aid = qaid2_ranked_list[qaid][0]\n score = qaid2_ranked_scores[qaid][0]\n strings_list.append('qaid=%r, aid=%r, score=%r' % (qaid, aid, score))\n scores_list.append(score)\n sorted_scorestr = np.array(strings_list)[np.array(scores_list).argsort()]\n print('\\n'.join(sorted_scorestr))",
"def check_map(infile, disable_primer_check, barcode_type=\"golay_12\",\r\n added_demultiplex_field=None, has_barcodes=True):\r\n\r\n if barcode_type == \"variable_length\":\r\n var_len_barcodes = True\r\n else:\r\n var_len_barcodes = False\r\n\r\n if barcode_type == \"0\":\r\n has_barcodes = False\r\n\r\n # hds, id_map, dsp, run_description, errors, warnings\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(infile, has_barcodes=has_barcodes,\r\n disable_primer_check=disable_primer_check,\r\n added_demultiplex_field=added_demultiplex_field,\r\n variable_len_barcodes=var_len_barcodes)\r\n\r\n if errors:\r\n raise ValueError('Errors were found with mapping file, ' +\r\n 'please run validate_mapping_file.py to ' +\r\n 'identify problems.')\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n barcode_to_sample_id = {}\r\n\r\n primer_seqs_lens = {}\r\n all_primers = {}\r\n\r\n for sample_id, sample in id_map.items():\r\n if added_demultiplex_field:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper() + \",\" +\r\n sample[added_demultiplex_field]] = sample_id\r\n else:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper()] = sample_id\r\n if not disable_primer_check:\r\n raw_primers = sample['LinkerPrimerSequence'].upper().split(',')\r\n\r\n if len(raw_primers[0].strip()) == 0:\r\n raise ValueError('No primers detected, please use the ' +\r\n '-p parameter to disable primer detection.')\r\n expanded_primers = expand_degeneracies(raw_primers)\r\n curr_bc_primers = {}\r\n for primer in expanded_primers:\r\n curr_bc_primers[primer] = len(primer)\r\n all_primers[primer] = len(primer)\r\n primer_seqs_lens[sample['BarcodeSequence']] = curr_bc_primers\r\n\r\n return hds, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers",
"def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict",
"def readMappedData(options,phase):\n whole_mapped_data={}\n mapped_data_per_size_per_register={}\n alignment_filename=options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt\"\n fhr=open(alignment_filename,\"r\")\n for line in fhr:\n try:\n read_id, strand, chromosome, coordinate, sequence, quality, mapped_times = line.strip().split()\n except ValueError:\n print(line)\n continue\n try:\n coordinate=int(coordinate)\n mapped_times=int(mapped_times)+1\n length=len(sequence)\n except ValueError:\n print(line)\n continue\n if strand==\"-\":\n coordinate+=2\n if chromosome not in whole_mapped_data:\n whole_mapped_data[chromosome]={}\n if coordinate not in whole_mapped_data[chromosome]: \n whole_mapped_data[chromosome][coordinate]=0\n whole_mapped_data[chromosome][coordinate]+=1\n \n if phase!=length:\n continue\n if chromosome not in mapped_data_per_size_per_register:\n mapped_data_per_size_per_register[chromosome]={}\n register=coordinate % length\n if register not in mapped_data_per_size_per_register[chromosome]:\n mapped_data_per_size_per_register[chromosome][register]={}\n if coordinate not in mapped_data_per_size_per_register[chromosome][register]:\n mapped_data_per_size_per_register[chromosome][register][coordinate]=0\n mapped_data_per_size_per_register[chromosome][register][coordinate]+=1\n if mapped_data_per_size_per_register[chromosome][register][coordinate]>2:\n print(\"Trouble with alignments\",length,chromosome,register,coordinate)\n \n return whole_mapped_data,mapped_data_per_size_per_register",
"def create_barcodes_search_dict(barcodes_dict, args):\t\n\tsearch_dict = {}\n\t\n\t#get dictionary with names:seq for barcodes\n\tforward_barcs = barcodes_dict['barcodes']\n\t\n\t# get expected start and stop for barcodes\n\tsearch_dict['start'] = barcodes_dict['start']\n\tsearch_dict['stop'] = search_dict['start'] + len(list(forward_barcs.values())[0])\n\t\n\t# get number of mismatches\n\tif 'mismatches' in barcodes_dict:\n\t\tmismatches = barcodes_dict['mismatches']\n\telse:\n\t\tmismatches = 0\n\t\n\t# construct dictionary for regexes/exact matches\n\tif mismatches == 0:\n\t\t# if not allowing mismatches, just use sequences from yaml file\n\t\tsearch_dict['type'] = 'constant_exact'\n\t\tsearch_dict['forward_search'] = {f\"{key} ({value})\":value for key, value in forward_barcs.items()}\n\tif mismatches > 0:\n\t\t# if allowing mismatches, use regexes\n\t\tsearch_dict['type'] = 'constant_regex'\n\t\tsearch_dict['forward_search'] = {f\"{key} ({value})\":create_mismatches_regex([value], mismatches) for key, value in forward_barcs.items()}\n\t\t\n\treturn search_dict",
"def search_motif(sequences):\n motif = re.compile(r'(?=(N[^P](S|T)[^P]))') #N{P}[ST]{P}\n motif_index = {}\n\n for key,value in sequences.items():\n match_motif = re.finditer(motif, value)\n motif_start_list = []\n\n for i in match_motif:\n motif_start_list.append(str(i.start()+1))\n motif_index[key] = ' '.join(motif_start_list)\n return motif_index",
"def step040():\n logger.logMessage('Begin: matching work files')\n sKey = ''\n mKey = ''\n def readFile(f):\n line = f.readline().rstrip()\n if line == '':\n key = 'ZZZZZZZZZZZZZZZZZZZZZZZZZ'\n return None,key\n else:\n sp = line.split(';')\n key = '{0:25s}'.format(sp[1])[0:19]\n return sp,key\n\n m = open(dbDumpFile,'r')\n s = open(sortedCandidatesFile,'r')\n numrecs = 0\n with open(matchFile,'w') as match:\n mFields,mKey = readFile(m)\n sFields,sKey = readFile(s)\n while mFields != None or sFields != None:\n if sKey == mKey:\n match.write('{0:014d};{1:25s};{2:32s};{3:31s}\\n'.format(int(mFields[0]),mKey,sFields[2],sFields[3]))\n numrecs += 1\n if numrecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records matched\".format(numrecs))\n sFields,sKey = readFile(s)\n mFields,mKey = readFile(m)\n elif sKey < mKey:\n sFields,sKey = readFile(s)\n else:\n logger.logMessage(level='WARNING',message='Record not matched: {0}'.format(mFields))\n mFields,mKey = readFile(m)\n logger.logMessage(\"Total matched: {0:d}\".format(numrecs))\n\n m.close()\n s.close()\n logger.logMessage('End : matching work files')",
"def mappings(input_report, **kwargs):\n ben = BoomerEngine()\n ben.load(input_report, prefix_map=global_prefix_map)\n writer = StreamingSssomWriter()\n for m in ben.mappings(**kwargs):\n writer.emit(m)\n writer.finish()",
"def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads"
] | [
"0.59078485",
"0.57317793",
"0.56751376",
"0.5561408",
"0.54683334",
"0.5306675",
"0.53015906",
"0.52467316",
"0.52461946",
"0.52367425",
"0.520722",
"0.5146601",
"0.51458216",
"0.5112829",
"0.5058424",
"0.50535494",
"0.50508714",
"0.5004539",
"0.49987587",
"0.4996127",
"0.49955532",
"0.4977503",
"0.49770632",
"0.49750924",
"0.49705994",
"0.49626094",
"0.49625456",
"0.49562058",
"0.49508756",
"0.49506974"
] | 0.62142134 | 0 |
Reads a PWM file in Jaspar format and returns a Biopython PWM object. | def readPwmFile(pwmFileName, outputLocation, pseudocounts=0.0):
# Adding pseudocounts
pwmFile = open(pwmFileName,"r");
tempFileName = outputLocation+pwmFileName.split("/")[-1]+"temp"
pwmFileT = open(tempFileName,"w")
for line in pwmFile: pwmFileT.write(" ".join([str(float(e)+pseudocounts) for e in line.strip().split(" ")])+"\n")
pwmFile.close()
pwmFileT.close()
# Creating PWM from pseudocounted input
pwmFile = open(tempFileName,"r")
pwm = Motif.read(pwmFile,"jaspar-pfm")
pwmFile.close()
os.system("rm "+tempFileName)
return pwm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def goal_pwm(self):\n return self._read(MX_GOAL_PWM)",
"def read_pfm(filename):\n\n\twith open(filename, \"r\") as handle:\n\t\tmotif = motifs.read(handle, \"pfm\")\n\tmotif.pseudocounts = .25\n\tmotif.background = {'A':0.3,'C':0.2,'G':0.2,'T':0.3}\n\n\treturn motif",
"def read(self):\n\n # Load .pico file in memory\n self.file_buffer = PyPico.PicoReader()\n self.file_buffer.open(self.file_path)\n\n # Get .pico file header\n self.header = self.file_buffer.get_header()\n\n # Get .pico file last timecode jam\n self.base_timecode = [str(self.file_buffer.get_frame(0).jamsync_timecode.hours),\n str(self.file_buffer.get_frame(0).jamsync_timecode.minutes),\n str(self.file_buffer.get_frame(0).jamsync_timecode.seconds),\n str(self.file_buffer.get_frame(0).jamsync_timecode.frames)]\n\n # Get .pico file properties\n properties = self.file_buffer.get_properties()\n\n # Get .pico file active channels\n self.channels = []\n channel = 0\n while channel < 4:\n if properties['channels.{0}.enabled'.format(channel)] == 'True':\n self.channels.append(channel)\n channel += 1\n else:\n channel += 1\n\n # Get measured framerate\n self.raw_fps = float(properties['channels.{0}.framerate_measured'.format(self.channels[0])])\n\n # Timecode operations\n self.render_fps = int(round(self.raw_fps / 2))\n\n jam_timecode_str = ':'.join(self.base_timecode)\n self.jam_timecode = Timecode(self.render_fps, jam_timecode_str)\n\n # Set .pico file render first and last frame, can be full or by tc inputs\n if self.render_length == 'Slice':\n self.timecode_in = Timecode(self.render_fps, str(self.timecode_in))\n self.timecode_out = Timecode(self.render_fps, str(self.timecode_out))\n self.frame_in = (self.timecode_in.frames - self.jam_timecode.frames) * 2\n self.frame_out = (self.timecode_out.frames - self.jam_timecode.frames) * 2\n\n else:\n self.frame_in = int(self.header.start_capture_frame_number)\n self.frame_out = int(self.header.stop_capture_frame_number)\n self.timecode_in = Timecode(self.render_fps, frames=int(self.header.start_capture_frame_number))\n self.timecode_out = Timecode(self.render_fps, frames=int(self.header.stop_capture_frame_number))\n\n # Reference Timecode\n self.ref_timecode = Timecode(self.render_fps, frames=(self.jam_timecode.frames + (self.frame_in / 2)))\n\n # Get .pico file \"zero\" frame from the burn in\n self.frame_zero = int(self.file_buffer.read_burn_in(0))\n\n # .pico file frame operations\n self.frame_offset = self.frame_in - self.frame_zero\n self.frame_padding = len(str(self.frame_out - self.frame_in))\n self.total_frames = self.frame_out - self.frame_in\n\n # No need to mess around with the start frame, it should come from the GUI\n\n # if self.render_length == 'Slice':\n # self.frame_start = self.start_frame\n # else:\n # self.frame_start = self.frame_in - self.frame_offset\n\n # Set output names\n if self.override is not None:\n self.output_name = self.override\n else:\n self.output_name = self.file_path",
"def get_file_bpm(path, params = {}):\n try:\n win_s = params['win_s']\n samplerate = params['samplerate']\n hop_s = params['hop_s']\n except:\n \"\"\"\n # super fast\n samplerate, win_s, hop_s = 4000, 128, 64 \n # fast\n samplerate, win_s, hop_s = 8000, 512, 128\n \"\"\"\n # default:\n samplerate, win_s, hop_s = 44100, 1024, 512\n\n s = source(path, samplerate, hop_s)\n samplerate = s.samplerate\n o = tempo(\"specdiff\", win_s, hop_s, samplerate)\n # List of beats, in samples\n beats = []\n # Total number of frames read\n total_frames = 0\n\n while True:\n samples, read = s()\n is_beat = o(samples)\n if is_beat:\n this_beat = o.get_last_s()\n beats.append(this_beat)\n #if o.get_confidence() > .2 and len(beats) > 2.:\n # break\n total_frames += read\n if read < hop_s:\n break\n\n # Convert to periods and to bpm \n bpms = 60./diff(beats)\n b = median(bpms)\n return b",
"def load_sim(filename):\n return pybamm.load(filename)",
"def readMpcorb(path2mpcorb, compression='gzip', filternan=True):\n mpcorb_col_numbers=[(0,7),(8,13),(14,19),(20,25),(26,35),(37,46),\n (48,57),(59,68),(70,79),(80,91),(92,103),(106,116),\n (117,122),(123,126),(127,136),(137,141),\n (142, 145),(146,149),(150,160),(166,194),(194,202)]\n col_names=['ObjID','H','G','epoch','M','argperi','node','i',\n 'e','n','a','reference',\n 'N_Obs', 'N_Opp', 'yr_1st&last_Obs', 'r.m.s',\n 'coarsePerts', 'precisePerts', 'computer',\n 'readableName', 'lastObs']\n skiprows=43\n\n dtp=[str,float,float,str,float,float,float,float,float,float,float]\n dtypes=dict(zip(col_names,dtp))\n\n mpcorb=pd.read_fwf(path2mpcorb,skiprows=skiprows,colspecs=mpcorb_col_numbers,\n names=col_names,dytpe=dtypes,index_col=False, compression=compression)\n\n if (filternan):\n mpcorb.dropna(subset=['a', 'e','i','node','argperi','M','epoch', 'H', 'r.m.s'],inplace=True)\n\n return mpcorb",
"def biopythonMM(pwmFileName,genomeDict,mpbsDict,scoringMethod,tempLocation,pseudocounts=0.1,bitscore=12.0,fpr=0.01,precision=10**4,highCutoff=0.7,functionalDepth=0.9):\n \n # Reading PWM\n pwm = readPwmFile(pwmFileName,tempLocation,pseudocounts)\n pwmName = pwmFileName.split(\"/\")[-1].split(\".\")[0]\n pwmLen = len(pwm)\n\n # Evaluating threshold\n pwmThreshold = 0.0\n if(scoringMethod == \"bitscore\"):\n pwmThreshold = bitscore\n elif(scoringMethod == \"fpr\"):\n sd = Motif.ScoreDistribution(pwm,precision=precision)\n pwmThreshold = sd.threshold_fpr(fpr)\n elif(scoringMethod == \"boyle\"):\n maxScore = pwm.max_score()\n minScore = 0.0 # TODO Boyle's rule is not suited for negative values.\n pwmThreshold = min(highCutoff*maxScore,functionalDepth*(maxScore-minScore))\n else:\n sys.stderr.write(\"Choose a valid scoring method.\\n\")\n sys.exit(0)\n\n # Creating aditional parameters\n chrList = constants.getChromList(reference=[mpbsDict])\n tempMpbsDict = dict([(e,[]) for e in chrList])\n maxValue = -99.0\n\n # Iterating on chromosomes\n for chrName in chrList:\n\n # Reading genome\n sequence = genomeDict[chrName]\n\n # Performing biopython's motif matching\n for pos, score in pwm.search_pwm(sequence,threshold=pwmThreshold):\n if(score > maxValue): maxValue = score\n if(pos >= 0): tempMpbsDict[chrName].append([pos,pos+pwmLen,pwmName,score,\"+\"])\n else: tempMpbsDict[chrName].append([-pos,-pos+pwmLen,pwmName,score,\"-\"])\n\n # Update scores - new scores are within [0,1000]\n for chrName in chrList:\n for e in tempMpbsDict[chrName]:\n mpbsDict[chrName].append([e[0],e[1],e[2],int(1000*(e[3]-pwmThreshold)/(maxValue-pwmThreshold)),e[4]])\n \n return 0",
"def pwm(self):\n return self._pwm",
"def read_jack(run, bin_scheme):\n fname=get_jack_file(run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)",
"def __init__(self, pwm_pin, dir_pin_1, dir_pin_2, pwm_freq):\n\t\tself._pwm_pin = pwm_pin # PWM input pin.\n\t\tself._dir_pin_1 = dir_pin_1 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._dir_pin_2 = dir_pin_2 # GPIO number to control the direction of rotation of the wheel.\n\t\tself._pwm_freq = pwm_freq # PWM cycle.\n\n\t\tself._last_dir = 's' # Last rotation direction of this wheel. 's' indicates stop.\n\t\tself._last_dc_val = 0 # Last duty cycle value.\n\t\tself._current_dc_val = 0 # Current duty cycle value.\n\n\t\tGPIO.setmode(GPIO.BOARD)\n\n\t\t# Set the direction control GPIO output mode.\n\t\tGPIO.setup(self._pwm_pin, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_1, GPIO.OUT)\n\t\tGPIO.setup(self._dir_pin_2, GPIO.OUT)\n\n\t\t# Inits PWM pin.\n\t\tself._motor_pwm = GPIO.PWM(self._pwm_pin, self._pwm_freq) # pwm_freq: Hz\n\t\tself._motor_pwm.start(0) # Set duty cycle to 0.",
"def present_pwm(self):\n return self._read(MX_PRESENT_PWM)",
"def LoadTroikaRefFile(ref_fl):\n refdata = sp.io.loadmat(ref_fl)['BPM0']\n return refdata[2:]",
"def read_motifs(infile=None, fmt=\"pwm\", as_dict=False):\n if infile is None or isinstance(infile, six.string_types): \n infile = pwmfile_location(infile)\n with open(infile) as f:\n motifs = _read_motifs_from_filehandle(f, fmt)\n else:\n motifs = _read_motifs_from_filehandle(infile, fmt)\n\n if as_dict:\n motifs = {m.id:m for m in motifs}\n\n return motifs",
"def read_ppm(ppm_file):\n\n p6, width, height, depth = ppm_file.readline().split()\n\n assert p6 == b'P6'\n assert depth == b'255', \"Only 8-bit PPM files are supported\"\n\n width, height = int(width), int(height)\n\n data = np.fromfile(ppm_file, dtype=np.uint8, count=width * height * 3)\n\n return data.reshape(height, width, 3)",
"def read_pccp(fname, seq):\n pccp = phy7(fname)\n return np.array([pccp[i] for i in seq])",
"def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')",
"def readLBOMDIN(self):\n logger = logging.getLogger(__name__)\n\n if os.path.exists(\"lbomd.IN\"):\n f = open(\"lbomd.IN\")\n\n try:\n f.readline()\n f.readline()\n f.readline()\n f.readline()\n\n line = f.readline().strip()\n array = line.split()\n try:\n PBC = [0] * 3\n PBC[0] = int(array[0])\n PBC[1] = int(array[1])\n PBC[2] = int(array[2])\n\n except IndexError:\n logger.warning(\"Index error 2 (check lbomd.IN format)\")\n\n except Exception:\n err = \"Read lbomd.IN failed with error:\\n\\n%s\" % \"\".join(traceback.format_exception(*sys.exc_info()))\n self.displayError(err)\n\n finally:\n f.close()",
"def read_corr_jack(lens_run, rand_run, bin_scheme):\n\n fname=get_corr_jack_file(lens_run, rand_run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)",
"def get_fan_pwm(self, pwm_val=None):\n self.assertNotEqual(pwm_val, None, \"Expected PWM value needs to be set\")\n\n data = run_shell_cmd(\"/usr/local/bin/get_fan_speed.sh\")\n data = data.split(\"\\n\")\n for line in data:\n if len(line) == 0:\n continue\n line = line.split(\"(\")\n line = line[1].split(\"%\")\n if abs(int(line[0]) - int(pwm_val)) < 2:\n continue\n else:\n return [False, data]\n return [True, None]",
"def bin_to_bpm(self, bin):\n\t\t\n\t\treturn (60.0 * bin * self.fps) / float(len(self.buf))",
"def readPFM(file):\n file = open(file, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n if header == b'PF':\n color = True\n elif header == b'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n dims = file.readline()\n try:\n width, height = list(map(int, dims.split()))\n except:\n raise Exception('Malformed PFM header.')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width, 1)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n return data, scale",
"def DAC(fp):\n length = unpack('>H', fp.read(2))[0]\n _remaining = length - 2\n\n _tc, _tb, _cs = [], [], []\n while _remaining:\n tc, tb = _split_byte(fp.read(1))\n _cs.append(unpack('>B', fp.read(1))[0])\n _remaining -= 2\n _tc.append(tc)\n _tb.append(tb)\n\n info = {\n 'La' : length,\n 'Tc' : _tc,\n 'Tb' : _tb,\n 'Cs' : _cs\n }\n\n return info",
"def pfm_to_pwm(self, pfm, pseudo=0.001):\n return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm]",
"def loadPulseData(filename, suffix = ''):\n data = np.genfromtxt(filename+'.txt', skip_header=3, names=True,\n dtype='i8,f8,S5,f8,f8,f8,f8,f8,f8')\n print \"Importing...\\n\"\n for key in data.dtype.fields.keys():\n name = key + suffix\n print name\n globals()[name] = data[key]",
"def getMpcorb(url='https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT.gz', fname='MPCORB.DAT.gz', verbose=True):\n\n #filename = wget.download(url)\n try:\n r = requests.get(url, allow_redirects=True)\n open(fname, 'wb').write(r.content)\n if (verbose):\n print('Download complete:', url)\n except:\n print(\"Error in getMpcorb: could not download \", fname, \" at \", url)\n raise\n return",
"def compute_chroma_bpm(filepath):\n\t\n\ty, sr = librosa.load(filepath)\n\t# Compute chroma features from the harmonic signal\n\tchromagram = librosa.feature.chroma_stft(y=y,sr=sr)\n\tchromaDF = pd.DataFrame(chromagram)\n\n\t# Filter intensity values less than 1\n\tchromaDF[chromaDF < 1] = 0\n\tchroma_f = chromaDF.sum(axis = 1)\n\n\t# Calculate chroma distribution\n\tchroma_p = [i / sum(chroma_f) for i in chroma_f]\n\t\n\t# Beat track on the percussive signal\n\ttempo, beat_frames = librosa.beat.beat_track(y=y,sr=sr)\n\t\n\tresults = [tempo]\n\tresults.append(chroma_p)\n\t\n\treturn results",
"def fetch_baja_bathymetry():\n data_file = POOCH.fetch(\"baja-bathymetry.csv.xz\")\n data = pd.read_csv(data_file, compression=\"xz\")\n return data",
"def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])",
"def _get_bpm_from_soundstretch(output):\n \n output = output.split(\"\\n\")\n for line in output:\n if 'Detected BPM rate ' in line:\n bpm = line[18:]\n return float(bpm)\n return None # Could not parse output",
"def read_pgm(filename, byteorder='>'):\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return numpy.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=58\n ).reshape((int(height), int(width)))"
] | [
"0.56788427",
"0.5495728",
"0.5376343",
"0.53577346",
"0.5160215",
"0.5143681",
"0.50724196",
"0.502228",
"0.49792627",
"0.49575832",
"0.4956543",
"0.48406678",
"0.47284013",
"0.47217077",
"0.4717854",
"0.4705762",
"0.46920034",
"0.46860862",
"0.46798915",
"0.4671482",
"0.4652474",
"0.46484253",
"0.46448103",
"0.4644738",
"0.46357822",
"0.46308404",
"0.46026155",
"0.45968744",
"0.45961052",
"0.4591402"
] | 0.6233586 | 0 |
Test various retrieval utilities on a single list of Artifact. | def testGetFromSingleList(self):
artifacts = [standard_artifacts.Examples()]
artifacts[0].uri = '/tmp/evaluri'
artifacts[0].split_names = '["eval"]'
self.assertEqual(artifacts[0],
artifact_utils.get_single_instance(artifacts))
self.assertEqual('/tmp/evaluri', artifact_utils.get_single_uri(artifacts))
self.assertEqual('/tmp/evaluri/eval',
artifact_utils.get_split_uri(artifacts, 'eval'))
with self.assertRaises(ValueError):
artifact_utils.get_split_uri(artifacts, 'train') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_read_artifact(self):\n pass",
"def get_single_instance(artifact_list: List[Artifact]) -> Artifact:\n if len(artifact_list) != 1:\n raise ValueError('expected list length of one but got {}'.format(\n len(artifact_list)))\n return artifact_list[0]",
"def test_get_experiment_artifact(self):\n query_string = [('id', 'id_example'),\n ('path', 'path_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = self.client.open(\n '/api/v1/experiment-artifacts/download',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_get_list(self):\n pass",
"def test_list_artifacts_for_job(fake_client):\n artifacts = Artifacts(fake_client, \"base\")\n artifacts.list_artifacts_for_job(\"org_slug\", \"pipe_slug\", \"build_no\", 123)\n url = \"base/organizations/org_slug/pipelines/pipe_slug/builds/build_no/jobs/123/artifacts/\"\n fake_client.get.assert_called_with(\n url, query_params={\"page\": 0}, with_pagination=False\n )",
"def test_list_artifacts_for_build(fake_client):\n artifacts = Artifacts(fake_client, \"base\")\n artifacts.list_artifacts_for_build(\"org_slug\", \"pipe_slug\", \"build_no\")\n url = \"base/organizations/org_slug/pipelines/pipe_slug/builds/build_no/artifacts/\"\n fake_client.get.assert_called_with(\n url, query_params={\"page\": 0}, with_pagination=False\n )",
"def get_artifacts_by_alert_command(\n client: Client, args: Dict[str, Any]\n) -> Union[str, Dict[str, Any]]:\n uuid = args.get('uuid', '')\n uuid = uuid.lower()\n\n # Preparing header\n headers = {\n 'Content-Type': CONTENT_TYPE_ZIP,\n 'X-FeApi-Token': client.get_api_token(),\n }\n\n # Call get artifacts data api\n artifacts_resp = client.http_request(\n 'GET',\n url_suffix=URL_SUFFIX['GET_ARTIFACTS'].format(uuid),\n headers=headers,\n )\n\n # Create file from Content\n if int(artifacts_resp.headers.get('Content-Length', '0')) > 0:\n file_name = f'{uuid}.zip'\n file_entry = fileResult(\n filename=file_name, data=artifacts_resp.content\n )\n return file_entry\n else:\n return MESSAGES['NO_RECORDS_FOUND'].format('artifacts data')",
"def test_aqua_function_for_multiple_ddos(aquarius_instance):\n assert aquarius_instance.list_assets()\n assert aquarius_instance.list_assets_ddo()",
"def test_installments_get(self):\n pass",
"def test_multiple_gets(uris):\n\n for uri in uris:\n print('='*10 + ' Try uri : {uri} '.format(uri=uri) + '='*10)\n resp = get_api_url(uri)\n print(resp)\n try:\n pprint(resp.json())\n except Exception as e:\n print(resp.text)",
"def test_listtem_using_get(self):\n pass",
"def test_get_art_info(self):\n pass",
"def get_artefacts(self, leverable):\n\n if self.url == 'test':\n artefactlist = ['fk-' + leverable + '_wlsapp', 'fk-' + leverable + '_tuxapp']\n else:\n artefactlist = []\n try:\n response = urlopen(\n 'http://' + self.url + '/nexus/service/local/lucene/search?repositoryId=rpm-dev&g=fk.rpm.'\n + leverable)\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting artefacts!!!']\n\n metadata_root = elementTree.parse(response)\n for data in metadata_root.iter('artifact'):\n artefact = data.find('artifactId').text\n if artefact not in artefactlist:\n artefactlist.append(artefact)\n\n return artefactlist",
"def get_single_uri(artifact_list: List[Artifact]) -> Text:\n return get_single_instance(artifact_list).uri",
"def test_retrieve(self):\n iam_resources = _create_list_of_mock_iam_resources()\n mock_data_access = mock.MagicMock()\n mock_data_access.scanner_iter.return_value = iam_resources\n mock_service_config = mock.MagicMock()\n mock_service_config.model_manager = mock.MagicMock()\n mock_service_config.model_manager.get.return_value = (\n mock.MagicMock(), mock_data_access)\n self.scanner.service_config = mock_service_config\n\n audit_logging_data = self.scanner._retrieve()\n\n expected_projects = [\n 'organization/234/project/proj-1/',\n 'organization/234/folder/56/project/proj-2/',\n 'organization/234/project/proj-3/'\n ]\n expected_audit_configs = [\n {\n 'allServices': {\n 'ADMIN_READ': set(),\n }\n },\n {\n 'allServices': {\n 'ADMIN_READ': set([\n 'user:[email protected]',\n 'user:[email protected]',\n 'user:[email protected]'\n ]),\n },\n 'cloudsql.googleapis.com': {\n 'DATA_READ': set(),\n 'DATA_WRITE': set(),\n },\n 'compute.googleapis.com': {\n 'DATA_READ': set(),\n 'DATA_WRITE': set(),\n }\n },\n {\n 'allServices': {\n 'ADMIN_READ': set(),\n 'DATA_WRITE': set(),\n },\n 'cloudsql.googleapis.com': {\n 'ADMIN_READ': set(['user:[email protected]']),\n }\n },\n ]\n\n # _retrieve only returns projects.\n self.assertEqual(3, len(audit_logging_data))\n\n for i in range(3):\n actual_project, actual_audit_configs = audit_logging_data[i]\n self.assertEqual(expected_projects[i], actual_project.full_name)\n self.assertEqual(expected_audit_configs[i],\n actual_audit_configs.service_configs)",
"def test_get_foods(self):\n pass",
"def test():\n test = [{'key': 'val1'}, ['key']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'",
"def test_get_foods_list(self):\n pass",
"def test_list_dependent_assets1(self):\n pass",
"def list_artifacts(arn=None, type=None, nextToken=None):\n pass",
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def test_get_list8(self):\n pass",
"def add_artifacts_from_result(args, result):\n for art in result.get_artifacts():\n add_artifact(args, art)",
"def test_products_get_pattern_multiple_match(data, mocker):\n mocker.patch(\"sps.request.fetch\", autospec=True)\n request.fetch.return_value = data\n assert products.get(\"1\", \"fake-file-name\", False, False) == data[\"data\"]\n assert products.get(\"SUSE\", \"fake-file-name\", False, False) == data[\"data\"]\n assert products.get(\"x86\", \"fake-file-name\", False, False) == data[\"data\"]",
"def test_get_recipe_information_bulk(self):\n pass",
"def test_get_work_type_list(self):\n # Login as simple user\n self.authenticate(self.user)\n\n # Get work type list\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)\n self.assertEqual(len(response.data[\"results\"]), 2)\n\n # Artists are sorted by name\n self.check_work_type_json(response.data[\"results\"][0], self.wt1)\n self.check_work_type_json(response.data[\"results\"][1], self.wt2)",
"def _run_test_fn(self, artifacts, test_fn):\n for artifact in artifacts:\n self._context.set_any(artifact.type.value, artifact)\n test_fn(self._context)",
"def test_get_results(self):\n pass",
"def test_get_details7(self):\n pass",
"def test_get_all(client: FlaskClient):\n response1 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Txt)\n )\n response2 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Jpg)\n )\n response3 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Png)\n )\n\n # Now retrieve them\n response_get = util.get_all_files(client, DEFAULT_USER)\n assert response_get.status == \"200 OK\"\n assert len(response_get.json) == 3\n assert response1.json in response_get.json\n assert response2.json in response_get.json\n assert response3.json in response_get.json"
] | [
"0.662036",
"0.62965786",
"0.62030226",
"0.60663354",
"0.593072",
"0.5918591",
"0.58616775",
"0.57673883",
"0.5732222",
"0.57250345",
"0.57193136",
"0.57103896",
"0.56704307",
"0.56461483",
"0.5623771",
"0.56043285",
"0.558357",
"0.5563329",
"0.55472404",
"0.5546737",
"0.55381167",
"0.5527053",
"0.55205965",
"0.5517234",
"0.55137837",
"0.5508391",
"0.55018526",
"0.5479462",
"0.5473847",
"0.54669225"
] | 0.6868066 | 0 |
Test various retrieval utilities on a list of split Artifact. | def testGetFromSplits(self):
artifacts = [standard_artifacts.Examples()]
artifacts[0].uri = '/tmp'
artifacts[0].split_names = artifact_utils.encode_split_names(
['train', 'eval'])
self.assertEqual(artifacts[0].split_names, '["train", "eval"]')
self.assertIs(artifact_utils.get_single_instance(artifacts), artifacts[0])
self.assertEqual('/tmp', artifact_utils.get_single_uri(artifacts))
self.assertEqual('/tmp/train',
artifact_utils.get_split_uri(artifacts, 'train'))
self.assertEqual('/tmp/eval',
artifact_utils.get_split_uri(artifacts, 'eval')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testGetFromSingleList(self):\n artifacts = [standard_artifacts.Examples()]\n artifacts[0].uri = '/tmp/evaluri'\n artifacts[0].split_names = '[\"eval\"]'\n self.assertEqual(artifacts[0],\n artifact_utils.get_single_instance(artifacts))\n self.assertEqual('/tmp/evaluri', artifact_utils.get_single_uri(artifacts))\n self.assertEqual('/tmp/evaluri/eval',\n artifact_utils.get_split_uri(artifacts, 'eval'))\n with self.assertRaises(ValueError):\n artifact_utils.get_split_uri(artifacts, 'train')",
"def test_get_parts(self):\n pass",
"def test_get_list(self):\n pass",
"def _get_split_instance(artifact_list: List[Artifact], split: Text) -> Artifact:\n matched = [x for x in artifact_list if x.split == split]\n if len(matched) != 1:\n raise ValueError('{} elements matches split {}'.format(len(matched), split))\n return matched[0]",
"def test_read_artifact(self):\n pass",
"def test_aqua_function_for_multiple_ddos(aquarius_instance):\n assert aquarius_instance.list_assets()\n assert aquarius_instance.list_assets_ddo()",
"def test_get_list8(self):\n pass",
"def test_list(self):\n pass",
"def test_list(self):\n pass",
"def get_artefacts(self, leverable):\n\n if self.url == 'test':\n artefactlist = ['fk-' + leverable + '_wlsapp', 'fk-' + leverable + '_tuxapp']\n else:\n artefactlist = []\n try:\n response = urlopen(\n 'http://' + self.url + '/nexus/service/local/lucene/search?repositoryId=rpm-dev&g=fk.rpm.'\n + leverable)\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting artefacts!!!']\n\n metadata_root = elementTree.parse(response)\n for data in metadata_root.iter('artifact'):\n artefact = data.find('artifactId').text\n if artefact not in artefactlist:\n artefactlist.append(artefact)\n\n return artefactlist",
"def test_products_get_pattern_multiple_match(data, mocker):\n mocker.patch(\"sps.request.fetch\", autospec=True)\n request.fetch.return_value = data\n assert products.get(\"1\", \"fake-file-name\", False, False) == data[\"data\"]\n assert products.get(\"SUSE\", \"fake-file-name\", False, False) == data[\"data\"]\n assert products.get(\"x86\", \"fake-file-name\", False, False) == data[\"data\"]",
"def test_get_file_executors(self):\n pass",
"def test_listtem_using_get(self):\n pass",
"def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])",
"def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])",
"def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])",
"def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])",
"def test_list_dependent_assets1(self):\n pass",
"def test_scrape_multiple(self):\n self.assertEqual(self.blogs[0].title, 'First article')\n self.assertEqual(self.blogs[0].content, ['First para', 'Second para'])\n self.assertEqual(self.blogs[1].title, 'Second article')\n self.assertEqual(self.blogs[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.blogs[2].title, 'Third article')\n self.assertEqual(self.blogs[2].content, ['Thing one', 'Thing two'])",
"def test_multiples(self):\n\n checkit=subprocess.run([\"python\", \"../../taxonomy/src_files/validate_match_batch.py\", \"-i\", \"../resource_files/validate_folder2\", \"-m\", \"../resource_files/testing_good_mapfile.csv\"], capture_output=True, text=True)\n spl_folder=checkit.stdout.strip().split(\"/\")[-2]\n spl_output=\"{}/{}\".format(spl_folder, checkit.stdout.strip().split(\"/\")[-1])\n \n with open(\"../processed_files/{}\".format(spl_output), 'r') as f:\n get_lines=f.readlines()\n self.assertEqual(get_lines[0].strip(),\"id,query,blca,confidence,match\")\n self.assertEqual(get_lines[1].strip(),\"FC000001.01.02,Pretendbacterium bacterium,Pretendbacterium bacterium,16.3265306122,1\") # regular match\n self.assertEqual(get_lines[2].strip(),\"FC000002.01.02,Pretendbacterium bacterium2,Pretendbacterium bacterium2,16.3265306122,1\") # species only\n self.assertEqual(get_lines[3].strip(),\"FC000003.01.02,Pretendbacterium bacterium3,Pretendbacterium bacterium3,16.3265306122,1\") # number after genus, species only\n self.assertEqual(get_lines[4].strip(),\"FC000004.01.02,Pretendbacterium bacterium4 SK52 = DSM 20,Pretendbacterium bacterium4,16.3265306122,1\") # strain number in reference\n self.assertEqual(get_lines[5].strip(),\"FC000005.01.02,Pretendbacterium bacterium5 SK52 = DSM 20,Pretendbacterium bacterium5,16.3265306122,1\") # strain number in reference, species only\n \n print(\"removing ../processed_files/{}\".format(spl_folder))\n shutil.rmtree(\"../processed_files/{}\".format(spl_folder))",
"def test():\n test = [{'key': 'val1'}, ['key']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'",
"def test_get_art_info(self):\n pass",
"def test_scrape_results(self):\n self.assertIsInstance(self.blogs, EntityList)\n self.assertEqual(len(self.blogs), 3)\n self.assertEqual([s.title for s in self.blogs[1:]], ['Second article', 'Third article'])",
"def test_get_multiple(multiple_bucket): # pylint: disable=redefined-outer-name\n for idx in range(2):\n element_number = idx + 1\n assert multiple_bucket.get(f\"key {element_number}\") == f\"value {element_number}\"",
"def test_list_dependent_assets2(self):\n pass",
"def test_match_items(self):\n # Returns a tuple\n matches = self.site.match(r\"(.*)\")\n self.assertIsInstance(matches[0], tuple)\n\n # Returns match data and the item itself\n for match, item in matches:\n self.assertEqual(match.group(1), item.filename)",
"def test_splitlist():\n lst = [4, 2, 3, 1, 6, 7]\n lt, pi, gt = splitlist(lst)\n if lt == [2, 3, 1] and pi == 4 and gt == [6, 7]:\n print(\"test splitlist OK!\")\n else:\n print(\"test splitlist Failed!\")",
"def test_list(self, array: dict) -> None:\r\n item = read_items(array)\r\n if read_type(item) == 'object':\r\n logger.debug('list -> dict')\r\n self.test_dict(obj=item)\r\n elif read_type(item) == 'array':\r\n logger.debug('list -> list')\r\n self.test_list(array=item)",
"def test_handle_list(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n team2 = Team(\"OTEAM\", \"other team\", \"android\")\n self.db.query.return_value = [team, team2]\n attach = team.get_basic_attachment()\n attach2 = team2.get_basic_attachment()\n attachment = [attach, attach2]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team list\", user)\n expect = {'attachments': attachment}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.query.assert_called_once_with(Team)",
"def test_list_dependent_assets3(self):\n pass"
] | [
"0.7153729",
"0.6059467",
"0.602834",
"0.6018038",
"0.5820629",
"0.55848837",
"0.55712974",
"0.5540031",
"0.5540031",
"0.5531399",
"0.5525339",
"0.5524753",
"0.54710245",
"0.54397416",
"0.54397416",
"0.54349387",
"0.54349387",
"0.54316485",
"0.54262596",
"0.541444",
"0.54034984",
"0.5400897",
"0.53994536",
"0.53891313",
"0.5388949",
"0.53867865",
"0.53783774",
"0.5376365",
"0.5366581",
"0.53614885"
] | 0.694926 | 1 |
Tests that current nmrplot.add_signals output agrees with an accepted dataset. The original dataset was generated before line broadening scaling_factor was added to nmrplot.lorentz. Therefore Y has been scaled to half to convert this data to work with the new lorentz function. | def test_add_signals():
x = np.linspace(390, 410, 200)
doublet = [(399, 1), (401, 1)]
y = add_signals(x, doublet, 1)
X = np.array([x for x, _ in ADD_SIGNALS_DATASET])
Y = np.array([y / 2 for _, y in ADD_SIGNALS_DATASET]) # scale to match
print(y)
print(Y)
assert np.array_equal(x, X)
assert np.array_equal(y, Y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_spreads():\n fig, (ax_prior, ax2_post) = plt.subplots(\n nrows=2, figsize=figsize(aspect=1.2))\n\n # train margin-dependent Elo model\n melo = Melo(lines=np.arange(-49.5, 50.5), commutes=False, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, league.spreads)\n\n # exact prior distribution\n outcomes = melo.training_data.value[:, np.newaxis] > melo.lines\n sf = np.mean(outcomes, axis=0)\n ax_prior.plot(melo.lines, sf, color='k')\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n plot_args = [\n (ax_prior, melo.first_update, 'prior'),\n (ax2_post, melo.last_update, 'posterior'),\n ]\n\n for ax, time, title in plot_args:\n for n, label2 in enumerate(label2_list):\n\n lines, sf = melo._predict(time, label1, label2)\n label = r'$\\lambda_2={}$'.format(label2)\n\n if ax.is_first_row():\n ax.plot(lines[n::6], sf[n::6], 'o', zorder=2, label=label)\n\n if ax.is_last_row():\n ax.plot(lines, sf, 'o', zorder=2, label=label)\n\n sf = skellam.sf(melo.lines, int(label1), int(label2))\n ax.plot(melo.lines, sf, color='k')\n\n leg = ax.legend(title=r'$\\lambda_1 = {}$'.format(label1),\n handletextpad=.2, loc=1)\n leg._legend_box.align = 'right'\n\n lines = np.floor(lines)\n ax.set_xticks(lines[::10])\n ax.set_xlim(lines.min(), lines.max())\n\n if ax.is_last_row():\n ax.set_xlabel('line $=$ scored $-$ allowed')\n\n ax.set_ylabel('probability to cover line')\n\n ax.annotate(title, xy=(.05, .05),\n xycoords='axes fraction', fontsize=24)\n\n set_tight(h_pad=1)",
"def createSignalModelLinear(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n lin_scale = Uniform('lin_scale', lower=0, upper=.01)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, scale=lin_scale):\n out = np.zeros(len(data))\n out[s:] = scale * (timestamp[s:] - s)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()",
"def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled",
"def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal",
"def fitSingleLorentzian(data, expectedQ, lookahead, delta, backgroundData = None, svgWindowLength = 11, svgOrder= 5):\n f=data[:,0]\n t=data[:,3]\n\n if backgroundData == None:\n r=data[:,2]\n elif len(backgroundData) == len(r=data[:,2]):\n r=data[:,2]-backgroundData\n else:\n print \"Length of Background Data is not equal Length of r\"\n return\n\n\n\n #apply SVG filter to make peak detection easier\n fr=signal.savgol_filter(r,svgWindowLength,svgOrder)\n _max, _min = pd.peakdetect(fr, None, lookahead, delta)\n\n #create lists of locations of max/min, values of xmax/xmin and ymax/ymin\n fmi = [p[0] for p in _max]\n fni= [p[0] for p in _min]\n fm = [f[p[0]] for p in _max]\n rm = [p[1] for p in _max]\n fn = [f[p[0]] for p in _min]\n rn = [p[1] for p in _min]\n\n limitToBigPeak=True\n #change Q to fwhm based on first detected peak location\n\n fwhm=fm[0]/expectedQ\n paramList=[]\n\n #if limitToBigPeak is true, will only try to fit maximum peak. Good if only\n #looking at single resonance peak. Problematic if there is a larger BG peak\n #which gets fit.\n if limitToBigPeak == True:\n max_value = max(rm)\n max_index = rm.index(max_value)\n paramList=[fm[max_index], rm[max_index]*fwhm, fwhm]\n else:\n for i in xrange(len(fmi)):\n paramList+=[fm[i], rm[i]*fwhm/2*math.pi, fwhm]\n\n #fits data to a single lorentzian with a linear background. Parameters are limited\n #in how much they vary so initial choice is vital to a good fit\n\n out = lmLorFit(f, r, paramList, ctr_range = 1.2, amp_range = 2 , sig_range= 2)\n fittedfwhm = out.params['lo0_fwhm'].value\n fittedAmp = out.params['lo0_amplitude'].value\n fittedCenter = out.params['lo0_center'].value\n fittedQ=fittedCenter/fittedfwhm\n\n \"\"\"Returns output fit as will as list of important fitting parameters\"\"\"\n return out, [fittedCenter, fittedAmp, fittedfwhm, fittedQ]",
"def validate_dataset(self):\n if np.all(self.L_bpe == self.bpe_l):\n pass\n\n super(StandardDataset, self).validate_dataset()",
"def validate_totals():\n fig, (ax_prior, ax2_post) = plt.subplots(\n nrows=2, figsize=figsize(aspect=1.2))\n\n # train margin-dependent Elo model\n melo = Melo(lines=np.arange(149.5, 250.5), commutes=True, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, league.totals)\n\n # exact prior distribution\n outcomes = melo.training_data.value[:, np.newaxis] > melo.lines\n sf = np.mean(outcomes, axis=0)\n ax_prior.plot(melo.lines, sf, color='k')\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n plot_args = [\n (ax_prior, melo.first_update, 'prior'),\n (ax2_post, melo.last_update, 'posterior'),\n ]\n\n for ax, time, title in plot_args:\n for n, label2 in enumerate(label2_list):\n\n lines, sf = melo._predict(time, label1, label2)\n label = r'$\\lambda_2={}$'.format(label2)\n\n if ax.is_first_row():\n ax.plot(lines[n::6], sf[n::6], 'o', zorder=2, label=label)\n\n if ax.is_last_row():\n ax.plot(lines, sf, 'o', zorder=2, label=label)\n\n sf = poisson.sf(melo.lines, int(label1) + int(label2))\n ax.plot(melo.lines, sf, color='k')\n\n leg = ax.legend(title=r'$\\lambda_1 = {}$'.format(label1),\n handletextpad=.2, loc=1)\n leg._legend_box.align = 'right'\n\n lines = np.floor(lines)\n ax.set_xticks(lines[::10])\n ax.set_xlim(lines.min(), lines.max())\n\n if ax.is_last_row():\n ax.set_xlabel('line $=$ scored $+$ allowed')\n\n ax.set_ylabel('probability to cover line')\n\n ax.annotate(title, xy=(.05, .05),\n xycoords='axes fraction', fontsize=24)\n\n set_tight(h_pad=1)",
"def test_exercise_2():\n dirname = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_pickle(f\"{dirname}/material/data-consumption-function.pkl\")\n\n def construct_predicted_values(income, alpha, beta, gamma):\n return alpha + beta * income ** gamma\n\n mock_rslt = [-91.1933, 0.5691, 1.0204]\n income = df[\"realgdp\"].values\n df[\"realcons_pred\"] = construct_predicted_values(income, *mock_rslt)\n\n x = df.index.get_level_values(\"Year\")\n fig, ax = plt.subplots()\n ax.plot(x, df[\"realcons_pred\"], label=\"Predicted\")\n ax.plot(x, df[\"realcons\"], label=\"Observed\")",
"def testNegativeInput(self):\n nb.rescale_length(2.0)\n nb.rescale_length(-1.0)\n self.assertEqual(2.0, nb.rscale)",
"def run_negative():\n\tdef cy1(x, y): return y[:,0] - 2.5 # y < 2.5\n\tdef cy2(x, y): return 3 - y[:,0] # y > 3\n\tdef cx1(x, y): return -0.3 - x[:,0] # x > -0.3\n\tdef cx2(x, y): return x[:,0] - 0.3 # x < 0.3\n\n\tdef addons():\n\t\tdom = np.arange(-0.3, 0.3, 0.05)\n\t\tplt.fill_between(dom, 3.0, plt.ylim()[1], facecolor='#E41A1C', alpha=0.5, zorder=101)\n\t\tplt.fill_between(dom, plt.ylim()[0], 2.5, facecolor='#E41A1C', alpha=0.5, zorder=101)\n\n\tbnn = BNNSVGDRegressor(uid=\"bnn-negative-eg\", configfile=\"configs/bnn-negative-eg.json\")\n\tbnn.load(**toy1())\n\tbnn.add_negative_constraint((-5.0, 5.0), [cy1, cx1, cx2])\n\tbnn.add_negative_constraint((-5.0, 5.0), [cy2, cx1, cx2])\n\tbnn.infer()\n\tbnn.plot_pp(plot_title=\"Predictive Posterior Plot\", domain=np.arange(-5, 5, 0.05), ylims=(-9, 7), addons=addons)",
"def testNonLinearity():\n vis = VISinformation()\n data = np.linspace(1, vis['fullwellcapacity'], 10000)\n nonlin = CCDnonLinearityModel(data.copy())\n\n txt = '%s' % datetime.datetime.isoformat(datetime.datetime.now())\n\n fig = plt.figure(frameon=False)\n\n left, width = 0.1, 0.8\n rect1 = [left, 0.3, width, 0.65]\n rect2 = [left, 0.1, width, 0.2]\n\n ax1 = fig.add_axes(rect1, title='VIS Non-linearity Model')\n ax2 = fig.add_axes(rect2) #left, bottom, width, height\n\n ax1.axhline(y=0, c='k', ls='--')\n ax1.plot(data, (nonlin/data - 1.)*100, 'r-', label='Model')\n\n ax2.axhline(y=0, c='k', ls='--')\n ax2.plot(data, (nonlin - data)/vis['gain'], 'g-')\n\n ax1.axvline(x=97, c='k', ls='--')\n ax2.axvline(x=97, c='k', ls='--')\n\n ax1.set_xticklabels([])\n ax2.set_xlabel('Real Charge [electrons]')\n ax1.set_ylabel('(Output / Real - 1)*100')\n ax2.set_ylabel('O - R [ADUs]')\n\n ax1.set_xlim(0, vis['fullwellcapacity'])\n ax2.set_xlim(0, vis['fullwellcapacity'])\n ax1.set_ylim(-.15, .2)\n\n ax1.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax1.transAxes, alpha=0.2)\n ax1.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.0)\n plt.savefig('NonlinearityModel.pdf')\n\n ax1.set_ylim(-.1, 8)\n ax2.set_ylim(0, 2)\n ax1.set_xlim(50, 800)\n ax2.set_xlim(50, 800)\n plt.savefig('NonlinearityModel2.pdf')\n\n plt.close()",
"def _add_scaled_signal(self, signal):\n if type(signal) is np.ndarray:\n signal = signal.tolist()\n assert type(signal[0]) == float, \"scaled signal must be a float\"\n self.scaled_signal = signal",
"def calculate_signal(self):\n y = self.data.get_bar_values(self.pair[0], \"adj_close\", N=self.ols_window)\n x = self.data.get_bar_values(self.pair[1], \"adj_close\", N=self.ols_window)\n\n if y is not None and x is not None:\n if len(y) >= self.ols_window and len(x) >= self.ols_window:\n # get hedge ratio\n self.hedge_ratio = sm.OLS(y, x).fit().params[0]\n\n # get z score of residuals\n spread = y - self.hedge_ratio * x\n zscore_last = ((spread - spread.mean()) / spread.std())[-1]\n\n # calculate signals and add to events queue\n y_signal, x_signal = self.calculate_xy_signal(zscore_last)\n if y_signal is not None and x_signal is not None:\n self.events.put(y_signal)\n self.events.put(x_signal)",
"def test_uncertainties(self):\n new_wave = np.linspace(0.9, 2.1, 200)\n\n # Without uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux)\n self.assertEqual(len(binned), 2)\n\n # With uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux, self.flux/100.)\n self.assertEqual(len(binned), 3)",
"def test_axrline_transform(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"ternary\")\n with pytest.raises(ValueError):\n ax.axrline(0.5, transform=ax.transAxes)",
"def test_default_signal_nxdata(self, nexus_base):\n assert isinstance(nexus_base.default_signal, np.ndarray)",
"def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]",
"def train_weak_signals(data, weak_signal_data, num_weak_signal):\n\n train_data, train_labels = data['training_data']\n val_data, val_labels = data['validation_data']\n test_data, test_labels = data['test_data']\n\n n, d = train_data.shape\n\n weak_signal_train_data = weak_signal_data[0]\n weak_signal_val_data = weak_signal_data[1]\n weak_signal_test_data = weak_signal_data[2]\n\n weak_signals = []\n stats = np.zeros(num_weak_signal)\n w_sig_probabilities = []\n w_sig_test_accuracies = []\n weak_val_accuracy = []\n\n\n for i in range(num_weak_signal):\n # fit model\n model = LogisticRegression(solver = \"lbfgs\", max_iter= 1000)\n model.fit(weak_signal_train_data[i], train_labels)\n weak_signals.append(model)\n\n # evaluate probability of P(X=1)\n probability = model.predict_proba(weak_signal_val_data[i])[:, 1]\n score = val_labels * (1 - probability) + (1 - val_labels) * probability\n stats[i] = np.sum(score) / score.size\n w_sig_probabilities.append(probability)\n\n # evaluate accuracy for validation data\n weak_val_accuracy.append(accuracy_score(val_labels, np.round(probability)))\n\n # evaluate accuracy for test data\n test_predictions = model.predict(weak_signal_test_data[i])\n w_sig_test_accuracies.append(accuracy_score(test_labels, test_predictions))\n\n\n model = {}\n model['models'] = weak_signals\n model['probabilities'] = np.array(w_sig_probabilities)\n model['error_bounds'] = stats\n model['validation_accuracy'] = weak_val_accuracy\n model['test_accuracy'] = w_sig_test_accuracies\n\n return model",
"def ridges(\n x,\n scale_frequencies,\n dt=1,\n ridge_kind='amplitude',\n morse_wavelet=None,\n min_wavelet_lengths_in_ridge=None,\n trim_wavelet_lengths=None,\n frequency_max=None,\n frequency_min=None,\n mask=None,\n alpha=1/4,\n scale_axis=-2,\n time_axis=-1,\n variable_axis=None):\n original_shape = x.shape\n if variable_axis is not None:\n x = np.moveaxis(x, (variable_axis, scale_axis, time_axis), (-3, -2, -1))\n post_axis_move_shape = x.shape\n x = np.reshape(x, (-1,) + x.shape[-3:])\n if mask is not None:\n mask_scale_axis = scale_axis if scale_axis < variable_axis else scale_axis - 1\n mask_time_axis = time_axis if time_axis < variable_axis else time_axis - 1\n mask = np.moveaxis(mask, (mask_scale_axis, mask_time_axis), (-2, -1))\n mask = np.reshape(mask, (-1,) + mask.shape[-2:])\n else:\n x = np.moveaxis(x, (scale_axis, time_axis), (-2, -1))\n post_axis_move_shape = x.shape\n x = np.reshape(x, (-1,) + x.shape[-2:])\n if mask is not None:\n mask = np.moveaxis(mask, (scale_axis, time_axis), (-2, -1))\n mask = np.reshape(mask, (-1,) + mask.shape[-2:])\n\n indicator_ridge, ridge_quantity, instantaneous_frequencies = _indicator_ridge(\n x, scale_frequencies, ridge_kind, 0, frequency_min, frequency_max, mask)\n\n ridge_ids = _assign_ridge_ids(indicator_ridge, ridge_quantity, instantaneous_frequencies, alpha)\n\n # not sure why this is necessary...points not in the mask are already eliminated earlier,\n # so I'm not sure how a disallowed point can get into a ridge, but this is in the original\n # code and breaks up ridges that span disallowed points into multiple ridges with different\n # ids\n if mask is not None:\n _mask_ridges(ridge_ids, mask)\n\n min_periods = None\n if min_wavelet_lengths_in_ridge is not None:\n if morse_wavelet is None:\n raise ValueError('If min_wavelet_lengths_in_ridge is given, morse_wavelet must be specified')\n min_periods = min_wavelet_lengths_in_ridge * 2 * morse_wavelet.time_domain_width() / np.pi\n\n trim_periods = None\n if trim_wavelet_lengths is not None:\n if morse_wavelet is None:\n raise ValueError('If trim_wavelet_lengths is given, morse_wavelet must be specified')\n trim_periods = trim_wavelet_lengths * morse_wavelet.time_domain_width() / np.pi\n\n # now clean up the ridge ids according to the parameters, interpolate, and compute bias parameters\n unique_ridge_ids, ridge_id_count = np.unique(ridge_ids, return_counts=True)\n compressed_id = np.max(unique_ridge_ids) + 1\n for ridge_id, ridge_count in zip(unique_ridge_ids, ridge_id_count):\n if ridge_id < 0:\n continue\n\n indicator_ridge_id = ridge_ids == ridge_id\n # remove singleton ridge ids\n if ridge_count < 2:\n ridge_ids[indicator_ridge_id] = -1\n continue\n\n if min_periods is not None:\n ridge_batch_indices, ridge_scale_indices, ridge_time_indices = np.nonzero(indicator_ridge_id)\n ridge_len = np.sum(scale_frequencies[ridge_scale_indices] / (2 * np.pi) * dt)\n if ridge_len < min_periods:\n ridge_ids[indicator_ridge_id] = -1\n\n if trim_wavelet_lengths is not None:\n ridge_batch_indices, ridge_scale_indices, ridge_time_indices = np.nonzero(indicator_ridge_id)\n time_sort = np.argsort(ridge_time_indices)\n age = np.cumsum(scale_frequencies[ridge_scale_indices[time_sort]] / (2 * np.pi) * dt)\n age = age[np.argsort(time_sort)]\n indicator_trim = np.logical_or(age <= trim_periods, age >= np.max(age) - trim_periods)\n trim_batch_indices = ridge_batch_indices[indicator_trim]\n trim_scale_indices = ridge_scale_indices[indicator_trim]\n trim_time_indices = ridge_time_indices[indicator_trim]\n ridge_ids[(trim_batch_indices, trim_scale_indices, trim_time_indices)] = -1\n indicator_ridge_id = ridge_ids == ridge_id\n\n # reassign ids so that they will be contiguous (more convenient for caller)\n ridge_ids[indicator_ridge_id] = compressed_id\n compressed_id += 1\n\n # shift the ids to start at 0\n ridge_ids[ridge_ids >= 0] = ridge_ids[ridge_ids >= 0] - (np.max(unique_ridge_ids) + 1)\n\n instantaneous_frequencies = instantaneous_frequencies / dt\n x1 = np.gradient(x, axis=-1) / dt\n x2 = np.gradient(x1, axis=-1) / dt\n x2[..., 0] = x2[..., 1]\n x2[..., -1] = x2[..., -2]\n\n if len(x.shape) == 4:\n # put variable axis last for interpolation\n x = np.moveaxis(x, 1, -1)\n x1 = np.moveaxis(x1, 1, -1)\n x2 = np.moveaxis(x2, 1, -1)\n\n ridge_indices = np.nonzero(ridge_ids >= 0)\n\n x, x1, x2, instantaneous_frequencies = _ridge_interpolate(\n [x, x1, x2, instantaneous_frequencies], ridge_indices, ridge_quantity)\n\n if len(x.shape) == 2:\n instantaneous_frequencies = np.expand_dims(instantaneous_frequencies, 1)\n l2 = np.sqrt(np.sum(np.square(np.abs(x)), axis=1, keepdims=True))\n else:\n l2 = np.sqrt(np.square(np.abs(x)))\n\n # deviation vectors as in\n # Lilly and Olhede (2012), Analysis of Modulated Multivariate\n # Oscillations. IEEE Trans. Sig. Proc., 60 (2), 600--612., equations (17), (18)\n # note that instantaneous_frequencies has 1 value per ridge point, but bandwidth and curvature\n # are multivariate (if x is multivariate)\n bandwidth = (x1 - 1j * instantaneous_frequencies * x) / l2\n curvature = (x2 - 2 * 1j * instantaneous_frequencies * x1 - instantaneous_frequencies ** 2 * x) / l2\n\n result = [ridge_ids, x, instantaneous_frequencies, bandwidth, curvature]\n\n if morse_wavelet is not None:\n curvature_l2 = np.sqrt(np.sum(np.square(np.abs(curvature)), axis=1, keepdims=True)) \\\n if len(curvature.shape) == 2 else np.sqrt(np.square(np.abs(curvature)))\n total_err = (1 / 2 * np.square(np.abs(morse_wavelet.time_domain_width() / instantaneous_frequencies))\n * curvature_l2)\n result.append(total_err)\n\n expanded_shape = ridge_ids.shape if len(x.shape) == 1 else ridge_ids.shape + (x.shape[1],)\n expanded_ridge_indices = None\n for idx in range(len(result)):\n if idx == 0:\n # special case for ridge_ids\n expanded = ridge_ids\n if len(expanded_shape) == 4:\n expanded = np.tile(np.expand_dims(expanded, 3), (1, 1, 1, expanded_shape[3]))\n else:\n expanded = np.full(expanded_shape, np.nan, dtype=result[idx].dtype)\n expanded[ridge_indices] = result[idx]\n if len(expanded_shape) == 4:\n # move the variable axis back to axis=1\n expanded = np.moveaxis(expanded, -1, 1)\n # reshape back to input shape\n expanded = np.reshape(expanded, post_axis_move_shape)\n # restore axes\n expanded = np.moveaxis(expanded, (-3, -2, -1), (variable_axis, scale_axis, time_axis))\n # move variable axis to the end again: this means in our sparse representation,\n # we will get multivariate points\n expanded = np.moveaxis(expanded, variable_axis, -1)\n else:\n # reshape back to input shape\n expanded = np.reshape(expanded, post_axis_move_shape)\n # restore axes\n expanded = np.moveaxis(expanded, (-2, -1), (scale_axis, time_axis))\n if idx == 0: # ridge_ids\n if len(expanded_shape) == 4:\n expanded = expanded[..., :1] # make the ridge_ids themselves broadcast\n expanded_ridge_indices = np.nonzero(expanded[..., 0] >= 0) # ignore the variable axis for indices\n else:\n expanded_ridge_indices = np.nonzero(expanded >= 0)\n else:\n if len(expanded_shape) == 4:\n assert(len(result[idx].shape) == 2)\n if result[idx].shape[1] == 1:\n # this was originally broadcasting, so restore the broadcasting semantics\n expanded = expanded[..., :1]\n result[idx] = expanded[expanded_ridge_indices]\n\n return RidgeResult(\n original_shape=original_shape,\n ridge_values=result[1],\n indices=expanded_ridge_indices,\n ridge_ids=result[0],\n instantaneous_frequency=result[2],\n instantaneous_bandwidth=result[3],\n instantaneous_curvature=result[4],\n total_error=result[5] if len(result) > 5 else None,\n variable_axis=variable_axis,\n scale_axis=scale_axis)",
"def test_coefs_and_intercept__no_noise_regularization(coefs, intercept):\n X, y = _create_dataset(coefs, intercept)\n\n lads = [LADRegression(alpha=alpha, l1_ratio=0.).fit(X, y) for alpha in range(3)]\n coef_size = np.array([np.sum(lad.coef_ ** 2) for lad in lads])\n\n for i in range(2):\n assert coef_size[i] >= coef_size[i + 1]",
"def testDefaultDataScalingNotPersistant(self):\n self.chart.auto_scale.buffer = 0 # Buffer just makes the math tricky here.\n # This data should scale to the simple encoding's min/middle/max values\n # (A, f, 9).\n self.AddToChart(self.chart, [1, 2, 3])\n self.assertEqual(self.Param('chd'), 's:Af9')\n # Different data that maintains the same relative spacing *should* scale\n # to the same min/middle/max.\n self.chart.data[0].data = [10, 20, 30]\n self.assertEqual(self.Param('chd'), 's:Af9')",
"def plot_NHI_model(lls_dict, ax, lsz=12., touch=False, scl=1., csz=10.):\n from linetools.spectra.plotting import get_flux_plotrange\n\n spec, xspec, gdp, NHI, tau0 = setup_lls_fit_analy(lls_dict['spec_fil'], lls_dict['z'], lls_dict['windows'], lls_dict['NHI_mnx'])\n # Scale\n xspec.data['flux'] *= scl\n # Limits\n xmnx = [lls_dict['windows'][0][0], 940.*(1+lls_dict['z'])]\n if lls_dict['cdict']['type'] == 'Gaussian':\n ymnx = [-1*lls_dict['cdict']['sig'], lls_dict['cdict']['best']+4*lls_dict['cdict']['sig']]\n elif lls_dict['cdict']['type'] == 'Fixed':\n ymnx = [-0.1*lls_dict['cdict']['value'], 1.5*lls_dict['cdict']['value']]\n elif lls_dict['cdict']['type'] == 'Fit_const':\n ymnx = [-1*(lls_dict['cdict']['fit_val'][0]-lls_dict['cdict']['fit_val'][1]),\n 3*(lls_dict['cdict']['fit_val'][2]-lls_dict['cdict']['fit_val'][0])+\n lls_dict['cdict']['fit_val'][0]]\n elif lls_dict['cdict']['type'] == 'Fit_line':\n if gdp is None:\n gdp = (xspec.wavelength>xmnx[0]*u.AA) & (xspec.wavelength<xmnx[1]*u.AA)\n conti = lls_dict['cdict']['best'][0] + lls_dict['cdict']['best'][1]*(\n xspec.wavelength[gdp].value-lls_dict['cdict']['slope_pivot']*(1+lls_dict['z']))\n mx = np.max(conti)\n ymnx = [-0.1*mx, mx*1.3]\n else:\n raise ValueError(\"Need to setup this continuum model\")\n # Extend xmnx\n if lls_dict['cdict']['type'] in ['Fit_line', 'Fit_const']:\n xmx = 0.\n for rng in lls_dict['cdict']['analy']:\n xmx = max(xmx, rng[1])\n xmnx[1] = xmx+3.\n # Scale\n ymnx = np.array(ymnx)*scl\n # Finally\n #idx = (xspec.wavelength > xmnx[0]*u.AA) & (xspec.wavelength < xmnx[1]*u.AA)\n idx = gdp\n f_ymnx = get_flux_plotrange(xspec.flux[idx].value)\n ymnx[1] = max(ymnx[1],f_ymnx[1])\n\n\n # Axes\n #ax.xaxis.set_minor_locator(plt.MultipleLocator(0.5))\n ax.xaxis.set_major_locator(plt.MultipleLocator(20.))\n #ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1))\n #ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))\n ax.set_xlim(xmnx)\n ax.set_ylim(ymnx)\n if scl == 1.:\n ax.set_ylabel(r'$f_\\lambda$ (cgs)', size=lsz)\n else:\n ax.set_ylabel(r'$f_\\lambda$ ($10^{-15}$ cgs)', size=lsz)\n\n\n # Plot data\n ax.plot(xspec.wavelength, xspec.flux, color='black', drawstyle='steps-mid',\n zorder=2)\n try:\n ax.plot(xspec.wavelength, scl*xspec.sig, ':', color='red', zorder=1)\n except ValueError:\n pdb.set_trace()\n\n # Binned\n if False:\n binsz = 5.\n binwv = np.arange(1040., 1200., binsz)*u.AA\n binspec = xspec.rebin(binwv)\n gdp = binspec.wavelength.value < 910.*(1+lls_dict['z'])\n ax.scatter(binspec.wavelength.value[gdp]+binsz/2., binspec.flux[gdp],\n color='yellow', zorder=300)\n #edgecolor='none')#, alpha=0.5)\n\n # Best continuum\n if lls_dict['cdict']['type'] == 'Gaussian':\n conti = lls_dict['cdict']['best']*np.ones_like(xspec.flux.value)\n elif lls_dict['cdict']['type'] == 'Fixed':\n conti = lls_dict['cdict']['value']*np.ones_like(xspec.flux.value)\n elif lls_dict['cdict']['type'] == 'Fit_const':\n conti = lls_dict['cdict']['fit_val'][0]*np.ones_like(xspec.flux.value)\n elif lls_dict['cdict']['type'] == 'Fit_line':\n conti = lls_dict['cdict']['best'][0] + lls_dict['cdict']['best'][1]*(\n xspec.wavelength.value-lls_dict['cdict']['slope_pivot']*(1+lls_dict['z']))\n ax.plot(xspec.wavelength, conti*scl, '--', color='green', zorder=3)\n ax.minorticks_on()\n if touch is True:\n pass\n #ax.get_xaxis().set_ticks([])\n else:\n ax.set_xlabel('Wavelength (Ang)', size=lsz)\n\n # Best Model\n mclr = 'lightblue'\n wv_rest = xspec.wavelength / (lls_dict['z']+1)\n energy = wv_rest.to(u.eV, equivalencies=u.spectral())\n tau0 = (10.**lls_dict['fit_NHI'][0] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n if lls_dict['analy_type'] in ['Fit_Conti', 'Vary_Conti']:\n if lls_dict['fit_NHI'][0] != lls_dict['fit_NHI'][2]:\n best_model = scl*conti * np.exp(-1*tau0)\n abs = tau0 > 0.\n ax.plot(xspec.wavelength[abs], best_model[abs], color=mclr, zorder=100)\n\n # Continuum Error\n clr_ce = 'lightgreen'\n alpha_ce = 0.4\n if lls_dict['cdict']['type'] == 'Gaussian':\n cwv = tau0 == 0.\n npix = np.sum(cwv)\n ax.fill_between(xspec.wavelength.value[cwv],\n [scl*lls_dict['cdict']['best']+lls_dict['cdict']['sig']]*npix,\n [scl*lls_dict['cdict']['best']-lls_dict['cdict']['sig']]*npix,\n color=clr_ce, alpha=alpha_ce, zorder=50)\n elif lls_dict['cdict']['type'] == 'Fit_const':\n for rng in lls_dict['cdict']['analy']:\n idx = ((xspec.wavelength > rng[0]*u.AA) &\n (xspec.wavelength < rng[1]*u.AA) &\n (xspec.sig > 0))\n gdC = np.where(idx)[0]\n ax.fill_between(xspec.wavelength.value[gdC],\n [scl*lls_dict['cdict']['fit_val'][1]]*gdC.size,\n [scl*lls_dict['cdict']['fit_val'][2]]*gdC.size,\n color=clr_ce, alpha=alpha_ce, zorder=50)\n elif lls_dict['cdict']['type'] == 'Fit_line':\n #xdb.set_trace()\n if 'fit_val' in lls_dict['cdict']:\n for rng in lls_dict['cdict']['analy']:\n idx = ((xspec.wavelength > rng[0]*u.AA) &\n (xspec.wavelength < rng[1]*u.AA) &\n (xspec.sig > 0))\n gdC = np.where(idx)[0]\n #\n sig0 = (lls_dict['cdict']['fit_val'][0][2]-lls_dict['cdict']['fit_val'][0][1])/2.\n sig1 = (lls_dict['cdict']['fit_val'][1][2]-lls_dict['cdict']['fit_val'][1][1])/2.\n sigl = np.sqrt(sig0**2 +\n sig1**2*(lls_dict['cdict']['slope_pivot']*(1+lls_dict['z'])-\n xspec.wavelength.value[gdC])**2)\n ax.fill_between(xspec.wavelength.value[gdC],\n scl*(conti[gdC] + sigl),\n scl*(conti[gdC] - sigl),\n color=clr_ce, alpha=alpha_ce, zorder=50)\n\n # Model with error (limits too)\n taulow = (10.**lls_dict['fit_NHI'][1] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n try:\n low_model = scl*conti * np.exp(-1*taulow)\n except ValueError:\n pdb.set_trace()\n tauhi = (10.**lls_dict['fit_NHI'][2] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n hi_model = scl*conti * np.exp(-1*tauhi)\n mwv = tau0 > 0.\n ax.fill_between(xspec.wavelength.value[mwv], low_model[mwv],\n hi_model[mwv], color=mclr, alpha=0.3, zorder=100)\n\n # Finish\n ax.plot(xmnx, [0.,0.], '--', color='gray')",
"def survey_detection_effieciency():\n baseline = 365 #days\n # TODO: Rerun with 1024, add praesepe line\n max_num_observations = 1024\n min_num_observations = 16\n num_clumps = 4\n num_iterations = 10000\n \n if not os.path.exists(\"data/aas_survey_detection_efficiency.pickle\"):\n data_dict = {\"clumpy\" : {1. : [], 10. : [], 100 : []}, \"uniform\" : {1. : [], 10. : [], 100 : []}}\n \n for timescale in [1., 10., 100.]:\n for sampling in [\"clumpy\", \"uniform\"]: #, \"random\"]:\n if sampling == \"random\":\n mjd = np.random.random(max_num_observations)*baseline\n elif sampling == \"clumpy\":\n sparse_samples = np.random.random(max_num_observations/2)*baseline\n \n clumps = []\n days = []\n sum = 0.\n pts_per_clump = max_num_observations / 2 / num_clumps\n for ii in range(num_clumps):\n day = np.random.randint(365)\n if day in days: continue\n \n days.append(day)\n clumpy_samples = np.linspace(day+0.1, day+0.6, pts_per_clump)\n clumps.append(clumpy_samples)\n \n clumps.append(sparse_samples)\n mjd = np.concatenate(tuple(clumps))\n \n plt.plot(mjd, [1.]*len(mjd), 'ro', alpha=0.4)\n plt.show()\n \n elif sampling == \"uniform\":\n mjd = np.linspace(0., baseline, max_num_observations)\n \n for jj in range(num_iterations):\n lc = random_praesepe_light_curve()\n if len(lc.mag) < 100: continue\n \n dupe_mags = np.array(lc.mag*15)\n dupe_err = np.array(list(lc.error)*15)\n \n shuffled_idx = np.arange(0, len(dupe_mags))\n np.random.shuffle(shuffled_idx)\n \n mags = dupe_mags[shuffled_idx]\n err = dupe_err[shuffled_idx]\n \n sim_light_curve = simu.SimulatedLightCurve(mjd=mjd, mag=mags[:len(mjd)], error=err[:len(mjd)])\n sim_light_curve.addMicrolensingEvent(tE=timescale)\n #sim_light_curve.plot()\n \n delta_chi_squareds = []\n sim_mjd = sim_light_curve.mjd\n sim_mag = sim_light_curve.mag\n sim_err = sim_light_curve.error\n while True:\n if len(sim_mjd) < min_num_observations: break\n \n dcs = simu.compute_delta_chi_squared((sim_mjd, sim_mag, sim_err), force_fit=True)\n delta_chi_squareds.append(dcs)\n \n prune = np.arange(len(sim_mjd))\n np.random.shuffle(prune)\n prune = prune[::2]\n sim_mjd = sim_mjd[prune]\n sim_mag = sim_mag[prune]\n sim_err = sim_err[prune]\n \n data_dict[sampling][timescale].append(delta_chi_squareds)\n \n f = open(\"data/aas_survey_detection_efficiency.pickle\", \"w\")\n pickle.dump(data_dict, f)\n f.close()\n \n f = open(\"data/aas_survey_detection_efficiency.pickle\", \"r\")\n data_dict = pickle.load(f)\n \n # Plotting stuff\n plt.figure(figsize=(15,15))\n dcs_cutoff = 300.\n num_observations = [2**x for x in range(int(np.log2(max_num_observations)), int(np.log2(min_num_observations))-1, -1)]\n linestyles = {\"uniform\" : \"--\", \"clumpy\" : \"-\"}\n linecolors = {1. : \"k\", 10. : \"r\", 100. : \"c\"}\n for sampling in data_dict.keys():\n for timescale in data_dict[sampling].keys():\n data = np.array(data_dict[sampling][timescale])\n \n efficiencies = []\n for col,num_obs in enumerate(num_observations):\n efficiencies.append(np.sum(data[:,col] > dcs_cutoff) / len(data[:,col]))\n \n plt.plot(np.log2(num_observations), efficiencies, ls=linestyles[sampling], color=linecolors[timescale], label=r\"$t_E={}$ day, {} sampling\".format(int(timescale), sampling), lw=3)\n \n #plt.axvline(np.log2(625.), c=\"g\", ls=\"--\", lw=2, label=\"PTF Praesepe fields\")\n \n plt.xlabel(\"Number of Observations / 1 year\", size=label_font_size)\n plt.ylabel(r\"Detection Efficiency $\\mathcal{E}(t_E)$\", size=label_font_size)\n plt.title(\"Simulated Detection Efficiency for\\nDifferent Sampling Patterns\", size=title_font_size)\n \n # Change tick label size\n ax = plt.gca()\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontsize(tick_font_size)\n \n ax.set_xticklabels(num_observations[::-1])\n \n legend = plt.legend(loc=\"upper left\", shadow=True, fancybox=True)\n legendtext = legend.get_texts()\n plt.setp(legendtext, fontsize=tick_font_size) # the legend text fontsize\n plt.tight_layout()\n #plt.show()\n plt.savefig(\"plots/aas_survey_detection_efficiency.png\")",
"def add_signal(signal_array, json_file, indent_level, scale):\n\n logger.debug('+ Raw signal:{0}'.format(signal_array))\n\n initial_val = signal_array[1]\n # If no intial condition is defined give it an X, saves headache later. \n # issue a warning.\n if ( not(re.search('^[01xX]', signal_array[1])) ):\n signal_array[1] = str(scale) +'X'\n logger.warning(\n '+ Initial condition not defined for {0}. Force invalid \\'x\\''\n .format(signal_array[0])) \n for i,time_step in enumerate(signal_array[1:]):\n\n logger.debug('|---:{0} {1}'.format(i, time_step))\n\n if (re.search('X|x',time_step)):\n signal_array[i+1] = str(scale) + 'X'\n # FIXME: New not in documentation.\n # This is added to represent glitchiness or uncertanity.\n elif (re.search('G',time_step)):\n signal_array[i+1] = str(scale*.03) + 'T' + str(scale*.97) + 'T'\n # FIXME: New not in documentation\n # this is a simple encoding. 0.x will indicate an undef to 1 transition\n # which is not full cycle, and -0.x will show a undef to 0 transition\n # can potenitally be expanded to use x to decide proportion.\n # The combo indication is fixed to 0.25\n elif (re.search(r'0.\\d',time_step)):\n if (re.search(r'-0.\\d',time_step)):\n signal_array[i+1] = str(0.25*scale) + 'U' + str(0.75*scale) + 'L'\n else:\n signal_array[i+1] = str(0.25*scale) + 'U' + str(0.75*scale) + 'H'\n elif (re.search('0',time_step)):\n signal_array[i+1] = str(scale) + 'L'\n elif (re.search('1',time_step)):\n signal_array[i+1] = str(scale)+'H'\n elif (re.search('\\|', time_step)):\n signal_array[i+1] = 'S'\n temp = re.sub(r'\\d+([UDXLHC]).*',r'\\1',signal_array[i])\n signal_array[i+1] = ';[dotted]2' + temp + ';'\n else:\n # allow us to deal with a value change format by searching\n # backwards to find the last change from the current time step. The\n # search is to be performed on the waveform rendered so far.\n signal_array[i+1] = restore_after_spacer(signal_array[i],signal_array[i-1]) \n\n return signal_array",
"def test_plot_lin(self):\n pname = os.path.join(self.datadir, 'monol_testA_E3-50_pds_fit') + \\\n HEN_FILE_EXTENSION\n cname = os.path.join(self.datadir, 'monol_test_E3-50_cpds_fit') + \\\n HEN_FILE_EXTENSION\n lname = os.path.join(self.datadir, 'monol_testA_E3-50_lc') + \\\n HEN_FILE_EXTENSION\n hen.plot.main([pname, cname, lname, '--noplot', '--xlin', '--ylin',\n '-o', 'dummy.qdp'])\n hen.plot.main([lname, '--noplot',\n '--axes', 'time', 'counts', '--xlin', '--ylin',\n '-o', 'dummy.qdp'])",
"def createSignalModelExponential(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n #Modeling these parameters this way is why wf needs to be normalized\n exp_rate = Uniform('exp_rate', lower=0, upper=.1)\n exp_scale = Uniform('exp_scale', lower=0, upper=.1)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n## @deterministic(plot=False, name=\"test2\")\n## def adjusted_scale(s=switchpoint, s1=exp_scale):\n## out = np.empty(len(data))\n## out[:s] = s1\n## out[s:] = s1\n## return out\n#\n# scale_param = adjusted_scale(switchpoint, exp_scale)\n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):\n out = np.zeros(len(data))\n out[s:] = scale * ( np.exp(r * (timestamp[s:] - s)) - 1.)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()",
"def test_inverse_transform_many_segments(example_tsds: TSDataset) -> None:\n trend_transform = _TrendTransform(\n in_column=\"target\",\n change_point_model=Binseg(),\n detrend_model=LinearRegression(),\n n_bkps=5,\n out_column=\"test\",\n )\n example_tsds.fit_transform([trend_transform])\n original_df = example_tsds.df.copy()\n example_tsds.inverse_transform()\n assert (original_df == example_tsds.df).all().all()",
"def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"",
"def test_real(n_epochs=1000):\n n_hidden = 10\n n_in = 5\n n_out = 3\n n_steps = 10\n n_seq = 10 # per batch\n n_batches = 10\n\n np.random.seed(0)\n # simple lag test\n seq = np.random.randn(n_steps, n_seq * n_batches, n_in)\n targets = np.zeros((n_steps, n_seq * n_batches, n_out))\n\n targets[1:, :, 0] = seq[:-1, :, 3] # delayed 1\n targets[1:, :, 1] = seq[:-1, :, 2] # delayed 1\n targets[2:, :, 2] = seq[:-2, :, 0] # delayed 2\n\n targets += 0.01 * np.random.standard_normal(targets.shape)\n\n model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,\n learning_rate=0.01, learning_rate_decay=0.999,\n n_epochs=n_epochs, batch_size=n_seq, activation='tanh',\n L2_reg=1e-3)\n\n model.fit(seq, targets, validate_every=100, optimizer='bfgs')\n\n plt.close('all')\n fig = plt.figure()\n ax1 = plt.subplot(211)\n plt.plot(seq[:, 0, :])\n ax1.set_title('input')\n ax2 = plt.subplot(212)\n true_targets = plt.plot(targets[:, 0, :])\n\n guess = model.predict(seq[:, 0, :][:, np.newaxis, :])\n\n guessed_targets = plt.plot(guess.squeeze(), linestyle='--')\n for i, x in enumerate(guessed_targets):\n x.set_color(true_targets[i].get_color())\n ax2.set_title('solid: true output, dashed: model output')",
"def test_get_reg_pred_prices(self):\n regressor = RegressionPrediction.PredictionTrainer()\n self.assertTrue(isinstance(regressor.get_reg_pred_prices(), Figure))"
] | [
"0.5300914",
"0.5199778",
"0.50814444",
"0.50585854",
"0.5034965",
"0.50297797",
"0.5011559",
"0.48441267",
"0.4837573",
"0.48363823",
"0.48164424",
"0.4816253",
"0.4811083",
"0.47844577",
"0.4780375",
"0.47778818",
"0.47764048",
"0.47515702",
"0.47412965",
"0.473127",
"0.46982455",
"0.46504527",
"0.46502888",
"0.46370402",
"0.4624908",
"0.46243915",
"0.4620983",
"0.4612597",
"0.46081036",
"0.46043852"
] | 0.623664 | 0 |
function to read gcode as raw text | def read_gcode(filename):
##TODO: parse/read file line by line for memory considerations
with open(filename, 'r') as fh_in:
gcode_raw = fh_in.readlines()
gcode_raw = [gcode.rstrip(';\n') for gcode in gcode_raw] # stripping off trailing semicolon and newlines
return gcode_raw | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gcode_interpreter(file_gcode):\n\tgcode_raw = read_gcode(file_gcode)\n\n\tdim = (11, 11)\n\tarray = convert_gcode_to_array(gcode_raw, dim)\n\n\tprint array",
"def gcode_text(self):\n return os.linesep.join(map(str, self.gcode))",
"def _read_grammar(filename):\r\n with open(filename, 'r') as file:\r\n data = file.read()\r\n\r\n return data",
"def load_input(self, path):\n f = codecs.open(path, 'r', 'utf-8')\n raw_text = f.read()\n return raw_text",
"def parse(source_code):\n tokens = tokenize(source_code)\n return read(tokens)",
"def code() -> str:\n return \"\"\"\n G91 G17\n G0 Y10 X-10\n G0 Y0 X-5\n G0 Y5 X0\n G0 Y0 X5\n G0 Y0 X-5\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G0 Y-5 X0\n G0 Y-10 X10\n G0 Y0 X-5\n G0 Y-15 X-15\n G0 Y0 X5\n G0 Y5 X0\n G0 Y0 X-5\n G0 Y-5 X0\n G0 Y5 X0\n G2 Y5 X5 J0 I5\n G0 Y0 X5\n G0 Y-5 X0\n G2 Y-5 X-5 J0 I-5\n G0 Y5 X0\n G0 Y10 X10\n G0 Y0 X-30\n G3 Y0 X-10 J0 I-5\n G3 Y0 X10 J0 I5\n\n G0 Y0 X5\n G3 Y5 X5 J5 I0\n G3 Y10 X-10 J0 I-10\n G3 Y-5 X-5 J-5 I0\n G0 Y-5 X0\n\n G0 Y5 X0\n G3 Y5 X-5 J0 I-5\n G3 Y-10 X-10 J-10 I0\n G3 Y-5 X5 J0 I5\n G0 Y0 X5\n\n G0 Y0 X-5\n G3 Y-5 X-5 J-5 I0\n G3 Y-10 X10 J0 I10\n G3 Y5 X5 J5 I0\n G0 Y5 X0\n\n G0 Y-5 X0\n G3 Y-5 X5 J0 I5\n G3 Y10 X10 J10 I0\n G3 Y5 X-5 J0 I-5\n G0 Y0 X-5\n \"\"\"",
"def read_raw(file_path):\n file = open(file_path, 'rb')\n content = file.read()\n file.close()\n return content",
"def read_codes(self, filename=\"static/codes.txt\"):\n with open(filename, \"r\") as f:\n contents = f.read().splitlines()\n code = contents[0]\n \n return code",
"def read_text(self, encoding):\n with self.open(\"r\", encoding=encoding) as f:\n return f.read()",
"async def decode(ctx, code: Option(str, \"Brainfuck code to decode into text\")):\n decoded = bot.brainfuck.decode(code)\n await send_code(ctx, decoded.text, lang=\"txt\", filename=\"text.txt\")",
"def get_text(data_path):\n\tp = get_full_path(data_path)\n\tf = open(p, 'r')\n\tcontent = f.read()\n\tf.close()\n\treturn content",
"def getRawText(self):\n return self.graph.get(\"__rawTxt\", '')",
"def read_code(filename):\n f = open('files/%s.code' % filename)\n string = f.read()\n tokens = scan(string)\n ret = parse_code(tokens)\n return ret",
"def read_file(filename):\n f = open(filename)\n code = f.read()\n f.close()\n return code",
"def read_gcode(gcode_string, tolerance, optimize=False):\n reader = GcodeReader()\n job = reader.parse(gcode_string)\n if optimize:\n pass\n return job",
"def getraw_encoded(self):\n # update data model\n self.dataModel.setTestData( testData=self.srcEditor.text() )\n\n # return raw file\n return self.dataModel.getRaw()",
"def read():\n # TODO",
"def read(self):\r\n self.set_generator() # rearm\r\n total = self.width * self.height * 3\r\n if total < 32:\r\n raise Exception(\"Text not found.\")\r\n size = chunk = string = str()\r\n i = 0 # for(i=0; true; ++i)\r\n while True:\r\n (wp, hp, ch) = self.generator.next() # i byte\r\n values = self.im.getpixel((wp, hp))\r\n tmp = self.binary(values[ch], 1)\r\n if i < 32: # it's lame but I prefer string/bitset\r\n size += tmp[7]\r\n if i == 31:\r\n size = int(size, 2)\r\n if size < 1 or (size + 32) > total:\r\n raise Exception(\"Text not found.\")\r\n elif i < size + 32:\r\n chunk += tmp[7]\r\n if len(chunk) == 8:\r\n string += chr(int(chunk, 2))\r\n chunk = str()\r\n else:\r\n break\r\n i += 1\r\n if self.useAES and self.aes:\r\n if len(string) % 16 != 0:\r\n raise Exception(\"Text not encrypted.\")\r\n string = self.aes.decrypt(string).rstrip(chr(0))\r\n string.decode() # rise an exception if invalid\r\n return string",
"def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()",
"def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text",
"def raw_body(self):\n return file_ops.read_utf8(self.raw_file_name)",
"def read_source (filename):\n source = io.BytesIO ()\n encoding = 'utf-8'\n encoding_pattern = re.compile (b'coding[:=]\\s*([-\\w.]+)') # PEP: 0263\n\n with io.open (filename, 'rb') as stream:\n for line in stream:\n if line.startswith (b'#'):\n match = encoding_pattern.search (line)\n if match:\n encoding = match.group (1).decode ()\n source.write (b'\\n')\n continue\n source.write (line)\n\n if PY2:\n # unicode misbehave when creating traceback\n return source.getvalue ()\n else:\n return source.getvalue ().decode (encoding)",
"def load_text(path):\n\n with open(path) as f:\n read_text = f.read().splitlines()\n \n return np.array(read_text)",
"def read_raw_from_file(fname):\n with open(fname) as fh:\n content = fh.read()\n return parse_raw_string(content)",
"def raw_text(self):\n return self._raw_text",
"def read_file(input_file):\n\n\ttext = open(input_file)\n\traw = text.read()\n#\tdecoded = raw.decode('utf8').encode('ascii', 'replace')\n\tdecoded = raw.decode('utf8')\n\n\t#moves this through the html cleaner\n\ttext = plaintext(decoded)\n\n\treturn text",
"def striptext(self, rawt):\n ret = ''\n iscomm = False\n it = iter(rawt)\n for char in it:\n if char in self.control.escape and not iscomm:\n ret += char + next(it)\n if ret[-2:] == '\\\\\\n': ret = ret[:-2]\n print(ret)\n elif char in self.control.comment:\n iscomm = True\n # iscomm = not iscomm\n elif char in self.control.linebreak:\n if char in self.control.delims['endline'][0] and ret and \\\n ret[-1] not in self.control.delims['endline'][0]:\n ret += self.control.delims['endline'][0][0]\n iscomm = False\n else:\n if not iscomm:\n ret += char\n if '@eof' in ret:\n ret = ret[0:ret.find('@eof')]\n if not ret or ret[-1] not in self.control.delims['endline'][0]:\n ret += self.control.delims['endline'][0][0]\n return ret",
"def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw",
"def read_text(self, name: str) -> str:\n raise NotImplementedError()",
"def get_parsed_text(blob):\n return blob.parse()"
] | [
"0.654129",
"0.64190435",
"0.63276243",
"0.6285701",
"0.6201393",
"0.59650147",
"0.58513474",
"0.58321035",
"0.5811857",
"0.5757731",
"0.57542074",
"0.5736658",
"0.5722496",
"0.5673059",
"0.5661055",
"0.5611945",
"0.55907106",
"0.55634713",
"0.5559533",
"0.5539514",
"0.5537415",
"0.54927444",
"0.5434962",
"0.5425809",
"0.5398855",
"0.5397864",
"0.53944504",
"0.5390463",
"0.5383458",
"0.5345631"
] | 0.68218833 | 0 |
extract X and Y position from gcode "G" letter address | def get_xy_position(gcode_command):
pattern = re.compile(r"^G\dX(\d+(?:\.\d*)?|\.\d+)Y(\d+(?:\.\d*)?|\.\d+)F(\d+)$") # using non-capturing groups
m = pattern.match(gcode_command) # no need for re.search() because we have a complete pattern
(x, y) = m.group(1,2)
try:
x = float(x)
y = float(y)
except:
raise Exception("Could not convert X and Y to floats. X={}, Y={}".format(x,y))
x_pixel = round(x/X_STEP) # round to nearest int
y_pixel = round(y/Y_STEP*12)
return (x_pixel, y_pixel) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def coordinates(self):\n logging.debug('Get coordinates from text')\n result = []\n blocks = self.del_comm(blocks=True)\n coor = re.compile('[FXYZ][+-]?[0-9]+(\\.[0-9]+)?')\n for line in blocks:\n coord_line = False\n comm = line.split()\n temp = []\n for c in comm:\n if c == 'G1':\n coord_line = True\n if coord_line and coor.match(c):\n temp.append(c)\n if temp:\n result.append(temp)\n return result",
"def Write_GCode(self, PostPro):\n return PostPro.rap_pos_xy(self)",
"def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0",
"def text_to_position(string, is_black):\r\n # print(\"string: \", string)\r\n y = int(string[1]) - 1\r\n x = ord(string[0]) - ord('a')\r\n if is_black: #under perspective of black player, the position is flipped\r\n x = 7 - x\r\n y = 7 - y\r\n # print(\"x,y: \", (x, y))\r\n return (x, y)",
"def _get_gpos ( self ):\n bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \\\n self.y_min:self.y_max:self.nysteps*1j, \\\n self.z:self.z+0.1]\n bpos.resize((3, self.size))\n return bpos",
"def _get_gpos ( self ):\n bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \\\n self.y_min:self.y_max:self.nysteps*1j, \\\n self.z_min:self.z_max:self.nzsteps*1j]\n bpos.resize((3, self.size))\n return bpos",
"def handle_position(data: bytes) -> Tuple[bytes, str]:\n x, y, z = struct.unpack('fff', data[0:3 * 4])\n return data[20:], f'Current Position (x,y,z): {x} {y} {z}'",
"def get_DNApos_fromcoords(self,x,y):\n\n # Are we close to the DNA sequence?\n if abs(y-self.seq_row)>10:\n return None\n\n # ok, DNA it is\n pos=int(float(x-self.seq_xstart+4.0)/self.base_scale.get())\n return pos",
"def get_position():\n\n return character['Position']",
"def __getxy(x1, y1, x2, y2):\n\t\treturn x1*27+y1*9+x2*3+y2",
"def _get_charindex(self, x, y):\r\n verts = self.shapes[0].buf[0].vertices\r\n x = x - self.x + verts[2][0]\r\n y = y - self.y + verts[0][1]\r\n nv = len(verts)\r\n for i in range(0, nv, 4):\r\n vtr = verts[i] # top right\r\n vbl = verts[i + 2] # bottom left\r\n if x >= vbl[0] and x < vtr[0] and y >= vbl[1] and y < vtr[1]:\r\n i = int(i / 4)\r\n c_i = self.c_lookup[i]\r\n if c_i == (len(self.txt) - 1) or self.c_lookup[i + 1] > c_i + 1:\r\n if (vtr[0] - x) < (x - vbl[0]):\r\n c_i += 1\r\n return c_i\r\n return len(self.txt)",
"def address2xys(address):\n x = (address & 0xFE) >> 1\n x = 127 - x\n y = (address & 0x7F00) >> 8\n s = address & 1\n if s == 0:\n s = -1 \n return x, y, s",
"def convert_coordinates(coordinates):\r\n row = coordinates[1] - 1\r\n column = letters.index(coordinates[0])\r\n return column, row",
"def getxy(dd):\n x = dd[0] & 31\n y = (dd[0] >> 5) + ((dd[1] & 1) << 3)\n return (8 * x, 8 * y)",
"def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r",
"def get_gsyn_plot_address(p): \n return hex(int('0x%s' % pacman.pacman_configuration.get('memory_addresses', 'i_hex'), 16) + (int('0x400000', 16))*(p-1))[2:]",
"def parse_gpx_file(gpx_file_location):\n points = \"\\\"latitude\\\",\\\"longitude\\\",\\\"time\\\"\\n\"\n gpx = gpxpy.parse(gpx_file_location)\n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n points = points + f\"{point.latitude},{point.longitude},{point.time}\\n\"\n return points",
"def parse_pos(self, pos):\r\n\r\n column = ord(pos[0]) - 97\r\n if len(pos) == 2:\r\n row = ord(pos[1]) - 49\r\n else:\r\n row = 9\r\n return [row, column]",
"def get_character_position(character: dict) -> list:\r\n return character['Position']",
"def read_gpx(self,gpx_file):\r\n lat = []\r\n lon = []\r\n ele = []\r\n #print('here')\r\n with open(gpx_file,'r') as file:\r\n for line in file:\r\n if \"<trkpt lat\" in line:\r\n thislat, thislon = re.findall(r'[-+]?\\d*\\.\\d+|\\d+',line)\r\n lat.append(float(thislat))\r\n lon.append(float(thislon))\r\n elif \"<ele>\" in line:\r\n thisele = re.findall(r'[-+]?\\d*\\.\\d+|\\d+',line)\r\n #print(\"thisline\",line,\"=== \",thisele[0])\r\n ele.append(float(thisele[0]))\r\n\r\n\r\n return (lat,lon,ele)",
"def handle_instructions(instructions):\n row_instructions = instructions[0:7]\n column_instructions = instructions[7:10]\n row = bisect(row_instructions, (0, 127), \"F\", \"B\")\n column = bisect(column_instructions, (0, 7), \"L\", \"R\")\n return row, column",
"def find_gpas(s):\n \"*** YOUR CODE HERE ***\"",
"def parse_gce(self):\r\n length = parse_int(self.next_byte(), 'big')\r\n bts = self.next_bytes(length)\r\n delay = parse_int(bts[2:], 'little')\r\n bg_ind = parse_int(bts[-1:], 'big')\r\n self.next_byte()\r\n return delay, bg_ind",
"def __coding_coordinate(self):\n region1 = self.long_side_len\n region2 = self.short_side_len\n length = len(self.seq)\n if self.direction == '+':\n a_s = 0\n a_e = region2\n b_s = self.length - region1\n b_e = self.length - 1\n elif self.direction == '-':\n a_s = 0\n a_e = region1\n b_s = self.length - region2\n b_e = self.length - 1\n return (a_s, a_e, b_s, b_e)",
"def get_position(self):\n return self._find_gnx_node(self.gnx)",
"def gpgga_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[6] == '0' :\r\n return\r\n fix = ''\r\n if gps[6] == '1':\r\n fix = 'GPS fix'\r\n elif gps[6] == '2':\r\n fix = 'DGPS fix'\r\n elif gps[6] == '4':\r\n fix = 'RTK Fix coordinate (centimeter precision)'\r\n elif gps[6] == '5':\r\n fix = 'RTK Float (decimeter precision)'\r\n #utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]\r\n lat = ddm_dd_convert(gps[2], gps[3])\r\n long = ddm_dd_convert(gps[4], gps[5]) \r\n return [lat, long, fix]",
"def location_to_pos(self,row, col):\r\n\r\n pos_row = str(row + 1)\r\n pos_col = chr(col + 97)\r\n return pos_col + pos_row",
"def convert_gcode_to_array(gcode_raw, dim=(HEIGHT, WIDTH)):\n\t\n\tarray = np.zeros(dim, dtype=int)\n\tprint dim\n\t(x,y) = (0,0)\n\tfor gcode_command in gcode_raw:\n\t\tletter_address = gcode_command[0:2]\n\t\tif letter_address == \"G1\":\n\t\t\t(x, y) = get_xy_position(gcode_command)\n\t\telif letter_address == \"M4\":\n\t\t\tcontinue\n\t\telif letter_address == \"M7\":\n\t\t\tfiring_pattern = parse_gcode_M7(gcode_command)\n\t\t\tstrip_array = np.array(list(firing_pattern), dtype=int)\n\t\t\t#print strip_array\n\n\t\t\ttry:\n\t\t\t\t#array[y, x:x+13] += strip_array\n\t\t\t\tarray[y:y+13, x] += strip_array\n\t\t\t\tprint \"strip_array\", strip_array\n\t\t\texcept ValueError: # ValueError: operands could not be broadcast together with shapes XXX\n\t\t\t\tprint \"strip_array\", strip_array\n\t\t\t\t#len_array = len(array[x:x+13, y])\n\t\t\t\tlen_array = len(array[y:y+13, x])\n\t\t\t\t#array[y, x:x+13] += strip_array[:len_array]\n\t\t\t\tarray[y:y+13, x] += strip_array[:len_array]\n\t\t\t\t#print array[x:x+13, y]\n\t\t\t\tprint array[y:y+13, x]\n\t\t\t\tprint strip_array[:len_array]\n\t\t\t\tprint \"array\", array\n\t\telse:\n\t\t\traise Exception(\"Encountered unexpected gcode letter address: [{}]\".format(letter_address))\n\t\tprint (x,y)\n\treturn array",
"def coordinates(latitude, longitude):\r\n location = geolocator.reverse(latitude + \", \" + longitude)\r\n data = location.raw\r\n data = data['address']\r\n state_code = data['state']\r\n return state_code",
"def _EAN_coords_to_board_coords(EAN_move: str) -> (int, int):\n assert EAN_move[0] in \"abcdefgh\" and EAN_move[1] in \"12345678\", \"failed to get \" + EAN_move\n\n\n col = ord(EAN_move[0]) - ord('a')\n row = 8 - int(EAN_move[1])\n return row, col"
] | [
"0.61152065",
"0.61036426",
"0.59763837",
"0.5961071",
"0.59122723",
"0.5863732",
"0.5756867",
"0.57562524",
"0.5756186",
"0.5736474",
"0.5735927",
"0.57350487",
"0.56665355",
"0.5650005",
"0.56383294",
"0.5629794",
"0.5627465",
"0.5599105",
"0.5595129",
"0.5590702",
"0.5586012",
"0.55704457",
"0.55680186",
"0.5565761",
"0.55615944",
"0.5551766",
"0.5549421",
"0.5549177",
"0.5539929",
"0.55350643"
] | 0.73354113 | 0 |
Get custom template loader | def template_loader(self):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_loader_vanilla():\n return template_loader_vanilla",
"def get_template(loader, template_name):\n return loader.get_template(template_name)",
"def create_template_loader(self, template_path):\n raise NotImplementedError()",
"def loader(self):\n return self.loader_class()",
"def get_loader(self) -> BaseLoader:\n return FileSystemLoader(getattr(self, \"cwd\"))",
"def loader(self):\n return self._loader",
"def get_hierarchy_loader(directories):\n template_loaders = OrderedDict()\n for app_name, template_dir in directories:\n # Pull FileSystemLoader from cache if it already exists for this directory,\n # or instanciate it if not\n if template_dir not in file_system_loaders:\n loader = FileSystemLoader(template_dir)\n file_system_loaders[template_dir] = loader\n else:\n loader = file_system_loaders[template_dir]\n template_loaders[app_name] = loader\n return HierarchyLoader(template_loaders)",
"def get_resource_loader(self):\n return self.game.resource_loader",
"def test_loader(cls):\r\n return _test_loader_factory(cls)",
"def create_loader(self, *args, **kwargs):\n def loader():\n return self.load(*args, **kwargs)\n return loader",
"def loader(self):\r\n return self._loader",
"def load(self, environment, name, globals=None):\n code = None\n if globals is None:\n globals = {}\n\n # first we try to get the source for this template together\n # with the filename and the uptodate function.\n source, filename, uptodate = self.get_source(environment, name)\n code = environment.compile(source, name, filename)\n return environment.template_class(name, environment.config, code)",
"def load(self, spec):\n if spec.template is not None:\n return self.loader.unicode(spec.template, spec.template_encoding)\n\n path = self._find(spec)\n\n return self.loader.read(path, spec.template_encoding)",
"def load_template(format_: str) -> Template:\n template_path = Path(TEMPLATES_PATH).joinpath(f'{format_}{TEMPLATE_SUFFIX}')\n template = Template(template_path.read_text())\n return template",
"def load(self, filename, relative_to=None, cls=None, encoding=None):\n # TODO: get the template extension from the config!!\n if not filename.endswith('.html'):\n filename = tg.config['pylons.app_globals'\n ].dotted_filename_finder.get_dotted_filename(\n template_name=filename,\n template_extension='.html')\n\n return TemplateLoader.load(self, filename,\n relative_to=relative_to, cls=cls, encoding=encoding)",
"def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)",
"def generate_loader(mode, symbols, definition, linker):\n if \"vanilla\" == mode:\n loader_content = generate_loader_vanilla()\n elif \"dlfcn\" == mode:\n loader_content = generate_loader_dlfcn(symbols, linker)\n else:\n loader_content = generate_loader_hash(symbols)\n ret = template_loader % (definition, loader_content)\n if \"maximum\" != mode:\n ret += template_und_symbols\n return ret",
"def get_parser(project, layoutdir):\n for parser_class in PARSERS:\n parser = parser_class(project, layoutdir)\n if parser.can_load():\n return parser\n raise ValueError(\"No loader available for '{0}'.\".format(project))",
"def loader(self):\n if \"loader\" in self.recipe:\n return self.recipe[\"loader\"]\n else:\n raise ValueError(\"No loader defined!\")",
"def load_renderer(rname: str, config: ConfigManager) -> Renderer:\n try:\n return getattr(importlib.import_module('plasTeX.Renderers.'+rname), 'Renderer')()\n except ImportError:\n pass\n\n for plugin in config['general']['plugins'] or []:\n try:\n return getattr(importlib.import_module(plugin + '.Renderers.' + rname),\n 'Renderer')()\n except ImportError:\n pass\n\n try:\n return getattr(import_file(Path(rname)), 'Renderer')()\n except (ImportError, FileNotFoundError):\n raise ImportError('Could not import renderer \"%s\". Make sure that it is installed correctly, and can be imported by Python.' % rname)",
"def test_filesystem_loader(self):\n\n self.assertEqual(\n list(\n template_finder.templates_for_engine({\n 'BACKEND': 'django.templates.backends.django.Djangotemplate.',\n 'APP_DIRS': False,\n 'DIRS': ['/tmp/project/templates/', '/tmp/project/other_templates/']\n })\n ),\n [\n ('base.html', '/tmp/project/templates/base.html'),\n ('foo/bar.html', '/tmp/project/templates/foo/bar.html'),\n ('baz.html', '/tmp/project/other_templates/baz.html'),\n ]\n )",
"def get_template(name):\n file_name = \"{name}.template\".format(name=name)\n data = resource_string(\"pyscaffoldext.custom_extension.templates\",\n file_name)\n return string.Template(data.decode(\"UTF-8\"))",
"def is_compatible_template_loader(template_loader: BaseLoader) -> bool:\n return hasattr(template_loader, \"get_dirs\")",
"def load():\n return TwitterPlugin()",
"def __get_template_processor(lvp) -> Optional[TemplateProcessor]:\n if lvp == LVP.COBOL_TO_CSHARP_9:\n return COBOLToCSharp9TemplateProcessor()\n\n return None",
"def _get_tag_manager():\n\n class_path = getattr(settings, 'ES_REACT_RENDER_TAG_MANAGER', '')\n if not class_path:\n return ESModulesReactTagManager\n\n return import_string(class_path)",
"def lookup_template(namespace, name):\r\n return LOOKUP[namespace].get_template(name)",
"def test_template_loaders(self):\n schema = DjangoConfigurationSchema()\n # Just strings.\n raw_settings = self.minimal_settings\n raw_settings['TEMPLATE_LOADERS'] = [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]\n cleaned_settings = schema.deserialize(raw_settings) # Is valid.\n self.assertEqual(cleaned_settings['TEMPLATE_LOADERS'],\n raw_settings['TEMPLATE_LOADERS'])\n # Strings and tuples.\n raw_settings = self.minimal_settings\n raw_settings['TEMPLATE_LOADERS'] = [\n 'some.string',\n (\n 'django.template.loaders.cached.Loader',\n (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )\n ),\n ]\n cleaned_settings = schema.deserialize(raw_settings) # Is valid.\n self.assertEqual(cleaned_settings['TEMPLATE_LOADERS'],\n raw_settings['TEMPLATE_LOADERS'])",
"def load_template_if_needed(self):\n\n class GeneratorProxy(object):\n \"\"\"\n An interface to templates and plugins for\n providing restricted access to the methods.\n \"\"\"\n\n def __init__(self, preprocessor=None, postprocessor=None,\n context_for_path=None):\n self.preprocessor = preprocessor\n self.postprocessor = postprocessor\n self.context_for_path = context_for_path\n\n if not self.template:\n logger.info(\"Generating site at [%s]\" % self.site.sitepath)\n self.template = Template.find_template(self.site)\n logger.debug(\"Using [%s] as the template\",\n self.template.__class__.__name__)\n\n logger.info(\"Configuring the template environment\")\n preprocessor = self.events.begin_text_resource\n postprocessor = self.events.text_resource_complete\n proxy = GeneratorProxy(context_for_path=self.context_for_path,\n preprocessor=preprocessor,\n postprocessor=postprocessor)\n self.template.configure(self.site,\n engine=proxy)\n self.events.template_loaded(self.template)",
"def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)"
] | [
"0.737304",
"0.72793007",
"0.7253313",
"0.6933869",
"0.67708385",
"0.61881804",
"0.6157388",
"0.5983813",
"0.597341",
"0.5949879",
"0.5875685",
"0.58639973",
"0.5863923",
"0.58493626",
"0.5842449",
"0.58355576",
"0.5831463",
"0.5823081",
"0.5786827",
"0.57619715",
"0.5755552",
"0.57516485",
"0.5748562",
"0.569891",
"0.56740344",
"0.5653648",
"0.5640467",
"0.56074125",
"0.56000656",
"0.5579433"
] | 0.83160263 | 0 |
Binary mapping should return a ZPFBoundarySets object | def test_binary_mapping(load_database):
dbf = load_database()
my_phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'AL5FE2',
'AL2FE', 'AL13FE4', 'AL5FE4']
comps = ['AL', 'FE', 'VA']
conds = {v.T: (1200, 1300, 50), v.P: 101325, v.X('AL'): (0, 1, 0.2)}
zpf_boundaries = map_binary(dbf, comps, my_phases, conds)
num_boundaries = len(zpf_boundaries.all_compsets)
assert num_boundaries > 0
# calling binplot again can add more boundaries
map_binary(dbf, comps, my_phases, conds, boundary_sets=zpf_boundaries)
assert len(zpf_boundaries.all_compsets) == 2*num_boundaries | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps",
"def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")",
"def test_correct_binid(self):\n\n maps = Maps(plateifu='8485-1901', release='DR17', bintype='HYB10')\n spaxel = maps[22, 14]\n\n assert isinstance(spaxel, Spaxel)\n assert spaxel.x == 14, spaxel.y == 22\n\n bin_spaxels = spaxel.stellar_vel.bin.get_bin_spaxels()\n\n for sp in bin_spaxels:\n\n sp.load()\n assert sp.stellar_vel.bin.binid == spaxel.stellar_vel.bin.binid\n\n sp_bin = maps[sp.y, sp.x]\n assert sp_bin.stellar_vel.bin.binid == spaxel.stellar_vel.bin.binid",
"def seg2bmap(seg,width=None,height=None):\n\n seg = seg.astype(np.bool)\n seg[seg>0] = 1\n\n assert np.atleast_3d(seg).shape[2] == 1\n\n width = seg.shape[1] if width is None else width\n height = seg.shape[0] if height is None else height\n\n h,w = seg.shape[:2]\n\n ar1 = float(width) / float(height)\n ar2 = float(w) / float(h)\n\n assert not (width>w | height>h | abs(ar1-ar2)>0.01),\\\n 'Can''t convert %dx%d seg to %dx%d bmap.'%(w,h,width,height)\n\n e = np.zeros_like(seg)\n s = np.zeros_like(seg)\n se = np.zeros_like(seg)\n\n e[:,:-1] = seg[:,1:]\n s[:-1,:] = seg[1:,:]\n se[:-1,:-1] = seg[1:,1:]\n\n b = seg^e | seg^s | seg^se\n b[-1,:] = seg[-1,:]^e[-1,:]\n b[:,-1] = seg[:,-1]^s[:,-1]\n b[-1,-1] = 0\n\n if w == width and h == height:\n bmap = b\n else:\n bmap = np.zeros((height,width))\n for x in range(w):\n for y in range(h):\n if b[y,x]:\n j = 1+floor((y-1)+height / h)\n i = 1+floor((x-1)+width / h)\n bmap[j,i] = 1;\n\n return bmap",
"def test_rebulding_zpf_boundary_sets_regions():\n\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_310 = CompsetPair([\n BinaryCompset('P1', 310, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 310, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n\n # Initial compsets\n zpfbs.add_compsets(compsets_298)\n assert len(zpfbs.all_compsets) == 1\n assert len(zpfbs.two_phase_regions) == 1\n\n # Compsets added create a new region because phases changed\n zpfbs.add_compsets(compsets_300_diff_phases)\n assert len(zpfbs.all_compsets) == 2\n assert len(zpfbs.two_phase_regions) == 2\n\n # Compsets added create a new region because phases the temperature is out of tolerance\n zpfbs.add_compsets(compsets_310)\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 3\n\n # Rebuild the regions with a larger tolerance should create two regions with one and two compsets.\n zpfbs.rebuild_two_phase_regions(Ttol=20)\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 2\n assert sorted([len(tpr.compsets) for tpr in zpfbs.two_phase_regions]) == [1, 2]",
"def load_bcdr_mask(lw_x_points_str, lw_y_points_str, imshape=(4084, 3328)):\n x_points = np.array([float(num) for num in lw_x_points_str.strip().split(' ')])\n y_points = np.array([float(num) for num in lw_y_points_str.strip().split(' ')])\n poly_x, poly_y = polygon(y_points, x_points, shape=imshape)\n mask = np.zeros((imshape))\n mask[poly_x, poly_y] = 1\n return mask",
"def read_mesh_boundary(sFilename_boundary_in):\n iReturn_code = 1\n if os.path.isfile(sFilename_boundary_in):\n pass\n else:\n print('This mesh file does not exist: ', sFilename_boundary_in )\n iReturn_code = 0\n return iReturn_code\n\n \n pDriver_json = ogr.GetDriverByName('GeoJSON') \n pDataset_mesh = pDriver_json.Open(sFilename_boundary_in, gdal.GA_ReadOnly)\n pLayer_mesh = pDataset_mesh.GetLayer(0)\n pSpatial_reference_out = pLayer_mesh.GetSpatialRef()\n ldefn = pLayer_mesh.GetLayerDefn() \n\n #we also need to spatial reference\n for pFeature_mesh in pLayer_mesh:\n pGeometry_mesh = pFeature_mesh.GetGeometryRef() \n pGeometrytype_boundary = pGeometry_mesh.GetGeometryName()\n if(pGeometrytype_boundary == 'POLYGON'): \n pBoundary_ogr = pGeometry_mesh \n else:\n if(pGeometrytype_boundary == 'MULTIPOLYGON'): \n nLine = pGeometry_mesh.GetGeometryCount()\n for i in range(nLine):\n pBoundary_ogr = pGeometry_mesh.GetGeometryRef(i)\n \n pass\n else:\n pass\n pass \n \n \n pBoundary_wkt = pBoundary_ogr.ExportToWkt()\n aExtent = pBoundary_ogr.GetEnvelope()\n min_x, max_x, min_y, max_y = aExtent\n \n return pBoundary_wkt, aExtent",
"def test_adding_compsets_to_zpf_boundary_sets():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n assert zpfbs.components == ['A', 'B']\n assert len(zpfbs.two_phase_regions) == 0\n assert len(zpfbs.all_compsets) == 0\n\n zpfbs.add_compsets(compsets_298)\n assert len(zpfbs.all_compsets) == 1\n assert len(zpfbs.two_phase_regions) == 1\n\n zpfbs.add_compsets(compsets_300) # same region, different temperature\n assert len(zpfbs.all_compsets) == 2\n assert len(zpfbs.two_phase_regions) == 1\n\n zpfbs.add_compsets(compsets_300_diff_phases) # new region, different phases\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 2",
"def _binary(self, trajectories):\n # Get the mapping between indices and binary genotypes\n index2binary = self._gpm.map(\"indices\", \"binary.genotypes\")\n\n # New dictionary\n mapping = OrderedDict()\n\n # Iterate through trajectories and convert keys to binary repr.\n for key in trajectories:\n indices = list(key)\n sequences = tuple([index2binary[i] for i in indices])\n mapping[sequences] = trajectories[key]\n\n _mapping = self.sort_dict(mapping)\n return _mapping",
"def get_borough_boundaries():\n df = update_borough_boundaries()\n return {\"data\": df.to_dict(orient=\"records\")}",
"def get_regions_mask(self, input):",
"def _to_array1(self, maps, norb):\n nstate = len(maps[(0, 1)])\n nlt = norb * (norb + 1) // 2\n arrays = numpy.zeros((nlt, nstate, 3), dtype=numpy.int32)\n for i in range(norb):\n for j in range(i + 1, norb):\n ijn = i + j * (j + 1) // 2\n for k, data in enumerate(maps[(i, j)]):\n arrays[ijn, k, 0] = data[0]\n arrays[ijn, k, 1] = data[1]\n arrays[ijn, k, 2] = data[2]\n return arrays",
"def test_make_binary_and_fp(self):\n output_mask = boundary_mask(df=os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_inner.tif'))\n\n assert np.array_equal(output_mask, truth_mask)",
"def create_boundary_maps(params):\n batch_size = 40\n loader_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 1}\n train_loader, val_loader = \\\n init_city_loader(data_folder=params['data_folder'],\n image_size=[1024, 2048], # keeping original size\n remove_alpha=True, # removing the alpha channel\n loader_params=loader_params,\n ret_type='all') # return everything in the batch\n\n print(f'In [create_boundary_maps]: performing with data loaders of size: \\n'\n f'train_loader: {len(train_loader)} \\n'\n f'val_loader: {len(val_loader)} \\n'\n f'and batch_size of: {batch_size} \\n')\n\n for loader_name, loader in {'train_loader': train_loader, 'val_loader': val_loader}.items():\n print(f'In [create_boundary_maps]: creating for {loader_name}')\n for i_batch, batch in enumerate(loader):\n if i_batch % 1 == 0:\n print(f'Doing for the batch {i_batch}')\n\n instance_maps = batch['instance'].to(device)\n boundaries = helper.get_edges(instance_maps)\n boundary_paths = batch['boundary_path']\n # save one by one in the same location as gtFine images\n helper.save_one_by_one(boundaries, boundary_paths, save_path=None) # saving to boundary_paths\n print(f'In [create_boundary_maps]: done for {loader_name}')\n print('In [create_boundary_maps]: all done')",
"def decode(self, coded_set):",
"def test_geotransform2bbox(self):\n\n M = 5\n N = 10\n for gt in GEOTRANSFORMS:\n bbox = geotransform2bbox(gt, M, N)\n\n # FIXME: Need better tests here, but this is better than nothing\n\n # Lower bounds\n assert bbox[0] == gt[0]\n\n # Upper bounds\n assert bbox[3] == gt[3]",
"def map():",
"def generate_boundaries(self, mesh=None):\n\n boundaries_list = dict()\n\n for boundary in mesh.boundary_nodes_tags:\n nodes_list = list()\n weights_list = list()\n\n for key, (el, tag) in mesh.boundary_elements[boundary].items():\n nodes = mesh.internal_boundary_product(self.poly_roots)\n\n weights = np.array(mesh.internal_boundary_product(self.weights)).prod(\n axis=1\n )[:, None]\n\n sys.stdout.write(\n \"\\rMapping from the reference to the real mesh element {} from {}\".format(\n key, boundary\n )\n )\n sys.stdout.flush()\n\n if isinstance(self.p_order, tuple):\n nodes_mapped = mesh.map_to_boundary_element(\n nodes, self.reference_interval, el, tag\n )\n nodes_list.append(nodes_mapped)\n weights_list.append(weights)\n\n else:\n nodes_mapped = mesh.map_to_boundary_element(\n nodes, self.reference_interval, el\n )\n\n nodes_list.append(nodes_mapped.T)\n weights_list.append(weights)\n\n nodes_array = np.vstack(nodes_list)\n weights_array = np.vstack(weights_list)\n\n boundaries_list[boundary] = (nodes_array, weights_array)\n\n return boundaries_list",
"def get_intersected_basins_ppt_data(all_basin_geoms , month, year, conv2Inches):\n \n global gSpatialIndex\n print(\"Processing Prism Dataset\")\n ppt_bounds, ppt_data, hdr_dict = get_monthly_prism_ppt_data(year = year, month = month, plotPPTBounds = False)\n print(\"-Extracting precipitation data\")\n ppt_gdf = convert_pptData_to_GDF(ppt_bounds, ppt_data, hdr_dict, plotHeatMap = False)\n\n intersected_basins = {}\n print(\"---Creating Spatial RTree Index for month:\", month)\n \n # Create a copy of a global index to reduce time.\n # Check if it works correctly.\n \n if(gSpatialIndex == 0):\n gSpatialIndex = ppt_gdf.sindex\n\n print(\"-Creating basin intersections\")\n for basin_file_name, basin_geom in all_basin_geoms.items():\n possible_matches_index = list(gSpatialIndex.intersection(basin_geom.bounds))\n possible_matches = ppt_gdf.iloc[possible_matches_index]\n precise_matches = possible_matches[possible_matches.intersects(basin_geom)]\n if(conv2Inches):\n precise_matches[\"Precipitation\"] = precise_matches[\"Precipitation\"]/25.4\n intersected_basins[basin_file_name] = precise_matches\n \n print(\"Completed processing \")\n return intersected_basins",
"def parseBoundaryContent(content):\n data = {}\n isBinary = isBinaryFormat(content)\n bd = splitBoundaryContent(content)\n for boundary, (n1, n2) in bd.items():\n pd = {}\n n = n1\n while True:\n lc = content[n]\n if b'nonuniform' in lc:\n v = parseDataNonuniform(content, n, n2, isBinary)\n pd[lc.split()[0]] = v\n if not isBinary:\n n += len(v) + 4\n else:\n n += 3\n continue\n elif b'uniform' in lc:\n pd[lc.split()[0]] = parseDataUniform(content[n])\n n += 1\n if n > n2:\n break\n data[boundary] = pd\n return data",
"def map(z):\n pass",
"def cfdReadBoundaryFile(self):\r\n \r\n with open(self.boundaryFile,\"r\") as fpid:\r\n print('Reading boundary file ...')\r\n \r\n ## (dict) key for each boundary patch\r\n self.cfdBoundaryPatchesArray={}\r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n count=0\r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n \r\n if tline.strip().isdigit():\r\n \r\n self.numberOfBoundaryPatches = tline.split()[0]\r\n continue\r\n \r\n boundaryName=tline.split()[0]\r\n \r\n self.cfdBoundaryPatchesArray[boundaryName]=io.cfdReadCfdDictionary(fpid)\r\n ## number of faces for the boundary patch\r\n self.cfdBoundaryPatchesArray[boundaryName]['numberOfBFaces']= int(self.cfdBoundaryPatchesArray[boundaryName].pop('nFaces'))\r\n \r\n ## start face index of the boundary patch in the self.faceNodes\r\n self.cfdBoundaryPatchesArray[boundaryName]['startFaceIndex']= int(self.cfdBoundaryPatchesArray[boundaryName].pop('startFace'))\r\n count=count+1\r\n\r\n ## index for boundary face, used for reference\r\n self.cfdBoundaryPatchesArray[boundaryName]['index']= count",
"def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages",
"def _gtBinmap(self):\n if os.path.isfile(self.outbinmap) and (not self.clobber):\n print(\"\\t=== '{}' already exists ===\".format(self.outbinmap))\n return\n else:\n if not os.path.isfile(self.outmktime):\n self._gtMktime()\n\n # Image width must be comprised within the acceptance cone\n imWidth = int( np.floor(self.rad* 2**(0.5)) ) # deg\n imWipix = int(imWidth / self.binsz)\n\n # Coordinate system\n if self.csys == 'GAL':\n center_icrs = SkyCoord(ra=self.ra*u.degree, dec=self.dec*u.degree, frame='icrs')\n self.ra = center_icrs.galactic.l.deg\n self.dec = center_icrs.galactic.b.deg\n\n os.popen(\"gtbin evfile={} scfile=none outfile={} algorithm=CMAP emin={}\\\n emax={} nxpix={} nypix={} binsz={} coordsys={} xref={} yref={} axisrot=0\\\n proj=AIT\".format(self.outmktime, self.outbinmap, self.emin, self.emax,\n imWipix, imWipix, self.binsz, self.csys, self.ra, self.dec))\n\n if self.csys == 'GAL':\n self.ra = center_icrs.ra.deg\n self.dec = center_icrs.dec.deg\n return",
"def binary_mask_to_polygon(binary_mask, tolerance=0):\r\n\r\n polygons = []\r\n if isinstance(binary_mask, torch.Tensor):\r\n binary_mask = binary_mask.cpu().numpy()\r\n # pad mask to close contours of shapes which start and end at an edge\r\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\r\n contours = measure.find_contours(padded_binary_mask, 0.5)\r\n contours = np.subtract(contours, 1)\r\n for contour in contours:\r\n contour = close_contour(contour)\r\n contour = measure.approximate_polygon(contour, tolerance)\r\n if len(contour) < 3:\r\n continue\r\n contour = np.flip(contour, axis=1) # x, y\r\n polygon = np.maximum(contour, 0)\r\n #segmentation = contour.ravel().tolist()\r\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\r\n #segmentation = [0 if i < 0 else i for i in segmentation]\r\n polygons.append(polygon)\r\n\r\n return polygons",
"def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()",
"def binary_bases(cls):\n return cls._BINARY_BASES",
"def test_read_binary(self):\n meshes = stlreader.get_data(self.stl_bin_file)\n print(meshes[0][0], file=sys.stderr)\n name, vertices, polygons = meshes[0]\n self.assertEqual(name, \"{}#{}\".format(os.path.basename(self.stl_bin_file), 0))\n self.assertTrue(len(vertices) > 0)\n self.assertTrue(len(polygons) > 0)\n polygon_ids = list()\n for a, b, c in polygons.itervalues():\n polygon_ids += [a, b, c]\n self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))",
"def retrieveManualGeocodes():\n\n\tshp_2013 = join(project_dir, '2013', 'shp')\n\tw_lid = join(shp_2013, 'west_lid_qcew13_zip_regeocoded.shp')\n\te_lid = join(shp_2013, 'east_lid_qcew13_zip_regeocoded.shp')\n\n\tbin_dict = {}\n\tfor lid in (w_lid, e_lid):\n\t\twith da.SearchCursor(lid, '*') as cursor:\n\t\t\tfor row in cursor:\n\t\t\t\td = OrderedDict(zip(cursor.fields, row))\n\t\t\t\t# if the geometry wasn't matched in the geocoding it has\n\t\t\t\t# a value of (None, None) in the 'Shape' field\n\t\t\t\tif d['Status'] != 'U':\n\t\t\t\t\tgeo_fields = (\n\t\t\t\t\t\t'Shape', 'Loc_name', 'Score', 'Match_type')\n\t\t\t\t\tgeo_dict = {k: d[k] for k in geo_fields}\n\t\t\t\t\tbin_dict[d['BIN']] = geo_dict\n\t\n\treturn bin_dict",
"def add_microstructures_to_arbor(self,arbor,mbranches,bbranches,myelingeometry=[1,1,3,8,3,1],boutongeometry=[28,4]):\n\t\tarbor_labels = dict(zip([key for key in arbor.keys()],[[] for key in arbor.keys()]))\n\t\tarbor,arbor_labels = self.myelinate_branches(arbor,arbor_labels,mbranches,myelin_geometry=myelingeometry) #myelin_geometry=[1,1,3,8,3,1]\n\t\tarbor,arbor_labels = self.bouton_branches(arbor,arbor_labels,bbranches,bouton_geometry=boutongeometry) #bouton_geometry=[28,4]\n\t\tarbor = self.ensure_precision(arbor)\n\t\treturn(arbor,arbor_labels)"
] | [
"0.6035268",
"0.5507288",
"0.54482836",
"0.5369027",
"0.536103",
"0.53413874",
"0.5334319",
"0.5321949",
"0.5288576",
"0.5270437",
"0.5252339",
"0.5250869",
"0.5245525",
"0.52396154",
"0.5179",
"0.5110166",
"0.50924635",
"0.5092054",
"0.50727254",
"0.50721407",
"0.50680166",
"0.50497997",
"0.50478387",
"0.5046475",
"0.50461346",
"0.50416493",
"0.5032895",
"0.5027402",
"0.5027305",
"0.5021869"
] | 0.70849204 | 0 |
A CompsetPair with very different temperature should not belong in the TwoPhaseRegion | def test_two_phase_region_outside_temperature_tolerance_does_not_belong():
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
compsets_500 = CompsetPair([
BinaryCompset('P1', 500, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 500, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K
assert tpr.compsets_belong_in_region(compsets_500) is False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_two_phase_region_expands_as_compsets_are_added():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_305 = CompsetPair([\n BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_312 = CompsetPair([\n BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n # compsets don't belong because they are outside the temperature tolerance (10 K)\n assert tpr.compsets_belong_in_region(compsets_312) is False\n assert tpr.compsets_belong_in_region(compsets_305)\n tpr.add_compsets(compsets_305)\n # 312 K compsets could be added now that the 305 K is within 10 K.\n assert tpr.compsets_belong_in_region(compsets_312)",
"def test_two_phase_region_new_phases_does_not_belong():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False",
"def remove_ill_matched_pair(phi1,S1,TU1,TV1): #---- remove ill matched pair\r\n #--- mark inlier= 1; outlier= 0 ---\r\n mask, phi0= pano_tools.remove_outlier(phi1);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=2, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=2, mask= mask);\r\n mask, TV0 = pano_tools.remove_outlier(TV1 ,Nstd=2, mask= mask); \r\n mask, phi0= pano_tools.remove_outlier(phi1,Nstd=3, mask= mask);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=3, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=3, mask= mask);\r\n #--- select reliable data pair ---\r\n # mask is M*M matrix: 1= reliable pair combination;\r\n M = phi1.shape[0];\r\n sumx= np.sum(mask,axis=0); # large number= reliable\r\n seq = []; # chosen reliable data\r\n for k in range(0, int(M*0.7)):\r\n maxx = np.argmax(sumx);\r\n seq.append(maxx);\r\n sumx[maxx]= 0; \r\n return seq, phi0, S0, TU0, TV0",
"def test_two_phase_region_usage():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300)\n tpr.add_compsets(compsets_300)\n assert len(tpr.compsets) == 2",
"def test_t_paired_specific_difference(self):\r\n x, y = self.x, self.y\r\n # difference is 0.2, so test should be non-significant if 0.2 passed\r\n self.failIf(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)\r\n # same, except that reversing list order reverses sign of difference\r\n self.failIf(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)\r\n # check that there's no significant difference from the true mean\r\n self.assertFloatEqual(\r\n t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)",
"def _valid_bond_pair(self, set):\n (sbu1, cp1), (sbu2, cp2) = set\n if all([i is None for i in [cp1.special, cp2.special, cp1.constraint, cp2.constraint]]):\n return sbu1.is_metal != sbu2.is_metal\n\n return (cp1.special == cp2.constraint) and (cp2.special == cp1.constraint)",
"def test_sparsities_differing_map_pairs_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity((ds2, ds2), (m1, m2))\n sp2 = op2.Sparsity((ds2, ds2), (m2, m1))\n assert sp1 is not sp2",
"def test_sparsities_differing_map_tuples_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2)))\n sp2 = op2.Sparsity((ds2, ds2), ((m2, m2), (m2, m2)))\n assert sp1 is not sp2",
"def compParDiff(self, chem) :\r\n import warnings\r\n warnings.warn('Partition and diffusion coefficients in homogenised stratum corneum are NOT calculated. '\r\n 'Only user-supplied values are used.')",
"def circuitSat(C):",
"def testCorrectForTwoAtomCellWithoutPeriodicityNEEDED(self):\n\t\texpDist = 0.01*10\n\t\tself._checkExpMatchesActual(expDist)",
"def test_get_tf_pairs(self):\n graph = BELGraph()\n p1, p2, p3 = (Protein(\"test\", str(i)) for i in range(1, 4))\n r4, r5, r6 = (Rna(\"test\", str(j)) for j in range(4, 7))\n\n g4 = r4.get_gene()\n self.assertIsNotNone(g4)\n g5 = r5.get_gene()\n self.assertIsNotNone(g5)\n\n c14, c25 = ComplexAbundance([p1, g4]), ComplexAbundance([p2, g5])\n _tf_up(graph, p1, r4)\n _tf_down(graph, p2, r5)\n graph.add_correlation(p3, r6, citation=n(), evidence=n())\n\n self.assertEqual({p1, p2, p3, r4, r5, r6, g4, g5, c14, c25}, set(graph))\n\n expected_edges = [\n (c14, r4),\n (p1, c14),\n (g4, c14),\n (c25, r5),\n (p2, c25),\n (g5, c25),\n (p3, r6),\n (r6, p3),\n ]\n sorted_expected_edges = sorted(expected_edges, key=_bel_pair_key)\n sorted_actual_edges = sorted(graph.edges(), key=_bel_pair_key)\n\n self.assertEqual(sorted_expected_edges, sorted_actual_edges)\n\n pairs = set(get_tf_pairs(graph))\n expected_pairs = {(p1, r4, +1), (p2, r5, -1)}\n self.assertEqual(expected_pairs, pairs)",
"def test_equivalent_potential_temperature():\n p = 1000 * units.mbar\n t = 293. * units.kelvin\n td = 280. * units.kelvin\n ept = equivalent_potential_temperature(p, t, td)\n assert_almost_equal(ept, 311.18586467284007 * units.kelvin, 3)",
"def testSameRMSEWhenPairsAreSpecifiedOrNotDifferentGeoOrder(self):\n test_class1 = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=self.nontrivial_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0},\n pairs=None)\n\n for geox_type in GeoXType:\n if geox_type == GeoXType.CONTROL:\n continue\n\n test_class1._geox_type = geox_type\n _, expected_detailed_results = test_class1.report_candidate_designs(\n budget_list=[30, 40],\n iroas_list=[0, 2],\n use_cross_validation=True,\n num_simulations=10)\n\n # change the order of geo1 and geo2 in some of the pairs\n pairs = [x.copy() for x in test_class1.pairs]\n pairs[0].loc[0:3, 'geo1'] = test_class1.pairs[0].loc[0:3, 'geo2']\n pairs[0].loc[0:3, 'geo2'] = test_class1.pairs[0].loc[0:3, 'geo1']\n\n test_class2 = TrimmedMatchGeoXDesign(\n geox_type,\n pretest_data=self.nontrivial_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0},\n pairs=pairs)\n _, detailed_results = test_class2.report_candidate_designs(\n budget_list=[30, 40],\n iroas_list=[0, 2],\n use_cross_validation=True,\n num_simulations=10)\n\n for key in detailed_results.keys():\n self.assertTrue(\n np.array_equal(detailed_results[key]['estimate'],\n expected_detailed_results[key]['estimate']))",
"async def test_temp_unit_fix(\n hass: HomeAssistant,\n client,\n climate_radio_thermostat_ct101_multiple_temp_units,\n climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints,\n integration,\n) -> None:\n state = hass.states.get(\"climate.thermostat\")\n assert state\n assert state.attributes[\"current_temperature\"] == 18.3\n\n state = hass.states.get(\"climate.z_wave_thermostat\")\n assert state\n assert state.attributes[\"current_temperature\"] == 21.1",
"def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]",
"def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs",
"def canonical_pair(self, t=1):\n return self.canonical_vertex(), (self.L.one(), self.K.product(self.K.one(), t))",
"def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False",
"def gen_chip2_no_write(tup):\n cfpath, gfpath, bbox, theta, new_size, filter_list = tup\n chipBGR = ctool.compute_chip(gfpath, bbox, theta, new_size, filter_list)\n return chipBGR, cfpath",
"def test_sparsities_differing_maps_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity(ds2, m1)\n sp2 = op2.Sparsity(ds2, m2)\n assert sp1 is not sp2",
"def _test_pairs(self, idx0, idx1):\n pass",
"def __xor__(self, other: Compound[Scalar]) -> Compound[Scalar]:\n return (self._pack_points(self._points_set ^ other._points_set)\n if isinstance(other, Multipoint)\n else NotImplemented)",
"def test_get_currency_pairs_with_second_currency_invalid(self):\n test_result = self.db_handler.get_currency_pairs_with_second_currency(\"TESTEXCHANGE\", [\"BAT\"])\n result = []\n assert result == test_result",
"def test_mixed_parcel():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n dewpoint = np.array([19., -11.2, -10.8, -10.4, -10., -53.2]) * units.degC\n parcel_pressure, parcel_temperature, parcel_dewpoint = mixed_parcel(pressure, temperature,\n dewpoint,\n depth=250 * units.hPa)\n assert_almost_equal(parcel_pressure, 959. * units.hPa, 6)\n assert_almost_equal(parcel_temperature, 28.7401463 * units.degC, 2)\n assert_almost_equal(parcel_dewpoint, 7.15346585151 * units.degC, 2)",
"def test_transport_sanity(self):\n T = 400\n cv_mole, W = 21005.045895231186, 28.014\n species_name = \"N2\"\n\n data = ct_properties.ctThermoTransport(\"gri30.cti\", verbose=False)\n data.evaluate_properties()\n i = data.gas.species_index(species_name)\n\n As, Ts, _, poly_mu, poly_kappa, log_poly_mu, log_poly_kappa= ct2foam_utils.fit_ct_transport(data)\n\n mu_s = tr_fitter.sutherland(T, As[i], Ts[i])\n kappa_s=tr_fitter.euken(mu_s, cv_mole, W, R)\n mu_logp, kappa_logp = tr_fitter.eval_log_polynomial(log_poly_mu[i,:], log_poly_kappa[i,:], T)\n mu_p, kappa_p = tr_fitter.eval_polynomial(poly_mu[i,:], poly_kappa[i,:], T)\n\n\n # rough test whether they are in the same scale...\n mu_ref = 2.2217e-5\n kappa_ref = 0.032205\n\n self.assertTrue(np.abs(mu_s-mu_ref)/np.abs(mu_ref) < 0.07)\n self.assertTrue(np.abs(mu_p-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(mu_logp-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(kappa_s-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_p-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_logp-kappa_ref)/np.abs(kappa_ref) < 0.05)",
"def space(self):\n return set([k for k, v in list(self.unit_comp.items()) if abs(v) > 1e-6])",
"def test_XOR():\n\tk, outputs = 2, [0,1,1,0]\n\n\ttrue_pi0s = set(['00','11'])\n\ttrue_pi1s = set(['01','10'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('11',[],[[0,1]]),('00',[],[[0,1]])]\n\ttrue_ts1s = [('10',[[0,1]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))",
"def test_get_all_currency_pairs_from_exchange_with_no_invalid_pair(self):\n test_result = self.db_handler.get_all_currency_pairs_from_exchange(\"TESTEXCHANGE\")\n test_result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in test_result]\n result = self.session.query(ExchangeCurrencyPair).all()\n result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in result]\n assert result == test_result",
"def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2):\n const_dict = p[3]\n cd1, cd2 = p1[3], p2[3]\n if 'x0' in cd1 and 'x0' in cd2:\n if cd2['x0'] < cd1['x0']:\n const_dict['x0'] = cd2['x0']\n const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None\n const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None\n else:\n const_dict['x0'] = cd1['x0']\n const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None\n const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None\n p = p[0:3] + (const_dict,)\n return p"
] | [
"0.63824725",
"0.61983526",
"0.60765725",
"0.58474624",
"0.55372393",
"0.5528594",
"0.5406215",
"0.5398616",
"0.5267037",
"0.5245239",
"0.5206635",
"0.5178668",
"0.5114043",
"0.511152",
"0.51114476",
"0.5090678",
"0.50795275",
"0.5068768",
"0.506794",
"0.5047619",
"0.50468045",
"0.5045661",
"0.5000316",
"0.4999695",
"0.49946535",
"0.49940318",
"0.4993922",
"0.49763873",
"0.49755734",
"0.49753177"
] | 0.67047006 | 0 |
A CompsetPair with very different temperature should not belong in the TwoPhaseRegion | def test_two_phase_region_expands_as_compsets_are_added():
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
compsets_305 = CompsetPair([
BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),
])
compsets_312 = CompsetPair([
BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K
# compsets don't belong because they are outside the temperature tolerance (10 K)
assert tpr.compsets_belong_in_region(compsets_312) is False
assert tpr.compsets_belong_in_region(compsets_305)
tpr.add_compsets(compsets_305)
# 312 K compsets could be added now that the 305 K is within 10 K.
assert tpr.compsets_belong_in_region(compsets_312) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_two_phase_region_outside_temperature_tolerance_does_not_belong():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_500 = CompsetPair([\n BinaryCompset('P1', 500, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 500, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n assert tpr.compsets_belong_in_region(compsets_500) is False",
"def test_two_phase_region_new_phases_does_not_belong():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False",
"def remove_ill_matched_pair(phi1,S1,TU1,TV1): #---- remove ill matched pair\r\n #--- mark inlier= 1; outlier= 0 ---\r\n mask, phi0= pano_tools.remove_outlier(phi1);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=2, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=2, mask= mask);\r\n mask, TV0 = pano_tools.remove_outlier(TV1 ,Nstd=2, mask= mask); \r\n mask, phi0= pano_tools.remove_outlier(phi1,Nstd=3, mask= mask);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=3, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=3, mask= mask);\r\n #--- select reliable data pair ---\r\n # mask is M*M matrix: 1= reliable pair combination;\r\n M = phi1.shape[0];\r\n sumx= np.sum(mask,axis=0); # large number= reliable\r\n seq = []; # chosen reliable data\r\n for k in range(0, int(M*0.7)):\r\n maxx = np.argmax(sumx);\r\n seq.append(maxx);\r\n sumx[maxx]= 0; \r\n return seq, phi0, S0, TU0, TV0",
"def test_two_phase_region_usage():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300)\n tpr.add_compsets(compsets_300)\n assert len(tpr.compsets) == 2",
"def test_t_paired_specific_difference(self):\r\n x, y = self.x, self.y\r\n # difference is 0.2, so test should be non-significant if 0.2 passed\r\n self.failIf(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)\r\n # same, except that reversing list order reverses sign of difference\r\n self.failIf(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)\r\n # check that there's no significant difference from the true mean\r\n self.assertFloatEqual(\r\n t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)",
"def _valid_bond_pair(self, set):\n (sbu1, cp1), (sbu2, cp2) = set\n if all([i is None for i in [cp1.special, cp2.special, cp1.constraint, cp2.constraint]]):\n return sbu1.is_metal != sbu2.is_metal\n\n return (cp1.special == cp2.constraint) and (cp2.special == cp1.constraint)",
"def test_sparsities_differing_map_pairs_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity((ds2, ds2), (m1, m2))\n sp2 = op2.Sparsity((ds2, ds2), (m2, m1))\n assert sp1 is not sp2",
"def test_sparsities_differing_map_tuples_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2)))\n sp2 = op2.Sparsity((ds2, ds2), ((m2, m2), (m2, m2)))\n assert sp1 is not sp2",
"def compParDiff(self, chem) :\r\n import warnings\r\n warnings.warn('Partition and diffusion coefficients in homogenised stratum corneum are NOT calculated. '\r\n 'Only user-supplied values are used.')",
"def circuitSat(C):",
"def testCorrectForTwoAtomCellWithoutPeriodicityNEEDED(self):\n\t\texpDist = 0.01*10\n\t\tself._checkExpMatchesActual(expDist)",
"def test_get_tf_pairs(self):\n graph = BELGraph()\n p1, p2, p3 = (Protein(\"test\", str(i)) for i in range(1, 4))\n r4, r5, r6 = (Rna(\"test\", str(j)) for j in range(4, 7))\n\n g4 = r4.get_gene()\n self.assertIsNotNone(g4)\n g5 = r5.get_gene()\n self.assertIsNotNone(g5)\n\n c14, c25 = ComplexAbundance([p1, g4]), ComplexAbundance([p2, g5])\n _tf_up(graph, p1, r4)\n _tf_down(graph, p2, r5)\n graph.add_correlation(p3, r6, citation=n(), evidence=n())\n\n self.assertEqual({p1, p2, p3, r4, r5, r6, g4, g5, c14, c25}, set(graph))\n\n expected_edges = [\n (c14, r4),\n (p1, c14),\n (g4, c14),\n (c25, r5),\n (p2, c25),\n (g5, c25),\n (p3, r6),\n (r6, p3),\n ]\n sorted_expected_edges = sorted(expected_edges, key=_bel_pair_key)\n sorted_actual_edges = sorted(graph.edges(), key=_bel_pair_key)\n\n self.assertEqual(sorted_expected_edges, sorted_actual_edges)\n\n pairs = set(get_tf_pairs(graph))\n expected_pairs = {(p1, r4, +1), (p2, r5, -1)}\n self.assertEqual(expected_pairs, pairs)",
"def test_equivalent_potential_temperature():\n p = 1000 * units.mbar\n t = 293. * units.kelvin\n td = 280. * units.kelvin\n ept = equivalent_potential_temperature(p, t, td)\n assert_almost_equal(ept, 311.18586467284007 * units.kelvin, 3)",
"def testSameRMSEWhenPairsAreSpecifiedOrNotDifferentGeoOrder(self):\n test_class1 = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=self.nontrivial_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0},\n pairs=None)\n\n for geox_type in GeoXType:\n if geox_type == GeoXType.CONTROL:\n continue\n\n test_class1._geox_type = geox_type\n _, expected_detailed_results = test_class1.report_candidate_designs(\n budget_list=[30, 40],\n iroas_list=[0, 2],\n use_cross_validation=True,\n num_simulations=10)\n\n # change the order of geo1 and geo2 in some of the pairs\n pairs = [x.copy() for x in test_class1.pairs]\n pairs[0].loc[0:3, 'geo1'] = test_class1.pairs[0].loc[0:3, 'geo2']\n pairs[0].loc[0:3, 'geo2'] = test_class1.pairs[0].loc[0:3, 'geo1']\n\n test_class2 = TrimmedMatchGeoXDesign(\n geox_type,\n pretest_data=self.nontrivial_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n matching_metrics={'response': 1.0},\n pairs=pairs)\n _, detailed_results = test_class2.report_candidate_designs(\n budget_list=[30, 40],\n iroas_list=[0, 2],\n use_cross_validation=True,\n num_simulations=10)\n\n for key in detailed_results.keys():\n self.assertTrue(\n np.array_equal(detailed_results[key]['estimate'],\n expected_detailed_results[key]['estimate']))",
"async def test_temp_unit_fix(\n hass: HomeAssistant,\n client,\n climate_radio_thermostat_ct101_multiple_temp_units,\n climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints,\n integration,\n) -> None:\n state = hass.states.get(\"climate.thermostat\")\n assert state\n assert state.attributes[\"current_temperature\"] == 18.3\n\n state = hass.states.get(\"climate.z_wave_thermostat\")\n assert state\n assert state.attributes[\"current_temperature\"] == 21.1",
"def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]",
"def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs",
"def canonical_pair(self, t=1):\n return self.canonical_vertex(), (self.L.one(), self.K.product(self.K.one(), t))",
"def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False",
"def gen_chip2_no_write(tup):\n cfpath, gfpath, bbox, theta, new_size, filter_list = tup\n chipBGR = ctool.compute_chip(gfpath, bbox, theta, new_size, filter_list)\n return chipBGR, cfpath",
"def test_sparsities_differing_maps_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity(ds2, m1)\n sp2 = op2.Sparsity(ds2, m2)\n assert sp1 is not sp2",
"def _test_pairs(self, idx0, idx1):\n pass",
"def __xor__(self, other: Compound[Scalar]) -> Compound[Scalar]:\n return (self._pack_points(self._points_set ^ other._points_set)\n if isinstance(other, Multipoint)\n else NotImplemented)",
"def test_get_currency_pairs_with_second_currency_invalid(self):\n test_result = self.db_handler.get_currency_pairs_with_second_currency(\"TESTEXCHANGE\", [\"BAT\"])\n result = []\n assert result == test_result",
"def test_mixed_parcel():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n dewpoint = np.array([19., -11.2, -10.8, -10.4, -10., -53.2]) * units.degC\n parcel_pressure, parcel_temperature, parcel_dewpoint = mixed_parcel(pressure, temperature,\n dewpoint,\n depth=250 * units.hPa)\n assert_almost_equal(parcel_pressure, 959. * units.hPa, 6)\n assert_almost_equal(parcel_temperature, 28.7401463 * units.degC, 2)\n assert_almost_equal(parcel_dewpoint, 7.15346585151 * units.degC, 2)",
"def test_transport_sanity(self):\n T = 400\n cv_mole, W = 21005.045895231186, 28.014\n species_name = \"N2\"\n\n data = ct_properties.ctThermoTransport(\"gri30.cti\", verbose=False)\n data.evaluate_properties()\n i = data.gas.species_index(species_name)\n\n As, Ts, _, poly_mu, poly_kappa, log_poly_mu, log_poly_kappa= ct2foam_utils.fit_ct_transport(data)\n\n mu_s = tr_fitter.sutherland(T, As[i], Ts[i])\n kappa_s=tr_fitter.euken(mu_s, cv_mole, W, R)\n mu_logp, kappa_logp = tr_fitter.eval_log_polynomial(log_poly_mu[i,:], log_poly_kappa[i,:], T)\n mu_p, kappa_p = tr_fitter.eval_polynomial(poly_mu[i,:], poly_kappa[i,:], T)\n\n\n # rough test whether they are in the same scale...\n mu_ref = 2.2217e-5\n kappa_ref = 0.032205\n\n self.assertTrue(np.abs(mu_s-mu_ref)/np.abs(mu_ref) < 0.07)\n self.assertTrue(np.abs(mu_p-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(mu_logp-mu_ref)/np.abs(mu_ref) < 0.01)\n self.assertTrue(np.abs(kappa_s-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_p-kappa_ref)/np.abs(kappa_ref) < 0.05)\n self.assertTrue(np.abs(kappa_logp-kappa_ref)/np.abs(kappa_ref) < 0.05)",
"def space(self):\n return set([k for k, v in list(self.unit_comp.items()) if abs(v) > 1e-6])",
"def test_XOR():\n\tk, outputs = 2, [0,1,1,0]\n\n\ttrue_pi0s = set(['00','11'])\n\ttrue_pi1s = set(['01','10'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('11',[],[[0,1]]),('00',[],[[0,1]])]\n\ttrue_ts1s = [('10',[[0,1]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))",
"def test_get_all_currency_pairs_from_exchange_with_no_invalid_pair(self):\n test_result = self.db_handler.get_all_currency_pairs_from_exchange(\"TESTEXCHANGE\")\n test_result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in test_result]\n result = self.session.query(ExchangeCurrencyPair).all()\n result = [(item.exchange_id,\n item.first_id,\n item.second_id) for item in result]\n assert result == test_result",
"def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2):\n const_dict = p[3]\n cd1, cd2 = p1[3], p2[3]\n if 'x0' in cd1 and 'x0' in cd2:\n if cd2['x0'] < cd1['x0']:\n const_dict['x0'] = cd2['x0']\n const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None\n const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None\n else:\n const_dict['x0'] = cd1['x0']\n const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None\n const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None\n p = p[0:3] + (const_dict,)\n return p"
] | [
"0.67047006",
"0.61983526",
"0.60765725",
"0.58474624",
"0.55372393",
"0.5528594",
"0.5406215",
"0.5398616",
"0.5267037",
"0.5245239",
"0.5206635",
"0.5178668",
"0.5114043",
"0.511152",
"0.51114476",
"0.5090678",
"0.50795275",
"0.5068768",
"0.506794",
"0.5047619",
"0.50468045",
"0.5045661",
"0.5000316",
"0.4999695",
"0.49946535",
"0.49940318",
"0.4993922",
"0.49763873",
"0.49755734",
"0.49753177"
] | 0.63824725 | 1 |
A new pair of compsets with different phases should not be in the TwoPhaseRegion | def test_two_phase_region_new_phases_does_not_belong():
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K
assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_two_phase_region_expands_as_compsets_are_added():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_305 = CompsetPair([\n BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_312 = CompsetPair([\n BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n # compsets don't belong because they are outside the temperature tolerance (10 K)\n assert tpr.compsets_belong_in_region(compsets_312) is False\n assert tpr.compsets_belong_in_region(compsets_305)\n tpr.add_compsets(compsets_305)\n # 312 K compsets could be added now that the 305 K is within 10 K.\n assert tpr.compsets_belong_in_region(compsets_312)",
"def test_two_phase_region_usage():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300)\n tpr.add_compsets(compsets_300)\n assert len(tpr.compsets) == 2",
"def test_two_phase_region_outside_temperature_tolerance_does_not_belong():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_500 = CompsetPair([\n BinaryCompset('P1', 500, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 500, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n assert tpr.compsets_belong_in_region(compsets_500) is False",
"def trivial_phase(indivs):\r\n\tpool=make_pool(len(indivs[0]))\r\n\r\n\tfor i in xrange(1,len(pool)+1):\r\n\t\tall_combi=itertools.combinations(pool,i)\r\n\t\tfor t in all_combi:\r\n\t\t\tt+=t\r\n\t\t\tcandidate_couples=list(itertools.combinations(t,2))\r\n\t\t\tgeno_list=map(lambda x: mix(x[0],x[1]), candidate_couples)\r\n\t \t\tif check(indivs, geno_list):\r\n\t \t\t\treturn list(set(t)), candidate_couples\r\n\tprint \"It's impossible to execute this, something must be wrong.\"",
"def _comp_het_pair_pattern(self,\n gt_types1, gt_nums1,\n gt_types2, gt_nums2,\n gt_phases1, gt_phases2):\n\n # already phased before sending here.\n ret = {'candidates': [], 'priority': 4}\n for kid in self.samples_with_parent:\n if gt_nums1[kid._i] == gt_nums2[kid._i]: continue\n if not (gt_types1[kid._i] == HET and gt_types2[kid._i] == HET): continue\n #if not (gt_phases1[kid._i] and gt_phases2[kid._i]): continue\n if gt_types1[kid.mom._i] == HOM_ALT or gt_types2[kid.dad._i] == HOM_ALT: continue\n mom, dad = kid.mom, kid.dad\n\n kid_phased = gt_phases1[kid._i] and gt_phases2[kid._i]\n dad_phased = gt_phases1[dad._i] and gt_phases2[dad._i]\n mom_phased = gt_phases1[mom._i] and gt_phases2[mom._i]\n\n if kid_phased and dad_phased and (gt_nums1[dad._i] == gt_nums1[kid._i]) and (gt_nums2[dad._i] == gt_nums2[kid._i]):\n continue\n if kid_phased and mom_phased and (gt_nums1[mom._i] == gt_nums1[kid._i]) and (gt_nums2[mom._i] == gt_nums2[kid._i]):\n continue\n\n if kid_phased and dad_phased and mom_phased and gt_types1[dad._i] != gt_types2[dad._i] and gt_types1[mom._i] != gt_types2[mom._i]:\n priority = 1\n\n elif kid_phased and gt_types1[dad._i] != gt_types1[mom._i] and gt_types2[dad._i] != gt_types2[mom._i]:\n # parents are unphased hets at different sites.\n priority = 1\n else:\n priority = 2\n for parent in (kid.mom, kid.dad):\n # unphased het\n if gt_types2[parent._i] == gt_types1[parent._i] == HET:\n priority += 1\n\n ret['candidates'].append(kid)\n ret['priority'] = min(ret['priority'], priority)\n ret['candidate'] = len(ret['candidates']) > 0\n return ret",
"def _compositions_swapped(self, thermo):\n assert self._ref_indicators is not None\n\n indicators = self._singlet_comparison(thermo)\n\n for list1, list2 in zip(indicators, self._ref_indicators):\n comp_swapped = True\n for ind1, ind2 in zip(list1, list2):\n if ind1 == ind2:\n comp_swapped = False\n if comp_swapped:\n return True\n return False",
"def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self, other)]\n return self",
"def test_adding_compsets_to_zpf_boundary_sets():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n assert zpfbs.components == ['A', 'B']\n assert len(zpfbs.two_phase_regions) == 0\n assert len(zpfbs.all_compsets) == 0\n\n zpfbs.add_compsets(compsets_298)\n assert len(zpfbs.all_compsets) == 1\n assert len(zpfbs.two_phase_regions) == 1\n\n zpfbs.add_compsets(compsets_300) # same region, different temperature\n assert len(zpfbs.all_compsets) == 2\n assert len(zpfbs.two_phase_regions) == 1\n\n zpfbs.add_compsets(compsets_300_diff_phases) # new region, different phases\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 2",
"def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)",
"def test_recomb(self):\n sol1, sol2 = [0,0,0,0],[1,1,1,1]\n hot_regions = [0,0,0,1] ##NOTE: sum(hot_regions) shouls always be 1\n rec_events = 1\n sol = list(d.recombine(sol1,sol2,rec_events,hot_regions))\n print(f\"recomb sol: {sol}\")\n self.assertTrue( (sol == [0,0,0,1]) or (sol == [1,1,1,0]) )",
"def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self.components, other.components)]\n return self",
"def prepare_vectors(self, c1: Counter, c2: Counter) -> Tuple[list, list]:\n\n items = set(c1.keys()).union(set(c2.keys()))\n \n v1 = [c1[key] for key in items]\n v2 = [c2[key] for key in items] \n\n return v1, v2",
"def region_sets(self,listA,listB):\n self.setA = GenomicRegionSet('for Unit Test')\n for i in range(len(listA)):\n self.setA.add(GenomicRegion(chrom=listA[i][0], initial=listA[i][1], final=listA[i][2]))\n \n self.setB = GenomicRegionSet('for Unit Test')\n for i in range(len(listB)):\n self.setB.add(GenomicRegion(chrom=listB[i][0], initial=listB[i][1], final=listB[i][2]))",
"def test_rebulding_zpf_boundary_sets_regions():\n\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_310 = CompsetPair([\n BinaryCompset('P1', 310, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 310, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n\n # Initial compsets\n zpfbs.add_compsets(compsets_298)\n assert len(zpfbs.all_compsets) == 1\n assert len(zpfbs.two_phase_regions) == 1\n\n # Compsets added create a new region because phases changed\n zpfbs.add_compsets(compsets_300_diff_phases)\n assert len(zpfbs.all_compsets) == 2\n assert len(zpfbs.two_phase_regions) == 2\n\n # Compsets added create a new region because phases the temperature is out of tolerance\n zpfbs.add_compsets(compsets_310)\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 3\n\n # Rebuild the regions with a larger tolerance should create two regions with one and two compsets.\n zpfbs.rebuild_two_phase_regions(Ttol=20)\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 2\n assert sorted([len(tpr.compsets) for tpr in zpfbs.two_phase_regions]) == [1, 2]",
"def test_sparsities_differing_map_pairs_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity((ds2, ds2), (m1, m2))\n sp2 = op2.Sparsity((ds2, ds2), (m2, m1))\n assert sp1 is not sp2",
"def complement(self):\n N = self._size + 1\n new_covers = [[N - i[0], N - i[1]] for i in self._poset.cover_relations_iterator()]\n return TamariIntervalPoset(N - 1, new_covers)",
"def alt_clueset(self):\n sames = self.get_same_mapping()\n new_clues = []\n has_changes = False\n\n for clue in self.clueset:\n if (clue[\"type\"] != SAME and clue[\"type\"] != ISAT):\n alt = self.new_clue(sames, clue)\n if alt:\n new_clues.append(alt)\n has_changes = True\n else:\n new_clues.append(clue)\n\n return new_clues if has_changes else None",
"def delete_comp_outside_cluster(cls_dic,compos):\n #so that merge will not contain cluster\n del_comp = []\n for idx, comp in enumerate(compos):\n box1 = [comp.bbox.row_min, comp.bbox.row_max, comp.bbox.col_min, comp.bbox.col_max]\n for idx2, comp2 in enumerate(compos):\n if idx == idx2:\n continue\n if comp.cls not in cls_dic.keys() or comp2.cls in cls_dic.keys():\n continue\n box2 = [comp2.bbox.row_min, comp2.bbox.row_max, comp2.bbox.col_min, comp2.bbox.col_max]\n #\n if cal_iou(box1, box2)[0] >=0.5 and cal_iou(box1, box2)[0] > cal_iou(box1, box2)[1]:\n del_comp.append(idx2)\n # print('del',box2)\n new_comp = []\n for idx, comp in enumerate(compos):\n if idx not in del_comp:\n new_comp.append(comp)\n return new_comp",
"def test_clifford_1_qubit_generation(self):\n clifford_dicts = [\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"+X\"]},\n {\"stabilizer\": [\"+X\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"-Z\"], \"destabilizer\": [\"+Y\"]},\n {\"stabilizer\": [\"+Y\"], \"destabilizer\": [\"-Z\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"+Z\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"-X\"]},\n {\"stabilizer\": [\"-X\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"+Z\"], \"destabilizer\": [\"-Y\"]},\n {\"stabilizer\": [\"-Y\"], \"destabilizer\": [\"+Z\"]},\n ]\n cliffords = [Clifford.from_dict(i) for i in clifford_dicts]\n for n in range(24):\n clifford = CliffordUtils.clifford_1_qubit(n)\n self.assertEqual(clifford, cliffords[n])",
"def _complement(self):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n kargs = {\"complement\": self}\n if self._has(\"p\"):\n kargs[\"p\"] = self._.p.reorder([0, 2, 1], inplace=False)\n elif self._has(\"q\"):\n kargs[\"q\"] = self._.q.reorder([0, 2, 1], inplace=False)\n elif self._has(\"P\"):\n kargs[\"P\"] = self._.P[[0, 2, 1], [0, 2, 1]]\n elif self._has(\"Q\"):\n kargs[\"Q\"] = self._.Q[[0, 2, 1], [0, 2, 1]]\n return ASParameters(**kargs)",
"def unique(combo, out):\n # This lets us find only minimally covering payments (you should never add cards to a payment that already\n # satisfies the charge)\n for el in out:\n if set(el).issubset(combo):\n return False\n return True",
"def remove_ill_matched_pair(phi1,S1,TU1,TV1): #---- remove ill matched pair\r\n #--- mark inlier= 1; outlier= 0 ---\r\n mask, phi0= pano_tools.remove_outlier(phi1);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=2, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=2, mask= mask);\r\n mask, TV0 = pano_tools.remove_outlier(TV1 ,Nstd=2, mask= mask); \r\n mask, phi0= pano_tools.remove_outlier(phi1,Nstd=3, mask= mask);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=3, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=3, mask= mask);\r\n #--- select reliable data pair ---\r\n # mask is M*M matrix: 1= reliable pair combination;\r\n M = phi1.shape[0];\r\n sumx= np.sum(mask,axis=0); # large number= reliable\r\n seq = []; # chosen reliable data\r\n for k in range(0, int(M*0.7)):\r\n maxx = np.argmax(sumx);\r\n seq.append(maxx);\r\n sumx[maxx]= 0; \r\n return seq, phi0, S0, TU0, TV0",
"def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs",
"def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])",
"def __xor__(self, other: Compound[Scalar]) -> Compound[Scalar]:\n return (self._pack_points(self._points_set ^ other._points_set)\n if isinstance(other, Multipoint)\n else NotImplemented)",
"def comp_het_pair(self, gt_types1, gt_bases1,\n gt_types2, gt_bases2,\n gt_phases1=None,\n gt_phases2=None,\n ref1=None, alt1=None,\n ref2=None, alt2=None,\n allow_unaffected=False,\n fast_mode=False,\n pattern_only=False):\n if gt_phases1 is None:\n gt_phases1 = [\"|\" in b for b in gt_bases1]\n if gt_phases2 is None:\n gt_phases2 = [\"|\" in b for b in gt_bases2]\n\n if ref1 is None and alt1 is None:\n ref1, alt1 = self._get_ref_alt(gt_types1, gt_bases1)\n\n if ref2 is None and alt2 is None:\n ref2, alt2 = self._get_ref_alt(gt_types2, gt_bases2)\n\n idxs = set(s._i for s in self.subjects)\n\n self.famphase(gt_types1, gt_phases1, gt_bases1,\n length_check=False)\n self.famphase(gt_types2, gt_phases2, gt_bases2,\n length_check=False)\n\n # we index by sample._i, but we only need to split for the current\n # samples so we check if i in idxs.\n gt_bases1 = [b.split(\"|\" if p else \"/\") if i in idxs else b for i,\n (b, p) in enumerate(zip(gt_bases1, gt_phases1))]\n gt_bases2 = [b.split(\"|\" if p else \"/\") if i in idxs else b for i,\n (b, p) in enumerate(zip(gt_bases2, gt_phases2))]\n\n # get in (0, 1) format instead of (A, T)\n try:\n ra = {ref1: 0, alt1: 1, \".\": 2}\n gt_nums1 = [(ra[b[0]], ra[b[1]]) if i in idxs else None for i, b in enumerate(gt_bases1)]\n ra = {ref2: 0, alt2: 1, \".\": 2}\n gt_nums2 = [(ra[b[0]], ra[b[1]]) if i in idxs else None for i, b in enumerate(gt_bases2)]\n except KeyError:\n warn(\"can't phase sites with multiple alternate alleles\\n\")\n return {'candidate': False}\n except IndexError:\n # alternate is unknown, e.g. \"A/.\"\n return {'candidate': False}\n\n if pattern_only:\n return self._comp_het_pair_pattern(gt_types1, gt_nums1,\n gt_types2, gt_nums2,\n gt_phases1, gt_phases2)\n\n unaffecteds = self.unaffecteds\n for un in unaffecteds:\n if gt_types2[un._i] == HOM_ALT or gt_types1[un._i] == HOM_ALT:\n return {'candidate': False}\n\n ret = {'affected_phased': [], 'unaffected_phased': [],\n 'unaffected_unphased': [], 'affected_unphased': [],\n 'affected_dn': [],\n 'affected_skipped': [], 'candidates': []}\n\n aff = None\n for aff in self.affecteds:\n if gt_types1[aff._i] != HET or gt_types2[aff._i] != HET:\n ret['affected_skipped'].append(aff)\n ret['candidate'] = False\n if fast_mode: break\n continue\n\n aff_phased = gt_phases1[aff._i] and gt_phases2[aff._i]\n # on same chrom.\n if aff_phased and gt_nums1[aff._i] == gt_nums2[aff._i]:\n ret['affected_skipped'].append(aff)\n # Remove candidates where an affected from the same family does\n # NOT share the same het pair.\n ret['candidate'] = False\n if fast_mode: break\n continue\n\n if not 'candidate' in ret: ret['candidate'] = True\n if aff_phased:\n ret['affected_phased'].append(aff)\n\n else:\n dn = False\n if gt_phases1[aff._i] or gt_phases2[aff._i]: # one phased and one denovo\n # 2nd allele is unphased, check for DN.\n # NOTE! if any other sample is HET at the DN site, it is\n # not considered as a candidate.\n if not gt_phases2[aff._i] and gt_types2[aff.mom._i] == HOM_REF and gt_types2[aff.dad._i] == HOM_REF and sum(gt_types2[u._i] in (HET, HOM_ALT) for u in unaffecteds) == 0:\n dn = True\n ret['affected_dn'].append(aff)\n # 1st allele is unphased, check for DN.\n elif not gt_phases1[aff._i] and gt_types1[aff.mom._i] == HOM_REF and gt_types1[aff.dad._i] == HOM_REF and sum(gt_types1[u._i] in (HET, HOM_ALT) for u in unaffecteds) == 0:\n dn = True\n ret['affected_dn'].append(aff)\n\n # we have to check that the parent is not HOM_REF at both\n # sites or HOM_ALT at both sites.\n if not dn:\n for parent in (aff.mom, aff.dad):\n if parent is None: continue\n if gt_types1[parent._i] == gt_types2[parent._i] and gt_types1[parent._i] != HET:\n ret['candidate'] = False\n ret['non-het-parent'] = True\n\n ret['affected_unphased'].append(aff)\n ret['candidates'].append(aff)\n\n del aff\n if ret['candidates'] != []:\n\n for un in unaffecteds:\n if gt_types1[un._i] != HET or gt_types2[un._i] != HET:\n continue\n\n is_phased = gt_phases1[un._i] and gt_phases2[un._i]\n # unaffected has the candidate pair on the same chromosome\n if is_phased and gt_nums1[un._i] == gt_nums2[un._i]:\n continue\n\n if is_phased:\n # found an unaffected with the same het-pair.\n ret['unaffected_phased'].append(un)\n if not allow_unaffected:\n ret['candidate'] = False\n if fast_mode: break\n else:\n ret['unaffected_unphased'].append(un)\n if not 'candidate' in ret:\n ret['candidate'] = False\n ret['priority'] = None\n elif ret['candidate']:\n\n ret['priority'] = 3\n if len(ret['affected_phased']) and len(ret['unaffected_unphased']) == 0:\n ret['priority'] = 1\n\n if len(ret['affected_dn']) and len(ret['unaffected_unphased']) == 0:\n ret['priority'] = 1.5\n # priority 2 for a single unphased affected.\n elif len(ret['affected_unphased']) and len(ret['unaffected_unphased']) == 0:\n ret['priority'] = 2\n\n return ret",
"def test_sparsities_differing_map_tuples_not_cached(self, backend, m1, m2, ds2):\n sp1 = op2.Sparsity((ds2, ds2), ((m1, m1), (m2, m2)))\n sp2 = op2.Sparsity((ds2, ds2), ((m2, m2), (m2, m2)))\n assert sp1 is not sp2",
"def get_cross_synset_pairs(src_synset: Synset, dst_synset: Synset) -> list:\n # Remove phrasal expressions from the literals\n src_literals = remove_phrases(src_synset.literals)\n dst_literals = remove_phrases(dst_synset.literals)\n\n return unique([tuple(sorted((w1, w2), key=itemgetter(0))) for w1 in src_literals for w2 in dst_literals])",
"def make_pairs(concepts):\n\tfor sub, obj in combinations(concepts, 2):\n\t\tif sub[1] != obj[1]:\n\t\t\tyield (sub, obj)",
"def test_selection():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n nrg = ncs_obj_phil.get_ncs_restraints_group_list()\n\n m1 = list(nrg[0].master_iselection)\n c1 = list(nrg[0].copies[0].iselection)\n c2 = list(nrg[0].copies[1].iselection)\n\n assert len(m1) == len(c1) # renumbering\n assert m1 == [0, 1, 2, 3, 4, 5, 6] # 0, 1, X, 3, X, 5, X | 0, 1, 3\n assert c1 == [7, 8, 9, 10, 11, 12, 13] # 7, 8, 9, X, X, 12, X | 4, 5, 7\n assert c2 == [14, 15, 16, 17, 18, 19, 20] # 14, 15, X, 17, X, 19, X | 8, 9, 11\n\n selection1 = flex.size_t([0,1,5,3,100,101])\n selection2 = flex.size_t([0,1,5,3,7,8,9,12,100,101])\n selection3 = flex.size_t([0,1,5,3,7,8,9,12,14,15,19,17,100,101])\n # gone iseqs for selection3: 2,4,6,10,11,13,16,18,20-99\n\n new_nrg = nrg.select(flex.bool(102, selection1))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n # atoms selected in both master and copies\n new_nrg = nrg.select(flex.bool(102, selection2))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n new_nrg = nrg.select(flex.bool(102, selection3))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n c2t = list(new_nrg[0].copies[1].iselection)\n\n assert mt == [0, 1, 3], list(mt)\n assert c1t == [4, 5, 7], list(c1t)\n assert c2t == [8, 9, 11], list(c2t)"
] | [
"0.72638243",
"0.66531706",
"0.5972164",
"0.5763779",
"0.5614874",
"0.5568291",
"0.5500854",
"0.5419175",
"0.5378876",
"0.5364433",
"0.5332054",
"0.52813214",
"0.5249226",
"0.524922",
"0.5237803",
"0.52325755",
"0.521825",
"0.5183979",
"0.5174692",
"0.5155056",
"0.51443166",
"0.5143809",
"0.5138787",
"0.51104426",
"0.51074505",
"0.5107027",
"0.5099818",
"0.50967985",
"0.5073123",
"0.50658077"
] | 0.73919016 | 0 |
Test that new composition sets can be added to ZPFBoundarySets successfully. | def test_adding_compsets_to_zpf_boundary_sets():
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))
assert zpfbs.components == ['A', 'B']
assert len(zpfbs.two_phase_regions) == 0
assert len(zpfbs.all_compsets) == 0
zpfbs.add_compsets(compsets_298)
assert len(zpfbs.all_compsets) == 1
assert len(zpfbs.two_phase_regions) == 1
zpfbs.add_compsets(compsets_300) # same region, different temperature
assert len(zpfbs.all_compsets) == 2
assert len(zpfbs.two_phase_regions) == 1
zpfbs.add_compsets(compsets_300_diff_phases) # new region, different phases
assert len(zpfbs.all_compsets) == 3
assert len(zpfbs.two_phase_regions) == 2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_rebulding_zpf_boundary_sets_regions():\n\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_310 = CompsetPair([\n BinaryCompset('P1', 310, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 310, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n\n # Initial compsets\n zpfbs.add_compsets(compsets_298)\n assert len(zpfbs.all_compsets) == 1\n assert len(zpfbs.two_phase_regions) == 1\n\n # Compsets added create a new region because phases changed\n zpfbs.add_compsets(compsets_300_diff_phases)\n assert len(zpfbs.all_compsets) == 2\n assert len(zpfbs.two_phase_regions) == 2\n\n # Compsets added create a new region because phases the temperature is out of tolerance\n zpfbs.add_compsets(compsets_310)\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 3\n\n # Rebuild the regions with a larger tolerance should create two regions with one and two compsets.\n zpfbs.rebuild_two_phase_regions(Ttol=20)\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 2\n assert sorted([len(tpr.compsets) for tpr in zpfbs.two_phase_regions]) == [1, 2]",
"def test_zpf_boundary_set_scatter_plot():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n zpfbs.add_compsets(compsets_298)\n zpfbs.add_compsets(compsets_300_diff_phases)\n boundaries, tielines, legend = zpfbs.get_scatter_plot_boundaries()\n x, y, col = boundaries['x'], boundaries['y'], boundaries['c']\n assert len(x) > 0\n assert len(x) == len(y)\n assert len(x) == len(col)\n assert len(tielines._paths) > 0",
"def test_zpf_boundary_sets_line_plot():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n zpfbs.add_compsets(compsets_298)\n zpfbs.add_compsets(compsets_300_diff_phases)\n boundaries, tielines, legend = zpfbs.get_line_plot_boundaries()\n assert len(boundaries._paths) > 0\n assert len(tielines._paths) > 0",
"def test_two_phase_region_expands_as_compsets_are_added():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_305 = CompsetPair([\n BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_312 = CompsetPair([\n BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n # compsets don't belong because they are outside the temperature tolerance (10 K)\n assert tpr.compsets_belong_in_region(compsets_312) is False\n assert tpr.compsets_belong_in_region(compsets_305)\n tpr.add_compsets(compsets_305)\n # 312 K compsets could be added now that the 305 K is within 10 K.\n assert tpr.compsets_belong_in_region(compsets_312)",
"def test_composition(self):",
"def test_add_new_surface_objects(self):\n\n # create object with ReactionSystem behavior\n class rsys:\n pass\n\n class item:\n pass\n\n T = item()\n P = item()\n T.value_si = 1000.0\n P.value_si = 101000.0\n rsys.T = T\n rsys.P = P\n procnum = 2\n\n cerm = CoreEdgeReactionModel()\n\n spcA = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs]\n\n rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum)))\n\n for rxn in rxns:\n cerm.make_new_reaction(rxn)\n\n cerm.core.species = [spcA] + spcs\n\n corerxns = []\n edgerxns = []\n edgespcs = set()\n for rxn in rxns:\n if set(rxn.reactants + rxn.products) <= set(cerm.core.species):\n corerxns.append(rxn)\n else:\n edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products)\n edgerxns.append(rxn)\n\n cerm.edge.species += list(edgespcs)\n\n cerm.core.reactions = corerxns\n cerm.edge.reactions = edgerxns\n\n cerm.surface.species = []\n cerm.surface.reactions = []\n\n new_surface_reactions = [cerm.edge.reactions[0]]\n new_surface_species = []\n obj = new_surface_reactions\n\n cerm.add_new_surface_objects(obj, new_surface_species, new_surface_reactions, rsys)\n\n empty = set()\n\n self.assertEqual(cerm.new_surface_spcs_add, empty)\n self.assertEqual(cerm.new_surface_spcs_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_loss, empty)\n self.assertEqual(cerm.new_surface_rxns_add, set([cerm.edge.reactions[0]]))",
"def test_list_compositions(self):\n pass",
"def test_update_composition(self):\n pass",
"def test_setlist(self):\n self.assertEqual(len(self.show.setlist), 19)",
"def test_create_collection(self):\n pass",
"def test_two_phase_region_new_phases_does_not_belong():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False",
"def test_setlist(self):\n self.assertEqual(self.show.setlist, [])",
"def test_get_placements(self):\n subv = list()\n for i in range(5):\n subv.append(SimpleMachineVertex(None, \"\"))\n\n pl = list()\n for i in range(4):\n pl.append(Placement(subv[i], 0, 0, i))\n\n pls = Placements(pl)\n container = pls.placements\n for i in range(4):\n self.assertIn(pl[i], container)",
"def test_container(self):\r\n\r\n fc = FlowgramCollection({'a': '1.0 0.0 0.0 1.0 1.0 1.2 1.2 0.8',\r\n 'b': '1.2 1.0 0.0 0.8 1.2 2.4 1.0 0.0'})\r\n\r\n f_container = FlowgramContainerFile(header)\r\n\r\n for f in fc:\r\n f_container.add(f)\r\n\r\n for f_obs, f_exp in zip(f_container, fc):\r\n self.assertEqual(str(f_obs), str(f_exp))\r\n\r\n # adding after iter started raises errror\r\n self.assertRaises(ValueError, f_container.add, f_obs)",
"def test_components(self):\n\n good_cpts = self.good.components.components.copy()\n\n for cid, cpt in self.actual.components.components.items():\n goodcpt = good_cpts.pop(cid)\n\n self.assertEqual(cpt.name, goodcpt.name)\n self.assertEqual(cpt.attributes, goodcpt.attributes)\n self.assertEqual(len(cpt.symbols), 1)\n self.assertEqual(len(cpt.symbols[0].bodies), 1)\n\n body = cpt.symbols[0].bodies[0]\n goodbody = goodcpt.symbols[0].bodies[0]\n\n self.assertEqual(len(body.shapes), len(goodbody.shapes))\n for shape, goodshape in zip(body.shapes, goodbody.shapes):\n self.assertEqual(shape.__class__, goodshape.__class__)\n self.assertEqual(shape.json(), goodshape.json())\n\n self.assertEqual(len(body.pins), len(goodbody.pins))\n for pin, goodpin in zip(body.pins, goodbody.pins):\n self.assertEqual(pin.__class__, goodpin.__class__)\n self.assertEqual(pin.json(), goodpin.json())\n\n self.assertEqual(good_cpts, {})",
"def test_create_new_placements(self):\n subv = SimpleMachineVertex(None, \"\")\n pl = Placement(subv, 0, 0, 1)\n Placements([pl])",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000",
"def test_patch_collection(self):\n pass",
"def test_Collection_basics():\n # pylint: disable=pointless-statement\n # data generated below\n data = pickle.load(\n open(os.path.abspath(\"./tests/testdata/testdata_Collection.p\"), \"rb\")\n )\n mags, dims2, dims3, posos, angs, axs, anchs, movs, rvs, _ = data\n\n B1, B2 = [], []\n for mag, dim2, dim3, ang, ax, anch, mov, poso, rv in zip(\n mags, dims2, dims3, angs, axs, anchs, movs, posos, rvs\n ):\n rot = R.from_rotvec(rv)\n\n pm1b = magpy.magnet.Cuboid(mag[0], dim3[0])\n pm2b = magpy.magnet.Cuboid(mag[1], dim3[1])\n pm3b = magpy.magnet.Cuboid(mag[2], dim3[2])\n pm4b = magpy.magnet.Cylinder(mag[3], dim2[0])\n pm5b = magpy.magnet.Cylinder(mag[4], dim2[1])\n pm6b = magpy.magnet.Cylinder(mag[5], dim2[2])\n\n pm1 = magpy.magnet.Cuboid(mag[0], dim3[0])\n pm2 = magpy.magnet.Cuboid(mag[1], dim3[1])\n pm3 = magpy.magnet.Cuboid(mag[2], dim3[2])\n pm4 = magpy.magnet.Cylinder(mag[3], dim2[0])\n pm5 = magpy.magnet.Cylinder(mag[4], dim2[1])\n pm6 = magpy.magnet.Cylinder(mag[5], dim2[2])\n\n col1 = magpy.Collection(pm1, pm2, pm3)\n col1.add(pm4, pm5, pm6)\n\n # 18 subsequent operations\n for a, aa, aaa, mv in zip(ang, ax, anch, mov):\n for pm in [pm1b, pm2b, pm3b, pm4b, pm5b, pm6b]:\n pm.move(mv).rotate_from_angax(a, aa, aaa).rotate(rot, aaa)\n\n col1.move(mv).rotate_from_angax(a, aa, aaa, start=-1).rotate(\n rot, aaa, start=-1\n )\n\n B1 += [magpy.getB([pm1b, pm2b, pm3b, pm4b, pm5b, pm6b], poso, sumup=True)]\n B2 += [col1.getB(poso)]\n\n B1 = np.array(B1)\n B2 = np.array(B2)\n\n np.testing.assert_allclose(B1, B2)",
"def test_create_existing(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n\n dset = grp.require_dataset('foo', (10, 3), 'float32')\n dset2 = grp.require_dataset('foo', (10, 3), 'float32')\n\n assert dset == dset2",
"def test_enforcement_boundary_create_command_success(\n mock_client, enforcement_boundary_success, monkeypatch\n):\n monkeypatch.setattr(\n illumio.pce.PolicyComputeEngine._PCEObjectAPI,\n \"create\",\n lambda *a: EnforcementBoundary.from_json(enforcement_boundary_success),\n )\n resp = enforcement_boundary_create_command(\n mock_client,\n {\n \"name\": \"test_enforcement_boundary_1\",\n \"port\": 1,\n \"protocol\": \"udp\",\n \"providers\": [\"/orgs/1/labels/1\"],\n \"consumers\": \"ams\",\n },\n )\n\n assert resp.raw_response == enforcement_boundary_success",
"def testCreate(self):\n layer = PublicLayerList.objects.create(active=True)\n path = os.path.dirname(os.path.abspath(__file__))\n\n f = File(open(path + '/fixtures/public_layers.kml'))\n settings.MEDIA_URL = ''\n layer.kml_file.save('kml-file.kml', f)\n # 2 because the initial_data fixture loads one\n self.assertEquals(PublicLayerList.objects.count(), 2)\n self.assertTrue(layer.kml_file.size > 0)",
"def test_add_group(self):\n pass",
"def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3",
"def test_compund(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dt = np.dtype( [('weight', np.float64),\n ('cputime', np.float64),\n ('walltime', np.float64),\n ('parents_offset', np.uint32),\n ('n_parents', np.uint32),\n ('status', np.uint8),\n ('endpoint_type', np.uint8)])\n\n testdata = np.ndarray((16,), dtype=dt)\n for key in dt.fields:\n testdata[key] = np.random.random((16,))*100\n\n # print(testdata)\n\n grp['test'] = testdata\n outdata = grp['test'][()]\n assert np.all(outdata == testdata)\n assert outdata.dtype == testdata.dtype",
"def test_get_composition(self):\n pass",
"def test_add_containing_place_new(conflict_places):\n new_place = \"Metal Shop\"\n contained_places = [\"Room A\", \"Room B\"]\n conflict_places.add_containing_place(new_place, contained_places)\n assert_conflict_place(conflict_places, new_place, UncheckedPlace)\n for name in contained_places:\n assert_conflict_place(conflict_places, name, UncheckedPlace)\n assert_contains(conflict_places, new_place, contained_places)",
"def testBits(self):\n\n fullPlaneNameList = list(MaskPlaneNameIDDict.keys())\n totNumBits = countBits(MaxBitMask)\n for i in range(len(fullPlaneNameList)):\n numPlanes = i + 1\n setPlaneNameList = fullPlaneNameList[0:numPlanes]\n\n bitMask = coaddUtils.makeBitMask(setPlaneNameList)\n self.assertEqual(countBits(bitMask), numPlanes)\n for planeName, planeId in MaskPlaneNameIDDict.items():\n self.assertEqual((bitMask & (1 << planeId)) > 0, planeName in setPlaneNameList)\n\n invBitMask = coaddUtils.makeBitMask(setPlaneNameList, doInvert=True)\n self.assertEqual(countBits(invBitMask), totNumBits - numPlanes)\n for planeName, planeId in MaskPlaneNameIDDict.items():\n self.assertEqual((invBitMask & (1 << planeId)) > 0, planeName not in setPlaneNameList)",
"def test_adding_points(self, A, B):\n layer = Points(A)\n assert len(layer.data) == A.shape[0]\n\n layer.add(B)\n\n assert len(layer.data) == A.shape[0] + B.shape[0]\n assert_array_equal(layer.data, np.concatenate((A, B)))\n assert layer.selected_data == set(range(A.shape[0], A.shape[0] + B.shape[0]))\n # assert layer.selected_data == {A.shape[0] + B.shape[0] - 1}\n\n layer.remove_selected()\n assert len(layer.data) == A.shape[0]\n assert_array_equal(layer.data, A)"
] | [
"0.7422872",
"0.6564256",
"0.6453201",
"0.61937606",
"0.61298454",
"0.6103981",
"0.5989328",
"0.59409636",
"0.59300935",
"0.59042823",
"0.5739416",
"0.5732022",
"0.5718276",
"0.5715971",
"0.56820273",
"0.56806755",
"0.56332743",
"0.56332743",
"0.5622735",
"0.5620251",
"0.5605196",
"0.5578928",
"0.5561632",
"0.55552083",
"0.55288404",
"0.55239695",
"0.55191547",
"0.55125356",
"0.5504141",
"0.55003566"
] | 0.8190679 | 0 |
Test that three regions generated by ZPFBoundarySets can correctly be rebuilt to two regions | def test_rebulding_zpf_boundary_sets_regions():
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_310 = CompsetPair([
BinaryCompset('P1', 310, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 310, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))
# Initial compsets
zpfbs.add_compsets(compsets_298)
assert len(zpfbs.all_compsets) == 1
assert len(zpfbs.two_phase_regions) == 1
# Compsets added create a new region because phases changed
zpfbs.add_compsets(compsets_300_diff_phases)
assert len(zpfbs.all_compsets) == 2
assert len(zpfbs.two_phase_regions) == 2
# Compsets added create a new region because phases the temperature is out of tolerance
zpfbs.add_compsets(compsets_310)
assert len(zpfbs.all_compsets) == 3
assert len(zpfbs.two_phase_regions) == 3
# Rebuild the regions with a larger tolerance should create two regions with one and two compsets.
zpfbs.rebuild_two_phase_regions(Ttol=20)
assert len(zpfbs.all_compsets) == 3
assert len(zpfbs.two_phase_regions) == 2
assert sorted([len(tpr.compsets) for tpr in zpfbs.two_phase_regions]) == [1, 2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)",
"def test_bkg_regions(i07_nexus: I07Nexus, regions):\n for i, _ in enumerate(regions):\n assert i07_nexus.background_regions[i] == regions[i]",
"def test_adding_compsets_to_zpf_boundary_sets():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))\n assert zpfbs.components == ['A', 'B']\n assert len(zpfbs.two_phase_regions) == 0\n assert len(zpfbs.all_compsets) == 0\n\n zpfbs.add_compsets(compsets_298)\n assert len(zpfbs.all_compsets) == 1\n assert len(zpfbs.two_phase_regions) == 1\n\n zpfbs.add_compsets(compsets_300) # same region, different temperature\n assert len(zpfbs.all_compsets) == 2\n assert len(zpfbs.two_phase_regions) == 1\n\n zpfbs.add_compsets(compsets_300_diff_phases) # new region, different phases\n assert len(zpfbs.all_compsets) == 3\n assert len(zpfbs.two_phase_regions) == 2",
"def region_sets(self,listA,listB):\n self.setA = GenomicRegionSet('for Unit Test')\n for i in range(len(listA)):\n self.setA.add(GenomicRegion(chrom=listA[i][0], initial=listA[i][1], final=listA[i][2]))\n \n self.setB = GenomicRegionSet('for Unit Test')\n for i in range(len(listB)):\n self.setB.add(GenomicRegion(chrom=listB[i][0], initial=listB[i][1], final=listB[i][2]))",
"def test_two_phase_region_expands_as_compsets_are_added():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_305 = CompsetPair([\n BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_312 = CompsetPair([\n BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n # compsets don't belong because they are outside the temperature tolerance (10 K)\n assert tpr.compsets_belong_in_region(compsets_312) is False\n assert tpr.compsets_belong_in_region(compsets_305)\n tpr.add_compsets(compsets_305)\n # 312 K compsets could be added now that the 305 K is within 10 K.\n assert tpr.compsets_belong_in_region(compsets_312)",
"def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)",
"def test_two_phase_region_usage():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300)\n tpr.add_compsets(compsets_300)\n assert len(tpr.compsets) == 2",
"def test_two_phase_region_new_phases_does_not_belong():\n compsets_298 = CompsetPair([\n BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_300_diff_phases = CompsetPair([\n BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K\n assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False",
"def test_four_square_regions(self):\n\n vertices_by_region = np.array([[0, 1, 2, 3], [1, 2, 7, 8], [2, 5, 6, 7], [2, 3, 4, 5]])\n centers_by_region = np.array([[1, 1], [3, 1], [3, 3], [1, 3]])\n vertices = np.array([[0, 0], [2, 0], [2, 2], [0, 2], [0, 4], [2, 4], [4, 4], [4, 2], [4, 0]])\n world = data.convert_to_world(vertices_by_region, centers_by_region, vertices)\n\n self.assertEqual({0: {0}, 1: {0, 1}, 2: {0, 1, 2, 3}, 3: {0, 3}, 4: {3}, 5: {2, 3}, 6: {2}, 7: {1, 2}, 8: {1}},\n world.regions_touching_vertex)\n self.assertEqual({0: {1, 3}, 1: {0, 2, 8}, 2: {1, 3, 5, 7}, 3: {0, 2, 4},\n 4: {3, 5}, 5: {2, 4, 6}, 6: {5, 7}, 7: {2, 6, 8}, 8: {1, 7}},\n world.vertices_touching_vertex)",
"def test_correct_data_under_boundaries(self):\n load_to_datastore(self.places_sofia, self.metadata_sofia)\n\n area_name = self.metadata_sofia.area_name\n\n # ensure only the boundary for the correct single area is loaded\n redis_boundaries_keys = r.keys(\"*%s*\" % cities_boundaries_template_key) # returns a list\n self.assertEqual(set([cities_boundaries_template_key + area_name]), set(redis_boundaries_keys))\n\n CommonAssertions.check_correct_boundaries_for_area(tester=self, metadata=self.metadata_sofia)",
"def test_bkg_regions_len(i07_nexus: I07Nexus, regions):\n assert len(i07_nexus.background_regions) == len(regions)",
"def test_signal_regions(i07_nexus: I07Nexus, regions):\n # Note: this should probably always be a for loop with just 1 iteration.\n for i, _ in enumerate(regions):\n assert i07_nexus.signal_regions[i] == regions[i]",
"def test_reverse_region(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n\n last line\n line c\n line b\n line a\n line 1\n first line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"7.0\"),\n after_sel=(\"7.10\", \"7.10\"),\n command_name=\"reverse-region\",\n )",
"def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')",
"def test_multi_area(self):\n pass",
"def test_gets_different_data(self):\n print(\"Testing that get_region_data can return different data types\")\n\n test_ctd = get_region_data(np.array([[3505, 1, 0, 0]]), self.float_name, self.config,\n self.index, self.pres)\n test_bot = get_region_data(np.array([[3505, 0, 1, 0]]), self.float_name, self.config,\n self.index, self.pres)\n test_argo = get_region_data(np.array([[3505, 0, 0, 1]]), self.float_name, self.config,\n self.index, self.pres)\n\n self.assertTrue(test_ctd[0].shape[1] != test_argo[0].shape[1],\n \"Should get a different data set, if we have specified it\")\n self.assertTrue(test_bot[0].shape[1] != test_argo[0].shape[1],\n \"Should get a different data set, if we have specified it\")",
"def test_bothbounds(self):\n result = _make_mask_cube(\n self.mask, self.coords, [self.lower, self.upper], self.units\n )\n self.assertEqual(result.coord(\"topographic_zone\").bounds[0][1], self.upper)\n self.assertEqual(result.coord(\"topographic_zone\").bounds[0][0], self.lower)\n self.assertEqual(\n result.coord(\"topographic_zone\").points, np.mean([self.lower, self.upper])\n )\n self.assertEqual(result.coord(\"topographic_zone\").units, Unit(\"m\"))",
"def createSubdivRegion(*args, **kwargs)->bool:\n pass",
"def __test_region(self, bk):\n for arg in self.args['region']:\n ds = ArgoDataFetcher(backend=bk).region(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True",
"def test_two_region_time_aggregation(self, months, seasons, regions_half_squares):\n data = np.array(\n [\n # area a, months 1-12\n [\n 31,\n 28,\n 31,\n 30,\n 31,\n 30,\n 31,\n 31,\n 30,\n 31,\n 30,\n 31,\n ],\n # area b, months 1-12\n [\n 31 + 1,\n 28 + 1,\n 31 + 1,\n 30 + 1,\n 31 + 1,\n 30 + 1,\n 31 + 1,\n 31 + 1,\n 30 + 1,\n 31 + 1,\n 30 + 1,\n 31 + 1,\n ],\n ],\n dtype=float,\n )\n\n expected = np.array(\n [\n [\n 31 + 31 + 28,\n 31 + 30 + 31,\n 30 + 31 + 31,\n 30 + 31 + 30,\n ],\n [\n 31 + 31 + 28 + 3,\n 31 + 30 + 31 + 3,\n 30 + 31 + 31 + 3,\n 30 + 31 + 30 + 3,\n ],\n ],\n dtype=float,\n )\n\n adaptor = IntervalAdaptor(\"test-month-seasons-two-regions\")\n from_spec = Spec(\n name=\"test-var\",\n dtype=\"float\",\n dims=[\"half_squares\", \"months\"],\n coords={\"months\": months, \"half_squares\": regions_half_squares},\n )\n adaptor.add_input(from_spec)\n to_spec = Spec(\n name=\"test-var\",\n dtype=\"float\",\n dims=[\"half_squares\", \"seasons\"],\n coords={\"seasons\": seasons, \"half_squares\": regions_half_squares},\n )\n adaptor.add_output(to_spec)\n\n data_array = DataArray(from_spec, data)\n\n data_handle = Mock()\n data_handle.get_data = Mock(return_value=data_array)\n data_handle.read_coefficients = Mock(side_effect=SmifDataNotFoundError)\n\n adaptor.simulate(data_handle)\n actual = data_handle.set_results.call_args[0][1]\n\n assert np.allclose(actual, expected)",
"def assertRegionsEqual(self, expected_region, actual_region, msg=None):\n if (expected_region.size() == 1) and (actual_region.size() == 1):\n expected_region = _make_region(self.view, expected_region.begin(), expected_region.end())\n actual_region = _make_region(self.view, actual_region.begin(), actual_region.end())\n self.assertEqual(expected_region, actual_region, msg)",
"def test_viewset_post_passed(self):\n zone = [\n [\n [9.523050482755892,55.71576659960325],\n [9.52433794308304,55.71581494788879],\n [9.525732691770784,55.71585120906369],\n [9.527191813474886,55.715863296114506],\n [9.52785700131058,55.71585120906369],\n [9.530367548948519,55.715561118722064],\n [9.531440432554476,55.71540398555416],\n [9.53208416271805,55.71521059001827],\n [9.532856638914339,55.7149567569243],\n [9.531247313505403,55.713349109025195],\n [9.530989821439974,55.713107351738756],\n [9.529616530424349,55.71246668769403],\n [9.528801138883821,55.712055690133354],\n [9.52860801983475,55.71214030763166],\n [9.528436358457796,55.71206777835862],\n [9.525904353147737,55.711983160703205],\n [9.52410190868973,55.71200733719487],\n [9.52311485577225,55.71206777835862],\n [9.523200686460726,55.71427986060977],\n [9.523050482755892,55.71576659960325]\n ], [\n [9.529723818784944,55.71464248509411],\n [9.529037173277132,55.713880969788974],\n [9.528801138883821,55.713748005276905],\n [9.528221781736605,55.71368756671271],\n [9.528393443113558,55.71315570331575],\n [9.529165919309847,55.71315570331575],\n [9.52957361508011,55.71351833823549],\n [9.53058212566971,55.71435238577594],\n [9.529723818784944,55.71464248509411]\n ]\n ]\n\n data = {\n \"provider\": Provider.objects.first().pk,\n \"name\": \"Test Zone\",\n \"price\": \"5.50\",\n \"zone\": json.dumps(zone)\n }\n\n response = self.client.post(reverse(\"servicearea-list\"), data, format='json')\n self.assertEqual(response.status_code, 201)",
"def test_regref(self):\n dset1 = self.f.create_dataset('x', (10,10))\n regref = dset1.regionref[...]\n dset2 = self.f.create_dataset('y', (1,), dtype=h5py.regionref_dtype)\n dset2[0] = regref\n self.assertEqual(type(dset2[0]), h5py.RegionReference)",
"def test_createRegionDimensions(self):\r\n\r\n classList = {}\r\n classList[RegionType.REGION_TYPE_BOX] = RegionDimensions.RegionDimensionsBox\r\n classList[RegionType.REGION_TYPE_CYLINDER] = RegionDimensions.RegionDimensionsCylinder\r\n classList[RegionType.REGION_TYPE_SPHERE] = RegionDimensions.RegionDimensionsSphere\r\n\r\n for regionType in classList:\r\n classRef = classList[regionType]\r\n regionDimension = RegionDimensions.createRegionDimensions(regionType)\r\n self.assertIsInstance(regionDimension, classRef)\r\n\r\n #self.fail(\"Test if the testcase is working.\")\r",
"def test_signal_regions_len(i07_nexus, regions):\n assert len(i07_nexus.signal_regions) == len(regions)",
"def test_two_phase_region_outside_temperature_tolerance_does_not_belong():\n compsets_300 = CompsetPair([\n BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),\n ])\n\n compsets_500 = CompsetPair([\n BinaryCompset('P1', 500, 'B', 0.5, [0.5, 0.5]),\n BinaryCompset('P2', 500, 'B', 0.8, [0.2, 0.8]),\n ])\n\n tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K\n assert tpr.compsets_belong_in_region(compsets_500) is False",
"def test_center_region(self):\n before_b = \"\"\"\\\n Some 90% of all presidentially declared disasters are weather related,\n leading to around 500 deaths per year and nearly $14 billion in damage.\n StormReady, a program started in 1999 in Tulsa, OK,\n helps arm America's communities with the communication and safety\n skills needed to save lives and property– before and during the event.\n StormReady helps community leaders and emergency managers strengthen local safety programs.\n \"\"\"\n after_b = \"\"\"\\\n Some 90% of all presidentially declared disasters are weather related,\n leading to around 500 deaths per year and nearly $14 billion in damage.\n StormReady, a program started in 1999 in Tulsa, OK,\n helps arm America's communities with the communication and safety\n skills needed to save lives and property– before and during the event.\n StormReady helps community leaders and emergency managers strengthen local safety programs.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"7.0\"),\n after_sel=(\"1.0\", \"7.0\"),\n command_name=\"center-region\",\n directives=\"@pagewidth 70\",\n )",
"def get_valid_regions(self):\n pass",
"def test_build__set_regions(self, valid_service: fixture) -> None:\n service: Service = valid_service\n\n assert service.regions == set_service_regions()",
"def test_recomb(self):\n sol1, sol2 = [0,0,0,0],[1,1,1,1]\n hot_regions = [0,0,0,1] ##NOTE: sum(hot_regions) shouls always be 1\n rec_events = 1\n sol = list(d.recombine(sol1,sol2,rec_events,hot_regions))\n print(f\"recomb sol: {sol}\")\n self.assertTrue( (sol == [0,0,0,1]) or (sol == [1,1,1,0]) )"
] | [
"0.6851561",
"0.6647029",
"0.66334915",
"0.6579759",
"0.65599364",
"0.6482568",
"0.633641",
"0.62562704",
"0.6254943",
"0.6175007",
"0.61622155",
"0.61461675",
"0.6125849",
"0.6115511",
"0.60688263",
"0.5916412",
"0.58777696",
"0.58770466",
"0.5841907",
"0.5833076",
"0.5826163",
"0.5823942",
"0.57580256",
"0.5756346",
"0.5754294",
"0.57520676",
"0.5711326",
"0.5702773",
"0.5688845",
"0.56886595"
] | 0.7825171 | 0 |
Create new neighbor within acceptable bounds | def neighbor(self, start):
x = start[0] + random.uniform(-20, 20)
y = start[1] + random.uniform(-20, 20)
x = max(min(x, xbounds[1]), xbounds[0])
y = max(min(y, ybounds[1]), ybounds[0])
return [x,y] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_neighborhood(self):\n if len(self.available_building_cells) == 0:\n return False\n # Pick cell\n shuffle(self.available_building_cells)\n\n neighborhood_origin = self.available_building_cells[0]\n if not self.creates_valid_building(neighborhood_origin):\n # If not a valid placement, remove location from list\n self.available_building_cells.remove(neighborhood_origin)\n # Retry!\n self.create_neighborhood()\n return True # Exit after neighborhood is created\n\n final_cells = [neighborhood_origin]\n self.available_building_cells.remove(neighborhood_origin)\n\n # Place building on origin\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_origin, attractiveness=random()))\n neighborhood_cells = self.environment.grid.get_neighborhood(neighborhood_origin, moore=True, include_center=True)\n\n # Create a random number of residence buildings in this neighborhood\n number_of_residences = randrange(2,6)\n for i in range(number_of_residences):\n while len(neighborhood_cells) > 0:\n shuffle(neighborhood_cells)\n # Only place building if space is empty\n if self.environment.grid.is_cell_empty(neighborhood_cells[0]):\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_cells[0], attractiveness=random()))\n final_cells.append(neighborhood_cells[0])\n try:\n # If this space was available before, remove it from list\n self.available_building_cells.remove(neighborhood_cells[0])\n except:\n pass\n\n continue\n\n # Remove cell from list\n neighborhood_cells.remove(neighborhood_cells[0])\n\n # Fill surrounding space around buildings with roads!\n for building_location in final_cells:\n for surrounding_cell in self.environment.grid.get_neighborhood(building_location, moore=True):\n if self.environment.grid.is_cell_empty(surrounding_cell):\n self.place_road(Road(surrounding_cell))\n\n return True",
"def create_neighbor():\n copy = np.copy(puzzle)\n pair1, pair2 = same_box_pair()\n\n temp = copy[pair1[0], pair1[1]]\n copy[pair1[0], pair1[1]] = copy[pair2[0], pair2[1]]\n copy[pair2[0], pair2[1]] = temp\n\n return copy",
"def addNeighbor(self, neighbor):",
"def create_epsilon_neighbourhoods(self):\n self.neigbors_clf = NearestNeighbors(radius=self.epsilon, algorithm='ball_tree')\n self.neigbors_clf.fit(self.data)\n _, neigh_idx = self.neigbors_clf.radius_neighbors(self.data)\n return neigh_idx",
"def __init__(self, n_neighbors=2):\n self.n_neighbors = n_neighbors",
"def test_extreme_neighborhoods(self):\n\n ## Radius = 0 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Min_neighbors > 30 ==> all points are noise\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.0,\n min_core_neighbors=31,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 0)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"noise\"), self.n)\n\n ## Radius very large ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=100.0,\n min_core_neighbors=3,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)\n\n ## Min_neighbors = 0 ==> all points are core points\n m = tc.dbscan.create(\n self.sf,\n distance=\"euclidean\",\n radius=0.5,\n min_core_neighbors=0,\n verbose=False,\n )\n\n self.assertEqual(m.num_clusters, 1)\n self.assertEqual(sum(m.cluster_id[\"type\"] == \"core\"), self.n)",
"def _to_neighbor(self, mean, stddev):\n move = np.random.normal(mean, stddev, self.weight.shape)\n move *= self.connectivity\n self.weight += move\n return move",
"def init_neighbors(self, element):\n def distance(loc1, loc2):\n '''L2 metric distance between two equi-dimensional coordinates.'''\n return np.sqrt(np.sum(np.square(np.subtract(loc1, loc2))))\n\n element.clear_neighbors() # Clean slate\n for e in self.elements:\n if (e.idx != element.idx) & \\\n (distance(element.coords, e.coords) <= self.crit_radius):\n element.add_neighbor(e)\n # TODO: add edge length attribute",
"def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)",
"def neighbor(self): \n newBoard = Board(self.n, False)\n for i in range(self.n):\n newBoard.queens[i][0] = self.queens[i][0]\n newBoard.queens[i][1] = self.queens[i][1]\n \n current_moves = self.moves()\n n_moves = len(current_moves)\n move_index = random.choice(range(n_moves))\n newBoard.queens[current_moves[move_index][0]] = current_moves[move_index][1]\n\n return newBoard",
"def make_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_neuac = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if i == 0:\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_neuac) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 1, base_neuac + i\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods",
"def make_boundaries(self):\n p = self.project\n c = p[0]\n outlet = p.NewOutlet('GW', c.x, c.y, c.z - c.soildepth)\n cmf.FreeDrainagePercolation(c.layers[-1], outlet)\n rainfall = cmf.timeseries.from_sequence(self.starttime, cmf.day, [25, 0, 0, 0, 0, 0, 0] * 200)\n p.rainfall_stations.add('Heavy rain once a week', rainfall, (0, 0, 0))\n print(cmf.describe(p.rainfall_stations))\n p.use_nearest_rainfall()\n\n return outlet",
"def add_neighbors(self, visited, parent):\n \n x = parent.x\n y = parent.y\n cost = parent.cost\n neighbors = []\n neighbor_grid = [(-1,1), (0,1), (1,1), (-1,0), (1,0), (-1,-1), (0,-1), (1,-1)]\n\n for idx in neighbor_grid:\n new_x = x + idx[0]\n new_y = y + idx[1]\n if self.valid_pos(new_x, new_y, visited):\n visited[new_y, new_x] = 1\n if self.valid_cost(x,y):\n new_cost = cost + np.linalg.norm(idx)*self.costmap[new_y, new_x]\n neighbors.append(self.new_node(new_x, new_y, new_cost, parent))\n\n return neighbors",
"def insert_nodes(self):\n neighbour_max_distance = 5\n new_nodes = []\n for node in self.nodes:\n left_distance = node.get_distance(node.neighbour1)\n right_distance = node.get_distance(node.neighbour2)\n if left_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour1.x - node.x) / 2,\n node.y + (node.neighbour1.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour1.connect(node.neighbour1.neighbour1, new_node)\n new_node.connect(node.neighbour1, node)\n node.connect(new_node, node.neighbour2)\n new_nodes.append(new_node)\n new_nodes.append(node)\n\n if right_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour2.x - node.x) / 2,\n node.y + (node.neighbour2.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour2.connect(new_node, node.neighbour2.neighbour2)\n new_node.connect(node, node.neighbour2)\n node.connect(node.neighbour1, new_node)\n new_nodes.append(new_node)\n\n return new_nodes",
"def test_d2_get_neighborhood_small(self):\n config.NR_COLS = 3\n config.NR_ROWS = 3\n gamefield = [\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 1],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 3)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 2)\n self.assertEqual(nh, 4)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 2, 0)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 2, 2)\n self.assertEqual(nh, 3)\n # center\n nh = logic.get_neighborhood(gamefield, 1, 1)\n self.assertEqual(nh, 4)",
"def _valid_neighbors(location, some_num):\n xloc, yloc = location\n vector = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n ret_v = []\n for vect in vector:\n xpos = xloc + vect[0]\n ypos = yloc + vect[1]\n if xpos <= 0 or ypos <= 0:\n continue\n if xpos > some_num or ypos > some_num:\n continue\n ret_v.append((xpos, ypos))\n return ret_v",
"def boundaries_new(*args):\n return _ida_hexrays.boundaries_new(*args)",
"def add_nei(self, dest: int, weight: float):\r\n if not self.has_nei(dest):\r\n self.neighbors[dest] = weight",
"def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_index=[]\n throat_vert_index=[]\n #Find boundary extent\n [x_min,x_max,y_min,y_max,z_min,z_max]=vo.vertex_dimension(self,self.pores(),parm='minmax')\n min_point = np.around(np.array([x_min,y_min,z_min]),10)\n max_point = np.around(np.array([x_max,y_max,z_max]),10)\n Np = self.num_pores()\n Nt = self.num_throats()\n new_throat_count = 0\n # ridge_dict contains a dictionary where the key is a set of 2 neighbouring pores and the value is the vertex indices\n # that form the throat or ridge between them\n for p,v in self._vor.ridge_dict.items():\n # if the vertex with index -1 is contained in list then the ridge is unbounded - ignore these\n if np.all(np.asarray(v) >=0):\n #boundary throats will be those connecting one pore inside the original set and one out\n if (p[0] in range(Np) and p[1] not in range(Np)) or\\\n (p[0] not in range(Np) and p[1] in range(Np)):\n # the dictionary key is not in numerical order so find the pore index inside\n if p[0] in range(Np):\n my_pore=p[0]\n else:\n my_pore=p[1]\n my_pore_coord = self[\"pore.coords\"][my_pore]\n new_pore_coord = my_pore_coord.copy()\n #rounding necessary here to identify the plane as Voronoi can have 1e-17 and smaller errors\n throat_verts = np.around(self._vor.vertices[v],10)\n #find which plane we are aligned with (if any) and align new_pore with throat plane\n if len(np.unique(throat_verts[:,0])) == 1:\n new_pore_coord[0]=np.unique(throat_verts[:,0])\n elif len(np.unique(throat_verts[:,1])) == 1:\n new_pore_coord[1]=np.unique(throat_verts[:,1])\n elif len(np.unique(throat_verts[:,2])) == 1:\n new_pore_coord[2]=np.unique(throat_verts[:,2])\n else:\n new_pore_coord = throat_verts.mean()\n bound_coords.append(new_pore_coord)\n bound_conns.append(np.array([my_pore,new_throat_count+Np]))\n bound_vert_index.append(dict(zip(v,throat_verts)))\n throat_vert_index.append(dict(zip(v,throat_verts)))\n new_throat_count += 1\n\n #Add new pores and connections\n self.extend(pore_coords=bound_coords, throat_conns=bound_conns)\n #Record new number of pores\n Mp = self.num_pores()\n Mt = self.num_throats()\n new_pore_ids = np.arange(Np,Mp)\n new_throat_ids = np.arange(Nt,Mt)\n #Identify which boundary the pore sits on\n front = self.pores()[self['pore.coords'][:,0]==min_point[0]]\n back = self.pores()[self['pore.coords'][:,0]==max_point[0]]\n left = self.pores()[self['pore.coords'][:,1]==min_point[1]]\n right = self.pores()[self['pore.coords'][:,1]==max_point[1]]\n bottom = self.pores()[self['pore.coords'][:,2]==min_point[2]]\n top = self.pores()[self['pore.coords'][:,2]==max_point[2]]\n #Assign labels\n self['pore.boundary'] = False\n self['pore.boundary'][new_pore_ids] = True\n self['pore.right_boundary'] = False\n self['pore.left_boundary'] = False\n self['pore.front_boundary'] = False\n self['pore.back_boundary'] = False\n self['pore.top_boundary'] = False\n self['pore.bottom_boundary'] = False\n self['pore.right_boundary'][right] = True\n self['pore.left_boundary'][left] = True\n self['pore.front_boundary'][front] = True\n self['pore.back_boundary'][back] = True\n self['pore.top_boundary'][top] = True\n self['pore.bottom_boundary'][bottom] = True\n #Save the throat verts\n self[\"pore.vert_index\"][new_pore_ids] = bound_vert_index\n self[\"throat.vert_index\"][new_throat_ids] = throat_vert_index",
"def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])",
"def investigate(self, nearest_neighbors):\n pass",
"def setupNeighbor(self, **params):\n if not self.rank:\n logging.info('Setting up nearest neighbor searching parameters')\n\n if 'nns_freq' not in params:\n params['nns_freq'] = 10\n\n if 'nns_skin' not in params:\n radius = 0\n\n for ss in params['species']:\n if 'radius' in ss:\n radius = max(radius, ss['radius'][1])\n\n params['nns_skin'] = radius * 4\n\n self.lmp.command('neighbor {nns_skin} {nns_type}'.format(**params))\n self.lmp.command('neigh_modify delay 0 every {nns_freq} check yes'.format(**params))",
"def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]",
"def create_space(num_rows, num_cols, goal=[], obstacles=[], *args):\n space = []\n for i in range (num_rows):\n space.append([])\n for i in range(num_rows):\n for j in range(num_cols):\n space[i].append([])\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j]=node()\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j].h = math.sqrt((goal[0]-i)**2 + (goal[1]-j)**2)\n space[i][j].f = 10000\n space[i][j].g = 10000\n \n for obs in obstacles:\n space[obs[0]][obs[1]].h = 1000\n \n heuristics = np.zeros((num_rows,num_cols))\n for i in range(num_rows):\n for j in range(num_cols):\n heuristics[i][j]=space[i][j].h\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j].cor = [i, j]\n \n return space, heuristics",
"def _compute_euclidean_neigh_matrix(src, d_matrix, radius):\n\n n_max = 100\n n_min = 3\n reached_points = np.array([0])\n counter = 0\n n_neigh = []\n list_neigh = []\n\n while counter < reached_points.shape[0] < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n # Check the number of neighbours\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n list_neigh.append(aux)\n reached_points = np.append(reached_points,\n aux[~np.in1d(aux, reached_points)])\n counter += 1\n\n if counter >= reached_points.shape[0]:\n raise ValueError('Too small value of the radius:'\n 'the neighbour-matrix is not connected')\n elif src.shape[0] == reached_points.shape[0]:\n while counter < src.shape[0]:\n P = reached_points[counter]\n aux = np.array(sorted(\n np.where(d_matrix[P] <= radius)[0],\n key=lambda k: d_matrix[P, k]))\n n_neigh.append(aux.shape[0])\n\n if n_neigh[-1] < n_min:\n raise ValueError('Computation of neighbours aborted since '\n 'their minimum number is too small.\\n'\n 'Please choose a higher radius.')\n elif n_neigh[-1] > n_max:\n raise ValueError('Computation of neighbours aborted since'\n 'their maximum number is too big.\\n'\n 'Please choose a lower radius.')\n\n list_neigh.append(aux)\n counter += 1\n\n n_neigh_max = max(n_neigh)\n n_matrix = np.zeros([src.shape[0],\n n_neigh_max], dtype=int) - 1\n for i in range(src.shape[0]):\n n_matrix[i, 0:list_neigh[i].shape[0]] = list_neigh[i]\n index_ord = np.argsort(n_matrix[:, 0])\n n_matrix = n_matrix[index_ord]\n return n_matrix\n else:\n raise RuntimeError(\"Some problems during\"\n \"computation of neighbours.\")",
"def init_neighbor(self, addr, port, weight):\n neighbor_name = ':'.join([addr, str(port)])\n self.neighbors[neighbor_name] = Router.Neighbor(addr, port, weight)\n self.distance_vector[neighbor_name] = Router.OtherRouter(weight, neighbor_name)",
"def add_neighbors(self, pos, distance, obstacles):\n \n neighbor_list = [(pos[0]-1,pos[1]), (pos[0]+1,pos[1]), \\\n (pos[0],pos[1]-1), (pos[0], pos[1]+1)]\n # Processing each neighbor.\n for (x,y) in neighbor_list:\n if x>=0 and y>=0 and x<self.M and y<self.N: # Out from boundary?\n if (x,y) not in obstacles:\n if (x,y) not in self.footprint: # Already in done list?\n new_distance = distance + 1 + self.heuristic_map[x,y]\n if (x,y) not in self.frontier.keys(): # A new candidate to add to frontier set.\n self.frontier.update({(x,y):new_distance})\n self.distance_map[x,y] = distance + 1\n self.camefrom_map[(x,y)] = pos\n elif new_distance < self.frontier[(x,y)]: # A short path reached this neighbor.\n self.frontier[(x,y)] = new_distance\n self.distance_map[x,y] = distance + 1\n self.camefrom_map[(x,y)] = pos",
"def build_from_coords(self, neighbor_coords):\n pass",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Define a list to hold Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min),\n int(north + d_north + safety_distance - north_min),\n int(east - d_east - safety_distance - east_min),\n int(east + d_east + safety_distance - east_min),\n ]\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\n\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n # create a voronoi graph based on\n # location of obstacle centres\n graph = Voronoi(points)\n # check each edge from graph.ridge_vertices for collision\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]].astype(int)\n p2 = graph.vertices[v[1]].astype(int)\n # test each pair p1 and p2 for collision using Bresenham\n # If the edge does not hit an obstacle add it to the list\n in_collision = False\n ridgeline = bresenham(p1[0], p1[1], p2[0], p2[1])\n for b in ridgeline:\n # eliminate out of range points in the line\n if b[0] < 0 or b[0] >= grid.shape[0]:\n in_collision = True\n break\n if b[1] < 0 or b[1] >= grid.shape[1]:\n in_collision = True\n break\n # check if grid cell is an obstacle\n if grid[b[0], b[1]] == 1:\n in_collision = True\n break\n # keep ridge points not in collision\n if not in_collision:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges",
"def test_get_neighborhood_radius_correct():\r\n grid_spacing = random.uniform(1e-6, 4.0)\r\n dimensionality = random.randint(1, 3)\r\n\r\n center = numpy.random.random(dimensionality)*2 - 1.0\r\n radius = random.uniform(1e-6, grid_spacing*2)\r\n\r\n # Find all points on grid in range with exhaustive search\r\n grid = _make_grid(grid_spacing, dimensionality,\r\n numpy.min(center)-radius, numpy.max(center)+radius)\r\n expected_neighborhood = [point for point in grid if calculate.distance(point, center) <= radius]\r\n\r\n assert (sorted(ill.get_neighborhood_radius(grid_spacing, center, radius))\r\n == sorted(expected_neighborhood))"
] | [
"0.63055634",
"0.61515677",
"0.6088113",
"0.6036705",
"0.59358096",
"0.5916102",
"0.5905298",
"0.59038895",
"0.57757497",
"0.5760618",
"0.5752663",
"0.57353127",
"0.5683542",
"0.5659537",
"0.56424284",
"0.56122315",
"0.56095177",
"0.5567989",
"0.5559961",
"0.5527063",
"0.54640436",
"0.54564464",
"0.5431994",
"0.54180926",
"0.5416122",
"0.53894484",
"0.53894025",
"0.53779435",
"0.5377406",
"0.5374311"
] | 0.6255202 | 1 |
>>> decimal_to_time(1.0) datetime.time(1, 0) >>> decimal_to_time(23.450) datetime.time(23, 27) | def decimal_to_time(decimal_time):
hours = int(decimal_time)
minutes = (decimal_time * 60) % 60
seconds = (decimal_time * 3600) % 60
args = [int(n) for n in [hours, minutes, seconds]]
return datetime.time(*args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __float_to_time(float_value):\n time_ms = int(float_value*24*60*60*1e3)\n return (datetime.datetime.min + datetime.timedelta(milliseconds=time_ms)).time()",
"def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)",
"def _time_to_datetime(value):\r\n assert isinstance(value, datetime.time)\r\n return datetime.datetime(1970, 1, 1,\r\n value.hour, value.minute, value.second,\r\n value.microsecond)",
"def _decimal_to_dt(dec):\n if dec is None:\n return None\n\n integer = int(dec)\n micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000)\n daittyme = datetime.datetime.utcfromtimestamp(integer)\n return daittyme.replace(microsecond=int(round(micro)))",
"def convert_to_time(value):\n if isinstance(value, datetime.time):\n return value\n elif isinstance(value, str):\n return datetime.time.fromisoformat(value)\n else:\n return datetime.time(value)",
"def int_to_time(seconds):\n time1 = time()\n minutes, time1.second = divmod(seconds, 60)\n time1.hour, time1.minute = divmod(minutes, 60)\n return time1",
"def int_to_time(seconds):\n time1 = time()\n minutes, time1.second = divmod(seconds, 60)\n time1.hour, time1.minute = divmod(minutes, 60)\n return time1",
"def __time_to_float(time_value):\n return time_value.hour/24 + time_value.minute/(24*60) + time_value.second/(24*60*60) + round(time_value.microsecond, 3)/(24*60*60*1e6)",
"def int_to_time(seconds):\n time = Time()\n minutes, time.second = divmod(seconds, 60)\n time.hour, time.minute = divmod(minutes, 60)\n return time",
"def float_to_time(hours, moment='am', tz=None):\n if hours == 12.0 and moment == 'pm':\n return time.max\n fractional, integral = math.modf(hours)\n if moment == 'pm':\n integral += 12\n res = time(int(integral), int(float_round(60 * fractional, precision_digits=0)), 0)\n if tz:\n res = res.replace(tzinfo=pytz.timezone(tz))\n return res",
"def int_to_time(seconds):\n minutes, second = divmod(seconds, 60)\n hour, minute = divmod(minutes, 60)\n time = Time(hour, minute, second)\n return time",
"def _convert_time(self, duration):\n in_sec = int(int(duration) / 1000)\n in_time = int(in_sec / 60) + (0.01 * (in_sec % 60))\n return in_time",
"def _convert_to_decimal(value):\n d0 = value[0][0]\n d1 = value[0][1]\n d = float(d0) / float(d1)\n \n m0 = value[1][0]\n m1 = value[1][1]\n m = float(m0) / float(m1)\n \n s0 = value[2][0]\n s1 = value[2][1]\n s = float(s0) / float(s1)\n \n return d + (m / 60.0) + (s / 3600.0)",
"def _datetime2et(time: datetime) -> float:\n if isinstance(time, float):\n return time\n if not isinstance(time, datetime):\n raise TypeError(\"Time must be a float or a datetime object.\")\n return spy.str2et(time.isoformat())",
"def _truncate_time(number, decimals=0):\n\n # check for validity of inputs\n if not isinstance(decimals, int):\n raise TypeError(\"The parameter \\'decimal\\' must be an integer value.\")\n elif decimals < 0:\n raise ValueError(\"The parameter \\'decimal\\' has to be >= 0.\")\n elif decimals == 0:\n return math.trunc(number)\n\n # calculate the shift factor for truncation\n shift_factor = 10 ** decimals\n\n # calculate the truncated number (shift --> truncate --> shift back)\n return math.trunc(number * shift_factor) / shift_factor",
"def convertTime(self, sec):\n\n if self.timeFormat == S:\n return '%.3f' % sec\n\n if self.timeFormat == HHMMSS:\n return seconds2time(sec)",
"def datetime_from_float(t, unit, epoch=None):\n if epoch is None:\n epoch = np.datetime64(0, \"s\")\n factor = np.timedelta64(1, \"s\") / np.timedelta64(1, unit)\n dt = np.round(factor * t).astype(\"timedelta64[{0}]\".format(unit))\n return epoch + dt",
"def SECOND(time):\n\n return _make_datetime(time).second",
"def convert_time(min, sec):\n # Updated 11/19/16 \n total_time = min*60\n total_time = total_time + sec\n \n return str(total_time)+'.0' # string because being passed to GUI",
"def timestamp_to_time(timestamp):\n n_day = timestamp // (60*24)\n n_time = timestamp - n_day * (60*24)\n n_hour = n_time // 60\n n_minutes = n_time - n_hour * 60\n return Time(n_day, n_hour, n_minutes)",
"def sec_to_time(seconds):\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n return \"%02d:%02d:%02d\" % (h, m, s)",
"def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value",
"def getTime(toConvert = None):\n if toConvert == None:\n return time.mktime(\n datetime.datetime.now().timetuple()\n )\n else:\n return time.mktime(\n toConvert.timetuple()\n )",
"def to_seconds(time):\n return 3600 * time",
"def convert_timeval(seconds_since_epoch):\n frac, whole = math.modf(seconds_since_epoch)\n microseconds = math.floor(frac * 1000000)\n seconds = math.floor(whole)\n return seconds, microseconds",
"def timestamp_to_datetime(value):\n if not isinstance(value, (int, long, float)):\n raise ValueError(\n 'Expecting a number, got %s instead' % type(value).__name__)\n return EPOCH + datetime.timedelta(microseconds=value)",
"def test_as_time(self):\n self.assertEqual(\n time_display.as_time(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n with_msec=True),\n '23:59:30.357')",
"def to_timestamp(date_time: datetime, unit: TimeUnit = TimeUnit.SECONDS) -> float:\n return date_time.replace(tzinfo=timezone.utc).timestamp() * (1000 ** int(unit))",
"def hydrate_time(nanoseconds, tz=None):\n seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000))\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = Time(hours, minutes, seconds)\n if tz is None:\n return t\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n return zone.localize(t)",
"def filetime_to_time(filetime):\r\n total = filetime.dwHighDateTime << 32 | filetime.dwLowDateTime\r\n return total / 10000000 - SECONDS_BETWEEN_EPOCHS"
] | [
"0.6740458",
"0.6462992",
"0.64223194",
"0.639058",
"0.6273709",
"0.6182896",
"0.6182896",
"0.6151217",
"0.6147378",
"0.6122597",
"0.5932671",
"0.59254336",
"0.58498394",
"0.5765848",
"0.569216",
"0.5691907",
"0.56543106",
"0.5623327",
"0.55985504",
"0.5585881",
"0.5555204",
"0.5528267",
"0.5500555",
"0.54617614",
"0.54296976",
"0.54270923",
"0.5418204",
"0.53981763",
"0.5397289",
"0.53933036"
] | 0.82775223 | 0 |
Tests the init funtion of the ship | def test_init(self):
self.assertEqual(self.location, Ship(self.location).location) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\r\n self.spaceship = SpaceShipGame()\r\n self.spaceship.init()",
"def setUp(self):\r\n self.spaceship = SpaceShipGame()",
"def setUp(self):\r\n self.spaceship = SpaceShipGame()",
"def setUp(self):\r\n self.spaceship = SpaceShipGame()",
"def setUp(self):\r\n self.spaceship = SpaceShipGame()",
"def setUp(self):\r\n self.spaceship = SpaceShipGame()",
"def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)",
"def setUp(self):\r\n global ship_image\r\n self.spaceshipgame = SpaceShipGame()\r\n self.spaceshipgame.init()\r\n pos = [1,1]\r\n vel = [1,1]\r\n angle = 0\r\n image = ship_image\r\n\r\n center = [1,1]\r\n size = 1\r\n info = ImageInfo(center, size)\r\n\r\n self.ship = Ship( pos, vel, angle, image, info)",
"def test_01_Init(self):\n pass",
"def setUp(self):\n self.delegate = AlwaysHitDelegate(\"\")\n self.environment = BattleEnvironment()",
"def test_init(self):\n assert_not_equal(self.testGame, None)",
"def test_init(self):\n\n # This environment must have another attributes\n self.assertTrue(hasattr(self.environment, 'transitions'))\n\n # By default mesh shape is 4x3\n self.assertEqual(spaces.Tuple((spaces.Discrete(4), spaces.Discrete(3))), self.environment.observation_space)\n\n # By default initial position is (0, 2)\n self.assertEqual((0, 2), self.environment.initial_state)\n\n # Default reward is (-0.04)\n self.assertEqual((-0.04,), self.environment.default_reward)",
"def test_initialise(self):\n # Make sure the variables are all updated\n assert isinstance(gcmc_system_sampler.context, Context)\n assert isinstance(gcmc_system_sampler.positions, Quantity)\n assert isinstance(gcmc_system_sampler.simulation_box, Quantity)\n\n return None",
"def init():\n pass",
"def init():",
"def setUp(self):\n self.player = ship.Player(\n constants.PLAYER_START_PLACE,\n constants.PLAYER_WIDTH,\n constants.PLAYER_HEIGHT,\n constants.PLAYER_IMG,\n constants.PLAYER_HEALTH\n )\n\n self.alien = ship.Alien(\n [320, 300],\n 30,\n 30,\n constants.GREEN_ALIEN_IMG,\n 1\n )\n\n self.alien.shooting([320, 300], 5, False)\n\n self.player.shooting([self.player.position[0] + 3, self.player.position[1]], 1, True)",
"def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route",
"def test_init(self):\n t = Thing(store=self.store)\n p = Portal(t)\n self.assertEqual(p.store, t.store)\n self.assertEqual(p.thing, t)\n self.assertEqual(p.destination, None)\n self.assertEqual(IUseable(t), p)",
"def test_init_game(self):\n screen = utils.init_game()\n self.assertIsInstance(screen, pg.Surface)",
"def init(self, args):\n return True",
"def test_initialise(self):\n\n # Make sure the variables are all updated\n assert isinstance(gcmc_sphere_sampler.context, Context)\n assert isinstance(gcmc_sphere_sampler.positions, Quantity)\n assert isinstance(gcmc_sphere_sampler.sphere_centre, Quantity)\n\n return None",
"def test_init_default(self):\n self._test_init_default()",
"def test_init_(self):\n card = Card('Archer', 3, 0, 2)\n self.assertEqual(card.name, 'Archer')\n self.assertEqual(card.attack, 3)\n self.assertEqual(card.money, 0)\n self.assertEqual(card.cost, 2)",
"def setUp(self):\n\n self.sold = Soldier(0, 0)\n self.R = Random(seed)",
"def test_init(self):\n # call function to test\n test_object = ScipyOdeSolver(integrator=self._integrator, **self._kwargs)\n assert test_object._solver is None\n assert test_object._solver_args == self._kwargs, 'unexpected additional arguments. Keep in mind None and {}.'\n assert test_object._integrator == self._integrator, 'unexpected initialization of integrate function'",
"def __init__(self):\n self.setup_called = False",
"def test_init(self):\n self.assertEqual(self.foo._base_cmd, 'sleep 10; hostname')\n self.assertEqual(self.foo._base_args, {})\n self.assertEqual(self.foo.InputArgs, {})\n self.assertEqual(self.foo.OracleJobName, 'job1')",
"def __init__(self):\n self.set_time(0)\n self.MakeAliens()\n self.set_ship(Ship())\n self.set_bolts([])\n self.set_powerups([])\n self.set_score(0)\n self.set_direct(True)\n self.set_direct_change(True)\n self.set_plyrbolts(0)\n self.set_lives(3)\n self.set_numkey(0)\n self.set_count(0)\n self.set_count2(0)\n self.set_lowht(GAME_HEIGHT)\n self.set_winstate(False)\n self.set_speed(SHIP_MOVEMENT)\n self.set_firerate(random.randint(1, BOLT_RATE)) #math.random?",
"def initialise(self):",
"def __init__(self):\n self.x_coord = default_init\n self.y_coord = default_init\n self._init_random_coord() # generating random coordinates\n self.x_speed = default_init\n self.y_speed = default_init\n self.degrees = default_init\n self.radius = ship_def_radius"
] | [
"0.8001722",
"0.7689143",
"0.7689143",
"0.7689143",
"0.7689143",
"0.7689143",
"0.74255955",
"0.74255955",
"0.73445696",
"0.7143339",
"0.71065366",
"0.7025921",
"0.6844173",
"0.6808563",
"0.6744077",
"0.67212474",
"0.67182875",
"0.67056423",
"0.6699834",
"0.6693226",
"0.6692314",
"0.6642651",
"0.6637123",
"0.6628975",
"0.66253304",
"0.6622573",
"0.6607723",
"0.6596536",
"0.65806943",
"0.6578901"
] | 0.7780014 | 1 |
Tests whether a ship can be hit | def test_hit(self):
ship = Ship(self.location)
self.assertEqual(self.location, ship.location)
self.assertEqual(1, ship.check_hit(self.hit))
self.assertEqual(1, len(ship.location)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_miss(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(0, ship.check_hit((1, 1)))\n self.assertEqual(1, len(ship.location))",
"def is_ship_sunk(self, x, y):\n marker = self.markers[x][y]\n total_hits = self.ship_hits[marker]\n return total_hits == MarkerType.MAX_HITS[marker]",
"def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left.\n self.stats.ships_left -= 1\n self.scoreboard.prep_ships()\n\n # Get rid of any remaining aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n\n # Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n # Pause\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)",
"def _ship_hit(self):\n if self.stats.ships_left > 0:\n # Decrement ships_left\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Remove remianing aliens & bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Create new fleet and ship at start location\n self._create_fleet()\n self.ship.center_ship()\n\n # pause\n sleep(0.5)\n else:\n self.stats.game_active = False \n pygame.mouse.set_visible(True)",
"def is_ship_alive(ship):\n\n # If and when flag systems become advanced enough **FUN** things can\n # be applied to make this check more hilarious.\n return ship.attributes.hull > 0 # though it can't be < 0",
"def has_ship(data, coords):\n if type(data) != dict:\n print('Wrong type of first argument (data)')\n return None\n if type(coords) != tuple:\n print('Wrong type of second argument (coords)')\n return None\n x = ord(coords[0].upper()) - 64\n y = coords[1]\n if x < 1 or x > 10:\n print('Wrong coordinate. Must be from A to J.')\n return None\n if y < 1 or y > 10:\n print('Wrong coordinate. Must be from 1 to 10.')\n return None\n if data[(x, y)] or data[(x, y)] == 'damaged':\n return True\n else:\n return False",
"def has_active_ship(self):\n if self.mark in (constants.ACTIVE_SHIP_MARK, constants.HIT_SHIP_MARK):\n return True\n return False",
"def check_enemy_fleet(self):\n if len(self.enemyShips) > 0:\n response = False\n for ship in self.enemyShips:\n if ship.afloat == True:\n response = True\n return response",
"def _ship_hit(self):\n # takes one life away if there's lives left\n # also removes that life from the scoreboard\n if self.stats.ships_left > 0:\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # removes alien fleet + leftover bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # creates a new fleet and centers players ship\n self._create_fleet()\n self.ship.center_ship()\n\n # stops game for a short while\n sleep(1.5)\n\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)",
"def _ship_hit(self):\n\n if self.stats.ships_left > 0:\n #Decrement ships\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n #Get rid of remaining aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Recenter the ship\n self.ship.center_ship\n\n # pause\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)",
"def test_sink(self):\n ship = Ship([self.hit])\n self.assertEqual([self.hit], ship.location)\n self.assertEqual(2, ship.check_hit(self.hit))\n self.assertEqual(0, len(ship.location))",
"def _ship_hit (self):\n\n\t\tself.stats.ship_left -=1\n\n\t\t\"\"\"get rid of remaining bullets and ships\"\"\"\n\t\tself.aliens.empty()\n\t\tself.bullets.empty()\n\n\t\t#Create a new fleet\n\n\t\tself._create_fleet()\n\t\tself.ship.center_ship()\n\n\t\t#pause\n\t\tsleep (0.5)",
"def _ship_hit(self):\n if self.stats.ships_left > 0:\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n self.stars.empty()\n self.bullets.empty()\n self._create_galaxy()\n self.ship.center_ship()\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)",
"def _sideways_ship_hit(self):\n if self.stats.sideways_ships_left > 0:\n self.stats.sideways_ships_left -= 1\n self.aliens.empty()\n self.bullets.empty()\n self._create_fleet()\n self.sideways_ship.center_sideways_ship()\n sleep(0.5)\n else:\n self.stats.game_active = False",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n # Decrement ships left.\n stats.ships_left -= 1\n\n # Update scoreboard.\n sb.prep_ships()\n\n # Empty the list of aliens and bullets.\n aliens.empty()\n bullets.empty()\n\n # Create new fleet.\n create_fleet(ai_settings, screen, ship, aliens)\n\n # Center the ship.\n ship.center_ship()\n\n # Pause for a while.\n sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n\tif stats.ships_left > 0:\n\t\t#Decrement ships_left\n\t\tstats.ships_left -= 1\n\t\t\n\t\t#Update scoreboard\n\t\tsb.prep_ships()\n\t\t\n\t\t#Empty the list of aliens and bullets\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\t\n\t\t#Create a new fleet and center the ship\n\t\tcreate_fleet(ai_settings, screen, ship, aliens)\n\t\tship.center_ship()\n\t\t\n\t\t#Pause\n\t\tsleep(0.5)\n\n\telse:\n\t\tstats.game_active = False \n\t\tpygame.mouse.set_visible(True)",
"def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):\n if stats.ships_left > 0:\n #decrement the value of ships_left\n stats.ships_left -= 1\n #update scoreboard\n sb.prep_ships()\n #when hit remove bullets and aliens from screen\n aliens.empty()\n bullets.empty()\n #create a new fleet with ship at centre\n create_fleet(ai_settings, screen,ship, aliens)\n ship.center_ship()\n #pause for a sec to collect defeat\n sleep(1.0)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def check_fleet(self):\n if len(self.ships) > 0:\n response = False\n for ship in self.ships:\n if ship.afloat == True:\n response = True\n return response",
"def check_ship_fits(self, ship_length, row, column, orientation):\n if orientation == 'H':\n if column + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True\n else:\n if row + ship_length > 10:\n if self.user == 'player':\n print('SHIP DOES NOT FIT, TRY AGAIN!\\n')\n return False\n else:\n return False\n else:\n return True",
"def ship_hit(ai_settings, stats, screen, ship, aliens, bullets):\n\tif stats.ship_left > 1:\n\t\tstats.ship_left -= 1\n\t\n\t\t# Empty aliens and bullets\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\n\t\t# Restore the initial screen\n\t\tcreate_fleet(ai_settings, screen, aliens)\n\t\tship.center_ship()\n\t\n\t\t# Pause\n\t\tsleep(1.0)\n\telse:\n\t\tstats.game_active = False",
"async def check_game_over(self,\n spaceship: MapObject,\n max_x: int,\n max_y: int) -> NoReturn:\n\n for obj_id, obj in self._dynamic_objects.items():\n if not obj_id.startswith('rubbish'):\n continue\n if spaceship & obj:\n while True:\n draw_frame(self._canvas, max_x // 4, max_y // 2,\n self._all_frames['other']['game_over'])\n await sleep(0)",
"def ship_hit(si_settings, screen, stats, sb, ship, aliens, bullets, alienBullets, images):\r\n if stats.ships_left > 0:\r\n # Decrement ships_left.\r\n stats.ships_left -= 1\r\n\r\n # Animate the ship explosion\r\n ship_explosion(si_settings, screen, ship)\r\n\r\n # Update scoreboard.\r\n sb.prep_ships()\r\n\r\n # Empty the list of aliens and bullets.\r\n aliens.empty()\r\n bullets.empty()\r\n alienBullets.empty()\r\n\r\n # Create a new fleet and center the ship.\r\n create_fleet(si_settings, screen, ship, aliens, images)\r\n ship.center_ship()\r\n\r\n # Pause.\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n pygame.mouse.set_visible(True)",
"def canItakeEnemyShip(self, enemyShip):\n if self.assaultStrength/enemyShip.getPersonStrength() > 1.5:\n return 1\n return 0",
"def ship_hit(ai_settings, stats, screen, ship, boss, bullets,boss_bullets):\n if stats.ships_left > 1:\t\n # Decrement ships_left\n stats.ships_left -= 1\n # Empty the list of bullets\n bullets.empty()\n boss_bullets.empty()\n #center the ship.\n ship.center_ship()\n # Pause.\n #sleep(0.5)\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)",
"def ship_hit(si_settings,screen,stats,sb,ship,aliens,bullets):\n if stats.ships_left > 0:\n # Decrement ships_left.\n stats.ships_left -= 1\n #update Scoreboard\n sb.prep_ships()\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n #empties aliens and bullets\n aliens.empty()\n bullets.empty()\n #makes new aliens and centers ship\n create_fleet(si_settings,screen,ship,aliens)\n ship.center_ship()\n #stop\n sleep(0.5)",
"def getShip(self):\n \"return self._ship\"\n if self._ship == None:\n return True\n return False",
"def has_ship(field, coordinates):\n\n if field[coordinates] == '*':\n return True\n return False",
"def point_in_ship(ships, coor):\n for ship in ships:\n if coor in ship.coordinates or coor in ship.neighbor:\n return True\n return False",
"def can_flyover(self):\n return False"
] | [
"0.7271281",
"0.70827186",
"0.704767",
"0.7041984",
"0.7026214",
"0.6968032",
"0.6933917",
"0.68898654",
"0.68886876",
"0.6832401",
"0.6804325",
"0.6801222",
"0.6788589",
"0.67883235",
"0.67757285",
"0.6768699",
"0.66971505",
"0.66723",
"0.666582",
"0.6664523",
"0.66624606",
"0.66504693",
"0.66286796",
"0.6618729",
"0.6481689",
"0.64770234",
"0.64415824",
"0.64297646",
"0.6368693",
"0.63332665"
] | 0.7323437 | 0 |
Selects the mean data if the flag is true | def select_data(
self, flag_mean: bool
) -> np.ndarray[tuple[Ifm, Chn], np.float64]:
return self.mean if flag_mean else self.data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def masked_mean(x: torch.FloatTensor, m: torch.BoolTensor):\n if m.bool().sum() == len(m):\n return torch.full((1, ), fill_value=float('inf'), device=x.device)\n return x[m.bool()].mean()",
"def reset_mean(cls, sensor):\n if sensor == 't':\n cls.mean_t.clear()\n return cls.mean_t == []\n if sensor == 'l':\n cls.mean_l.clear()\n return cls.mean_l == []",
"def conditional_mean(self, gp):\n raise NotImplementedError",
"def get_mean(self):\n self.meanval = np.mean(self.adulist)",
"def modelmean(self, model_params, this_data, this_suff_stat):\n pass",
"def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()",
"def Mean(data):\n return data.mean()",
"def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_mean(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._mean if not avg else self._mean_avg",
"def _init_data_mean(self):\n if isinstance(self.settings.caffevis_data_mean, basestring):\n # If the mean is given as a filename, load the file\n try:\n data_mean = np.load(self.settings.caffevis_data_mean)\n except IOError:\n print '\\n\\nCound not load mean file:', self.settings.caffevis_data_mean\n print 'Ensure that the values in settings.py point to a valid model weights file, network'\n print 'definition prototxt, and mean. To fetch a default model and mean file, use:\\n'\n print '$ cd models/caffenet-yos/'\n print '$ ./fetch.sh\\n\\n'\n raise\n input_shape = self.get_input_data_shape() # e.g. 227x227\n # Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)\n excess_h = data_mean.shape[1] - input_shape[0]\n excess_w = data_mean.shape[2] - input_shape[1]\n assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)\n data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),\n (excess_w/2):(excess_w/2+input_shape[1])]\n elif self.settings.caffevis_data_mean is None:\n data_mean = None\n else:\n # The mean has been given as a value or a tuple of values\n data_mean = np.array(self.settings.caffevis_data_mean)\n # Promote to shape C,1,1\n while len(data_mean.shape) < 1:\n data_mean = np.expand_dims(data_mean, -1)\n \n #if not isinstance(data_mean, tuple):\n # # If given as int/float: promote to tuple\n # data_mean = tuple(data_mean)\n\n if data_mean is not None:\n self.net.transformer.set_mean(self.net.inputs[0], data_mean)",
"def reset_mean(self,new_mean):\n self.mean = new_mean\n return",
"def reset_mean(self,new_mean):\n self.mean = new_mean\n return",
"def conditional_mean(self, F):\n raise NotImplementedError",
"def sample_mean(self, x_dict={}):\n raise NotImplementedError()",
"def get_mean_fit(flag='L'):\n if flag == 'L':\n return np.mean(np.vstack(l_coeff_queue), axis =0) if len(l_coeff_queue)>1 else l_coeff_queue[-1]\n else:\n return np.mean(np.vstack(r_coeff_queue), axis =0) if len(r_coeff_queue)>1 else r_coeff_queue[-1]",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def mean(self, mean):\n\n self._mean = mean",
"def running_mean_old(self, data = '', n='', datums=''):\n not_nans = [ x for x in data if not np.isnan(x) ] \n not_nans_indexes = [ data.index(x) for x in data if not np.isnan(x) ]\n datums_nans = [ datums[i] for i in not_nans_indexes ] # extracting not nans values and corresponding index in datums (for plotting) \n cumsum = np.cumsum(np.insert(not_nans, 0, 0))\n means = (cumsum[n:] - cumsum[:-n]) / float(n)\n means = np.sqrt(np.absolute(means)) \n return means , datums_nans",
"def mean(vals):",
"def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)",
"def take_one_averaged(self):\n self.na.set_center_frequency(6.160574e9)\n self.na.set_span(10e6)\n self.na.set_power(-5, 1)\n self.na.set_ifbw(1e3)\n\n self.na.set_query_timeout(40e3)\n set_format = self.na.set_format('polar')\n print \"set_format returned: \", set_format\n self.na.set_trigger_source(\"manual\")\n self.na.set_averages(10)\n self.na.set_trigger_average_mode()\n\n self.na.clear_averages(channel=1)\n self.na.trigger_single(channel=1)\n fpts, xs, ys = self.na.read_data()\n #\n plt.figure()\n plt.plot(fpts, xs)\n plt.plot(fpts, ys)\n plt.show()",
"def __call__(self, x):\n return np.mean(self.observations <= x)",
"def _compute_global_mean(self, dataset, session, limit=None):\n _dataset = dataset\n mean = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray):\n mean = np.mean(_dataset)\n else:\n # Iterate in case of non numpy data\n for i in range(len(dataset)):\n mean += np.mean(dataset[i]) / len(dataset)\n self.global_mean.assign(mean, session)\n return mean",
"def test_mean(self):\n pass",
"def test_mean(self):\n pass",
"def mean(self):\r\n return np.mean(self.data_array)",
"def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)",
"def calculate(self):\n if self.sampling_timer.IsRunning():\n return\n if self.daq.data0 == []:\n average = 0.0\n else:\n average = mean(self.daq.data0)\n res_string = '%.2f' %average\n self.control_box.result_box.SetLabel(res_string)",
"def mean(self):\n\n return time_stat(self, stat=\"mean\")",
"def MeanNa(Vec):\n MM = mean(Vec)\n Vec[where(Vec.mask)] = MM\n return(Vec)",
"def get_mean_of_dataset(train_data_loader, args, idx=0):\n meter = AverageMeter()\n for i in train_data_loader:\n if isinstance(i, list):\n meter.update(i[idx])\n else:\n meter.update(i)\n data_mean = meter.mean\n if data_mean.ndim == 2: data_mean = data_mean.mean(0)\n return tensor(data_mean, args)"
] | [
"0.6278715",
"0.6039226",
"0.59565204",
"0.5857826",
"0.5752508",
"0.56542146",
"0.5637328",
"0.5604804",
"0.5561127",
"0.5551796",
"0.5551796",
"0.55310106",
"0.5478316",
"0.54347473",
"0.54059947",
"0.54058695",
"0.5385061",
"0.53676134",
"0.5334629",
"0.53160447",
"0.53139335",
"0.53076696",
"0.5306158",
"0.5306158",
"0.5298094",
"0.5287306",
"0.52840847",
"0.5281399",
"0.5264676",
"0.52640355"
] | 0.75266117 | 0 |
Main function for validating CFN templates | def main():
VALIDATE_TEMPLATE(TEMPLATES) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, template: str, func: Callable):\n raise NotImplementedError",
"def test_valid(self):\n args = [SIMPLE_TEMPLATE, SIMPLE_CANDIDATE]\n result = self.runner.invoke(main, args)\n self.assertEqual(0, result.exit_code)",
"def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)",
"def test_validate_valid_template(cidc_api, some_file, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n\n client = cidc_api.test_client()\n data = form_data(\"pbmc.xlsx\", some_file, \"pbmc\")\n\n mocks = UploadMocks(monkeypatch)\n\n grant_upload_permission(user_id, \"pbmc\", cidc_api)\n\n res = client.post(VALIDATE, data=data)\n assert res.status_code == 200\n assert res.json[\"errors\"] == []\n mocks.iter_errors.assert_called_once()",
"def main(\n files: List[Path] = typer.Argument(default=None, dir_okay=False, exists=True),\n template: Optional[str] = typer.Option(\n None, '--template', help='Name of template file'\n ),\n logo: Optional[str] = typer.Option(None, '--logo', help='Name of logo file'),\n logo_width: Optional[str] = typer.Option(\n None, '--logo-width', help='Logo width (default 35mm)'\n ),\n highlight_style: Optional[str] = typer.Option(None, '--highlight-style',\n help='Specify coloring style to be used in highlighting source code'),\n syntax_definition: Optional[str] = typer.Option(None, '--syntax-definition',\n help='Specify a directory which contains syntax definition files'),\n no_toc: bool = typer.Option(\n False, '--no-toc', help='table of contents in PDF document'\n ),\n no_number_sections: bool = typer.Option(False, '--no-number-sections', help='no section numbering'),\n\n no_titlepage: bool = typer.Option(False, '--no-titlepage', help='title in PDF document'),\n tex_file: bool = typer.Option(\n False, '--tex', help='create TeX file instead of PDF document'\n ),\n email: Optional[str] = typer.Option(None, '--email', help='Author email'),\n company: Optional[str] = typer.Option(None, '--company', help='Name of company'),\n department: Optional[str] = typer.Option(\n None, '--department', help='Name of department'\n ),\n confidential: bool = typer.Option(\n False, '--confidential', help='indicate confidential'\n ),\n debug: bool = typer.Option(False, '--debug', help='turns debugging on'),\n pdf_engine: str = typer.Option(\n 'xelatex',\n '--pdf-engine',\n help='Specify pdf engine, one of lualatex, xelatex or tectonic ',\n ),\n _version: bool = typer.Option(\n None, '-V', '--version', callback=version_callback, help='Show version and exit'\n ),\n):\n\n if not files:\n typer.echo('Error: Must specify at least one .md file.')\n raise typer.Abort()\n\n mdfiles: List[str] = [str(md) for md in files]\n\n template = template or os.environ.get('MD2PDF_TEMPLATE')\n if template is None:\n print('No template specified')\n sys.exit(1)\n\n email = email or os.environ.get('MD2PDF_AUTHOR_EMAIL')\n footer_center = ''\n\n # command line overwrites `MD2PDF_PDF_ENGINE`. if both are not given\n # then `xelatex` is the default\n pdf_engine = pdf_engine or os.environ.get('MD2PDF_PDF_ENGINE') or 'xelatex'\n # check that pdf-engine is one of the following\n if pdf_engine not in ['xelatex', 'lualatex', 'tectonic']:\n print('--pdf-engine must be one of \"xelatex\", \"lualatex\", \"tectonic\"')\n sys.exit(1)\n\n ext = '.pdf'\n if tex_file:\n ext = '.tex'\n\n if len(mdfiles) == 1:\n toml_file = os.path.splitext(mdfiles[0])[0] + '.toml'\n\n if os.path.exists(toml_file):\n print(f'TOML file {toml_file} found')\n parsed_toml = toml.load(toml_file)\n default_val = parsed_toml.get('default')\n if default_val is None:\n print(f'No file names found in {toml_file}')\n else:\n mdfiles = default_val.get('files')\n\n for mdf in mdfiles:\n print(f'Compiling {mdf}')\n\n main_mdfile = os.path.realpath(mdfiles[0])\n\n outfile = Path(main_mdfile).stem + ext\n\n year = date.today().year\n\n company = company or os.environ.get('MD2PDF_COMPANY')\n department = department or os.environ.get('MD2PDF_DEPARTMENT')\n\n if company:\n if confidential:\n footer_center = f'© Copyright {year} {company}'\n else:\n footer_center = f'{year} {company}'\n\n pdcmd = PandocCmd(outfile)\n pdcmd.append(f'--template={template}')\n pdcmd.append(f'--pdf-engine={pdf_engine}')\n\n pdcmd.set_v('footer-center', footer_center)\n pdcmd.set_v('company', company)\n pdcmd.set_v('department', department)\n\n syntax_definition = syntax_definition or os.environ.get('MD2PDF_SYNTAX_DEFINITION_DIR')\n if syntax_definition is not None:\n add_syntax_definition(pdcmd, syntax_definition)\n\n pdcmd.append('--highlight-style')\n highlight_style = highlight_style or os.environ.get('MD2PDF_HIGHLIGHT_STYLE')\n if highlight_style is None:\n pdcmd.append('pygments')\n else:\n check_highlight_style(highlight_style)\n pdcmd.append(highlight_style)\n\n if not no_number_sections:\n pdcmd.append('--number-sections')\n\n if no_titlepage:\n pdcmd.set_m('titlepage', 'false')\n\n logo = logo or os.environ.get('MD2PDF_LOGO')\n pdcmd.set_v('logo', logo)\n\n logo_width = logo_width or os.environ.get('MD2PDF_LOGO_WIDTH')\n pdcmd.set_v('logo-width', logo_width)\n\n pdcmd.set_m('email', email)\n\n if not no_toc:\n pdcmd.append('--toc')\n\n pdcmd.extend(mdfiles)\n\n if debug:\n print(' '.join(pdcmd.pandoc))\n\n\n pdcmd.run()",
"def test_template(filename, rule, mode, rules):\n\n filename = os.path.join(os.path.dirname(__file__), \"templates\", filename)\n\n template = cfnlint.decode.cfn_yaml.load(filename)\n matches = cfnlint.core.run_checks(\n filename,\n template,\n rules,\n # TODO: parametrize the region\n [\"eu-west-1\"],\n )\n\n match_ids = [match.rule.id for match in matches]\n\n # No non-serverless errors\n assert len([m for m in match_ids if m[1] != \"S\"]) == 0\n\n if mode == \"fail\":\n assert rule in match_ids\n else:\n assert rule not in match_ids",
"def test_validate_invalid_template(cidc_api, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n grant_upload_permission(user_id, \"pbmc\", cidc_api)\n mocks = UploadMocks(monkeypatch)\n\n client = cidc_api.test_client()\n\n # handles ValidationError thrown by `XlTemplateReader.from_xlsx`\n mocks.open_xlsx.side_effect = ValidationError(\"uh oh\")\n res = client.post(VALIDATE, data=form_data(\"pbmc.xlsx\", io.BytesIO(b\"123\"), \"pbmc\"))\n assert res.status_code == 400\n assert res.json[\"_error\"][\"message\"][\"errors\"] == [\"uh oh\"]\n\n # handles errors returned by `XlTemplateReader.iter_errors`\n mocks.open_xlsx.side_effect = None\n mocks.iter_errors.return_value = [\"test error\"]\n res = client.post(VALIDATE, data=form_data(\"pbmc.xlsx\", io.BytesIO(b\"123\"), \"pbmc\"))\n assert res.status_code == 400\n assert len(res.json[\"_error\"][\"message\"]) > 0",
"def _parse_template(self):\n with open(\"./common/sagemaker_rl/orchestrator/cloudformation.yaml\") as template_fileobj:\n template_data = template_fileobj.read()\n self.cf_client.validate_template(TemplateBody=template_data)\n return template_data",
"def main():\n basedir = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(basedir, 'input')\n\n valid_strings = 0\n\n with open(file_path, 'r') as input_file:\n for line in input_file:\n if validate(line):\n valid_strings += 1\n\n print \"Found {} valid strings.\".format(valid_strings)\n\n assert valid_strings == 55",
"def _validate_template_is_handled(self, filepath):\n # we're already sure we can open it ok\n zf = zipfile.ZipFile(str(filepath))\n\n tainted_filenames = []\n for name in zf.namelist():\n content = zf.read(name)\n if INIT_TEMPLATE_TOKEN in content:\n tainted_filenames.append(name)\n\n if tainted_filenames:\n raise CommandError(\n \"Cannot upload the charm as it include the following files with a leftover \"\n \"TEMPLATE-TODO token from when the project was created using the 'init' \"\n \"command: {}\".format(\", \".join(tainted_filenames))\n )",
"def _rec_is_template_valid(template: JSONDict, *, address: Tuple = ()) -> List[Error]:\n\n errors = []\n\n keywords = template[\"keywords\"] if \"keywords\" in template.keys() else []\n for k in keywords:\n errs = _check_keyword(k, address=address)\n errors.extend(errs)\n\n sections = template[\"sections\"] if \"sections\" in template.keys() else []\n for s in sections:\n if _undocumented(s):\n errors.append(\n Error(\n (address + (s[\"name\"],)),\n \"Sections must have a non-empty docstring.\",\n )\n )\n errs = _rec_is_template_valid(s, address=(address + (s[\"name\"],)))\n errors.extend(errs)\n\n return errors",
"def validate():",
"def main(args):\n p = OptionParser()\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='debug')\n p.add_option('-w', '--w3c',\n action='store_true', default=False, dest='w3c',\n help='send file to validator.w3.org')\n p.add_option('-r', '--rm',\n action='store_true', default=False, dest='passrm',\n help='rm validation output on pass')\n p.add_option('-v', '--verbose',\n action='store_true', default=False, dest='verbose',\n help='more output')\n (o, a) = p.parse_args(args)\n \n if o.debug: pdb.set_trace()\n\n verbose(o.verbose)\n \n if 1 < len(a):\n flist = a[1:]\n else:\n flist = glob.glob(\"*.html\")\n\n for filename in flist:\n if verbose(): print filename\n if o.w3c:\n w3c_validate(filename)\n else:\n check_file(filename)\n\n sys.exit(exit_value())",
"def validate(cls, templates):\n super(Product, cls).validate(templates)\n\n for template in templates:\n template.check_type_and_mode()\n\n template.check_gc_min_max()",
"def validate_template(self, contents):\n try:\n self.conn.validate_template(template_body=contents)\n return True\n except BotoServerError as e:\n print contents\n print e.message\n raise",
"def _validate_template(self, node, lexer):\n param_names = set()\n const_names = set()\n ident_names = set()\n\n def check_const(name, node):\n msg = None\n if name in param_names:\n msg = \"declaration of 'const %s' shadows a parameter\"\n elif name in const_names:\n msg = \"redeclaration of 'const %s'\"\n if msg is not None:\n syntax_error(msg % name, FakeToken(lexer, node.lineno))\n const_names.add(name)\n\n def check_id(name, node):\n msg = None\n if name in param_names:\n msg = \"identifier '%s' shadows a parameter\"\n elif name in const_names:\n msg = \"identifier '%s' shadows a const expression\"\n elif name in ident_names:\n msg = \"redeclaration of identifier '%s'\"\n if msg is not None:\n syntax_error(msg % name, FakeToken(lexer, node.lineno))\n ident_names.add(name)\n\n # collect the parameter names\n params = node.parameters\n for param in params.positional:\n param_names.add(param.name)\n for param in params.keywords:\n param_names.add(param.name)\n if params.starparam:\n param_names.add(params.starparam)\n\n # validate the const expressions\n ConstExpr = enaml_ast.ConstExpr\n for item in node.body:\n if isinstance(item, ConstExpr):\n check_const(item.name, item)\n\n # validate the identifiers\n ChildDef = enaml_ast.ChildDef\n TemplateInst = enaml_ast.TemplateInst\n stack = list(reversed(node.body))\n while stack:\n node = stack.pop()\n if isinstance(node, ChildDef):\n if node.identifier:\n check_id(node.identifier, node)\n stack.extend(reversed(node.body))\n elif isinstance(node, TemplateInst):\n idents = node.identifiers\n if idents is not None:\n for name in idents.names:\n check_id(name, idents)\n if idents.starname:\n check_id(idents.starname, idents)",
"def _template_isvalid(template_body: str, region: str, profile: str = None) -> bool:\n logger.debug(f\"checking if template is valid in region {region}\")\n cfn_client = _get_cfn_client(region=region, profile=profile)\n try:\n cfn_client.validate_template(TemplateBody=template_body)\n except Exception as e:\n if 'Template format error' in e.__str__():\n logger.warning(e)\n return False\n else:\n raise e\n logger.debug(f\"template is valid\")\n return True",
"def validate_command(ctx, path):\n context = SceptreContext(\n command_path=path,\n project_path=ctx.obj.get(\"project_path\"),\n user_variables=ctx.obj.get(\"user_variables\"),\n options=ctx.obj.get(\"options\"),\n output_format=ctx.obj.get(\"output_format\"),\n ignore_dependencies=ctx.obj.get(\"ignore_dependencies\")\n )\n\n plan = SceptrePlan(context)\n responses = plan.validate()\n\n for stack, response in responses.items():\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n del response['ResponseMetadata']\n click.echo(\"Template {} is valid. Template details:\\n\".format(stack.name))\n write(response, context.output_format)",
"def test_main():\n for template in templates:\n main([\"-g\", template])\n\n # One at a time\n for xyz_file in example_xyz_files:\n main([template, xyz_file])\n\n # All at once\n main([template] + list(example_xyz_files))\n\n # Allow use of template in the parent directory\n with cd(\"data\"):\n main([\"../pnictogen/repo/ADF.in\", \"water-dimer.xyz\"])",
"def validate_template_config(template_config):\n return template_config_schema.validate(template_config)",
"def test_object_template_validation():\n length_template = PropertyTemplate(\"Length\", bounds=RealBounds(2.0, 3.5, 'cm'))\n dial_template = ConditionTemplate(\"dial\", bounds=IntegerBounds(0, 5))\n color_template = ParameterTemplate(\"Color\", bounds=CategoricalBounds([\"red\", \"green\", \"blue\"]))\n\n with pytest.raises(TypeError):\n MaterialTemplate()\n\n with pytest.raises(ValueError):\n MaterialTemplate(\"Block\", properties=[[length_template, RealBounds(3.0, 4.0, 'cm')]])\n\n with pytest.raises(ValueError):\n ProcessTemplate(\"a process\", conditions=[[color_template, CategoricalBounds([\"zz\"])]])\n \n with pytest.raises(ValueError):\n MeasurementTemplate(\"A measurement\", parameters=[[dial_template, IntegerBounds(-3, -1)]])",
"def validate_document(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"]: \n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(assignment|problem|year|title|name|blurb|due))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tMake sure the tags you are using are correct.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed at all.\".format(settings.filename))\n print color(\"\\tAre you sure all tags are closed?\", color_code(YELLOW))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n try:\n document = Document(settings.filename)\n document.parse_tree(tree)\n document.validate()\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n for i, version in enumerate(document.versions):\n print color(\"\\n\\nProblem {}: {}\\n\".format(i+1, version.filename),\n color_code(BLUE))\n validate_version(version, failed)",
"def test_create_template_subsciption(self):\n pass",
"def validate(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"] and not(settings.skip_permissions):\n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(problem|usedin|version|authors?|year|topics?|types?|param|deps?|dependency|dependencies|body|solution|rubric|resource))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n if len(string.rstrip(line)) > 80:\n print_warning(\"Line {} longer than 80 characters (has {})\".format(num+1, len(string.rstrip(line))))\n failed = True\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tA literal < can be escaped using \\\"<\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed.\".format(settings.filename))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n if tree.getroot().tag == 'assignment':\n print_error(\"This looks like an assignment xml file. Did you mean 22edit validate_doc?\")\n exit(1)\n try:\n problem = Problem(settings.filename)\n problem.parse_tree(tree, False)\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n firstProblem = True\n for version in problem.get_versions():\n if not version.standalone and not firstProblem:\n continue\n firstProblem = False\n \n print color(\"\\n\\nVERSION {}:\\n\".format(version.vid),\n color_code(BLUE))\n validate_version(version, failed)",
"def test_render_templates():\n water_mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/water.xyz\", \"xyz\"))\n if not water_mol.name:\n water_mol.name = \"data/water.xyz\"\n\n main([\"-g\", \"/tmp/foo.ADF.in\"])\n main([\"/tmp/foo.ADF.in\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.in\").read().strip(),\n \"\"\"TITLE data/water.xyz\n\nCHARGE 0 0\n\nNumber of atoms\n 3\n\nATOMS Cartesian\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nEnd\n\nBasis\nEnd\n\nGeometry\nEnd\"\"\",\n )\n\n main([\"-g\", \"/tmp/test.GAMESS.inp\"])\n main([\"/tmp/test.GAMESS.inp\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.inp\").read(),\n \"\"\" $CONTRL COORD=CART UNITS=ANGS $END\n\n $DATA\ndata/water.xyz\nC1\nO 8.0 0.0584027061 0.0584027059 0.0000000000\nH 1.0 1.0096135406 -0.0680162466 0.0000000000\nH 1.0 -0.0680162466 1.0096135407 0.0000000000\n $END\n\n\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/hello.GAMESSUK.inp\"])\n main([\"/tmp/hello.GAMESSUK.inp\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.inp\").read(), water_mol.to_string(\"gukin\"))\n\n main([\"-g\", \"/tmp/hello.world.Gaussian.gjf\"])\n main([\"/tmp/hello.world.Gaussian.gjf\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.gjf\").read(),\n \"\"\"#Put Keywords Here, check Charge and Multiplicity.\n\n data/water.xyz\n\n0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\n\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.Jaguar.in\"])\n main([\"/tmp/bar.Jaguar.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"jin\"))\n\n main([\"-g\", \"/tmp/foo.Molpro.inp\"])\n main([\"/tmp/foo.Molpro.inp\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.inp\").read(), water_mol.to_string(\"mp\"))\n\n main([\"-g\", \"/tmp/example.MOPAC.mop\"])\n main([\"/tmp/example.MOPAC.mop\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.mop\").read(),\n \"\"\"CHARGE=0 MS=0.0\ndata/water.xyz\n\nO 0.05840 1 0.05840 1 0.00000 1\nH 1.00961 1 -0.06802 1 0.00000 1\nH -0.06802 1 1.00961 1 0.00000 1\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.MPQC.in\"])\n main([\"/tmp/bar.MPQC.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"mpqcin\"))\n\n main([\"-g\", \"/tmp/foo.NWChem.nw\"])\n main([\"/tmp/foo.NWChem.nw\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.nw\").read(),\n \"\"\"start molecule\n\ntitle data/water.xyz\n\ngeometry units angstroms print xyz autosym\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nend\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/example.ORCA.inp\"])\n main([\"/tmp/example.ORCA.inp\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.inp\").read(),\n \"\"\"# data/water.xyz\n! Opt\n\n* xyz 0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\n*\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/bar.Psi.dat\"])\n main([\"/tmp/bar.Psi.dat\", \"data/water.xyz\"])\n assert_equals(\n open(\"data/water.dat\").read(),\n \"\"\"# data/water.xyz\n\nmolecule {\n0 1\nO 0.0584027061 0.0584027059 0.0000000000\nH 1.0096135406 -0.0680162466 0.0000000000\nH -0.0680162466 1.0096135407 0.0000000000\nunits angstrom\n}\n\noptimize('scf')\n\"\"\",\n )\n\n main([\"-g\", \"/tmp/example.QChem.in\"])\n main([\"/tmp/example.QChem.in\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.in\").read(), water_mol.to_string(\"qcin\"))\n\n main([\"-g\", \"/tmp/foo.ZINDO.input\"])\n main([\"/tmp/foo.ZINDO.input\", \"data/water.xyz\"])\n assert_equals(open(\"data/water.input\").read(), water_mol.to_string(\"zin\"))",
"def cpf_valid(cpf):\n cpf_validator = CPF()\n return cpf_validator.validate(cpf)",
"def test_template_feedback(self):\r\n pass",
"def template_validator(request):\n # get a dict of {site_id : settings_module} for the validator\n settings_modules = {}\n for mod in settings.ADMIN_FOR:\n settings_module = import_module(mod)\n settings_modules[settings_module.SITE_ID] = settings_module\n site_list = Site.objects.in_bulk(settings_modules.keys()).values()\n if request.POST:\n form = TemplateValidatorForm(settings_modules, site_list,\n data=request.POST)\n if form.is_valid():\n messages.info(request, 'The template is valid.')\n else:\n form = TemplateValidatorForm(settings_modules, site_list)\n return render_to_response('admin/template_validator.html', {\n 'title': 'Template validator',\n 'form': form,\n }, context_instance=template.RequestContext(request))",
"def dispatch(args, validator):\n LOG.info(\"Printing all the arguments: {}\\n\".format(args))\n if args.vnfd:\n LOG.info(\"VNFD validation\")\n validator.schema_validator.load_schemas(\"VNFD\")\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False,\n custom=False)\n elif args.integrity:\n LOG.info(\"Syntax and integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False,\n custom=False)\n elif args.topology:\n LOG.info(\"Syntax, integrity and topology validation\")\n validator.configure(syntax=True, integrity=True, topology=True,\n custom=False)\n elif args.custom:\n validator.configure(syntax=True, integrity=True, topology=True,\n custom=True, cfile=args.cfile)\n LOG.info(\"Syntax, integrity, topology and custom rules validation\")\n else:\n LOG.info(\"Default mode: Syntax, integrity and topology validation\")\n if validator.validate_function(args.vnfd):\n if ((validator.error_count == 0) and\n (len(validator.customErrors) == 0)):\n LOG.info(\"No errors found in the VNFD\")\n else:\n LOG.info(\"Errors in validation\")\n return validator\n\n elif args.nsd:\n LOG.info(\"NSD validation\")\n validator.schema_validator.load_schemas(\"NSD\")\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False)\n elif args.integrity:\n LOG.info(\"Syntax and integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False,\n dpath=args.dpath)\n elif args.topology:\n LOG.info(\"Syntax, integrity and topology validation\")\n validator.configure(syntax=True, integrity=True, topology=True,\n dpath=args.dpath)\n elif args.custom:\n validator.configure(syntax=True, integrity=True, topology=True,\n custom=True, cfile=args.cfile,\n dpath=args.dpath)\n LOG.info(\"Syntax, integrity, topology and custom rules validation\")\n else:\n validator.configure(syntax=True, integrity=True, topology=True,\n dpath=args.dpath)\n LOG.info(\"Default mode: Syntax, integrity and topology validation\")\n\n if validator.validate_service(args.nsd):\n if ((validator.error_count == 0) and (len(validator.customErrors) == 0)):\n LOG.info(\"No errors found in the Service descriptor validation\")\n else:\n LOG.info(\"Errors in custom rules validation\")\n return validator\n\n elif args.project_path:\n LOG.info(\"Project descriptor validation\")\n validator.schema_validator.load_schemas(\"NSD\")\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False,\n workspace_path=args.workspace_path)\n elif args.integrity:\n LOG.info(\"Syntax and integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False,\n workspace_path=args.workspace_path)\n elif args.topology:\n LOG.info(\"Syntax, integrity and topology validation\")\n validator.configure(syntax=True, integrity=True, topology=True,\n workspace_path=args.workspace_path)\n\n elif args.custom:\n validator.configure(syntax=True, integrity=True, topology=True,\n custom=True, cfile=args.cfile)\n LOG.info(\"Syntax, integrity, topology and custom rules validation\")\n else:\n LOG.info(\"Default mode: Syntax, integrity and topology validation\")\n\n if not validator.validate_project(args.project_path):\n LOG.info('Cant validate the project descriptors')\n else:\n if validator.error_count == 0:\n if len(validator.customErrors) == 0:\n LOG.info(\"No errors found in the validation of the project descriptors\")\n else:\n LOG.info(\"Errors in custom rules validation\")\n return validator\n elif args.tstd:\n LOG.info(\"Test descriptor validation\")\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False, custom=False)\n elif args.integrity:\n LOG.info(\"Integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n else:\n LOG.info(\"Default test descriptor validation syntax and integrity\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n\n if not validator.validate_test(args.tstd):\n LOG.info('Cant validate the test descriptors')\n else:\n if validator.error_count == 0 and len(validator.customErrors) == 0:\n LOG.info(\"No errors found in the validation of the test descriptors\")\n else:\n LOG.info(\"Errors in validation\")\n return validator\n elif args.nstd:\n LOG.info(\"Slice descriptor validation\")\n validator.schema_validator.load_schemas(\"NSTD\")\n\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False, custom=False)\n elif args.integrity:\n LOG.info(\"Integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n else:\n LOG.info(\"Default test descriptor validation syntax and integrity\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n\n if not validator.validate_slice(args.nstd):\n LOG.info('Cant validate the slice descriptors')\n else:\n if validator.error_count == 0 and len(validator.customErrors) == 0:\n LOG.info(\"No errors found in the validation of the slice descriptors\")\n else:\n LOG.info(\"Errors in validation\")\n return validator\n elif args.slad:\n LOG.info(\"SLA descriptor validation\")\n validator.schema_validator.load_schemas(\"SLAD\")\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False, custom=False)\n elif args.integrity:\n LOG.info(\"Integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n else:\n LOG.info(\"Default test descriptor validation syntax and integrity\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n\n if not validator.validate_sla(args.slad):\n LOG.info('Cant validate the sla descriptors')\n else:\n if validator.error_count == 0 and len(validator.customErrors) == 0:\n LOG.info(\"No errors found in the validation of the sla descriptors\")\n else:\n LOG.info(\"Errors in validation\")\n return validator\n elif args.rpd:\n LOG.info(\"RP descriptor validation\")\n validator.schema_validator.load_schemas(\"RPD\")\n if args.syntax:\n LOG.info(\"Syntax validation\")\n validator.configure(syntax=True, integrity=False, topology=False, custom=False)\n elif args.integrity:\n LOG.info(\"Integrity validation\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n else:\n LOG.info(\"Default test descriptor validation syntax and integrity\")\n validator.configure(syntax=True, integrity=True, topology=False, custom=False)\n\n if not validator.validate_runtime_policy(args.rpd):\n LOG.info('Cant validate the sla descriptors')\n else:\n if validator.error_count == 0 and len(validator.customErrors) == 0:\n LOG.info(\"No errors found in the validation of the sla descriptors\")\n else:\n LOG.info(\"Errors in validation\")\n return validator",
"def run():\n\n parser = argparse.ArgumentParser(description='Run the GOComp PSSE based validation tool on a problem instance')\n \n parser.add_argument('raw', help='raw - complete path and file name to a RAW file')\n parser.add_argument('con', help='con - complete path and file name to a CON file')\n parser.add_argument('inl', help='inl - complete path and file name to a INL file')\n parser.add_argument('mon', help='mon - complete path and file name to a MON file')\n parser.add_argument('sub', help='sub - complete path and file name to a SUB file')\n \n args = parser.parse_args()\n \n try:\n raw = args.raw\n con = args.con\n inl = args.inl\n mon = args.mon\n sub = args.sub\n except:\n print (\"exception in parsing the validation command\")\n raise\n else:\n run_main(raw, con, inl, mon, sub)"
] | [
"0.64297616",
"0.63258636",
"0.6283338",
"0.6172829",
"0.6088516",
"0.59951484",
"0.59484035",
"0.59463793",
"0.591722",
"0.58468133",
"0.58309555",
"0.5799353",
"0.57959706",
"0.5764867",
"0.5684891",
"0.568023",
"0.56518626",
"0.56301785",
"0.56172353",
"0.55345136",
"0.5533191",
"0.552623",
"0.5463642",
"0.5425174",
"0.5414265",
"0.5409142",
"0.54091126",
"0.538132",
"0.5367868",
"0.536137"
] | 0.811457 | 0 |
Ensures that two allocations close to each other are not mistaken when using scheduler.reserve. If they do then they bleed over, hence the name. | def test_no_bleed(scheduler):
d1 = (datetime(2011, 1, 1, 15, 0), datetime(2011, 1, 1, 16, 0))
d2 = (datetime(2011, 1, 1, 16, 0), datetime(2011, 1, 1, 17, 0))
a1 = scheduler.allocate(d1)[0]
a2 = scheduler.allocate(d2)[0]
scheduler.commit()
assert not a1.overlaps(*d2)
assert not a2.overlaps(*d1)
# expect no exceptions
scheduler.reserve('[email protected]', d2)
scheduler.reserve('[email protected]', d1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addToReservation():\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1",
"def reserveAllocation(self, job, concurrent_jobs):\n with self.thread_lock:\n if job not in self.active and (job.serial or len(self.active) < self.max_slots):\n if job.serial:\n self.shared_dags[job].addCaveat('serial')\n\n self.active.add(job)\n return True",
"def test_allocator_double_sets():\n indexSets = [set([3, 1]), set([2, 1]), set([0, 1]), set([1])]\n allocator = Allocator(indexSets)\n assert len(allocator.slots) == 4\n allocation = allocator.allocate()\n LOGGER.info(allocator.report())\n validate_allocation(indexSets, allocation)",
"def test_simple_multitask():\n bucket = []\n def _foo():\n for i in range(10):\n bucket.append(i)\n yield\n\n scheduler = Scheduler()\n scheduler.new(_foo())\n scheduler.new(_foo())\n scheduler.mainloop()\n\n expect_bucket = []\n for i in range(10):\n expect_bucket.append(i)\n expect_bucket.append(i)\n assert bucket == expect_bucket",
"def _allocation_needs_change(self, dates_before, dates_after,\n network_properties, resource_properties,\n alloc):\n requested_network_ids = [network['id'] for network in\n self._filter_networks_by_properties(\n network_properties, resource_properties)]\n\n if alloc['network_id'] not in requested_network_ids:\n return True\n\n starting_earlier = (\n dates_after['start_date'] < dates_before['start_date'])\n ending_later = dates_after['end_date'] > dates_before['end_date']\n\n if (starting_earlier or ending_later):\n max_start = max(dates_before['start_date'],\n dates_after['start_date'])\n min_end = min(dates_before['end_date'],\n dates_after['end_date'])\n\n reserved_periods = db_utils.get_reserved_periods(\n alloc['network_id'],\n dates_after['start_date'],\n dates_after['end_date'],\n datetime.timedelta(minutes=CONF.cleaning_time),\n resource_type='network')\n reserved_by_others = [\n p for p in reserved_periods\n if not (p[0] == max_start and p[1] == min_end)\n ]\n return len(reserved_by_others) > 0\n\n return False",
"def test_allocation_strategy_opt_allocs():\n prices = np.array([[10, 10], [11, 15], [12, 5], [13, 10]])\n allocs = AllocationStrategy.opt_allocs(prices, neg_sharpe_ratio)\n np.testing.assert_almost_equal(allocs, [1, 0])",
"def test_multiple_slots_released(self):\r\n JOB_ID = 20\r\n JOB_START= 50\r\n job = simulation_multi.Job(2, JOB_START)\r\n self.simulation.jobs[JOB_ID] = job\r\n\r\n worker = self.simulation.workers[1]\r\n self.assertEqual(worker.num_free_slots, 4)\r\n events = worker.add_probe(JOB_ID, JOB_START)\r\n self.assertEqual(worker.num_free_slots, 0)\r\n # The events shoudl include 2 task end events and 1 noop.\r\n self.assertEqual(len(events), 3)\r\n # Run the noop event.\r\n events[-1][1].run(events[-1][0])\r\n self.assertEqual(worker.num_free_slots, 2)",
"def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"",
"def test_placement_group(ray_start_2_cpus):\n num_workers = 2\n bundle = {\"CPU\": 1}\n bundles = [bundle.copy() for _ in range(num_workers)]\n placement_group = ray.util.placement_group(bundles)\n wg = WorkerGroup(num_workers=num_workers, placement_group=placement_group)\n wg.remove_workers([0])\n wg.add_workers(1)",
"def test_allocator_single_confilicting_sets():\n indexSets = [set([1]), set([1])]\n allocator = Allocator(indexSets)\n assert len(allocator.slots) == 2\n allocation = allocator.allocate()\n assert not allocation",
"def testBucketDrain(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.clock.set(10)\n fit = b.add(1000)\n self.assertEqual(20, fit)",
"def doubleCapacity(self): # total: O(n)\n self._capacity = self._capacity * 2 #O(1)\n tempLs = [None] * self._capacity #O(1)\n if self._start < self._end: # if state ment: O(n)\n for i in range(self._size): #O(n) - n is self._size\n tempLs[i] = self._queue[i]\n else: # else branch: O(x+y) = O(n) - n is self._size\n for i in range(self._end+1): #O(x) \n tempLs[i] = self._queue[i]\n for k in range(self._start,self._size): #O(y)\n tempLs[k+self._size] = self._queue[k]\n self._start = self._start + self._size #O(1)\n self._queue = tempLs #O(1)",
"def testMainScheduler(self):\n # ARRANGE\n\n numGuardsToAllocate = 3\n guardsAllocated = []\n \n entries = []\n entries.append(GuardEntry(\"Mike\", 0, 12))\n entries.append(GuardEntry(\"Ray\", 3, 9))\n entries.append(GuardEntry(\"Dave\", 4, 8))\n\n # 12 slots 8pm to 2am\n numTimeSlots = 12\n \n # ACT\n\n # Setup the schedule\n (schedule, guardsAllocated) = createSchedule(entries, numTimeSlots)\n timeSlots = schedule.getSchedule()\n \n # ASSERT\n\n # Print details of the schedule\n timeSlotIdx = 0\n print(\"Time Slot,Guard ID\")\n for slot in timeSlots:\n print(str(timeSlotIdx) + \",\" + str(slot.guardID))\n timeSlotIdx += 1\n self.assertTrue(len(guardsAllocated) == 3)",
"def overlap_with(self, other):",
"def test_auto_assign_two_overflow(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=3,\n )\n shift2 = RegularWorkshift.objects.create(\n workshift_type=self.wtype2,\n pool=self.p1,\n hours=3,\n )\n\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([self.profile], unfinished)\n self.assertIn(self.profile, shift1.current_assignees.all())\n self.assertNotIn(self.profile, shift2.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter == self.profile\n for instance in instances\n ))\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift2)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter is None\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n 3,\n )",
"def test_claim_resources_success_resize_to_same_host_no_shared(self):\n get_current_allocations_resp_mock = mock.Mock(status_code=200)\n # source host allocation held by the migration_uuid so it is not\n # not returned to the claim code as that asks for the instance_uuid\n # consumer\n get_current_allocations_resp_mock.json.return_value = {\n 'allocations': {},\n \"consumer_generation\": 1,\n \"project_id\": uuids.project_id,\n \"user_id\": uuids.user_id\n }\n\n self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock\n put_allocations_resp_mock = mock.Mock(status_code=204)\n self.ks_adap_mock.put.return_value = put_allocations_resp_mock\n consumer_uuid = uuids.consumer_uuid\n # This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB\n # are all being increased but on the same host. We also throw a custom\n # resource class in the new allocation to make sure it's not lost\n alloc_req = {\n 'allocations': {\n uuids.same_host: {\n 'resources': {\n 'VCPU': 2,\n 'MEMORY_MB': 2048,\n 'DISK_GB': 40,\n 'CUSTOM_FOO': 1\n }\n },\n },\n # this allocation request comes from the scheduler therefore it\n # does not have consumer_generation in it.\n \"project_id\": uuids.project_id,\n \"user_id\": uuids.user_id\n }\n\n project_id = uuids.project_id\n user_id = uuids.user_id\n res = self.client.claim_resources(self.context, consumer_uuid,\n alloc_req, project_id, user_id,\n allocation_request_version='1.28')\n\n expected_url = \"/allocations/%s\" % consumer_uuid\n expected_payload = {\n 'allocations': {\n uuids.same_host: {\n 'resources': {\n 'VCPU': 2,\n 'MEMORY_MB': 2048,\n 'DISK_GB': 40,\n 'CUSTOM_FOO': 1\n }\n },\n },\n # report client assumes a new consumer in this case\n 'consumer_generation': None,\n 'project_id': project_id,\n 'user_id': user_id}\n self.ks_adap_mock.put.assert_called_once_with(\n expected_url, microversion='1.28', json=mock.ANY,\n global_request_id=self.context.global_id)\n # We have to pull the json body from the mock call_args to validate\n # it separately otherwise hash seed issues get in the way.\n actual_payload = self.ks_adap_mock.put.call_args[1]['json']\n self.assertEqual(expected_payload, actual_payload)\n\n self.assertTrue(res)",
"def overlaps(self, other: \"Availability\", strict: bool) -> bool:\n\n if not isinstance(other, Availability):\n raise Exception(\"Please provide an Availability object\")\n\n if strict:\n return (\n (self.start <= other.start < self.end)\n or (self.start < other.end <= self.end)\n or (other.start <= self.start < other.end)\n or (other.start < self.end <= other.end)\n )\n return (\n (self.start <= other.start <= self.end)\n or (self.start <= other.end <= self.end)\n or (other.start <= self.start <= other.end)\n or (other.start <= self.end <= other.end)\n )",
"def overlaps(self, other):\n pass",
"def stack_ps(ps1, ps2, keep_unique = False, fill_time = False, message = True):\n # create deepcopies to avoid changing original instances\n \n ps1 = copy.deepcopy(ps1)\n ps2 = copy.deepcopy(ps2)\n \n # create datetime information in PS instances\n \n try:\n _ = getattr(ps1, \"datetime\")\n except AttributeError:\n ps1.createTimeDate()\n \n try: \n _ = getattr(ps2, \"datetime\")\n except AttributeError:\n ps2.createTimeDate()\n \n # check time resolutions\n res1 = (dt.datetime.strptime(ps1.datetime['data'][1], ps1.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])).seconds\n res2 = (dt.datetime.strptime(ps2.datetime['data'][1], ps2.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])).seconds\n \n if abs(res1-res2) > 60:\n if message:\n print( (\"warning: resolutions differ %d seconds\")%(abs(res1-res2)) )\n \n # check if ps1 is \"older\" than ps2\n \n reversed_order = False\n cut = None\n \n if dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units']) < dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']):\n # ps2 starts after ps1 ends\n timediff = (dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units'])).total_seconds()\n elif dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units']) < dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']):\n # ps1 starts after ps2 ends (user has inadvertently switched the order of the instances)\n reversed_order = True\n timediff = (dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units'])).total_seconds()\n else:\n # yikes! The particle sizer instances have overlapping data\n # it is assumed that ps2 data replaces ps1 data starting \n # from the overlapping time\n cut, cutdate = tt.findNearestDate(ps1.datetime['data'], ps2.datetime['data'][0]) \n fill_time = False\n \n #print(timediff, 1.5*res1)\n # check if filling is required\n if fill_time is True:\n # check time difference\n if reversed_order:\n # ps1 starts after ps2 ends\n if timediff > 1.5*res2:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res2))\n base = dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res2*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:]))# because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps2.datetime['units']) for dl in date_list]\n ps2.datetime['data'] = np.append(ps2.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps2.time['units']) for dl in date_list]\n ps2.time['data'] = np.append(ps2.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps2.date['units']) for dl in date_list]\n ps2.date['data'] = np.append(ps2.date['data'], datelist)\n else:\n fill_time = False\n else:\n if timediff > 1.5*res1:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res1))\n base = dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res1*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:])) # because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps1.datetime['units']) for dl in date_list]\n ps1.datetime['data'] = np.append(ps1.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps1.time['units']) for dl in date_list]\n ps1.time['data'] = np.append(ps1.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps1.date['units']) for dl in date_list]\n ps1.date['data'] = np.append(ps1.date['data'], datelist)\n else:\n fill_time = False\n \n if message:\n print(\"reversed order:\", reversed_order)\n # check which attributes are similar in both instances\n if reversed_order:\n # ps1 starts after ps2 ends\n new_ps = copy.deepcopy(ps2)\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n \n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps2.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps2.data[var]['data'],add,axis=1)\n ps2.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st21:st22, 0:ps2.data[var]['data'][:,:cut].shape[1]] = ps2.data[var]['data'][:,:cut]\n new_field[st11:st12, ps2.data[var]['data'][:,:cut].shape[1]:] = ps1.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps2[:cut],add), data_ps1)\n else:\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps1,attribute)\n newattribute['time'] = ps1['datetime']['data']\n setattr(new_ps, attribute, newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n \n else:\n # ps2 starts after ps1 ends\n new_ps = copy.deepcopy(ps1)\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps1.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps1.data[var]['data'],add,axis=1)\n ps1.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st11:st12, 0:ps1.data[var]['data'][:,:cut].shape[1]] = ps1.data[var]['data'][:,:cut]\n new_field[st21:st22, ps1.data[var]['data'][:,:cut].shape[1]:] = ps2.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps1[:cut],add), data_ps2)\n else:\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps2,attribute)\n newattribute['time'] = ps2['datetime']['data']\n setattr(new_ps, attribute,newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n new_ps.sample['data'] = np.arange(1.0, len(new_ps.datetime['data'])+1)\n new_ps.instrument_type = ps1.instrument_type.split('_')[0] + '_concatenated'\n \n if message:\n print('filltime: ', fill_time)\n \n return new_ps",
"def test_claim_resources_success_resize_to_same_host_with_shared(self):\n get_current_allocations_resp_mock = mock.Mock(status_code=200)\n # source host allocation held by the migration_uuid so it is not\n # not returned to the claim code as that asks for the instance_uuid\n # consumer\n get_current_allocations_resp_mock.json.return_value = {\n 'allocations': {},\n \"consumer_generation\": 1,\n \"project_id\": uuids.project_id,\n \"user_id\": uuids.user_id\n }\n\n self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock\n put_allocations_resp_mock = mock.Mock(status_code=204)\n self.ks_adap_mock.put.return_value = put_allocations_resp_mock\n consumer_uuid = uuids.consumer_uuid\n # This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB\n # are all being increased but on the same host. We also throw a custom\n # resource class in the new allocation to make sure it's not lost\n alloc_req = {\n 'allocations': {\n uuids.same_host: {\n 'resources': {\n 'VCPU': 2,\n 'MEMORY_MB': 2048,\n 'CUSTOM_FOO': 1\n }\n },\n uuids.shared_storage: {\n 'resources': {\n 'DISK_GB': 40,\n }\n },\n },\n # this allocation request comes from the scheduler therefore it\n # does not have consumer_generation in it.\n \"project_id\": uuids.project_id,\n \"user_id\": uuids.user_id\n }\n\n project_id = uuids.project_id\n user_id = uuids.user_id\n res = self.client.claim_resources(self.context, consumer_uuid,\n alloc_req, project_id, user_id,\n allocation_request_version='1.28')\n\n expected_url = \"/allocations/%s\" % consumer_uuid\n expected_payload = {\n 'allocations': {\n uuids.same_host: {\n 'resources': {\n 'VCPU': 2,\n 'MEMORY_MB': 2048,\n 'CUSTOM_FOO': 1\n }\n },\n uuids.shared_storage: {\n 'resources': {\n 'DISK_GB': 40,\n }\n },\n },\n # report client assumes a new consumer in this case\n 'consumer_generation': None,\n 'project_id': project_id,\n 'user_id': user_id}\n self.ks_adap_mock.put.assert_called_once_with(\n expected_url, microversion='1.28', json=mock.ANY,\n global_request_id=self.context.global_id)\n # We have to pull the json body from the mock call_args to validate\n # it separately otherwise hash seed issues get in the way.\n actual_payload = self.ks_adap_mock.put.call_args[1]['json']\n self.assertEqual(expected_payload, actual_payload)\n\n self.assertTrue(res)",
"def is_single_allocation(self):\n return False",
"def test_claim_resources_older_alloc_req(self):\n get_resp_mock = mock.Mock(status_code=200)\n get_resp_mock.json.return_value = {\n 'allocations': {}, # build instance, not move\n }\n self.ks_adap_mock.get.return_value = get_resp_mock\n resp_mock = mock.Mock(status_code=204)\n self.ks_adap_mock.put.return_value = resp_mock\n consumer_uuid = uuids.consumer_uuid\n alloc_req = {\n 'allocations': {\n uuids.cn1: {\n 'resources': {\n 'VCPU': 1,\n 'MEMORY_MB': 1024,\n }\n },\n },\n }\n\n project_id = uuids.project_id\n user_id = uuids.user_id\n res = self.client.claim_resources(self.context, consumer_uuid,\n alloc_req, project_id, user_id,\n allocation_request_version='1.12')\n\n expected_url = \"/allocations/%s\" % consumer_uuid\n expected_payload = {\n 'allocations': {\n rp_uuid: res\n for rp_uuid, res in alloc_req['allocations'].items()},\n # no consumer generation in the payload as the caller requested\n # older microversion to be used\n 'project_id': project_id,\n 'user_id': user_id}\n self.ks_adap_mock.put.assert_called_once_with(\n expected_url, microversion='1.12', json=expected_payload,\n global_request_id=self.context.global_id)\n self.assertTrue(res)",
"def reserve(self, item, strict=True):\n out.info(\"Trying to reserve {} from pool {}\\n\".format(str(item),\n self.__class__.__name__))\n if item in self.used:\n if strict:\n raise Exception(\"Trying to reserve a used item\")\n else:\n self.used.add(item)",
"def test_get_alloc_format_overlap():\n time = \"10:00:00\"\n nodes = 5\n account = \"A35311\"\n options = {\"N\": 10, \"time\": \"15\", \"account\": \"S1242\"}\n alloc_cmd = _get_alloc_cmd(nodes, time, account, options)\n result = [\n \"--no-shell\",\n \"-N\",\n \"5\",\n \"-J\",\n \"SmartSim\",\n \"-t\",\n \"10:00:00\",\n \"-A\",\n \"A35311\",\n ]\n assert result == alloc_cmd",
"def reserve(self):\n return self._reserve",
"def check_size(prev, current, delta):\n before = prev.pools[0].used\n after = current.pools[0].used\n assert delta == (before - after) >> 20",
"def test_claim_resources_success_evacuate_no_shared(self):\n # the source allocation is also held by the instance_uuid so report\n # client will see it.\n current_allocs = {\n 'allocations': {\n uuids.source_host: {\n 'generation': 42,\n 'resources': {\n 'VCPU': 1,\n 'MEMORY_MB': 1024,\n 'DISK_GB': 20\n },\n },\n },\n \"consumer_generation\": 1,\n \"project_id\": uuids.project_id,\n \"user_id\": uuids.user_id\n }\n self.ks_adap_mock.get.return_value = fake_requests.FakeResponse(\n status_code=200,\n content=jsonutils.dumps(current_allocs))\n put_allocations_resp_mock = fake_requests.FakeResponse(status_code=204)\n self.ks_adap_mock.put.return_value = put_allocations_resp_mock\n consumer_uuid = uuids.consumer_uuid\n # this is an evacuate so we have the same resources request towards the\n # dest host\n alloc_req = {\n 'allocations': {\n uuids.dest_host: {\n 'resources': {\n 'VCPU': 1,\n 'MEMORY_MB': 1024,\n 'DISK_GB': 20,\n }\n },\n },\n # this allocation request comes from the scheduler therefore it\n # does not have consumer_generation in it.\n \"project_id\": uuids.project_id,\n \"user_id\": uuids.user_id\n }\n\n project_id = uuids.project_id\n user_id = uuids.user_id\n res = self.client.claim_resources(self.context, consumer_uuid,\n alloc_req, project_id, user_id,\n allocation_request_version='1.28')\n\n expected_url = \"/allocations/%s\" % consumer_uuid\n # we expect that both the source and dest allocations are here\n expected_payload = {\n 'allocations': {\n uuids.source_host: {\n 'resources': {\n 'VCPU': 1,\n 'MEMORY_MB': 1024,\n 'DISK_GB': 20\n },\n },\n uuids.dest_host: {\n 'resources': {\n 'VCPU': 1,\n 'MEMORY_MB': 1024,\n 'DISK_GB': 20,\n }\n },\n },\n # report client uses the consumer_generation that it got from\n # placement when asked for the existing allocations\n 'consumer_generation': 1,\n 'project_id': project_id,\n 'user_id': user_id}\n self.ks_adap_mock.put.assert_called_once_with(\n expected_url, microversion='1.28', json=mock.ANY,\n global_request_id=self.context.global_id)\n # We have to pull the json body from the mock call_args to validate\n # it separately otherwise hash seed issues get in the way.\n actual_payload = self.ks_adap_mock.put.call_args[1]['json']\n self.assertEqual(expected_payload, actual_payload)\n\n self.assertTrue(res)",
"def overlaps(self, other): # -> bool:\n ...",
"def can_combine(self, first, second):\n # Need to check out of order issues as\n # blocks are sorted by where they start in a\n mismatch_ab = (first.a_end <= second.a\n and second.b_end <= first.b)\n mismatch_ba = (second.a_end <= first.a\n and first.b_end <= second.b)\n out_of_order = mismatch_ab or mismatch_ba\n return not out_of_order and self.jump_gap(second)",
"def test_does_not_starve_large_locks():\r\n with throttle_client(b\"[semaphores]\\nA=5\") as client:\r\n small = client.new_peer(expires_in=timedelta(minutes=1))\r\n big = client.new_peer(expires_in=timedelta(minutes=1))\r\n other_small = client.new_peer(expires_in=timedelta(minutes=1))\r\n # This lock is acquired immediatly decrementing the semaphore count to 4\r\n assert client.acquire(small, \"A\", count=1)\r\n # Now try a large one. Of course we can not acquire it yet\r\n assert not client.acquire(big, \"A\", count=5)\r\n # This one could be acquired due to semaphore count, but won't, since the larger one is\r\n # still pending.\r\n assert not client.acquire(other_small, \"A\", count=1)\r\n # Remainder is still 4\r\n assert client.remainder(\"A\") == 4\r\n\r\n # We free the first small lock, now the big one can be acquired\r\n client.release(small)\r\n assert client.remainder(\"A\") == 0"
] | [
"0.5574495",
"0.54888433",
"0.54477704",
"0.53917575",
"0.53531617",
"0.5304206",
"0.5289244",
"0.5287738",
"0.5266306",
"0.5228824",
"0.5228126",
"0.52268434",
"0.5102959",
"0.50496024",
"0.50263566",
"0.50198996",
"0.50185126",
"0.4980343",
"0.49549127",
"0.49506003",
"0.494848",
"0.49482417",
"0.49402443",
"0.493427",
"0.49178132",
"0.49134338",
"0.49120775",
"0.48695928",
"0.4866265",
"0.4824564"
] | 0.70153683 | 0 |
The event's start time, as a timezoneaware datetime object | def start(self):
if self.start_time is None:
time = datetime.time(hour=19, tzinfo=CET)
else:
time = self.start_time.replace(tzinfo=CET)
return datetime.datetime.combine(self.date, time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_datetime_start(self):\n return self.get_t_sect()['datetime_start']",
"def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()",
"def get_start_time(self):\n start = datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n return start",
"def start_time(self):\n return self._meta['start_time']",
"def start_datetime(self):\n\n start = \"{0} {1}\".format(self.data.get('Start date'), self.data.get('Start time'))\n return datetime.datetime.strptime(start, self.datetime_format)",
"def start_datetime(self) -> datetime:\n return utc_to_local(self._db_data.start_datetime)",
"def start_datetime(self):\n date_string = self.data.get('Start date')\n time_string = self.data.get('Start time')\n start_datetime_string = \"{date} {time}\".format(date=date_string, time=time_string)\n return datetime.datetime.strptime(start_datetime_string, self.datetime_format)",
"def start_time(self):\n return self.time_parser.start_time",
"def start_datetime(self):\n\n start = self.data.get('start').split(\"+\")[0]\n return datetime.datetime.strptime(start, self.datetime_format)",
"def started(self):\n if not self.start_ts:\n return None\n return datetime.utcfromtimestamp(self.start_ts)",
"def _getStartTime(self):\n return self._startTime.strftime(\"%Y-%m-%d %H:%M:%S\")",
"def scheduled_start_date_time(self):\n if \"scheduledStartDateTime\" in self._prop_dict:\n if isinstance(self._prop_dict[\"scheduledStartDateTime\"], OneDriveObjectBase):\n return self._prop_dict[\"scheduledStartDateTime\"]\n else :\n self._prop_dict[\"scheduledStartDateTime\"] = DateTimeTimeZone(self._prop_dict[\"scheduledStartDateTime\"])\n return self._prop_dict[\"scheduledStartDateTime\"]\n\n return None",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def get_start_time(self):\n vidname = self.fname.split(\"/\")[-1]\n date_, time_ = vidname.split(\"_\")\n year = int(date_[:4])\n mon = int(date_[4:6])\n day = int(date_[6:])\n hour = int(time_[:2])\n min_ = int(time_[2:4])\n sec = int(time_[4:6])\n return datetime.datetime(\n year, mon, day, hour, min_, sec, tzinfo=datetime.timezone.utc\n )",
"def start_time(self):\n return self._get(\"start_time\")",
"def cal_start(self):\n return self.datetime_start",
"def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")",
"def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")",
"def start_time(self) -> datetime:\n return self.root_hartree.start_time",
"def get_start_time(self):\n return str(self._start_time)",
"def date_started(self):\n return datetime.datetime.fromtimestamp(self.fields['startDate'])",
"def getStartTime(self):\n raise NotImplementedError",
"def __get_starting_time(self):\n return self.__starting_time"
] | [
"0.76968586",
"0.76729155",
"0.7643306",
"0.75934434",
"0.7549188",
"0.7539281",
"0.7499669",
"0.74902815",
"0.744862",
"0.7445907",
"0.7349264",
"0.7282488",
"0.7249556",
"0.7249556",
"0.7249556",
"0.7249556",
"0.7249556",
"0.7249556",
"0.7249556",
"0.7249556",
"0.7235539",
"0.7229741",
"0.7201024",
"0.71996874",
"0.71996874",
"0.7184178",
"0.7147502",
"0.71234655",
"0.71195763",
"0.71129113"
] | 0.78941786 | 0 |
Is true if the event only spans one day | def one_day(self):
return self.end.date() == self.date | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_in_advent() -> bool:\n # Run the code from the 1st to the 24th\n return datetime.now(EST).day in range(1, 25) and datetime.now(EST).month == 12",
"def runs_today(self,s_id,day):\n if self.schedule_keys[s_id][day]==1:\n return True\n else:\n return False",
"def get_is_on(self, event: Event | None) -> bool:\n if event is None:\n return False\n\n now = dt_util.utcnow()\n value = now > event.start\n if value and event.end is not None and now > event.end:\n value = False\n\n return value",
"def is_rejoinee(self):\n return len(self._start_date) > 1",
"def test_coming_up_one_day_until(self):\n time = timezone.now() + datetime.timedelta(days=1)\n tomorrow_event = Event(event_date=time)\n self.assertIs(tomorrow_event.coming_up(), True)",
"def test_events_same_date(self):\n user = User.objects.create_user(\n 'foo', '[email protected]', 'secret'\n )\n d = make_aware(\n datetime.datetime(2019, 5, 3, 0, 0, 0, 0),\n get_default_timezone()\n )\n event = Event.objects.create(\n start_date=d,\n end_date=d,\n all_day=True,\n created_by=user,\n title=\"The big event\",\n description=\"Amazing event\",\n repeat=\"NEVER\",\n )\n event2 = Event.objects.create(\n start_date=d,\n end_date=d,\n all_day=True,\n created_by=user,\n title=\"The other event\",\n description=\"Incredible event\",\n repeat=\"NEVER\",\n )\n event.save()\n event2.save()\n events = upcoming_events(finish=2000)\n self.assertEqual(len(events['upcoming_events']), 2)",
"def is_starttrimester(today):\n if isinstance(today, datetime):\n if today.day == 1 and today.month == 1:\n return True\n elif today.day == 1 and today.month == 4:\n return True\n elif today.day == 1 and today.month == 7:\n return True\n elif today.day == 1 and today.month == 10:\n return True\n return False\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))",
"def is_soon(dt, window):\r\n soon = (utcnow() + datetime.timedelta(seconds=window))\r\n return normalize_time(dt) <= soon",
"def will_occur(self, now):\n return self.end_repeat is None or self.end_repeat >= now.date() or \\\n self.l_start_date >= now or self.l_end_date >= now",
"def is_soon(dt, window):\n soon = (utcnow() + datetime.timedelta(seconds=window))\n return normalize_time(dt) <= soon",
"def isOn(self):\r\n return len(self.__agenda)>2",
"def is_happening(self):\n now = timezone.now()\n start = self.start\n end = self.end\n happening = False\n # check that the event has started and 'now' is btwn start & end:\n if (now >= start) and (start.time() <= now.time() <= end.time()):\n happening = True\n return happening",
"def _is_date_in_range(self, date):\n date_obj = datetime.strptime(date.split('T')[0], '%Y-%m-%d')\n \"\"\"When running under delta feed mode, we need to consider only those vulns which were\n updated between the given offset date and today's date.\"\"\"\n return self.today > date_obj >= self.start_day",
"def test_coming_up_one_day_past(self):\n time = timezone.now() + datetime.timedelta(days=-1, hours=-1)\n tomorrow_event = Event(event_date=time)\n self.assertIs(tomorrow_event.coming_up(), False)",
"def important_event(time: int) -> bool:\n last_event = get_events(True)[0]\n try:\n time_event = int(last_event.split('\\n')[0].strip(\"'\"))\n except ValueError:\n time_event = int(last_event.split('\\n')[-1].strip(\"'\"))\n if time - time_event < 60:\n return 'gol' in last_event or 'cartão' in last_event\n return False",
"def is_on(self):\n return self._coordinator.get_event_timestamp(self._event_name) > 0",
"def is_ongoing(self) -> bool:\n today = datetime.today()\n\n return (\n False\n if self.time is None\n else (\n today.weekday() == self.weekday()\n and self.time.start <= today.hour * 60 + today.minute <= self.time.end\n )\n )",
"def _more_events(klass, series, date_data):\n\n if (series.ends_after\n and len(series.events) >= series.num_occurrences\n or series.ends_on\n and date_data['start_date'] > series.recurrence_end_date):\n return False\n return True",
"def is_upcoming(self):\n\n return timezone.now() < self.start < timezone.now() + timedelta(days=1)",
"def check_weekday_of_date(self, date):\n return date.isoweekday() % 7",
"def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n return False",
"def has_overlap(vevent, start, end):\n event_start = vevent.dtstart.value\n event_end = vevent.dtend.value\n\n assert not is_naive(start), 'start dt is naive'\n assert not is_naive(end), 'end dt is naive'\n assert not is_naive(event_start), 'event_start dt is naive'\n assert not is_naive(event_end), 'event_end dt is naive'\n\n if start <= event_start <= end: # starts today\n return True\n if start <= event_end <= end: # ends today\n return True\n if event_start <= start and end <= event_end: # spans over today\n return True\n return False",
"def today(self) -> bool:\n return self._algorithm.can_study_now(self._stat)",
"def is_opening(self):\n now = timezone.now()\n return self.start_date.date() >= now.date()",
"def sleep_in(weekday, vacation):\r\n if not weekday or vacation:\r\n return True\r\n else:\r\n return False",
"def is_working_day_appointment(self):\n # function helps hide appointments on weekend\n return 0 <= self.date.weekday() <= 4",
"def acceptable(self):\n now = datetime.datetime.now()\n origin = datetime.datetime.combine(self.date, datetime.time.min)\n start = origin + datetime.timedelta(hours=6)\n end = origin + datetime.timedelta(days=1)\n morning = end + datetime.timedelta(hours=6)\n if now < origin or now > morning:\n return 0\n if now >= end or now <= start:\n return 1\n return 3",
"def is_today(self, dt: datetime.datetime) -> bool:\n\n if self is Day.DAILY:\n return True\n day = dt.weekday()\n if self is Day.WEEKDAY:\n return day < 5\n if self is Day.WEEKEND:\n return day >= 5\n return Day(day) == self",
"def test_coming_up_lt_one_dat_past(self):\n time = timezone.now() + datetime.timedelta(days=-1, hours=1)\n tomorrow_event = Event(event_date=time)\n self.assertIs(tomorrow_event.coming_up(), True)",
"def is_once(today, last_send):\n if isinstance(today, datetime):\n if last_send is not None:\n if today.date() != last_send.date():\n return True\n return False\n return True\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))"
] | [
"0.72662985",
"0.6719103",
"0.65884805",
"0.6551146",
"0.6338677",
"0.63329256",
"0.63131696",
"0.6304339",
"0.62870234",
"0.62826914",
"0.6255161",
"0.6226396",
"0.6218282",
"0.6174885",
"0.61583",
"0.6103819",
"0.6099032",
"0.6082656",
"0.60797435",
"0.6077559",
"0.6072566",
"0.606481",
"0.60638577",
"0.60555667",
"0.6054868",
"0.6043268",
"0.60310185",
"0.6028251",
"0.6008794",
"0.6004706"
] | 0.77704716 | 0 |
Yield the next planned occurrences after the date "since" The `since` argument can be either a date or datetime onject. If not given, it defaults to the date of the last event that's already planned. If `n` is given, the result is limited to that many dates; otherwise, infinite results may be generated. Note that less than `n` results may be yielded. | def next_occurrences(self, n=None, since=None):
scheme = self.recurrence_scheme
if scheme is None:
return ()
db = Session.object_session(self)
query = db.query(Event)
query = query.filter(Event.series_slug == self.slug)
query = query.order_by(desc(Event.date))
query = query.limit(1)
last_planned_event = query.one_or_none()
if since is None:
last_planned_event = query.one()
since = last_planned_event.date
elif since < last_planned_event.date:
since = last_planned_event.date
start = getattr(since, 'date', since)
start += relativedelta.relativedelta(days=+1)
if (scheme == 'monthly'
and last_planned_event
and last_planned_event.date.year == start.year
and last_planned_event.date.month == start.month):
# Monthly events try to have one event per month, so exclude
# the current month if there was already a meetup
start += relativedelta.relativedelta(months=+1)
start = start.replace(day=1)
start = datetime.datetime.combine(start, datetime.time(tzinfo=CET))
result = rrule.rrulestr(self.recurrence_rule, dtstart=start)
if n is not None:
result = itertools.islice(result, n)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_n_days_ahead(self, startdate, n, fmt=None):\n return startdate + datetime.timedelta(days=n)",
"def limit(iterator, n=None):\n for i, v in enumerate(iterator):\n yield v\n if i + 1 == n:\n break",
"def next_n(self, n, fast_forward=False):\n return list(it.islice(self.gen, n))",
"def next_n(self, n: int, fast_forward=False):\n data = []\n while len(data) < n:\n try:\n record = self.queue.get(True, self.wait)\n data.append(record)\n except Empty:\n raise StopIteration\n return data",
"def get_next_activities(self, n=None):\n\n if n > self.available():\n # !!! This is not quite as specified (see method docs) !!!\n raise IllegalState('not enough elements available in this list')\n else:\n next_list = []\n x = 0\n while x < n:\n try:\n next_list.append(next(self))\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n x = x + 1\n return next_list",
"def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)",
"def take(self, n: int = -1) -> List[T]:\n if n == -1:\n n = self._per_page\n\n if not isinstance(n, int) or n < 0:\n raise ArgumentError(\n f\"argument n={n} is invalid; n must be an int and n >= 1\"\n )\n\n it = iter(self)\n return list(itertools.islice(it, n))",
"def get_weekday_n_days_ago(start_date, n):\n prev_days = [start_date - timedelta(days=i) for i in range(1, 40)]\n prev_days = [d for d in prev_days if d.weekday() < 5]\n for d in prev_days:\n if d.month == 5 and d.day == 1:\n prev_days.remove(d)\n return prev_days[n-1]",
"def limit(iterator, n=None):\n # Producing at most `n` values from the given iterator.\n # Tried using try-except blocks to cover the stop iteration\n # exception for iter.\n if not n:\n return iterator\n\n output = []\n iter = (i for i in iterator)\n for x in range(0, n):\n\n try:\n output.append(next(iter))\n\n except StopIteration:\n pass\n\n return output",
"def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element",
"def add_n_days(self, n):\n print(self)\n while n > 0:\n self.tomorrow()\n print(self)\n n -= 1",
"def take(iterable, n):\n\n def taking(iterable_):\n for i, e in enumerate(iterable_):\n if i < n:\n yield e\n\n return taking(iterable)",
"def get_upcoming_games(n=10):\n conn, cursor = connect_to_db()\n query = \"\"\"select kickoff_time, t2.team_id home_id, t2.team_name home_name, \n t3.team_id away_id, t3.team_name away_name\n from fpl_fixtures t1 left join fpl_teams t2 on t1.team_h = t2.id left \n join fpl_teams t3 on t1.team_a = t3.id where started = 0 order by \n kickoff_time limit {}\"\"\".format(n)\n df = run_query(cursor, query)\n return df",
"def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)",
"def next(self, n: int | None = None) -> Any:\n self._fillcache(n)\n if not n:\n if self._cache[0] == self.sentinel:\n raise StopIteration\n if n is None:\n result = self._cache.popleft()\n else:\n result = []\n else:\n if self._cache[n - 1] == self.sentinel:\n raise StopIteration\n result = [self._cache.popleft() for i in range(n)]\n return result",
"def next_n(iterator, N):\n try:\n items = []\n for _ in range(N):\n items.append(next(iterator))\n return items\n except StopIteration:\n if items:\n return items\n return None",
"def consume(iterator, n=None, next=next, islice=islice, deque=deque):\n if n is not None:\n next(islice(iterator, n, n), None)\n else:\n exhaust(iterator)",
"def page_through(app_id, app_secret):\n has_next_page = True\n in_date_range = True\n \n #we only want to keep the articles that were returned from the NYtimes api, so this creates a list of target urls\n with open('output/article_search.json') as f:\n nyt_dat = json.load(f)\n nyt_urls = []\n for i in nyt_dat:\n nyt_urls.append(core_url(i['web_url']))\n\n items = get_page(app_id, app_secret)\n process_items(items, nyt_urls)\n\n while has_next_page & in_date_range:\n if 'paging' not in items.keys():\n has_next_page=False\n\n if items['data'][0]['created_time'][0:7]=='2016-10':\n in_date_range = False\n\n items = json.loads(request_until_succeed(items['paging']['next']))\n process_items(items, nyt_urls)",
"def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result",
"def nextNumberOfResults(self, N=10):\n self.start += self.N\n self.N = N",
"def nth(n):\n\n if n >= 0:\n @sinks\n def _dagpype_internal_fn_act_p(target):\n i = 0\n try:\n while True:\n e = (yield)\n if i == n:\n target.send(e)\n target.close()\n return\n i += 1\n except GeneratorExit:\n target.close()\n\n return _dagpype_internal_fn_act_p\n\n @sinks\n def _dagpype_internal_fn_act_n(target):\n q = collections.deque([], -n)\n try:\n while True:\n q.append((yield))\n except GeneratorExit:\n if len(q) >= -n:\n target.send(q.popleft())\n target.close()\n\n return _dagpype_internal_fn_act_n",
"def take(n, iterable, islice=islice):\n return islice(iterable, n)",
"def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer",
"def filter_n_days(df, n):\n # patient must have at least one entry n days before\n max_by_patient = df.groupby(\"UID\").date.max()\n ok_uids = max_by_patient[max_by_patient >= n].index.values\n df = df[df.UID.isin(ok_uids)].reset_index(drop=True)\n return df",
"def skip(self, n=None):\n while n > 0:\n try:\n self.next()\n except StopIteration:\n break\n n -= 1",
"def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]",
"def n_business_days(self, n=-2):\n\n business_days = 0\n calendar_days = 0 \n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(self.time_stamp + timedelta(calendar_days)):\n business_days = business_days + 1\n return self.time_stamp + timedelta(calendar_days)\n return date",
"def currentAbove(requestContext, seriesList, n):\n return [ series for series in seriesList if safeLast(series) >= n ]",
"def recent(cls, count=3, date=None):\n q = cls.query_started(date).limit(count)\n return q.all()",
"def _generate_new_dates(self, n: int) -> pd.DatetimeIndex:\n new_dates = [\n (self.training_series.time_index()[-1] + (i * self.training_series.freq())) for i in range(1, n + 1)\n ]\n return pd.DatetimeIndex(new_dates, freq=self.training_series.freq_str())"
] | [
"0.56236297",
"0.55622554",
"0.54129523",
"0.537688",
"0.5366716",
"0.5216922",
"0.5207121",
"0.5206607",
"0.51612425",
"0.5151308",
"0.50954014",
"0.5094701",
"0.49745113",
"0.497114",
"0.49553135",
"0.4943986",
"0.49304563",
"0.4926645",
"0.48990518",
"0.48948643",
"0.48934934",
"0.4882435",
"0.48768386",
"0.48640627",
"0.4850646",
"0.48474666",
"0.48435327",
"0.48428208",
"0.48307773",
"0.481671"
] | 0.7174086 | 0 |
constructor function of the RSA Key Pair Class directories must point to a valid path | def __init__(self, type_encryption, directory_key_private, directory_key_public):
# class variables
self.type_encryption = type_encryption
self.directory_key_private = directory_key_private
self.directory_key_public = directory_key_public
# check keys
self._publicKey = ''
self._privateKey = ''
self.verify_path() # verify the are not corrupted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, rsa_key):\r\n if isinstance(rsa_key, tuple):\r\n self.keypair = Crypto.PublicKey.RSA.construct(rsa_key)\r\n else:\r\n self._InitFromString(rsa_key)",
"def __init__(self):\n self._keypair = RSA.generate(2048)\n self.public_key = self._keypair.publickey().exportKey()",
"def __init__(self, basekey=\"\"):\n self.basekey = basekey",
"def __init__(self, key_path_prefix, windows_path):\n super(WinRegistryFileMapping, self).__init__()\n self.key_path_prefix = key_path_prefix.upper()\n self.windows_path = windows_path",
"def __init__(self):\n publicKeyFileName = \"serverPublicKey\"\n privateKeyFileName = \"serverPrivateKey.pem\"\n try:\n f = open(privateKeyFileName, 'rb')\n self.keys = RSA.importKey(f.read())\n except:\n self.keys = RSA.generate(1024)\n self.publickey = self.keys.publickey()\n # export public and private keys\n privHandle = open(privateKeyFileName, 'wb')\n privHandle.write(self.keys.exportKey('PEM'))\n privHandle.close()\n \n pubHandle = open(publicKeyFileName, 'wb')\n pubHandle.write(self.keys.publickey().exportKey())\n pubHandle.close()\n self.publickey = self.keys.publickey()",
"def __init__(self, gen_priv_key: bool = False, priv_key_path: str = None):\n self.priv_key = None\n self.pub_key = None\n\n # max size = (bytes(rsa) - 2 * bytes(hash) - 2),\n # currently hard-coded to 190 = 256 - 2 * 32 - 2\n self.max_encrypt_size = 190\n\n if gen_priv_key:\n self.priv_key = RSA.generate(2048)\n if priv_key_path is not None:\n path = pathlib.Path(priv_key_path)\n with open(path.as_posix(), 'w') as f:\n f.write(self.priv_key.export_key().decode('utf-8'))\n elif priv_key_path is not None:\n path = pathlib.Path(priv_key_path)\n if path.is_file():\n self.priv_key = RSA.importKey(open(path.as_posix()).read())\n else:\n raise Exception(\"Failed to open file {}\".format(path.as_posix))\n\n if self.priv_key is not None:\n self.pub_key = self.priv_key.publickey()\n\n # delegate encrypt/decrypt function\n self.cipher = PKCS1_OAEP.new(self.priv_key, hashAlgo=SHA256)\n self.decrypt = self.cipher.decrypt",
"def __init__(self, key_path, pass_path, password = '',\n encoding = 'utf-8'):\n self.encoding = encoding\n self.key_path = key_path\n self.pass_path = pass_path\n self.password = password",
"def __init__(self, key_path=''):\n super(WinRegistryKey, self).__init__()\n self._key_path = key_paths.JoinKeyPath([key_path])",
"def __init__(self, globalKey, publicKey, resourceName, **rest):\n super(SshKey, self).__init__({\n \"globalKey\": globalKey,\n \"publicKey\": publicKey,\n \"resourceName\": resourceName,\n }, **rest)",
"def __init__(self, baseDir, parseKey=keys.Key.fromString):\n self.baseDir = baseDir\n self.parseKey = parseKey",
"def _init_keys(self):\n\n basic_constraints = crypto.X509Extension('basicConstraints'.encode('ascii'), True,\n 'CA:TRUE, pathlen:0'.encode('ascii'))\n serial = self._get_serial()\n pkey = self._create_pkey(self.commonname, serial)\n self._create_cert(pkey, self.commonname, serial, [basic_constraints], expire=30*365)",
"def __init__(self, key=None):\n\n self.key = key\n self.cryptor = None\n self.file_ext_targets = ['txt']",
"def __init__(self, pardir=os.curdir, **kwargs):\n super(Mmapdict, self).__init__()\n if not os.path.exists(pardir):\n os.makedirs(pardir)\n self.pardir = pardir\n self.kwargs = kwargs",
"def __init__(self, path, number_keys=1):\n\n self.path = path\n self.keyring = []\n if os.path.exists(path):\n self.keyring = read_keys(path)\n else:\n for n in range(number_keys):\n key = generate_key(generate_random())\n self.keyring.append(key)\n write_keys(path, self.keyring)",
"def __init__(self, uid, key, initial_prng):\n self.uid = uid\n self.key = key\n Crypto1.__init__(self, key, initial_prng)",
"def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)",
"def __init__(self, pubkey=None):",
"def rsa_keypair_paths( config_path, prefix ) :\n\n public_key_filename, private_key_filename = rsa_keypair_filenames( prefix )\n public_key_path = os.path.join( \".\", public_key_filename )\n private_key_path = os.path.join( config_path, private_key_filename )\n return public_key_path, private_key_path",
"def __init__(self):\n self.public_key = None\n self._private_key = None",
"def __init__(self, gpg_binary, gpg_home):\n\n self.gpg_binary = gpg_binary\n self.gpg_home = gpg_home\n try:\n from gnupg import GPG\n except ImportError:\n raise TracError(_(\"Unable to load the python-gnupg module. \" \\\n \"Please check and correct your installation.\"))\n try:\n self.gpg = GPG(gpgbinary=self.gpg_binary, gnupghome=self.gpg_home)\n # get list of available public keys once for later use\n self.pubkeys = self.gpg.list_keys() # same as gpg.list_keys(False)\n except ValueError:\n raise TracError(_(\"Missing the crypto binary. \" \\\n \"Please check and set full path \" \\\n \"with option 'gpg_binary'.\"))",
"def __init__(self, key_size=1024):\n\t\tif not (key_size % 256 == 0 and key_size >= 1024):\n\t\t\t\traise ValueError(\"RSA key length must be a multiple of 256 and >= 1024\")\n\t\telse:\n\t\t\tself.key_size = key_size",
"def _InitFromString(self, text):\r\n # First, remove all whitespace:\r\n text = re.sub(_WHITESPACE_RE, '', text)\r\n\r\n # Parse out the period-separated components\r\n match = _KEY_RE.match(text)\r\n if not match:\r\n raise ValueError('Badly formatted key string: \"%s\"', text)\r\n\r\n private_exp = match.group('private_exp')\r\n if private_exp:\r\n private_exp = _B64ToNum(private_exp)\r\n else:\r\n private_exp = None\r\n self.keypair = Crypto.PublicKey.RSA.construct(\r\n (_B64ToNum(match.group('mod')),\r\n _B64ToNum(match.group('exp')),\r\n private_exp))",
"def __init__(self, proxy_only = False):\n self.key_file = None\n self.cert_file = None\n self.ca_path = None\n self.key_pass = None\n\n path = os.getenv(\"X509_CERT_DIR\", None)\n if path and os.path.exists(path):\n self.ca_path = path\n\n if not self.ca_path:\n path = \"/etc/grid-security/certificates\"\n if os.path.exists(path):\n self.ca_path = path\n\n path = os.getenv(\"X509_USER_PROXY\", None)\n if path and os.path.exists(path):\n self.key_file = self.cert_file = path\n\n if not self.key_file:\n path = os.getenv(\"X509_USER_KEY\", None)\n if path and os.path.exists(path):\n self.key_file = path\n\n if not self.cert_file:\n path = os.getenv(\"X509_USER_CERT\", None)\n if path and os.path.exists(path):\n self.cert_file = path\n\n if not self.key_file:\n path = os.getenv(\"HOME\") + \"/.globus/userkey.pem\"\n if os.path.exists(path):\n self.key_file = path\n\n if not self.cert_file:\n path = os.getenv(\"HOME\") + \"/.globus/usercert.pem\"\n if os.path.exists(path):\n self.cert_file = path\n\n if not self.ca_path or not os.path.exists(self.ca_path):\n raise RuntimeError(\"no certificate directory found\")\n\n if not self.key_file or not os.path.exists(self.key_file):\n raise RuntimeError(\"no certificate private key file found\")\n\n if not self.cert_file or not os.path.exists(self.cert_file):\n raise RuntimeError(\"no certificate public key file found\")\n\n if not proxy_only and self.key_file != self.cert_file:\n self.key_pass = getpass(\"Password for %s: \" % self.key_file)",
"def __init__(self, curve=None, private_key=None, public_key=None):\n self.curve = curve\n self.private_key = None\n self.public_key = None\n if private_key:\n self.load_private_key(private_key)\n if public_key:\n self.load_received_public_key(public_key)",
"def __init__(self, data_dir, pairs_filepath, img_ext):\n self.data_dir = data_dir\n self.pairs_filepath = pairs_filepath\n self.img_ext = img_ext",
"def __init__(self, mix_scp, s1_scp, s2_scp):\n check(mix_scp, s1_scp, s2_scp)\n self.key = read_key(mix_scp)\n self.mix_path = read_path(mix_scp)\n self.s1_path = read_path(s1_scp)\n self.s2_path = read_path(s2_scp)",
"def __init__(self, username, password):\n self.username = username\n self.password = password\n self.privkey = None\n\n # sets self.privkey\n self.__set_or_create_key_if_not_exist()",
"def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile",
"def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}",
"def __init__(self, key):\n Base.__init__(self, key)"
] | [
"0.71339846",
"0.6748313",
"0.6385755",
"0.6383883",
"0.63711643",
"0.63697726",
"0.6332745",
"0.6278031",
"0.6252205",
"0.62220544",
"0.62062967",
"0.6199969",
"0.6193358",
"0.61794245",
"0.6143205",
"0.6122479",
"0.61166006",
"0.6099306",
"0.5994502",
"0.595804",
"0.5955951",
"0.5924",
"0.5896501",
"0.5885834",
"0.5865596",
"0.58281696",
"0.58157206",
"0.5782215",
"0.57561815",
"0.57241744"
] | 0.7002548 | 1 |
Load a headmodel. read the geometry, conductivities and sources eventually. | def load_headmodel(name, prefix='data'):
cond_file = op.join(prefix, name, name + '.cond')
geom_file = op.join(prefix, name, name + '.geom')
patches_file = op.join(prefix, name, name + '.patches')
dip_file = op.join(prefix, name, name + '.dip')
tdcs_file = op.join(prefix, name, name + '.hdtdcs')
pot_file = op.join(prefix, name, name + '.pot')
geom = om.Geometry()
geom.read(str(geom_file), str(cond_file))
sensors = om.Sensors()
sensors.load(str(patches_file))
model = {'geometry': geom, 'sensors': sensors}
if op.exists(dip_file):
dipoles = om.Matrix(str(dip_file))
model['dipsources'] = dipoles
if op.exists(tdcs_file):
tdcs = om.Sensors(str(tdcs_file), geom)
model['tdcssources'] = tdcs
if op.exists(pot_file):
pot = om.Matrix(str(pot_file))
model['potentials'] = pot
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_model(self):\n self.__model = tf.keras.models.load_model(\n os.path.join(self.model_path, \"model.h5\")\n )\n print(\"[INFO] Model loaded!\")\n\n tok_pth = os.path.join(self.model_path, \"tokenizer.json\")\n with open(tok_pth, \"r\") as f:\n self.__tokenizer = tf.keras\\\n .preprocessing\\\n .text\\\n .tokenizer_from_json(f.read())\n print(\"[INFO] Tokenizer loaded!\")\n\n meta_pth = os.path.join(self.model_path, \"meta.json\")\n with open(meta_pth, \"r\") as f:\n meta = json.load(f)\n self.__title_len = meta[\"title_pad_length\"]\n self.__body_len = meta[\"body_pad_length\"]\n\n self.load_explainer()\n print(\"[INFO] Explainer loaded!\")",
"async def load_model(\n self,\n model_name: str,\n headers: dict[str, t.Any] = ...,\n config: str = ...,\n files: dict[str, str] = ...,\n ) -> None:",
"def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True",
"def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"**/\" + headType)\n for i in range(0, headReferences.getNumPaths()):\n headPart = self.instance(headReferences.getPath(i), \"modelRoot\",\n \"joint_head\")\n # set head texture if necessary\n if self.headTexture:\n headTex = loader.loadTexture(\"phase_\" + str(phase) + \"/maps/\" +\n self.headTexture)\n headTex.setMinfilter(Texture.FTLinearMipmapLinear)\n headTex.setMagfilter(Texture.FTLinear) \n headPart.setTexture(headTex, 1)\n\n # set head color if necessary\n if self.headColor:\n headPart.setColor(self.headColor)\n self.headParts.append(headPart)\n\n # Now remove the extra instance that was created in the\n # loadModelOnce call; we don't need it anymore now that we've\n # copied everything out.\n headModel.removeNode()",
"def load_model(self):\n pass",
"def load_model():\n global obj\n obj = NutritionTableDetector()\n print(\"Weights Loaded!\")",
"def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)",
"def load_model(self) -> Any:",
"def load_model(self, filename):\r\n pass",
"def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)",
"def initialize(self) -> None:\n self.model = load(self.path)",
"def load_model(self):\r\n try:\r\n self.model = CRNN_STN(self.crnn_cfg())\r\n self.model.load_weights(config.CRNN_Model_Path)\r\n except:\r\n print('Error in method {0} in module {1}'.format('load_model', 'crnn_bridge.py'))",
"def load_model(self, model_path: str):",
"def load_model(cls, src_path, update_dict=None, steps=None):\n\n if steps is not None:\n json_file, _ = cls.get_file_via_steps(src_path, steps, 'json', STEPS_REGEX)\n hdf5_file, samples_seen = cls.get_file_via_steps(src_path, steps, 'hdf5',\n STEPS_REGEX)\n\n\n else:\n json_file = max(glob.iglob(os.path.join(src_path, '*.json')),\n key=os.path.getctime)\n hdf5_file = max(glob.iglob(os.path.join(src_path, '*.hdf5')),\n key=os.path.getctime)\n\n samples_seen = cls.get_pattern(hdf5_file, STEPS_REGEX)\n samples_seen = samples_seen if samples_seen is not None else 0\n\n session_number = cls.get_pattern(hdf5_file, SESS_REGEX)\n session_number = session_number if session_number is not None else 1\n\n params_dict = data_functions.load_json(json_file)\n\n params_dict['pretrained_weights'] = hdf5_file\n\n #TODO: try to rearange loading weights\n # if 'weights' in os.path.basename(hdf5_file):\n # params_dict['pretrained_weights'] = hdf5_file\n # else:\n # params_dict['checkpoint'] = hdf5_file\n\n params_dict['train_time'] = os.path.basename(src_path)\n if update_dict is not None:\n if 'pretrained_weights' or 'checkpoint' in update_dict:\n params_dict['pretrained_weights'] = None\n params_dict['checkpoint'] = None\n params_dict.update(update_dict)\n\n model = ClarifruitUnet(**params_dict)\n logger.info(f\"continuing training from {os.path.basename(hdf5_file)}\")\n\n setattr(model, 'samples_seen', samples_seen)\n setattr(model, 'params_filepath', json_file)\n setattr(model, 'session_number', session_number)\n\n return model",
"def transformerXLLMHeadModel(*args, **kwargs):\n model = TransfoXLLMHeadModel.from_pretrained(*args, **kwargs)\n return model",
"def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')",
"def load(path_to_model):\n pass",
"def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))",
"def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return",
"def load_model(self):\n Thread(target=self.__load_model).start()",
"def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))",
"def load_model(self, path):\n pass",
"def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()",
"def load_model(self, filename):\n event = teca_time_py_event('teca_deeplab_ar_detect::load_model')\n\n # this creates OpenMP thread pools and imports torch\n # it must be called *before* we import torch\n self.initialize()\n\n # import our torch codes only now that torch has been initialized\n global teca_deeplab_ar_detect_internals\n from teca_deeplab_ar_detect_internals \\\n import teca_deeplab_ar_detect_internals\n\n # create an instance of the model\n model = teca_deeplab_ar_detect_internals.DeepLabv3_plus(\n n_classes=1, _print=False)\n\n # load model weights from state on disk\n super().load_model(filename, model)",
"def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)",
"def __load_Model(self):\r\n PrintsForUser.printProcess(\"[INFO] Loading network...\")\r\n \r\n self.__model = load_model(self.__model_path)\r\n self.__lb = pickle.loads(open(self.__labels_path, \"rb\").read())",
"def __load(self, model_name):\n\n print(\"Loading model.\")\n tstart = datetime.now()\n\n # Temporary directory to extract the zipped information\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Unzip the directory that contains the saved model(s)\n with zipfile.ZipFile(model_name + \".zip\", \"r\") as zip_ref:\n zip_ref.extractall(dirpath)\n\n # Load metadata\n metadata = pickle.load(open(dirpath + \"/metadata.pickle\", \"rb\"))\n\n # Re-load metadata\n self.__dict__.update(metadata)\n\n # Load all sub-models\n try:\n self.__mol_to_latent_model = load_model(\n dirpath + \"/mol_to_latent_model.h5\"\n )\n except:\n print(\"'mol_to_latent_model' not found, setting to None.\")\n self.__mol_to_latent_model = None\n\n self.__latent_to_states_model = load_model(\n dirpath + \"/latent_to_states_model.h5\"\n )\n self.__batch_model = load_model(dirpath + \"/batch_model.h5\")\n \n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=256 # could also be self.batch_size\n ) # Multi-output model\n\n print(\"Loading finished in %i seconds.\" % ((datetime.now() - tstart).seconds))",
"def load(self, path, verbose=False):\r\n\r\n model_name = os.path.basename(os.path.normpath(path))\r\n if verbose:\r\n print(\"Model name:\", model_name, \"-->\", os.path.join(path, model_name + \".json\"))\r\n with open(os.path.join(path, model_name + \".json\")) as f:\r\n metadata = json.load(f)\r\n\r\n self.backbone = metadata[\"backbone\"]\r\n self.__create_model(metadata[\"classes\"])\r\n\r\n self._model.load_parameters(os.path.join(path, metadata[\"model_paths\"][0]))\r\n self._model.collect_params().reset_ctx(self.ctx)\r\n self._model.hybridize(static_alloc=True, static_shape=True)\r\n if verbose:\r\n print(\"Loaded parameters and metadata.\")\r\n return True",
"def load_model():\n \n _files = training_file()\n \n predictor_path = _files.model_file(LANDMARKS_WEIGHTS)\n face_rec_model_path = _files.model_file(RESNET_WEIGHTS)\n \n detector = dlib.get_frontal_face_detector()\n sp = dlib.shape_predictor(predictor_path)\n facerec = dlib.face_recognition_model_v1(face_rec_model_path)\n \n return (detector, sp, facerec)",
"def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model"
] | [
"0.6497414",
"0.64208055",
"0.638878",
"0.62695634",
"0.62652946",
"0.6117369",
"0.60547155",
"0.6034862",
"0.6033276",
"0.6026985",
"0.60228074",
"0.6008137",
"0.5997371",
"0.59949976",
"0.59703386",
"0.5954361",
"0.5948874",
"0.5944537",
"0.59402436",
"0.5931371",
"0.5927574",
"0.5898268",
"0.58961123",
"0.5888554",
"0.5872348",
"0.5852205",
"0.58268034",
"0.58150643",
"0.5802971",
"0.57777214"
] | 0.7715285 | 0 |
Compute a Forward problem given a model with geometry and sources. | def forward_problem(m):
hm = om.HeadMat(m['geometry'])
hm.invert() # invert in place (no copy)
dsm = om.DipSourceMat(m['geometry'], m['dipsources'])
return hm * dsm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources",
"def forward_features(self, x, flows_backward, flows_forward):\n x1 = self.stage1(x, flows_backward[0::4], flows_forward[0::4])\n x2 = self.stage2(x1, flows_backward[1::4], flows_forward[1::4])\n x3 = self.stage3(x2, flows_backward[2::4], flows_forward[2::4])\n x4 = self.stage4(x3, flows_backward[3::4], flows_forward[3::4])\n x = self.stage5(x4, flows_backward[2::4], flows_forward[2::4])\n x = self.stage6(x + x3, flows_backward[1::4], flows_forward[1::4])\n x = self.stage7(x + x2, flows_backward[0::4], flows_forward[0::4])\n x = x + x1\n for layer in self.stage8:\n x = layer(x)\n x = rearrange(x, 'n c d h w -> n d h w c')\n x = self.norm(x)\n x = rearrange(x, 'n d h w c -> n c d h w')\n return x",
"def forward_model(self, shot, m0, frequencies, return_parameters=[]):\n\n # Local references\n solver = self.solver\n solver.model_parameters = m0 # this updates dt and the number of steps so that is appropriate for the current model\n\n mesh = solver.mesh\n\n d = solver.domain\n dt = solver.dt\n nsteps = solver.nsteps\n source = shot.sources\n\n # Sanitize the input\n if not np.iterable(frequencies):\n frequencies = [frequencies]\n\n # Setup data storage for the forward modeled data\n if 'simdata' in return_parameters:\n simdata = dict()\n for nu in frequencies:\n simdata[nu] = np.zeros(shot.receivers.receiver_count)\n\n # Setup data storage for the forward modeled data (in time, if it is needed, and it frequently is)\n if 'simdata_time' in return_parameters:\n simdata_time = np.zeros((solver.nsteps, shot.receivers.receiver_count))\n\n # Storage for the derivative of the propagation operator with respect to the model \\frac{d\\script{L}}{dm}\n if 'dWaveOp' in return_parameters:\n dWaveOp = dict()\n for nu in frequencies:\n dWaveOp[nu] = 0.0\n\n # Initialize the DFT components\n uhats = dict()\n for nu in frequencies:\n uhats[nu] = 0.0\n\n subsample_indices = self._compute_subsample_indices(frequencies)\n\n # Step k = 0\n # p_0 is a zero array because if we assume the input signal is causal\n # and we assume that the initial system (i.e., p_(-2) and p_(-1)) is\n # uniformly zero, then the leapfrog scheme would compute that p_0 = 0 as\n # well. ukm1 is needed to compute the temporal derivative.\n solver_data = solver.SolverData()\n\n rhs_k = np.zeros(mesh.shape(include_bc=True))\n rhs_kp1 = np.zeros(mesh.shape(include_bc=True))\n\n for k in range(nsteps):\n\n # Local reference\n\n uk = solver_data.k.primary_wavefield\n uk_bulk = mesh.unpad_array(uk)\n\n # Record the data at t_k\n if 'simdata_time' in return_parameters:\n shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata_time)\n\n t = k*dt\n\n for nu in frequencies:\n idx = subsample_indices[nu]\n if np.mod(k, idx) == 0:\n uhats[nu] += uk*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)\n\n if k == 0:\n rhs_k = self._setup_forward_rhs(rhs_k, source.f(k*dt))\n rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f((k+1)*dt))\n else:\n # shift time forward\n rhs_k, rhs_kp1 = rhs_kp1, rhs_k\n rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f((k+1)*dt))\n\n # Note, we compute result for k+1 even when k == nsteps-1. We need\n # it for the time derivative at k=nsteps-1.\n solver.time_step(solver_data, rhs_k, rhs_kp1)\n\n # When k is the nth step, the next step is uneeded, so don't swap\n # any values. This way, uk at the end is always the final step\n if(k == (nsteps-1)): break\n\n # Don't know what data is needed for the solver, so the solver data\n # handles advancing everything forward by one time step.\n # k-1 <-- k, k <-- k+1, etc\n solver_data.advance()\n\n # Record the data at t_k\n if 'simdata' in return_parameters:\n for nu in frequencies:\n simdata[nu] = shot.receivers.sample_data_from_array(mesh.unpad_array(uhats[nu]))\n\n # Compute time derivative of p at time k\n if 'dWaveOp' in return_parameters:\n for nu in frequencies:\n dWaveOp[nu] += solver.compute_dWaveOp('frequency', uhats[nu], nu)\n\n retval = dict()\n\n if 'dWaveOp' in return_parameters:\n retval['dWaveOp'] = dWaveOp\n if 'simdata' in return_parameters:\n retval['simdata'] = simdata\n if 'wavefield' in return_parameters:\n _uhats = dict()\n _uhats = {nu: mesh.unpad_array(uhats[nu], copy=True) for nu in frequencies}\n retval['wavefield'] = _uhats\n if 'simdata_time' in return_parameters:\n retval['simdata_time'] = simdata_time\n\n return retval",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r",
"def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def test_make_forward_solution():\n fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,\n fname_bem, mindist=5.)\n assert (isinstance(fwd_py, Forward))\n fwd = read_forward_solution(fname_meeg)\n assert (isinstance(fwd, Forward))\n _compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)\n # Homogeneous model\n with pytest.raises(RuntimeError, match='homogeneous.*1-layer.*EEG'):\n make_forward_solution(fname_raw, fname_trans, fname_src,\n fname_bem_meg)",
"def forward(self, x):\n # sources保存特征图,loc与conf保存所有PriorBox的位置与类别预测特征\n sources = list()\n loc = list()\n conf = list()\n\n # 对输入图像卷积到conv4_3,将特征添加到sources中\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # 继续卷积到conv7,将特征添加到sources中\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # 继续利用额外的卷积层计算,并将特征添加到sources中\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1: # 间隔一层\n sources.append(x)\n\n # 对sources中的特征图利用类别与位置网络进行卷积计算,并保存到loc与conf中\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n # 对于训练来说,output包括了loc与conf的预测值以及PriorBox的信息\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output",
"def L_model_forward(X, parameters):\n pass",
"def forward(self, x, test=False):\n sources = list()\n loc = list()\n conf = list()\n\n # apply bases layers and cache source layer outputs\n for k in range(len(self.base)):\n x = self.base[k](x)\n if k in self.feature_layer:\n if len(sources) == 0:\n s = self.Norm(x)\n sources.append(s)\n else:\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n sources.append(x)\n # if k % 2 == 1:\n # sources.append(x)\n\n #if phase == 'feature':\n # return sources\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = (\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n )\n return output",
"def forward(self, x):\n device = x.device\n x, sources = self._get_sources(x)\n\n # apply multibox head to source layers\n conf = []\n loc = []\n for i, (loc_fn, conf_fn) in enumerate(zip(self.loc, self.conf)):\n l = loc_fn(sources[i]).permute(0, 2, 3, 1).contiguous()\n l = l.view(x.size(0), -1, 4)\n loc.append(l)\n\n c = conf_fn(sources[i]).permute(0, 2, 3, 1).contiguous()\n c = c.view(x.size(0), -1, self.num_classes)\n conf.append(c)\n\n loc = torch.cat(loc, 1)\n conf = torch.cat(conf, 1)\n\n if not self.training:\n conf = F.softmax(conf, -1)\n output = self.detect(loc, conf, self.priors.float().to(device))\n output = self._post_process_inference(output)\n else:\n output = loc, conf, self.priors.to(device)\n\n return output",
"def forward(self, src=None, rec=None, u=None, vp=None, save=None, **kwargs):\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or self.geometry.rec\n\n # Create the forward wavefield if not provided\n u = u or TimeFunction(name='u', grid=self.model.grid,\n save=self.geometry.nt if save else None,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n print(\"====Forward norm(u)\", norm(u))\n # Execute operator and return wavefield and receiver data\n # summary = self.op_fwd(save).apply(src=src, rec=rec, u=u, vp=vp,\n summary = self.op_fwd(save).apply(src=src, u=u, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n print(\"====Forward norm(u)\", norm(u))\n \n\n regnormu = norm(u)\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n print(\"Norm u:\", regnormu)\n\n s_u = TimeFunction(name='s_u', grid=self.model.grid, space_order=self.space_order, time_order=2)\n src_u = src.inject(field=s_u.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n\n op_f = Operator([src_u])\n op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))\n\n # import pdb;pdb.set_trace()\n print(\"Norm s_u\", norm(s_u))\n\n # Get the nonzero indices\n nzinds = np.nonzero(s_u.data[0]) # nzinds is a tuple\n assert len(nzinds) == len(self.model.grid.shape)\n shape = self.model.grid.shape\n x, y, z = self.model.grid.dimensions\n time = self.model.grid.time_dim\n t = self.model.grid.stepping_dim\n\n source_mask = Function(name='source_mask', shape=self.model.grid.shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n\n source_id = Function(name='source_id', shape=shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n print(\"source_id data indexes start from 0 now !!!\")\n\n # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))\n source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(len(nzinds[0])))\n\n source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1\n\n print(\"Number of unique affected points is:\", len(nzinds[0]))\n\n # Assert that first and last index are as expected\n assert(source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)\n assert(source_id.data[nzinds[0][-1], nzinds[1][-1], nzinds[2][-1]] == len(nzinds[0])-1)\n assert(source_id.data[nzinds[0][len(nzinds[0])-1], nzinds[1][len(nzinds[0])-1], nzinds[2][len(nzinds[0])-1]] == len(nzinds[0])-1)\n\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(source_mask.data)))\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(s_u.data[0])))\n\n print(\"-At this point source_mask and source_id have been populated correctly-\")\n\n nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])\n\n nnz_sp_source_mask = Function(name='nnz_sp_source_mask', shape=(list(nnz_shape)), dimensions=(x,y ), space_order=0, dtype=np.int32)\n\n nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)\n inds = np.where(source_mask.data == 1.)\n print(\"Grid - source positions:\", inds)\n maxz = len(np.unique(inds[-1]))\n # Change only 3rd dim\n sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1], maxz)\n\n assert(len(nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions)-1))\n\n # Note : sparse_source_id is not needed as long as sparse info is kept in mask\n # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]\n\n id_dim = Dimension(name='id_dim')\n b_dim = Dimension(name='b_dim')\n\n save_src_u = TimeFunction(name='save_src_u', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n\n save_src_u_term = src.inject(field=save_src_u[src.dimensions[0], source_id], expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n print(\"Injecting to empty grids\")\n op1 = Operator([save_src_u_term])\n op1.apply(src=src, dt=kwargs.pop('dt', self.dt))\n print(\"Injecting to empty grids finished\")\n sp_zi = Dimension(name='sp_zi')\n\n\n sp_source_id = Function(name='sp_source_id', shape=(list(sparse_shape)),\n dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32)\n\n # Now holds IDs\n sp_source_id.data[inds[0], inds[1], :] = tuple(inds[-1][:len(np.unique(inds[-1]))])\n\n assert(np.count_nonzero(sp_source_id.data) == len(nzinds[0]))\n assert(len(sp_source_id.dimensions) == 3)\n\n # import pdb;pdb.set_trace()\n\n zind = Scalar(name='zind', dtype=np.int32)\n xb_size = Scalar(name='xb_size', dtype=np.int32)\n yb_size = Scalar(name='yb_size', dtype=np.int32)\n x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)\n y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)\n\n block_sizes = Function(name='block_sizes', shape=(4, ), dimensions=(b_dim,),\n space_order=0, dtype=np.int32)\n\n bsizes = (8, 8, 32, 32)\n block_sizes.data[:] = bsizes\n\n # eqxb = Eq(xb_size, block_sizes[0])\n # eqyb = Eq(yb_size, block_sizes[1])\n # eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n # eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1,\n implicit_dims=(time, x, y))\n\n eq1 = Eq(zind, sp_source_id[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi))\n\n # inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y, zind]]\n # Is source_mask needed /\n inj_u = save_src_u[time, source_id[x, y, zind]]\n\n eq_u = Inc(u.forward[t+1, x, y, zind], inj_u, implicit_dims=(time, x, y, sp_zi))\n\n # The additional time-tiling equations\n # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n performance_map = np.array([[0, 0, 0, 0, 0]])\n\n bxstart = 4\n bxend = 9\n bystart = 4\n byend = 9\n bstep = 4\n\n txstart = 32\n txend = 65\n tystart = 32\n tyend = 65\n\n tstep = 32\n # Temporal autotuning\n for tx in range(txstart, txend, tstep):\n # import pdb; pdb.set_trace()\n for ty in range(tystart, tyend, tstep):\n for bx in range(bxstart, bxend, bstep):\n for by in range(bystart, byend, bstep):\n\n block_sizes.data[:] = [tx, ty, bx, by]\n\n eqxb = Eq(xb_size, block_sizes[0])\n eqyb = Eq(yb_size, block_sizes[1])\n eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n u.data[:] = 0\n print(\"-----\")\n tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u)\n\n # import pdb; pdb.set_trace()\n\n # Execute operator and return wavefield and receiver data\n print(\"TT====Forward norm(u)\", norm(u))\n summary_tt = self.op_fwd(save, tteqs).apply(u=u, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n print(\"TT====Forward norm(u)\", norm(u))\n # op_tt = self.op_fwd(save, tteqs)\n\n # Execute operator and return wavefield and receiver data\n #summary_tt = self.op_fwd(save).apply(src=src, rec=rec, u=u, vp=vp,\n # dt=kwargs.pop('dt', self.dt), **kwargs)\n\n # op_tt = self.op_fwd(kernel, save, tteqs)\n # summary_tt = op_tt.apply(u=u, dt=kwargs.pop('dt', self.dt), **kwargs)\n configuration['jit-backdoor'] = False\n norm_tt_u = norm(u)\n print(\"Norm u:\", regnormu)\n print(\"Norm(tt_u):\", norm_tt_u)\n configuration['jit-backdoor'] = True\n\n print(\"===Temporal blocking======================================\")\n\n performance_map = np.append(performance_map, [[tx, ty, bx, by, summary_tt.globals['fdlike'].gpointss]], 0)\n \n print(performance_map)\n # tids = np.unique(performance_map[:, 0])\n\n #for tid in tids:\n bids = np.where((performance_map[:, 0] == tx) & (performance_map[:, 1] == ty))\n bx_data = np.unique(performance_map[bids, 2])\n by_data = np.unique(performance_map[bids, 3])\n gptss_data = performance_map[bids, 4]\n gptss_data = gptss_data.reshape(len(bx_data), len(by_data))\n\n fig, ax = plt.subplots()\n im = ax.imshow(gptss_data); #pause(2)\n # We want to show all ticks...\n ax.set_xticks(np.arange(len(bx_data)))\n ax.set_yticks(np.arange(len(by_data)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(bx_data)\n ax.set_yticklabels(by_data)\n\n ax.set_title(\"Gpts/s for fixed tile size. (Sweeping block sizes)\")\n fig.tight_layout()\n\n fig.colorbar(im, ax=ax)\n # ax = sns.heatmap(gptss_data, linewidth=0.5)\n plt.savefig(str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) + \".pdf\")\n\n\n if 1:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n # import pdb;pdb.set_trace()\n return rec, u, summary",
"def linear_forward_model(self, shot, m0, m1, frequencies, return_parameters=[]):\n\n # Sanitize the input\n if not np.iterable(frequencies):\n frequencies = [frequencies]\n\n # Local references\n solver = self.solver\n solver.model_parameters = m0 # this updates dt and the number of steps so that is appropriate for the current model\n\n mesh = solver.mesh\n\n d = solver.domain\n dt = solver.dt\n nsteps = solver.nsteps\n source = shot.sources\n\n m1_padded = m1.with_padding()\n\n # Storage for the field\n u1hats = dict()\n for nu in frequencies:\n u1hats[nu] = 0.0\n\n # Setup data storage for the forward modeled data\n if 'simdata' in return_parameters:\n simdata = dict()\n\n # Setup data storage for the forward modeled data (in time, if it is needed, and it frequently is)\n if 'simdata_time' in return_parameters:\n simdata_time = np.zeros((solver.nsteps, shot.receivers.receiver_count))\n\n # Storage for the time derivatives of p\n if 'dWaveOp0' in return_parameters:\n dWaveOp0 = dict()\n u0hats = dict()\n for nu in frequencies:\n dWaveOp0[nu] = 0.0\n u0hats[nu] = 0.0\n\n # Storage for the time derivatives of p\n if 'dWaveOp1' in return_parameters:\n dWaveOp1 = dict()\n for nu in frequencies:\n dWaveOp1[nu] = 0.0\n\n subsample_indices = self._compute_subsample_indices(frequencies)\n\n # Step k = 0\n # p_0 is a zero array because if we assume the input signal is causal\n # and we assume that the initial system (i.e., p_(-2) and p_(-1)) is\n # uniformly zero, then the leapfrog scheme would compute that p_0 = 0 as\n # well. ukm1 is needed to compute the temporal derivative.\n solver_data = solver.SolverData()\n\n # (***) Given that these modeling tools are for frequency methods, we do not\n # have the time derivatives / wave operator derivatives (aka dWaveOp) in\n # time available. This saves space, but as a result we have to recompute\n # it.\n # Also, because implicit and some ODE methods require uhat_1 at times k\n # and k+1, we need uhat_0 at k, k+1, and k+2, so all of this rigamaroll\n # is to get that.\n solver_data_u0 = solver.SolverData()\n\n # For u0, set up the right hand sides\n rhs_u0_k = np.zeros(mesh.shape(include_bc=True))\n rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))\n rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f(0*dt))\n rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f(1*dt))\n\n # compute u0_kp1 so that we can compute dWaveOp0_k (needed for u1)\n solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)\n\n # compute dwaveop_0 (k=0) and allocate space for kp1 (needed for u1 time step)\n dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)\n dWaveOp0_kp1 = dWaveOp0_k.copy()\n\n solver_data_u0.advance()\n # from here, it makes more sense to refer to rhs_u0 as kp1 and kp2, because those are the values we need\n # to compute u0_kp2, which is what we need to compute dWaveOp0_kp1\n rhs_u0_kp1, rhs_u0_kp2 = rhs_u0_k, rhs_u0_kp1 # to reuse the allocated space and setup the swap that occurs a few lines down\n\n for k in range(nsteps):\n\n uk = solver_data.k.primary_wavefield\n uk_bulk = mesh.unpad_array(uk)\n\n t = k*dt\n\n # Record the data at t_k\n if 'simdata_time' in return_parameters:\n shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata_time)\n\n for nu in frequencies:\n idx = subsample_indices[nu]\n if np.mod(k, idx) == 0:\n u1hats[nu] += uk*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)\n\n if 'dWaveOp0' in return_parameters:\n for nu in frequencies:\n idx = subsample_indices[nu]\n if np.mod(k, idx) == 0:\n u0hats[nu] += solver_data_u0.k.primary_wavefield*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)\n\n # Note, we compute result for k+1 even when k == nsteps-1. We need\n # it for the time derivative at k=nsteps-1.\n\n # See comment (***) above.\n # compute u0_kp2 so we can get dWaveOp0_kp1 for the rhs for u1\n rhs_u0_kp1, rhs_u0_kp2 = rhs_u0_kp2, rhs_u0_kp1\n rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f((k+2)*dt))\n solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)\n\n # shift the dWaveOp0's (ok at k=0 because they are equal then)\n # The derivative component is computed after the time step so that\n # information from time k+1 can be used to compute the derivative.\n dWaveOp0_k, dWaveOp0_kp1 = dWaveOp0_kp1, dWaveOp0_k\n dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)\n\n solver_data_u0.advance()\n\n if k == 0:\n rhs_k = m1_padded*(-1*dWaveOp0_k)\n rhs_kp1 = m1_padded*(-1*dWaveOp0_kp1)\n else:\n rhs_k, rhs_kp1 = rhs_kp1, m1_padded*(-1*dWaveOp0_kp1)\n\n solver.time_step(solver_data, rhs_k, rhs_kp1)\n\n # When k is the nth step, the next step is uneeded, so don't swap\n # any values. This way, uk at the end is always the final step\n if(k == (nsteps-1)): break\n\n # Don't know what data is needed for the solver, so the solver data\n # handles advancing everything forward by one time step.\n # k-1 <-- k, k <-- k+1, etc\n solver_data.advance()\n\n # Compute time derivative of p at time k\n if 'dWaveOp0' in return_parameters:\n for nu in frequencies:\n dWaveOp0[nu] = solver.compute_dWaveOp('frequency', u0hats[nu],nu)\n\n # Compute time derivative of p at time k\n if 'dWaveOp1' in return_parameters:\n for nu in frequencies:\n dWaveOp1[nu] = solver.compute_dWaveOp('frequency', u1hats[nu],nu)\n\n # Record the data at t_k\n if 'simdata' in return_parameters:\n for nu in frequencies:\n simdata[nu] = shot.receivers.sample_data_from_array(mesh.unpad_array(u1hats[nu]))\n\n retval = dict()\n\n if 'dWaveOp0' in return_parameters:\n retval['dWaveOp0'] = dWaveOp0\n if 'wavefield1' in return_parameters:\n _u1hats = dict()\n _u1hats = {nu: mesh.unpad_array(u1hats[nu], copy=True) for nu in frequencies}\n retval['wavefield1'] = _u1hats\n if 'dWaveOp1' in return_parameters:\n retval['dWaveOp1'] = dWaveOp1\n if 'simdata' in return_parameters:\n retval['simdata'] = simdata\n if 'simdata_time' in return_parameters:\n retval['simdata_time'] = simdata_time\n\n return retval",
"def _model_forward(self, X):\n caches = []\n A = X\n\n for l in range(1, self.layer_num+1):\n A, cache = self._linear_activation_forward(\n A, self.parameters['W' + str(l)], self.parameters['b' + str(l)], self.layer_activations[l][0])\n caches.append(cache)\n\n assert (A.shape == (1, X.shape[1]))\n\n return A, caches",
"def forward(self, x):\n flows_forward, flows_backward = self.get_flow(x)\n b, n, _, h, w = x.size()\n\n # backward branch\n out_l = []\n feat_prop = x.new_zeros(b, self.num_feat, h, w)\n for i in range(n - 1, -1, -1):\n x_i = x[:, i, :, :, :]\n if i < n - 1:\n flow = flows_backward[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.backward_trunk(feat_prop)\n out_l.insert(0, feat_prop)\n\n # forward branch\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, n):\n x_i = x[:, i, :, :, :]\n if i > 0:\n flow = flows_forward[:, i - 1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.forward_trunk(feat_prop)\n\n # upsample\n out = torch.cat([out_l[i], feat_prop], dim=1)\n out = self.lrelu(self.fusion(out))\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n out = self.lrelu(self.conv_hr(out))\n out = self.conv_last(out)\n base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)\n out += base\n out_l[i] = out\n\n return torch.stack(out_l, dim=1)",
"def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output",
"def forward(self, x):\n sources = list()\n tcb_source = list()\n odm_loc = list()\n odm_conf = list()\n if self.phase == 'test':\n feat_sizes = list()\n\n # apply vgg up to conv4_3 relu and conv5_3 relu\n for k in range(self.conv5_3_layer):\n x = self.vgg[k](x)\n if self.size != 512 and self.size != 320 and self.conv3_3_layer - 1 == k:\n s = self.conv3_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if self.conv4_3_layer - 1 == k:\n s = self.conv4_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n elif self.conv5_3_layer - 1 == k:\n s = self.conv5_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply vgg up to fc7\n for k in range(self.conv5_3_layer, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply extra layers and cache source layer outputs\n for k in range(len(self.extras)):\n x = self.extras[k](x)\n if self.extra_1_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if (self.size == 640 or self.size == 5126) and self.extra_2_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # calculate TCB features\n p = None\n for k, v in enumerate(sources[::-1]):\n s = v\n for i in range(3):\n s = self.tcb0[(self.step-k)*3 + i](s)\n if k != 0:\n u = p\n u = self.tcb1[self.step-k](u)\n s += u\n for i in range(3):\n s = self.tcb2[(self.step-k)*3 + i](s)\n p = s\n tcb_source.append(s)\n tcb_source.reverse()\n\n # apply ODM to source layers\n for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):\n odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)\n odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)\n\n if self.phase == \"test\":\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds\n self.softmax(odm_conf.view(odm_conf.size(0), -1,\n self.num_classes)), # odm conf preds\n feat_sizes\n )\n else:\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4),\n odm_conf.view(odm_conf.size(0), -1, self.num_classes),\n )\n return output",
"def forward(self, x):\r\n # 1. step 0\r\n x_0 = self.mg0(x)\r\n a_0 = self.mhsa0(x_0)\r\n a_0 = a_0.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_0 = t.matmul(a_0, self.W_p0).squeeze().permute(0, -1, 1, 2) + x # transformation # [m, c, h, w]\r\n a_0_ = a_0\r\n a_0 = a_0.permute(0, 2, 3, 1)\r\n a_0 = self.mlp0(a_0)\r\n a_0 = a_0.permute(0, -1, 1, 2) + a_0_\r\n x_0 = self.max_pool0(a_0) + self.avg_pool0(a_0)\r\n\r\n # 2. step 1\r\n x_1 = self.mg1(x_0)\r\n a_1 = self.mhsa1(x_1)\r\n a_1 = a_1.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_1 = t.matmul(a_1, self.W_p1).squeeze().permute(0, -1, 1, 2) + x_0 # transformation # [m, c, h, w]\r\n a_1_ = a_1\r\n a_1 = a_1.permute(0, 2, 3, 1)\r\n a_1 = self.mlp1(a_1)\r\n a_1 = a_1.permute(0, -1, 1, 2) + a_1_\r\n x_1 = self.max_pool1(a_1) + self.avg_pool1(a_1)\r\n\r\n # 3. step 2\r\n x_2 = self.mg2(x_1)\r\n a_2 = self.mhsa2(x_2)\r\n a_2 = a_2.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_2 = t.matmul(a_2, self.W_p2).squeeze().permute(0, -1, 1, 2) + x_1 # transformation # [m, c, h, w]\r\n a_2_ = a_2\r\n a_2 = a_2.permute(0, 2, 3, 1)\r\n a_2 = self.mlp0(a_2)\r\n a_2 = a_2.permute(0, -1, 1, 2) + a_2_\r\n\r\n # 4. Upsample\r\n a_1 = self.upsample1(a_1)\r\n a_2 = self.upsample2(a_2)\r\n output = a_0 + a_1 + a_2\r\n\r\n return output",
"def forward(self, x, fm, camera_mat):\n\n batch_size = x.shape[0]\n # List of initial 3D coordinates (first item) and outputs of the layers\n out = list()\n\n initial_coordinates_expanded = self.initial_coordinates.expand(\n batch_size, -1, -1)\n out.append(initial_coordinates_expanded)\n \n # #######################\n # First Projection Block\n # Layer 0: 156 x feat_dim\n out.append(self.gp(initial_coordinates_expanded, fm, camera_mat))\n out.append(self.gc1(out[-1])) # Layer 1: 156 x hidden_dim\n for i in range(0, 12): # GraphConvs from and to 156 x hidden_dim\n val = self.gc2[i](out[-1])\n if (i % 2) == 1:\n # Add previous output (Restnet style)\n val = torch.add(val, out[-2]) * 0.5\n out.append(val)\n # Layer 14: Out of dim 156x3, will be used as outputs_2[1]\n out.append(self.gc3(out[-1]))\n\n # #######################\n # Second Projection Block\n # Layer 15: 156 x (hidden_dim + feat_dim)\n v = self.gp(out[-1], fm, camera_mat)\n v = torch.cat([v, out[-2]], dim=2)\n out.append(v)\n # Layer 16: 618x (hidden_dim + feat_dim)\n out.append(self.gup1(out[-1]))\n out.append(self.gc4(out[-1])) # Layer 17: 618 x hidden_dim\n for i in range(0, 12): # GraphConvs from and to 618 x hidden_dim\n val = self.gc5[i](out[-1])\n if (i % 2) == 1:\n # Add previous output (Restnet style)\n val = torch.add(val, out[-2]) * 0.5\n out.append(val)\n # Layer 30: 618 x 3, will be used as outputs_2[2]\n out.append(self.gc6(out[-1]))\n\n # #######################\n # Third Projection Block\n # Layer 31: 618 x hidden_dim + feat_dim\n v = self.gp(out[-1], fm, camera_mat) # 618 x feat_dim\n v = torch.cat([v, out[-2]], dim=2)\n out.append(v)\n # Layer 32: 2466 x hidden_dim + feat_dim\n out.append(self.gup2(out[-1]))\n out.append(self.gc7(out[-1])) # Layer 33: 2466 x hidden_dim\n for i in range(0, 13): # GraphConvs from and to 2466 x hidden_dim\n val = self.gc8[i](out[-1])\n if i % 2 == 1:\n # Add previous output (Restnet style)\n val = torch.add(val, out[-2]) * 0.5\n out.append(val)\n out.append(self.gc9(out[-1])) # Layer 47: 2466 x 3\n # 156 x hidden_dim, 618 x hidden_dim, 2466 x hidden_dim\n outputs = (out[15], out[31], out[-1])\n outputs_2 = (initial_coordinates_expanded,\n self.gup1(out[15]), self.gup2(out[31]))\n\n return outputs, outputs_2",
"def forward(self, x):\n #print('output of fetures.children() : %s'%str([i for i in self.features.children()]))\n #print(\"shape of input is %s\" % str(x.size()))\n for layer_no, layer in enumerate(self.features.children()):\n\n if layer_no is 23:\n y = layer(x)\n if layer_no is 33:\n z = layer(x)\n x = layer(x)\n\n #print('debug')\n #print('layer info: %s'%str(layer))\n #print(\"shape of x is %s\" % str(x.size()))\n\n x = self.conv1D_downstream1(x)\n x = self.conv1D_downstream2(x)\n x = self.upsample_1(x)\n\n z = self.conv1D_pool4(z)\n y = self.conv1D_pool3(y)\n #print('debug')\n #print(\"shape of x is %s\"%str(x.size()))\n #print(\"shape of z is %s\" % str(z.size()))\n\n if x.size() is not z.size():\n x = nn.functional.interpolate(x,size = (z.size()[2],z.size()[3]), mode = 'nearest')\n x = x+ z\n x = self.upsample_2(x)\n x = x+y\n x = self.upsample_3(x)\n\n return x",
"def forward(model: nn.Module, inputs: torch.Tensor, device: torch.device):\n\n model.eval()\n model.to(device)\n\n with torch.no_grad():\n inputs = inputs.to(device)\n return model(inputs)",
"def test_forward(self):\n # test single input\n self.model.w = np.array([[0.5, 0.25]])\n self.model.b = 0.5\n x = np.array([[0.2, 0.1]])\n out = self.model.forward(x)\n self.assertTrue(np.abs(out[0] - 0.6514) < 0.01)\n\n # test multiple inputs\n self.model.w = np.array([[0.1, 0.2]])\n self.model.b = 0.2\n x = np.array([[0.3, 0.4],\n [0.5, 0.6]])\n out = self.model.forward(x)\n should_be = np.array([0.5769,0.5915])\n self.assertTrue(np.allclose(out, should_be, atol=0.01))",
"def _model_forward(self, node_feats, input_graph):\n bg = input_graph.to(self.device)\n \n bg.requires_grad = True\n node_feats.requires_grad = True\n \n if self.model_name in ['MPNN', 'AttentiveFP', 'Weave']:\n edge_feats = bg.edata.pop('e').to(self.device)\n edge_feats.requires_grad = True\n return self.model_instance(bg, node_feats, edge_feats)\n else:\n bg.edata.pop('e').to('cuda')\n return self.model_instance(bg, node_feats)",
"def test_forward_mixed_source_space(tmpdir):\n # get the surface source space\n rng = np.random.RandomState(0)\n surf = read_source_spaces(fname_src)\n\n # setup two volume source spaces\n label_names = get_volume_labels_from_aseg(fname_aseg)\n vol_labels = rng.choice(label_names, 2)\n vol1 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,\n volume_label=vol_labels[0],\n add_interpolator=False)\n vol2 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,\n volume_label=vol_labels[1],\n add_interpolator=False)\n\n # merge surfaces and volume\n src = surf + vol1 + vol2\n\n # calculate forward solution\n fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem)\n assert (repr(fwd))\n\n # extract source spaces\n src_from_fwd = fwd['src']\n\n # get the coordinate frame of each source space\n coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])\n\n # assert that all source spaces are in head coordinates\n assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all())\n\n # run tests for SourceSpaces.export_volume\n fname_img = tmpdir.join('temp-image.mgz')\n\n # head coordinates and mri_resolution, but trans file\n with pytest.raises(ValueError, match='trans containing mri to head'):\n src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=None)\n\n # head coordinates and mri_resolution, but wrong trans file\n vox_mri_t = vol1[0]['vox_mri_t']\n with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'):\n src_from_fwd.export_volume(fname_img, mri_resolution=True,\n trans=vox_mri_t)",
"def get_aligned_feature_4frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...])]\n for i in range(n, 1, -1):\n x_i = x[:, i - 1, ...]\n flow1 = flows_backward[0][:, i - 2, ...]\n if i == n:\n x_ii = torch.zeros_like(x[:, n - 2, ...])\n flow2 = torch.zeros_like(flows_backward[1][:, n - 3, ...])\n else:\n x_ii = x[:, i, ...]\n flow2 = flows_backward[1][:, i - 2, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_backward.insert(0, self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i - 2, ...], [flow1, flow2]))\n x_forward = [torch.zeros_like(x[:, 0, ...])]\n for i in range(-1, n - 2):\n x_i = x[:, i + 1, ...]\n flow1 = flows_forward[0][:, i + 1, ...]\n if i == -1:\n x_ii = torch.zeros_like(x[:, 1, ...])\n flow2 = torch.zeros_like(flows_forward[1][:, 0, ...])\n else:\n x_ii = x[:, i, ...]\n flow2 = flows_forward[1][:, i, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_forward.append(self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i + 2, ...], [flow1, flow2]))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]",
"def training_forward(self, x):\n if self.project_parameters.loss_function == 'BCELoss':\n return self.activation_function(self.backbone_model(x))\n elif self.project_parameters.loss_function == 'CrossEntropyLoss':\n return self.backbone_model(x)",
"def forward(model: Model, X: Any, is_train: bool) -> Tuple[Any, Callable]:\n convert_inputs = model.attrs[\"convert_inputs\"]\n convert_outputs = model.attrs[\"convert_outputs\"]\n\n Xtorch, get_dX = convert_inputs(model, X, is_train)\n Ytorch, torch_backprop = model.shims[0](Xtorch, is_train)\n Y, get_dYtorch = convert_outputs(model, (X, Ytorch), is_train)\n\n def backprop(dY: Any) -> Any:\n dYtorch = get_dYtorch(dY)\n dXtorch = torch_backprop(dYtorch)\n dX = get_dX(dXtorch)\n return dX\n\n return Y, backprop",
"def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur"
] | [
"0.63526386",
"0.61341417",
"0.60225815",
"0.59443873",
"0.5940945",
"0.5875347",
"0.5855688",
"0.5813345",
"0.57249606",
"0.5699427",
"0.5691829",
"0.5668313",
"0.5632994",
"0.56215024",
"0.5613032",
"0.55930996",
"0.559111",
"0.5573722",
"0.55649126",
"0.55364203",
"0.55294985",
"0.55251026",
"0.55018437",
"0.54518414",
"0.54491365",
"0.54453707",
"0.5406159",
"0.5403717",
"0.54025537",
"0.53885496"
] | 0.6507115 | 0 |
creates a proxy for the given class | def _create_class_proxy(cls, theclass):
def make_method(name):
def method(self, *args, **kw):
if not object.__getattribute__(self, "_track_on")[0]:
return getattr(
object.__getattribute__(self, "_obj"), name)(*args,
**kw)
object.__getattribute__(self, "_track_on")[0] = False
args_value = copy_and_placehold_data(args,
object.__getattribute__(
self, "_track_on"))
args_value_copy = copy_call_data(args_value)
kwargs_value = copy_and_placehold_data(kw,
object.__getattribute__(
self, "_track_on"))
kwargs_value_copy = copy_call_data(kwargs_value)
output = getattr(object.__getattribute__(self, "_obj"),
name)(*args_value, **kwargs_value)
output_value = copy_and_placehold_data(output,
object.__getattribute__(
self, "_track_on"))
output_value_copy = copy_call_data(output_value)
object.__getattribute__(self, "_special_data").append(
SPECIAL_ATTR_DATA(name, args_value_copy, kwargs_value_copy,
output_value_copy))
object.__getattribute__(self, "_track_on")[0] = True
return output_value
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls, ),
namespace) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_class_proxy(cls, theclass):\n \n def make_method(name):\n def method(self, *args, **kw):\n return getattr(object.__getattribute__(self, \"_obj\"), name)(*args, **kw)\n return method\n \n namespace = {}\n for name in cls._special_names:\n if name in cls._implemented:\n namespace[name] = getattr(cls, name)\n elif hasattr(theclass, name):\n namespace[name] = make_method(name)\n return type(\"%s(%s)\" % (cls.__name__, theclass.__name__), (cls,), namespace)",
"def ProxyType(cls):\n PROXY_TYPES[cls.__namespace__] = cls\n for clsName in getattr(cls, '__known_classes__', ()):\n PROXY_TYPES[clsName] = cls\n return cls",
"def __new__(cls, urls=None):\n if Proxy.__instance is None:\n Proxy.__instance = object.__new__(cls)\n Proxy.__instance.urls = urls\n\n return Proxy.__instance",
"def proxy(self, modelcls):\n return ModelProxy(self, modelcls)",
"def __init__(self, obj):\n obj.Proxy = self",
"def __init__(self, obj):\n obj.Proxy = self",
"def __call__(self, proxy):\n return LocalProxy(self, proxy)",
"def __call__(self, proxy):\n return LocalProxy(self, proxy)",
"def __get__(self, obj, cls=None):\n cls = cls or obj.__class__\n if not issubclass(cls, Model):\n return self # Only return the client when used from a Model\n proxy = self.proxies.get(cls)\n if proxy is None:\n table = cls.__table__\n if table is None:\n table = cls.__table__ = create_table(cls, self.metadata)\n proxy = self.proxies[cls] = SQLTableProxy(table=table, model=cls)\n return proxy",
"def __new__(cls, obj, *args, **kwargs):\n try:\n cache = cls.__dict__[\"_class_proxy_cache\"]\n except KeyError:\n cls._class_proxy_cache = cache = {}\n try:\n theclass = cache[obj.__class__]\n except KeyError:\n cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)\n ins = object.__new__(theclass)\n theclass.__init__(ins, obj, *args, **kwargs)\n return ins",
"def __new__(cls, obj, *args, **kwargs):\n try:\n cache = cls.__dict__[\"_class_proxy_cache\"]\n except KeyError:\n cls._class_proxy_cache = cache = {}\n try:\n theclass = cache[obj.__class__]\n except KeyError:\n cache[obj.__class__] = theclass = cls._create_class_proxy(\n obj.__class__)\n ins = object.__new__(theclass)\n theclass.__init__(ins, obj, *args, **kwargs)\n return ins",
"def __call__(self, proxy):\n def _lookup():\n try:\n return getattr(self, proxy)\n except AttributeError:\n raise UnboundProxyError(\"object '%s' unbound\" % proxy)\n return Proxy(_lookup)",
"def __getattr__(self, name):\n if name in self.proxies:\n return self.proxies[name]\n\n def _lookup():\n try:\n return self.values.value[name]\n except (KeyError, AttributeError):\n raise UnboundProxyError(\"object '%s' unbound\" % name)\n\n proxy = Proxy(_lookup)\n self.proxies[name] = proxy\n return proxy",
"def resolve_alias_cls(self):\n alias = self.alias\n if not callable(alias):\n return\n self.is_proxy = True\n env = XSH.env\n thable = env.get(\"THREAD_SUBPROCS\") and getattr(\n alias, \"__xonsh_threadable__\", True\n )\n cls = ProcProxyThread if thable else ProcProxy\n self.cls = cls\n self.threadable = thable\n # also check capturability, while we are here\n cpable = getattr(alias, \"__xonsh_capturable__\", self.captured)\n self.captured = cpable",
"def new(configuration: Mapping[str, Any], loop: AbstractEventLoop) \\\n -> ProxyProtocol:\n return SocksProxy(loop)",
"def buildProtocol(self, addr):\n proto = portforward.ProxyFactory.buildProtocol(self, addr)\n self.clientFactoryInstance = TestableProxyClientFactory()\n # Force the use of this specific instance\n proto.clientProtocolFactory = lambda: self.clientFactoryInstance\n self.protoInstance = proto\n return proto",
"def test_Proxy(self) -> None:\n\n class Subject:\n def foo(self) -> int:\n return 1\n\n def bar(self) -> int:\n return 2\n\n s = Subject()\n s.baz = 3\n\n class ProxyTest(Proxy):\n def bar(self) -> int:\n return 4\n\n p = ProxyTest(s)\n\n assert p.foo() == 1, p.foo()\n assert p.bar() == 4, p.bar()\n assert p.baz == 3, p.baz\n\n p.baz = 5\n s.baz = 6\n\n assert p.baz == 5, p.baz\n assert p.get() == s, p.get()",
"def _make_type_proxy(obj, dct):\n class TypeProxyMeta(type(obj)):\n def __instancecheck__(cls, x):\n return isinstance(x, obj)\n\n def __subclasscheck__(cls, x):\n return issubclass(x, obj)\n\n # Allow calling the class as usual, which is necessary to\n # use factory classmethod that return new instances\n # (alternative constructors).\n __call__ = obj.__call__\n\n class TypeProxyBase(metaclass=TypeProxyMeta):\n pass\n\n try:\n class TypeProxy(obj, TypeProxyBase):\n pass\n # If we cannot inherit from the class (like bool), pick the first base\n # class that is suitable. That is a tad ugly but better than nothing\n except TypeError:\n # Make sure we get all the methods as on the original type we\n # wanted to subclass\n dct = {**dict(inspect.getmembers(obj)), **dct}\n for obj_ in inspect.getmro(obj):\n try:\n class TypeProxy(obj_, TypeProxyBase):\n pass\n except TypeError:\n continue\n else:\n break\n\n for attr, val in dct.items():\n with contextlib.suppress(TypeError, AttributeError):\n setattr(TypeProxy, attr, val)\n\n TypeProxy.__name__ = obj.__name__\n TypeProxy.__qualname__ = obj.__qualname__\n return TypeProxy",
"def _replicate_class(self, **kwargs):\n return Posterior(**kwargs)",
"def __get__(self, _instance, _inst_cls):\n return getattr(self._call_proxy_inst, self._magic_name)",
"def wrap(cls, orig):\n # hack to give the timestamp this class' specialized methods\n orig.__class__ = cls\n return orig",
"def switch_proxy(self, proxy):",
"def get_proxy(elt, bases=None, _dict=None):\n\n # try to find an instance proxy generator\n proxygenerator = getattr(elt, __GETPROXY__, None)\n\n # if a proxy generator is not found, use this module\n if proxygenerator is None:\n if isroutine(elt):\n result = proxify_routine(elt)\n\n else: # in case of object, result is a Proxy\n result = proxify_elt(elt, bases=bases, _dict=_dict)\n\n else: # otherwise, use the specific proxy generator\n result = proxygenerator()\n\n return result",
"def set_proxy(self):",
"def _create(self, target_class, *args, **kw):\n\n raise NotImplementedError",
"def __new__(cls, obj):\n # Use a different class for slots\n if hasattr(obj, '__slots__'):\n cls = SlotsObjectProxy\n # Instantiate\n return super(ObjectProxy, cls).__new__(cls, obj)",
"def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls",
"def __call__ (self, cls):\n # Define a wrapper function to capture the actual instantiation and __init__ params\n @wraps(cls)\n def wrapper_f(*args, **kwargs):\n #print(f'type of cls is {type(cls)}')\n peripheral = self.peripheral_type(**self.kwargs)\n o = cls(*args, **kwargs)\n o.message_debug(f\"Decorating class {cls.__name__} with {self.peripheral_type.__name__}\")\n o.attach_sensor(peripheral)\n return o\n return wrapper_f",
"def _instantiate(cls, **kwargs):\n return cls(**kwargs)",
"def delegated(cls):\n return cls"
] | [
"0.74995166",
"0.68043464",
"0.66746736",
"0.66074884",
"0.6450246",
"0.6450246",
"0.644239",
"0.644239",
"0.6424365",
"0.6309202",
"0.6299057",
"0.6143022",
"0.60028124",
"0.59988916",
"0.5965285",
"0.59586173",
"0.59504575",
"0.5923421",
"0.5914526",
"0.5888929",
"0.5888668",
"0.58483773",
"0.5838901",
"0.58057487",
"0.57911783",
"0.5789844",
"0.5720469",
"0.571809",
"0.5717321",
"0.5711852"
] | 0.7449586 | 1 |
creates an proxy instance referencing `obj`. (obj, args, kwargs) are passed to this class' __init__, so deriving classes can define an __init__ method of their own. | def __new__(cls, obj, *args, **kwargs):
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(
obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __new__(cls, obj, *args, **kwargs):\n try:\n cache = cls.__dict__[\"_class_proxy_cache\"]\n except KeyError:\n cls._class_proxy_cache = cache = {}\n try:\n theclass = cache[obj.__class__]\n except KeyError:\n cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)\n ins = object.__new__(theclass)\n theclass.__init__(ins, obj, *args, **kwargs)\n return ins",
"def __new__(cls, obj):\n # Use a different class for slots\n if hasattr(obj, '__slots__'):\n cls = SlotsObjectProxy\n # Instantiate\n return super(ObjectProxy, cls).__new__(cls, obj)",
"def __init__(self, obj):\n obj.Proxy = self",
"def __init__(self, obj):\n obj.Proxy = self",
"def bind(self, obj: object):\n new_inst = self.__class__(\n func=self._func,\n self_obj=obj,\n unique=self._unique,\n timeout_seconds=self._timeout_seconds,\n expire_seconds=self._expire_seconds,\n **self._kwargs,\n )\n setattr(obj, self._func.__name__, new_inst)\n return new_inst",
"def __init__(self, obj):\n self._store = {}\n self.obj = weakref.proxy(obj)",
"def make_object(obj, kwargs):\n return obj(**kwargs)",
"def __new__(cls, urls=None):\n if Proxy.__instance is None:\n Proxy.__instance = object.__new__(cls)\n Proxy.__instance.urls = urls\n\n return Proxy.__instance",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n obj = kwargs.get(\"obj\")\n if obj:\n self.obj = obj",
"def new(self, obj):\n pass",
"def _make_type_proxy(obj, dct):\n class TypeProxyMeta(type(obj)):\n def __instancecheck__(cls, x):\n return isinstance(x, obj)\n\n def __subclasscheck__(cls, x):\n return issubclass(x, obj)\n\n # Allow calling the class as usual, which is necessary to\n # use factory classmethod that return new instances\n # (alternative constructors).\n __call__ = obj.__call__\n\n class TypeProxyBase(metaclass=TypeProxyMeta):\n pass\n\n try:\n class TypeProxy(obj, TypeProxyBase):\n pass\n # If we cannot inherit from the class (like bool), pick the first base\n # class that is suitable. That is a tad ugly but better than nothing\n except TypeError:\n # Make sure we get all the methods as on the original type we\n # wanted to subclass\n dct = {**dict(inspect.getmembers(obj)), **dct}\n for obj_ in inspect.getmro(obj):\n try:\n class TypeProxy(obj_, TypeProxyBase):\n pass\n except TypeError:\n continue\n else:\n break\n\n for attr, val in dct.items():\n with contextlib.suppress(TypeError, AttributeError):\n setattr(TypeProxy, attr, val)\n\n TypeProxy.__name__ = obj.__name__\n TypeProxy.__qualname__ = obj.__qualname__\n return TypeProxy",
"def __get__(self, obj, cls=None):\n cls = cls or obj.__class__\n if not issubclass(cls, Model):\n return self # Only return the client when used from a Model\n proxy = self.proxies.get(cls)\n if proxy is None:\n table = cls.__table__\n if table is None:\n table = cls.__table__ = create_table(cls, self.metadata)\n proxy = self.proxies[cls] = SQLTableProxy(table=table, model=cls)\n return proxy",
"def __call__(self, proxy):\n return LocalProxy(self, proxy)",
"def __call__(self, proxy):\n return LocalProxy(self, proxy)",
"def __newobj__(cls, *args):\n return cls.__new__(cls, *args)",
"def __init__(self, obj, **adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods) # 将传入的实例属性作为适配器实例的属性",
"def from_object(cls, obj, base_rule=None):\n if isinstance(obj, dict):\n return cls.from_dict(obj, base_rule=base_rule)\n elif isinstance(obj, Iterable):\n return cls.from_iterable(obj, base_rule=base_rule)\n else:\n raise ValueError('Cannot build {0} from {1}'.format(cls, type(obj)))",
"def extend_instance(obj, cls):\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(base_cls_name, (base_cls, cls), {})",
"def __init__(self, obj):\n self.obj = obj\n self._pkcache = {}\n self._idcache = obj.__class__.__instance_cache__\n self._typecache = defaultdict(dict)\n self.init()",
"def register_instance(self, obj):\n self.__instances.append(obj)\n self._proxy_class_methods(obj)",
"def __init__(self, obj, api_prefix):\n self._obj = obj\n self._api_prefix = api_prefix",
"def _from_other(cls, obj):",
"def __new__(cls, *args, **kwargs):\n obj = super().__new__(cls)\n obj.init_kwargs = cls.init_params(*args, **kwargs)\n return obj",
"def __init__(self, decoratedObj):\n\n self.__decoratedObj = decoratedObj",
"def __init__(self, obj, adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods)",
"def __call__(self, proxy):\n def _lookup():\n try:\n return getattr(self, proxy)\n except AttributeError:\n raise UnboundProxyError(\"object '%s' unbound\" % proxy)\n return Proxy(_lookup)",
"def __new__(cls, *args, **kwds):\n cls.__init_lock__.acquire()\n try:\n if not cls.__decorated__:\n cls._decorate()\n cls.__decorated__ = True\n \n return object.__new__(cls, *args, **kwds)\n finally:\n cls.__init_lock__.release()",
"def init_from_self(self, *args, **kwargs):\n kwargs = dict(ChainMap(\n kwargs,\n {key: self.__getattribute__(key) for key in self.__slots__ if key not in PhysicalInstance.__slots__}\n ))\n\n new_instance = self.__class__(\n *args,\n obj_name=''.join([self.obj_name, '_init_from_self']),\n predictor_names=self.predictor_names,\n dependant_names=self.dependant_names,\n **kwargs)\n return new_instance",
"def instantiate(obj):\n return obj() if isinstance(obj, type) else obj",
"def new(self, obj):\n key = obj.__class__.__name__+'.'+obj.id\n self.__objects[key] = obj"
] | [
"0.81124973",
"0.76509285",
"0.7615072",
"0.7615072",
"0.67413515",
"0.6698327",
"0.6490171",
"0.6413535",
"0.63807136",
"0.6225159",
"0.6205126",
"0.6191848",
"0.6100808",
"0.6100808",
"0.60938245",
"0.6087132",
"0.5924417",
"0.59213203",
"0.5883061",
"0.5853467",
"0.5848173",
"0.58317083",
"0.5811227",
"0.5801055",
"0.5797183",
"0.57959557",
"0.57807213",
"0.577025",
"0.56968653",
"0.5695484"
] | 0.81025034 | 1 |
Get the filenames corresponding to all the different mu values | def mean_filenames(filename, means):
return [filename + "(mu=" + str(round(mean, 2)) + ", loc=1.0).csv" for mean in all_means] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_filenames():\r\n datadir = \"./phase3_data/\"\r\n samples = os.listdir(datadir)\r\n all_files = []\r\n for i in range(len(samples)):\r\n sampfiles = []\r\n datadir = \"./phase3_data/\" + samples[i]\r\n files = os.listdir(datadir)\r\n for file in files:\r\n if file.endswith(\".bin\"):\r\n sampfiles += [file]\r\n all_files += [sampfiles]\r\n return samples, all_files",
"def get_filenames(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'LIP_train5.record')]\n else:\n return [os.path.join(data_dir, 'LIP_val5.record')]",
"def get_mean_files(self):\n return [self.mean_file_0, self.mean_file_1]",
"def get_filenames(is_training, data_dir):\n if is_training:\n return [\n os.path.join(data_dir, 'train-%05d-of-01024' % i)\n for i in range(_NUM_TRAIN_FILES)]\n else:\n return [\n os.path.join(data_dir, 'validation-%05d-of-00128' % i)\n for i in range(_NUM_VAL_FILES)]",
"def _get_file_names():\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 6)]\n file_names['test'] = ['test_batch']\n\n return file_names",
"def get_filenames(mode, data_dir):\n if mode == 'train':\n return [os.path.join(data_dir, 'encoder.train.input'), os.path.join(data_dir, 'encoder.train.target'),\n os.path.join(data_dir, 'decoder.train.target')]\n else:\n return [os.path.join(data_dir, 'encoder.test.input'), os.path.join(data_dir, 'encoder.test.target'),\n os.path.join(data_dir, 'decoder.test.target')]",
"def get_filenames(b,X,timefac,BW):\r\n\tn = int(200000*X*timefac); nn = int(200000*X*10)\r\n\troot = \"./dat_LE_stream/\"\r\n\trootL = 'C:/AsusWebStorage/MySyncData/TRAFILES/'\r\n\tpodir = \"/b=\"+str(b)+\"/\" if BW else \"/HO/\"\r\n\t\r\n\ttrafile = rootL+\"TRA_BW_b\"+str(b)+\"X\"+str(int(X))+\"n\"+str(nn)+\"seed65438\"\r\n\tif not os.path.isfile(trafile):\r\n\t\ttrafile = rootL+\"TRA_BW_b\"+str(b)+\"X\"+str(int(X))+\"n\"+str(n)+\"seed65438\"\r\n\toutfile = root+podir+os.path.split(trafile)[1][4:]\r\n\t\r\n\trndfile = os.path.split(outfile)[0]+\"/RND_\"+os.path.split(outfile)[1]+\".png\"\r\n\tpdffile = os.path.split(outfile)[0]+\"/PDF_\"+os.path.split(outfile)[1]\r\n\tstrfile = os.path.split(outfile)[0]+\"/STR_\"+os.path.split(outfile)[1]\r\n\t\r\n\treturn trafile,rndfile,pdffile,strfile,n",
"def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files",
"def get_filenames(self):\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n lookup_name = 'train'\n elif self.mode == tf.estimator.ModeKeys.EVAL:\n lookup_name = 'validation'\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n lookup_name = self.predict_split\n filenames = tf.gfile.Glob(\n os.path.join(self.data_dir, '{}-*-of-*'.format(lookup_name)))\n if tf.estimator.ModeKeys.PREDICT:\n # Sort so that TFRecords will be read out deterministically.\n filenames = sorted(filenames)\n return filenames",
"def get_filenames(self):\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n lookup_name = 'train'\n elif self.mode == tf.estimator.ModeKeys.EVAL:\n lookup_name = 'validation'\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n lookup_name = self.predict_split\n filenames = tf.gfile.Glob(\n os.path.join(self.data_dir, '{}-*-of-*'.format(lookup_name)))\n if tf.estimator.ModeKeys.PREDICT:\n # Sort so that TFRecords will be read out deterministically.\n filenames = sorted(filenames)\n return filenames",
"def get_filenames(root_dir):\n from pathlib import Path\n\n file_list = Path(root_dir).rglob('*featuresN.hdf5')\n file_list = [str(file) for file in file_list]\n\n filenames = pd.DataFrame(file_list, columns=['file_name'])\n filenames.insert(0, 'file_id', np.arange(len(file_list)))\n\n return filenames",
"def generate_filename(self):\n file_pattern = os.path.join(self.path, \"TCGA-*\")\n for f in glob(file_pattern):\n organ = get_organ(f)\n for raw_f in glob(os.path.join(f, \"*.tif\")):\n gt_f = raw_f.replace(\".tif\", \".xml\")\n yield raw_f, gt_f, organ",
"def files():\r\n fn=pd.read_csv(request.files.get('fnm'))\r\n scaling = scaler.transform(fn)\r\n prediction = classifier.predict(scaling)\r\n return 'Predictions'+ str(list(prediction))",
"def filenames(self):\n pass",
"def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']",
"def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']",
"def output_files(self):\n return [self.input_files()[0].replace(\".lhe.gz\", \".stdhep\").replace(\".lhe\", \".stdhep\")]",
"def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o",
"def get_filenames(is_training,datadir):\n assert os.path.exists(datadir), (\n 'Can not find data at given directory!!')\n if(is_training):\n labels = []\n data_dir = []\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_trainimgs.txt') as f:\n for line in f:\n data_dir.append(datadir+line.strip())\n f.close()\n\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_trainlabels.txt') as f:\n for line in f:\n labels.append(int(line.strip()))\n f.close()\n else:\n labels = []\n data_dir = []\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_testimgs.txt') as f:\n for line in f:\n data_dir.append(datadir + line.strip())\n f.close()\n\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_testlabels.txt') as f:\n for line in f:\n labels.append(int(line.strip()))\n f.close()\n\n return data_dir, labels",
"def _get_filenames():\n src_dir = os.path.join(FLAGS.dataset_dir, FLAGS.src_dir)\n filenames = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if\n f.endswith(\".tfrecord\") and\n all([blackflag not in f for blackflag in TEMP_BLACK_LIST])]\n shuffle(filenames)\n return filenames",
"def processed_file_names(self):\n # For 'trainval', we use files from 'train' and 'val' to save\n # memory\n if self.stage == 'trainval' and self.val_mixed_in_train:\n return [\n osp.join('train', self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n if self.stage == 'trainval':\n return [\n osp.join(s, self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n return [\n osp.join(self.stage, self.pre_transform_hash, f'{w}.h5')\n for w in self.cloud_ids]",
"def get_test_files(self):\n train_dir = os.path.join(self.data_dir, \"test_{}_new\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][1]), int(p[0][2]), p[1]) for p in interm]",
"def _get_subject_files(self):\n from itertools import chain\n\n subjsf = fetch_one_file(self.ica_dir, self._subjects_fname)\n mat_file = sio.loadmat(subjsf)['files']\n return [f.strip() for f in list(chain.from_iterable(chain.from_iterable(chain.from_iterable(mat_file))))]",
"def get_val_files(self):\n raise NotImplementedError",
"def raw_file_names(self):\n return self.raw_file_names_3d",
"def files(self):\n def f():\n return {'count': 0, 'size': 0, 'type': None}\n _files = defaultdict(f)\n\n for s in self.subjects:\n for sa in s.samples:\n for blob in sa.blobs.values():\n # get extension\n type = blob['name'].replace('.gz', '')\n type = type.split('/')[-1].split('.')[-1]\n _files[type]['count'] += 1\n _files[type]['type'] = type.title()\n _files[type]['size'] += blob['size']\n return _files",
"def filenames(self):\n names = []\n for furi in np.asarray(self.fileuris).flat:\n names.append(furi)\n return names",
"def whiskerStat_multiext(filename,sigma,noise=False,mag=None,exptime=None):\n hdu=pf.open(filename)\n data = []\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n Mcc=np.zeros(Nobj)\n Mrr = np.zeros(Nobj)\n Mrc = np.zeros(Nobj)\n r50 = np.zeros(Nobj)\n for i in range(Nobj):\n print i\n imgo = hdui.data[i][4:].reshape(160,160)\n psf = rebin(imgo,(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n Mcc[i],Mrr[i],Mrc[i]=complex2ndMoments(img,sigma)\n r50[i] = mfwhm(img)[5]\n data.append([np.mean(Mcc),np.mean(Mrr),np.mean(Mrc),np.mean(r50)])\n data = np.array(data)\n datamean =np.array([robust_mean(data[:,0]),robust_mean(data[:,1]),robust_mean(data[:,2]),robust_mean(data[:,3])])\n #r50 = 0.5*2.35482*np.sqrt((datamean[0]+datamean[1])/2.)*0.27\n r50moffat = datamean[3]*0.27\n whk = ((datamean[0]-datamean[1])**2 + (2.*datamean[2])**2)**(0.25)*0.27\n phi = np.rad2deg(0.5*np.arctan2(2.*datamean[2],(datamean[0]-datamean[1])))\n datasubmean = data - datamean\n whkrms = (robust_mean((datasubmean[:,0] - datasubmean[:,1])**2 + 4.*datasubmean[:,2]**2))**(0.25)*0.27\n np.savetxt(filename[0:-6]+'txt',[r50moffat,whk,phi,whkrms,datamean[0],datamean[1],datamean[2]],fmt='%10.5f')\n return '---done !-----'",
"def _metric_file_prefixes(TRAIN_CONFIGS):\n data_dir = TRAIN_CONFIGS.get(\"data_dir\")\n ID = _get_experiment_id(TRAIN_CONFIGS) # e.g. (00)-\n mpre = list(set([chop(f) for f in os.listdir(TRAIN_CONFIGS.get(\"metrics_dir\")) if f[0:len(ID)] == ID]))\n return mpre",
"def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]"
] | [
"0.6712646",
"0.6223864",
"0.61738175",
"0.6093145",
"0.60859245",
"0.60529107",
"0.5950844",
"0.5918755",
"0.58327836",
"0.58327836",
"0.5822635",
"0.58209854",
"0.5809355",
"0.5786707",
"0.5783648",
"0.57702667",
"0.5742246",
"0.5739868",
"0.5720252",
"0.5719575",
"0.5704846",
"0.57026905",
"0.5699539",
"0.5698694",
"0.5687019",
"0.566784",
"0.5666221",
"0.56645447",
"0.56488436",
"0.5639258"
] | 0.6487578 | 1 |
Create the dataframe of all the statistics for a given filename, for example for all the Levy statistics use "Levy" as the filename | def create_df(filename):
column_dict = []
for fn in mean_filenames(filename, all_means):
path = STAT_PATH + fn
if os.path.exists(path):
df = pd.read_csv(path)
print(len(df.consumedFoodCount))
# print(df)
# df = df[-180:]
# print(len(df.consumedFoodCount))
df_column_name = fn[len(filename)+1:len(filename)+7]
m, m_min_h, m_plus_h = mean_confidence_interval(df['searchEfficiency'], 0.95)
column_dict.append({"Mu": float(fn[len(filename)+4:len(filename)+7]),
"Average Food Consumed": np.mean(df['consumedFoodCount']),
"Average Flight Distance": np.mean(df['distanceTraversed']),
"Average Search Efficiency": m,
"CI Lower Bound": m_min_h,
"CI Upper Bound": m_plus_h})
return pd.DataFrame(column_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_df_metrics():\n DATA_DIR = 'metrics'\n search_pattern = '*.pkl'\n filename = 'stats'\n\n iteration_results = glob.glob(os.path.join(DATA_DIR, search_pattern))\n aggregated_results = os.path.join(DATA_DIR, filename)\n\n df = load_stats_dataframe(iteration_results, aggregated_results)\n print(f'Dataframe {df}')\n return df",
"def load_stats_dataframe(files, aggregated_results=None):\n if os.path.exists(aggregated_results) and all(\n [os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):\n return pd.read_pickle(aggregated_results)\n\n df = pd.DataFrame()\n for f in files:\n tmp_dict = pd.read_pickle(f)\n tmp_dict['emb_size'] = f.split('_')[2]\n tmp_dict['negative_ratio'] = f.split('_')[4]\n tmp_dict['batch_size'] = f.split('_')[6]\n tmp_dict['epochs'] = f.split('_')[8]\n tmp_dict['classification'] = f.split('_')[-1].split('.')[0]\n\n tmp_df = pd.DataFrame.from_dict(tmp_dict)\n df = pd.concat([df, tmp_df])\n\n if aggregated_results:\n df.to_pickle(aggregated_results)\n\n return df",
"def collect_filterstats_from_logfiles(*args):\n all_stats = {}\n for path in args:\n with path.open(\"r\") as fp:\n all_stats[path.name] = collect_filterstats_from_log(fp)\n return pandas.DataFrame(all_stats.values(), index=all_stats.keys())",
"def get_file_df(self, file_list):\n file_dict = {\n file.split(\".\")[0]: {\"Date\": file.split(\".\")[1], \"File\": file}\n for file in file_list\n }\n df = pd.DataFrame(file_dict).T\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n df[\"File\"] = df[\"File\"].astype(\"string\")\n df = df.reset_index()\n df.rename(columns={\"index\": \"League\"}, inplace=True)\n df = df.sort_values(by=[\"Date\"], ascending=False)\n return df",
"def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def get_stats_data(filename, n=3):\n \n store = pd.HDFStore(filename, 'r')\n full, partial = list(store.keys())\n df_full = store[full]\n df_partial = store[partial]\n store.close()\n\n df_full['Import_flag'] = 'full'\n df_partial['Import_flag'] = 'partial'\n df = pd.concat([df_full, df_partial])\n df['datetime'] = pd.to_datetime(df['date']+' '+df['time'])\n imp = select_last_n_imports(df, n=n)\n df = df[df['import_id'].isin(imp)].reset_index(drop=True)\n return df",
"def get_df_all_results(self, file):\n # read csv into dataframe\n df = pd.read_csv(file)\n # rename columns\n names = [\"index\", \"samp1\", \"samp2\", \"es\", \"sd1\", \"sd2\", \"k\", \"perm\",\n \"t_test\"]\n df.columns = names\n return df",
"def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')",
"def dataStats(reportsDir = \"./reports/\"):\n legMulti = glob.glob(reportsDir+\"/leg/*.json\")\n legOne = glob.glob(reportsDir+\"/leg/oneproc/*.json\")\n legBroken = glob.glob(reportsDir+\"/leg/broken/*.json\")\n \n malMulti = glob.glob(reportsDir+\"/mal/*.json\")\n malOne = glob.glob(reportsDir+\"/mal/oneproc/*.json\")\n malBroken = glob.glob(reportsDir+\"/mal/broken/*.json\")\n \n print(\"\"\"Legal files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(legBroken+legMulti+legOne), len(legOne), len(legMulti), len(legBroken)))\n print(\"\"\"Malicious files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(malBroken+malMulti+malOne), len(malOne), len(malMulti), len(malBroken)))\n print(\"Working samples: {0}\".format(len(malMulti+malOne+legMulti+legOne)))",
"def file_df_from_files(raw_files: list, file_folder: str) -> pd.DataFrame:\n raw_files.sort()\n sizes = [\n round(get_size(os.path.join(file_folder, _)) / 1024 ** 3, 2)\n for _ in raw_files\n ]\n created = [\n datetime.datetime.fromtimestamp(os.path.getctime(os.path.join(file_folder, _)))\n for _ in raw_files\n ]\n file_df = pd.DataFrame(\n list(zip(range(1, len(raw_files) + 1), raw_files, created, sizes)),\n columns=[\"#\", \"Filename\", \"Creation date\", \"Size (GB)\"],\n )\n file_df[\"Shortname\"] = [os.path.splitext(_)[0] for _ in raw_files]\n\n return file_df",
"def read_stats_hdf5(filename):\n\n df_dict = {}\n with pd.HDFStore(filename, mode=\"r\") as store:\n datasets = store.keys()\n\n for key in datasets:\n df = pd.read_hdf(filename, key=key)\n\n # Use start date as index\n if df[\"End\"].dtype == pd.Timestamp:\n # Drop redundant columns\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n\n # Set index\n df = df.set_index(df.columns[0])\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n df.index.name = \"Date\"\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n # Remove preceding \"/\" from key\n key = key[1:]\n df_dict[key] = df\n\n return df_dict",
"def create_df(files_list=my_files):\n\n all_records = list()\n\n for file in files_list:\n all_records += zr_parser(path.join(my_dir, file))\n\n return pd.DataFrame(all_records)",
"def read_stats_csv(filename):\n\n df_dict = {}\n df = pd.read_csv(filename, header=[0, 1, 2])\n\n # Check if End column data type is datetime - if so use start date as index, otherwise use file number;\n # Use start date as index - Note: df[\"End\"] is interpreted as a dataframe here not a series as in hdf5\n if df[\"End\"].dtypes.all() == pd.Timestamp:\n # Drop redundant columns\n if \"File Number\" in df.columns:\n df = df.drop(\"File Number\", axis=1, level=0)\n df = df.drop(\"End\", axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"Date\"\n\n # Convert timestamps to datetime\n try:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n except:\n try:\n # Timestamp will likely be in local (UK) format if csv file has been subsequently edited and saved\n df.index = pd.to_datetime(df.index, format=\"%d/%m/%Y %H:%M\")\n except:\n raise\n # Use file number as index\n else:\n df = df.drop([\"Start\", \"End\"], axis=1, level=0)\n df = df.set_index(df.columns[0])\n df.index.name = \"File Number\"\n\n df.columns.rename([\"channels\", \"stats\", \"units\"], inplace=True)\n logger = filename.split(\"Statistics_\")[-1].split(\".\")[0]\n df_dict[logger] = df\n\n return df_dict",
"def __init__(self, file_name: str):\n self.case_metrics = []\n self.cluster_metrics = []\n self.file_name = file_name\n\n self.path_to_pmg_metrics = f'metrics/{file_name}_process_model_graphs'\n self.path_to_pmg_vis = f'visualization/{file_name}_process_model_graphs'\n self.path_to_drifts = 'visualization/drifts'\n self.path_to_case_metrics = 'metrics/case_metrics'\n self.path_to_cluster_metrics = 'metrics/cluster_metrics'\n try:\n makedirs(self.path_to_pmg_metrics, exist_ok=True)\n makedirs(self.path_to_pmg_vis, exist_ok=True)\n makedirs(self.path_to_drifts, exist_ok=True)\n makedirs(self.path_to_case_metrics, exist_ok=True)\n makedirs(self.path_to_cluster_metrics, exist_ok=True)\n\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']) \\\n .to_csv(f'{self.path_to_case_metrics}/{file_name}.csv', index=False)\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']) \\\n .to_csv(f'{self.path_to_cluster_metrics}/{file_name}.csv', index=False)\n except Exception as e:\n print(e)",
"def build_stats_run(dirrun):\n tarfiles = glob(dirrun + 'ascii/*.ascii_out.tar')\n df = read_stats_in_log(tarfiles[0])\n for f in tarfiles[1:]:\n df = pd.concat([df, read_stats_in_log(f)])\n\n return df.sort_values('year')",
"def get_data(filename):\r\n return pd.read_csv(filename)",
"def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data",
"def create_table(folder: str, ext: str = None, pre: bool = False) -> pd.DataFrame:\n folder = format_path(folder)\n input_files = filelist(folder, ext=ext)\n\n if pre is True:\n input_files = preselect(input_files)\n\n summary_df = pd.DataFrame(columns=['file', 'hash'])\n\n summary_df['file'] = input_files\n summary_df['hash'] = hashtable(input_files)\n\n return summary_df",
"def read_stats(filename):\n header = {}\n tableinfo = {}\n measures = []\n rowmeasures = []\n\n with open(filename, 'rt') as fp:\n lines = fp.readlines()\n for line in lines:\n if line == line[0]:\n continue\n #parse commented header\n if line.startswith('#'):\n fields = line.split()[1:]\n if len(fields) < 2:\n continue\n tag = fields[0]\n if tag == 'TableCol':\n col_idx = int(fields[1])\n if col_idx not in tableinfo:\n tableinfo[col_idx] = {}\n tableinfo[col_idx][fields[2]] = ' '.join(fields[3:])\n if tableinfo[col_idx][fields[2]] == \"StructName\":\n struct_idx = col_idx\n elif tag == \"Measure\":\n fields = ' '.join(fields).replace('CortexVol ', 'CortexVol, ').split()\n fields = ' '.join(fields[1:]).split(', ')\n measures.append({'structure': fields[0],\n 'name': fields[1],\n 'description': fields[2],\n 'value': fields[3],\n 'units': fields[4],\n 'source': 'Header'})\n elif tag == \"ColHeaders\":\n if len(fields) != len(tableinfo):\n for idx, fieldname in enumerate(fields[1:]):\n if idx + 1 in tableinfo:\n continue\n tableinfo[idx + 1] = {'ColHeader': fieldname,\n 'Units': 'unknown',\n 'FieldName': fieldname}\n else:\n continue\n else:\n header[tag] = ' '.join(fields[1:])\n else:\n #read values\n row = line.split()\n values = {}\n measures.append({'structure': row[struct_idx-1],\n 'items': [],\n 'source': 'Table'}),\n for idx, value in enumerate(row):\n if idx + 1 == struct_idx:\n continue\n measures[-1]['items'].append({\n 'name': tableinfo[idx + 1]['ColHeader'],\n 'description': tableinfo[idx + 1]['FieldName'],\n 'value': value,\n 'units': tableinfo[idx + 1]['Units'],\n })\n return header, tableinfo, measures",
"def loadDfResults(self, filename=None, trajectoryName=None):\n # chose HDF file to load\n filename = filename or self.HDF_FILE\n self.pypetTrajectory = pu.loadPypetTrajectory(filename, trajectoryName)\n self.nResults = len(self.pypetTrajectory.f_get_run_names())\n\n exploredParameters = self.pypetTrajectory.f_get_explored_parameters()\n\n # create pandas dataframe of all runs with parameters as keys\n logging.info(\"Creating `dfResults` dataframe ...\")\n niceParKeys = [p[11:] for p in exploredParameters.keys()]\n if not self.parameterSpace:\n niceParKeys = [p.split(\".\")[-1] for p in niceParKeys]\n self.dfResults = pd.DataFrame(columns=niceParKeys, dtype=object)\n for nicep, p in zip(niceParKeys, exploredParameters.keys()):\n self.dfResults[nicep] = exploredParameters[p].f_get_range()",
"def getstats(self, filename=None, samples=None, subset=None, ablation_time=False):\n slst = []\n\n if samples is not None:\n subset = self.make_subset(samples)\n elif not hasattr(self, 'subsets'):\n self.make_subset()\n\n if subset is None:\n samples = self.subsets['All_Analyses']\n else:\n try:\n samples = self.subsets[subset]\n except:\n raise ValueError((\"Subset '{:s}' does not .\".format(subset) +\n \"exist.\\nRun 'make_subset' to create a\" +\n \"subset.\"))\n\n for s in self.stats_calced:\n for nm in [n for n in samples if self.srm_identifier\n not in n]:\n if len(self.stats[nm][s]) == 2:\n # make multi - index\n reps = np.arange(self.stats[nm][s].shape[-1])\n ss = np.array([s] * reps.size)\n nms = np.array([nm] * reps.size)\n # make sub - dataframe\n stdf = pd.DataFrame(self.stats[nm][s].T,\n columns=self.stats[nm]['analytes'],\n index=[ss, nms, reps])\n stdf.index.set_names(['statistic', 'sample', 'rep'],\n inplace=True)\n else:\n stdf = pd.DataFrame(self.stats[nm][s],\n index=self.stats[nm]['analytes'],\n columns=[[s],[nm]]).T\n \n stdf.index.set_names(['statistic', 'sample'],\n inplace=True)\n slst.append(stdf)\n out = pd.concat(slst)\n\n if ablation_time:\n ats = self.ablation_times(samples=samples, subset=subset)\n ats['statistic'] = 'nanmean'\n ats.set_index('statistic', append=True, inplace=True)\n ats = ats.reorder_levels(['statistic', 'sample', 'rep'])\n\n out = out.join(ats)\n\n if filename is not None:\n out.to_csv(self.export_dir + '/' + filename)\n\n self.stats_df = out\n\n return out",
"def create_df(datadir: str, ext: str='txt') -> pd.DataFrame:\n\n datalist = []\n for name in os.listdir(datadir):\n filename = '/'.join([datadir, name])\n if os.path.isfile(filename) and ext in name[-len(ext):]:\n row_data = []\n content = read_file.read_file(filename)\n row_data.append(read_file.extract_name(content))\n row_data.append(read_file.extract_year(content))\n row_data.append(read_file.extract_form_factor(content))\n row_data.append(read_file.extract_max_power(content))\n row_data.append(read_file.extract_min_power(content))\n row_data.append(read_file.extract_cpu_speed(content))\n row_data.append(read_file.extract_core_num(content))\n for ind in range(10, 100, 10):\n row_data.append(read_file.extract_int_power(content, ind))\n datalist.append(row_data)\n\n return pd.DataFrame(data=datalist, columns=[\n 'Name', 'Year', 'FormFac', 'MaxPower', 'IdlePower', 'CPU speed',\n 'NumCores'\n ]+[''.join([str(ind), '%Power']) for ind in range(10, 100, 10)])",
"def make_stats_df(self):\n columns = ['DATE', 'TEAM', 'teamId', 'R', 'HR', 'RBI', 'SBN', 'OBP', \n 'K', 'QS', 'SV', 'ERA', 'WHIP', 'MOVES', 'CHANGE']\n trimmed_table = self.parse_soup(self.stats)\n self.df_stats = pd.DataFrame(trimmed_table, columns=columns) \n # load season standings csv from file\n try: # if it already exists\n df = pd.read_csv('2016_stats.csv', index_col=0)\n except OSError:\n df = pd.DataFrame(columns=columns) # if it doesn't already exist\n df = df.append(self.df_stats)\n df.to_csv('2016_stats.csv')",
"def create_df(zip_file: str, file_names: List):\n df = pd.DataFrame(file_names, columns=[\"File Path\"])\n df['File Path'] = df['File Path'].apply(lambda x: zip_file.split(\".\")[0] + \"/\" + x) \n df['Type of SNR'] = df['File Path'].apply(lambda x: x.split(\"/\")[1])\n df['Type of Machine'] = df['File Path'].apply(lambda x: x.split(\"/\")[2])\n df['Model Number'] = df['File Path'].apply(lambda x: x.split(\"/\")[3])\n df['Status'] = df['File Path'].apply(lambda x: x.split(\"/\")[4])\n df['File Name'] = df['File Path'].apply(lambda x: x.split(\"/\")[5])\n return df",
"def _get_liwc_df(self) -> pd.DataFrame:\n data = pd.read_csv(self.path)\n data.index = pd.to_numeric(data['Filename'].str.rstrip('.txt'))\n return data",
"def get_vetted_sample(self):\n list_of_files = glob.glob(self.final_path)\n latest_file = max(list_of_files, key=os.path.getctime)\n df = pd.read_csv(latest_file)\n return df",
"def pipe(cls, filename: str) -> pd.DataFrame:\n\n # initialise parser\n extractor = cls(filename)\n \n # get records\n raw_data = extractor.load_data()\n \n # extract relevant data\n extracted_data = extractor.extract_data(raw_data)\n\n return extracted_data",
"def get_data():\n\n size, intensity, age = [], [], []\n def calculate(data, data_top):\n \"\"\"Return age and the averages of size and intensity.\"\"\"\n size, intensity, age = np.array([data[\"Size\"]]), np.array([data[\"Intensity\"]]), data_top.iat[1,0]\n size_avg, intensity_avg = np.average(size), np.average(intensity)\n return size_avg, intensity_avg, age\n \n with os.scandir(\"imgdata/\") as files:\n for entry in files:\n data = pd.read_csv(entry, header=3, index_col=0)\n data_top = pd.read_csv(entry, index_col=0, nrows=2, header=None)\n result = calculate(data, data_top)\n size.append(result[0])\n intensity.append(result[1])\n age.append(result[2])\n return size, intensity, age",
"def get_file_df(filepath):\n dd = [json.loads(f) for f in open(filepath).readlines()]\n return pd.DataFrame(dd)"
] | [
"0.67464304",
"0.67458373",
"0.63676864",
"0.62926716",
"0.62889355",
"0.62432015",
"0.62411124",
"0.6119994",
"0.6097927",
"0.6080698",
"0.6067551",
"0.6028105",
"0.6010443",
"0.5999743",
"0.59316117",
"0.5918771",
"0.59170926",
"0.5911152",
"0.58809793",
"0.58537763",
"0.58514947",
"0.58356494",
"0.5822597",
"0.5817456",
"0.5801117",
"0.5770372",
"0.5754533",
"0.5734179",
"0.57340115",
"0.57338107"
] | 0.6960381 | 0 |
get the special conditions for a given x,y on the board. returns values based on context, or a tuple of multiple contexts valid contexts are multiplier the int muliplier of said tile mod_type whether the multiplier is for the letter or the whole word bgcolor the bg color for the tile points values relevant to scoring (multiplier, mod_type) all all (multiplier, mod_type, bgcolor) | def getTileModifier(x,y,context="points"):
ret = (1,None,"") # default
if x == 0 or x == 14:
if y == 0 or y == 7 or y == 14:
ret = (3,"word",TRIP_WORD_COLOR)
elif y == 3 or y == 11:
ret = (2,"ltr",DOUB_LTR_COLOR)
elif x == 1 or x == 13:
if y == 1 or y == 13:
ret = (2,"word",DOUB_WORD_COLOR)
elif y == 5 or y == 9:
ret = (3,"ltr",TRIP_LTR_COLOR)
elif x == 2 or x == 12:
if y == 2 or y == 12:
ret = (2,"word",DOUB_WORD_COLOR)
elif y == 6 or y == 8:
ret = (2,"ltr",DOUB_LTR_COLOR)
elif x == 3 or x == 11:
if y == 3 or y == 11:
ret = (2,"word",DOUB_WORD_COLOR)
elif y == 0 or y == 7 or y == 14:
ret = (2,"ltr",DOUB_LTR_COLOR)
elif x == 4 or x == 10:
if y == 4 or y == 10:
ret = (2,"word",DOUB_WORD_COLOR)
elif x == 5 or x == 9:
if y == 1 or y == 5 or y == 9 or y == 13:
ret = (3,"ltr",TRIP_LTR_COLOR)
elif x == 6 or x == 8:
if y == 2 or y == 6 or y == 8 or y == 12:
ret = (2,"ltr",DOUB_LTR_COLOR)
elif x == 7:
if y == 0 or y == 14:
ret = (3,"word",TRIP_WORD_COLOR)
elif y == 3 or y == 11:
ret = (2,"ltr",DOUB_LTR_COLOR)
elif y == 7:
ret = (2,"word",DOUB_WORD_COLOR)
else:
raise ScrabbleError("Impossible Tile!")
if context == "muliplier":
return ret[0]
elif context == "mod_type":
return ret[1]
elif context == "points":
return (ret[0],ret[1])
elif context == "bgcolor":
return ret[2]
elif context == "all":
return ret
else:
raise ScrabbleError("Invalid context in getTileModifier") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getGameState(self):\n row1 = [0, 0, 0]\n row2 = [0, 0, 0]\n row3 = [0, 0, 0]\n tilePosStatement = Statement()\n posTerm1 = Term('?x')\n posTerm2 = Term('?y')\n posTerm3 = Term('?tile')\n tilePosStatement.terms = (posTerm1, posTerm2, posTerm3)\n tilePosStatement.predicate = 'tilePos'\n for fact in self.kb.facts:\n if match(fact.statement, tilePosStatement):\n if fact.statement.terms[2] == Term(Constant('tile1')):\n term = 1\n if fact.statement.terms[2] == Term(Constant('tile2')):\n term = 2\n if fact.statement.terms[2] == Term(Constant('tile3')):\n term = 3\n if fact.statement.terms[2] == Term(Constant('tile4')):\n term = 4\n if fact.statement.terms[2] == Term(Constant('tile5')):\n term = 5\n if fact.statement.terms[2] == Term(Constant('tile6')):\n term = 6\n if fact.statement.terms[2] == Term(Constant('tile7')):\n term = 7\n if fact.statement.terms[2] == Term(Constant('tile8')):\n term = 8\n if fact.statement.terms[2] == Term(Constant('empty')):\n term = -1\n if fact.statement.terms[0] == Term(Constant('pos1')):\n col = 0\n elif fact.statement.terms[0] == Term(Constant('pos2')):\n col = 1\n elif fact.statement.terms[0] == Term(Constant('pos3')):\n col = 2\n if fact.statement.terms[1] == Term(Constant('pos1')):\n row1[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos2')):\n row2[col] = term\n\n elif fact.statement.terms[1] == Term(Constant('pos3')):\n row3[col] = term\n\n row1 = tuple(row1)\n row2 = tuple(row2)\n row3 = tuple(row3)\n result = (row1, row2, row3)\n return result\n\n ### Student code goes here",
"def get_evaluation():\n rightly_positioned = int(entryWidget_both.get())\n permutated = int(entryWidget_only_colours.get())\n return (rightly_positioned, permutated)",
"def all_subexpressions_with_context_information(e : Exp, context : Context, pool : Pool = RUNTIME_POOL) -> [(Exp, Context, Pool)]:\n return _Shredder(context, pool).visit(e)",
"def get_conditions(self):\n return (self.temp, self.humid)",
"def _evalContext(self):\n def xor(*args):\n return sum(args) == 1\n def neg(result):\n return not result\n context = {\n 'xor': xor,\n 'neg': neg\n }\n return context",
"def checkCondition(self, left_context, mod, right_context):\n if self.condition == \"\":\n return(True)\n else:\n if self.ruleType == self.TYPE_OL:\n keys = self.symParam\n values = mod.param \n elif self.ruleType == self.TYPE_L1L:\n keys = self.left_context.param + self.symParam\n values = left_context.param + mod.param \n elif self.ruleType == self.TYPE_R1L:\n keys = self.symParam + self.right_context.param\n values = mod.param + right_context.param\n elif self.ruleType == self.TYPE_2L:\n keys = self.left_context.param + self.symParam + self.right_context.param\n values = left_context.param + mod.param + right_context.param\n new_dict = dict(zip(keys, values)) \n return(self.condition.evaluate(new_dict))",
"def evaluate(t, x, y):\n r = np.sqrt(x**2 + y**2)\n return contrast * np.cos(kx_g*x + ky_g*y - w_g*t) * (1 - heaviside(r - patch_diameter*0.5))",
"def special_game_modes_memory_patterns(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n # if game mode is not normal\n if obs['game_mode'] != GameMode.Normal:\n return True\n return False\n \n def get_memory_patterns(obs, player_x, player_y):\n \"\"\" get list of memory patterns \"\"\"\n memory_patterns = [\n corner,\n free_kick,\n goal_kick,\n kick_off,\n penalty,\n throw_in,\n idle\n ]\n return memory_patterns\n \n return {\"environment_fits\": environment_fits, \"get_memory_patterns\": get_memory_patterns}",
"def evaluate(self, x, y, t=0):\n alpha, beta, gamma, y_weights, discount_factors, warmglow_type = \\\n self.get_attr(\n 'alpha', 'beta', 'gamma', 'y_weights', 'discount_factors', 'warmglow_type')\n # Marginals: power utility\n v_1 = x ** beta\n v_2 = y ** beta\n\n # Warm glow utility\n if warmglow_type in [\"constant\"]:\n warmglow = alpha\n elif warmglow_type in [\"linear\"]:\n warmglow = alpha * y\n else:\n raise NotImplementedError\n\n # Case distinction to avoid overflow error\n if (x == 0.0) & (y > 0.0):\n utils = warmglow + discount_factors[t] * y_weights[t] * v_2\n elif (x > 0.0) & (y == 0.0):\n utils = discount_factors[t] * v_1\n elif (x == 0.0) & (y == 0.0):\n utils = 0.0\n else:\n # Both x and y are positive:\n try:\n utils = ((v_1 ** gamma) + ((y_weights[t] * v_2) ** gamma)) ** (1.0 / gamma)\n utils = discount_factors[t] * utils\n # Add warm glow utility\n utils = utils + warmglow\n # Sometimes an overflow error occurs.\n except ArithmeticError:\n utils = HUGE_FLOAT\n\n return utils",
"def extract_rules(rules: Dict) -> Dict:\n\n \"\"\"Dictionary to return\"\"\"\n rules_ltl = {}\n\n if \"gridworld\" in rules:\n rules_ltl[\"gridworld\"] = []\n for elem, adjacent in rules[\"gridworld\"].items():\n ltl = \"G(\"\n ltl += elem.formula + \" -> X (\"\n ltl += \" | \".join([a.formula for a in adjacent])\n ltl += \"))\"\n variables = Variables()\n variables |= elem.variables\n for a in adjacent:\n variables |= a.variables\n rules_ltl[\"gridworld\"].append(LTL(formula=ltl, variables=variables, kind=\"gridworld\"))\n\n if \"context\" in rules:\n rules_ltl[\"context\"] = []\n if \"mutex\" in rules[\"context\"]:\n for mtx_elements in rules[\"context\"][\"mutex\"]:\n if len(mtx_elements) > 0:\n variables: Variables = Variables()\n ltl = \"G(\"\n for vs in mtx_elements:\n variables |= vs.variables\n mtx_elements_str = [n.formula for n in mtx_elements]\n clauses = []\n for vs_a in mtx_elements_str:\n clause = [deepcopy(vs_a)]\n for vs_b in mtx_elements_str:\n if vs_a is not vs_b:\n clause.append(Not(deepcopy(vs_b)))\n clauses.append(And(clause))\n ltl += Or(clauses)\n ltl += \")\"\n rules_ltl[\"context\"].append(LTL(formula=ltl, variables=variables, kind=\"context\"))\n\n if \"inclusion\" in rules[\"context\"]:\n for pre, post in rules[\"context\"][\"inclusion\"].items():\n variables = Variables()\n variables |= pre.variables | post.variables\n ltl = \"G((\" + pre.formula + \") -> (\" + post.formula + \"))\"\n rules_ltl[\"context\"].append(LTL(formula=ltl, variables=variables, kind=\"context\"))\n\n if \"context_gridworld\" in rules:\n rules_ltl[\"context_gridworld\"] = []\n for pre, post in rules[\"context_gridworld\"].items():\n variables = Variables()\n variables |= pre.variables | post.variables\n ltl = \"G((\" + pre.formula + \") -> (\" + post.formula + \"))\"\n rules_ltl[\"context_gridworld\"].append(LTL(formula=ltl, variables=variables, kind=\"context_gridworld\"))\n\n if \"constraints\" in rules:\n rules_ltl[\"constraints\"] = []\n if \"mutex\" in rules[\"constraints\"]:\n for mtx_elements in rules[\"constraints\"][\"mutex\"]:\n if len(mtx_elements) > 0:\n variables: Variables = Variables()\n ltl = \"G(\"\n for vs in mtx_elements:\n variables |= vs.variables\n mtx_elements_str = [n.formula for n in mtx_elements]\n clauses = []\n for vs_a in mtx_elements_str:\n clause = [deepcopy(vs_a)]\n for vs_b in mtx_elements_str:\n if vs_a is not vs_b:\n clause.append(Not(deepcopy(vs_b)))\n clauses.append(And(clause))\n ltl += Or(clauses)\n ltl += \")\"\n rules_ltl[\"constraints\"].append(\n LTL(formula=ltl, variables=variables, kind=\"constraints\"))\n\n if \"inclusion\" in rules[\"constraints\"]:\n for pre, post in rules[\"constraints\"][\"inclusion\"].items():\n variables = Variables()\n variables |= pre.variables | post.variables\n ltl = \"G((\" + pre.formula + \") -> (\" + post.formula + \"))\"\n rules_ltl[\"constraints\"].append(\n LTL(formula=ltl, variables=variables, kind=\"constraints\"))\n\n return rules_ltl",
"def getGameState(self):\n ### Student code goes here\n row1 = ()\n row2 = ()\n row3 = ()\n for currRow in range(1,4):\n for currCol in range(1,4):\n tileFound = False\n for fact in self.kb.facts:\n if fact.statement.predicate == \"located\":\n tile = fact.statement.terms[0].term.element\n column = fact.statement.terms[1].term.element\n row = fact.statement.terms[2].term.element\n\n tileNumber = int(tile[-1])\n columnNumber = int(column[-1])\n rowNumber = int(row[-1])\n\n if rowNumber == currRow and columnNumber == currCol:\n tileFound = True\n if rowNumber == 1:\n row1 += tuple([tileNumber])\n elif rowNumber == 2:\n row2 += tuple([tileNumber])\n elif rowNumber == 3:\n row3 += tuple([tileNumber])\n \n break\n\n if not tileFound:\n if currRow == 1:\n row1 += tuple([-1])\n elif currRow == 2:\n row2 += tuple([-1])\n elif currRow == 3:\n row3 += tuple([-1])\n\n\n return (row1, row2, row3)",
"def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y",
"def equation_level_calculation(element_x, element_y):\n a = 4 * pow(element_x, 2) + pow(element_y, 2)\n b = 3 * pow(element_x, 2) + pow(element_y, 2)\n c = 3 * pow(element_x, 2) - pow(element_y, 2) if element_x > element_y else None\n return [a, b, c]",
"def tile_calculation(xi, yi, axi, ayi, positions, weights):\n for j in range(cuda.blockDim.x):\n xj = positions[j,0]\n yj = positions[j,1]\n wj = weights[j]\n axi, ayi = body_body_interaction(xi, yi, xj, yj, wj, axi, ayi)\n return axi, ayi",
"def processStimulus(self, observation):\n if observation == \"red\":\n return (1, 0, 0), (1, 0, 0)\n if observation == \"green\":\n return (0, 1, 0), (0, 1, 0)\n if observation == \"blue\":\n return (0, 0, 1), (0, 0, 1)",
"def calcAllIntensities(self, xc, yc):\n\n tp = 0.0\n ix = 0\n iy = 0\n h = 0\n ints = np.zeros([5, 5])\n ints_inner = np.zeros([5, 5])\n # ints = [[0.0] * 5] * 5\n # ints_inner = [[0.0] * 5] * 5\n x = 0.0\n y = 0.0\n xc1 = 0.0\n yc1 = 0.0\n xc1 = xc\n yc1 = yc\n \n for h in np.arange(1,5,1):\n for k in np.arange(1,5,1):\n ints[h][k] = 0.0\n ints_inner[h][k] = 0.0\n\n for k in np.arange(0, 2, 1):\n for h in np.arange(0, 2, 1):\n for ix in np.arange(0, self.stepp + 1, 1):\n for iy in np.arange(0, self.stepp + 1, 1):\n #print(k, h, ix, iy)\n if self.qc_format == 0 :\n x = -(1 + self.G) + h * (1 + 2 * self.G) + (ix * (1.0 / self.stepp))\n y = -(1 + self.G) + k * (1 + 2 * self.G) + (iy * (1.0 / self.stepp))\n if self.spot_radius == 0 or math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))) / ((1 / self.spot_radius) * math.sqrt(math.pow((x - xc1),2) + math.pow((y - yc1),2)))\n tp = math.pow(tp,2)\n #print(tp)\n elif self.qc_format == 1 :\n x = -1 + h + (ix * (1 / self.stepp))\n y = -1 + k + (iy * (1 / self.stepp))\n ints[h + 1][k + 1] += math.pow(math.exp((math.pow((x - xc1),2) + math.pow((y - yc1),2) ) / math.pow(self.spot_radius,2)), -1)\n if (self.spot_radius * self.spot_radius) == 0 or ((x - xc1) * (y - yc1) * np.pi * np.pi) == 0 :\n tp = 0.0\n else :\n tp = (math.sin((x - xc1) * np.pi / self.spot_radius) * math.sin((y - yc1) * np.pi / self.spot_radius)) / (((x - xc1) * (y - yc1) * np.pi * np.pi) / (self.spot_radius * self.spot_radius))\n\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.radius_inner,2):\n ints_inner[h + 1][k + 1] += tp\n else :\n if self.qc_format == 1 :\n if (math.pow(x,2) + math.pow(y,2)) <= math.pow(self.cell_qc, 2):\n ints[h + 1][k + 1] += tp\n if (math.pow(x,2) + math.pow(y,2)) <= 1 :\n #print(math.pow(x,2) + math.pow(y,2))\n ints[h + 1][k + 1] += tp\n # print(ints[h + 1][k + 1])\t\t\t\t\t\t\n tp = 0.0\n\n # print(ints)\n\n Aq = 0.0\n Bq = 0.0\n Cq = 0.0\n Dq = 0.0\n Ac_inner = 0.0\n Bc_inner = 0.0\n Cc_inner = 0.0\n Dc_inner = 0.0\n Ac = 0.0\n Bc = 0.0\n Cc = 0.0\n Dc = 0.0\n Ac = ints[1][2]\n Bc = ints[2][2]\n Cc = ints[2][1]\n Dc = ints[1][1]\n\n Ac_inner = ints_inner[1][2]\n Bc_inner = ints_inner[2][2]\n Cc_inner = ints_inner[2][1]\n Dc_inner = ints_inner[1][1]\n Ac *= self.QE\n Bc *= self.QE\n Cc *= self.QE\n Dc *= self.QE\n\n Ac_inner *= self.QE_inner\n Bc_inner *= self.QE_inner\n Cc_inner *= self.QE_inner\n Dc_inner *= self.QE_inner\n Ac += Ac_inner\n Bc += Bc_inner\n Cc += Cc_inner\n Dc += Dc_inner\n\n Aq = Ac\n Bq = Bc\n Cq = Cc\n Dq = Dc\n\n #tp/TP = cotribution percentage of the spot with respect to max (spot center)\n if self.smooth == 0 :\n if (Config.hplk_c0_e * self.TP) == 0 :\n cnst = 0\n else :\n cnst = ((Parameters.TPS / (self.n_ml * self.n_ml)) * self.lamb) / (Config.hplk_c0_e * self.TP) #Número de fótons efeticos\n if Config.flag_spice == 1 :\n Ac *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP) #W\n Bc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Cc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Dc *= Parameters.TPS / (self.n_ml * self.n_ml * self.TP)\n Ac *= 1 / (math.pow(self.cell_qc * 1e-6,2)) #W/(m^2)\n Bc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Cc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n Dc *= 1 / (math.pow(self.cell_qc * 1e-6,2))\n #Ac *= 1 / (self.lamb * 1e6); #Adequação da irradiância para a unidade W/m2micm conforme necessário no SPICE\n #Bc *= 1 / (self.lamb * 1e6);\n #Cc *= 1 / (self.lamb * 1e6);\n #Dc *= 1 / (self.lamb * 1e6);\n \n ############################## DOUBLE CHECK ##############################\n # self.grava_arquivos = 1\n # self.flag_V_QC = 0\n # grava_le_arquivos(0) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n # self.flag_V_QC = 1\n # self.grava_arquivos = 0\n ############################## DOUBLE CHECK ##############################\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n else :\n Aq *= cnst * 1e9\n Bq *= cnst * 1e9\n Cq *= cnst * 1e9\n Dq *= cnst * 1e9\n\n # 'returns' all the intensities\n self.A_intensity = Aq\n self.B_intensity = Bq\n self.C_intensity = Cq\n self.D_intensity = Dq",
"def getGameState(self):\n ### Student code goes here\n row1 = [-1, -1, -1]\n row2 = [-1, -1, -1]\n row3 = [-1, -1, -1]\n for i in self.kb.kb_ask(parse_input(\"fact: (pos ?t ?px ?py\")):\n if str(i.bindings_dict['?t'])=='empty':\n t = -1\n else:\n t = int(i.bindings_dict['?t'][4])\n xpx = int(i.bindings_dict['?px'][3])\n xpy = int(i.bindings_dict['?py'][3])\n if xpy == 1:\n row1[xpx-1] = t\n elif xpy == 2:\n row2[xpx-1] = t\n elif xpy == 3:\n row3[xpx-1] = t\n return tuple((tuple(row1),tuple(row2),tuple(row3)))",
"def penalty(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n # it is penalty game mode\n if obs['game_mode'] == GameMode.Penalty:\n return True\n return False\n \n def get_action(obs, player_x, player_y):\n \"\"\" get action of this memory pattern \"\"\"\n if (random.random() < 0.5 and\n Action.TopRight not in obs[\"sticky_actions\"] and\n Action.BottomRight not in obs[\"sticky_actions\"]):\n return Action.TopRight\n else:\n if Action.BottomRight not in obs[\"sticky_actions\"]:\n return Action.BottomRight\n return Action.Shot\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}",
"def filtered_xyz(self) -> tuple[int, int, int]:",
"def get(self, x, y):\n if (x < 0 or x > self.width-1) or (y < 0 or y > self.height-1):\n return (mapfeatures.Void(), [])\n cell_entities = list(filter(lambda e: e.x == x and e.y == y, self._entities))\n return (self._mapfeatures[y][x], cell_entities)",
"def _get_context_dicts(self, self_side, rule_side, possible_self_patterns, context_variables):\n # For each pattern + dicts\n new_subst_dicts = list()\n for possible_pattern in possible_self_patterns:\n # possible_self_patterns is the return of the previous method. It contains something like:\n # ((1, 2, 5), [{'A': B, 'B': C}])\n pattern_indexes = possible_pattern[0]\n pattern_possible_subst_dicts = possible_pattern[1]\n\n # Walk through the rule until we find a formula (or the last element of the rule)\n prev_rule_context = []\n rule_elem_counter = -1\n rule_formula_number = -1\n instance_corresponding_index = None # Will remain None only if rule_side has no formulae\n exit_pattern = False\n for rule_elem in rule_side:\n rule_elem_counter += 1\n last_element = rule_elem_counter == len(rule_side) - 1 # True if it is the last element, False otherw\n\n # Context variable\n if rule_elem in context_variables:\n prev_rule_context.append(rule_elem)\n # Formula\n else:\n rule_formula_number += 1\n\n # There is context to assign and we reach a formula or the end of the rule\n if prev_rule_context and (rule_elem not in context_variables or last_element):\n # We now need to determine the part of the instance that corresponds to prev_rule_context\n # For example, in rule = [Γ, Δ, A, B, Σ], instance=[Γ, A, B, Δ, Σ], pattern=[(1, 2), ...]\n # we will stop at A and have prev_rule_context=[Γ, Δ]. Given that, according to pattern, the A\n # of the rule is the A of the instance, we need to get that, in the instance, [Γ] corresponds\n # to prev_rule_context (i.e. [Γ, Δ]).\n # Same in the end, we need to establish that Σ in the rule corresponds to [Δ, Σ] in the instance\n\n # Formula\n if rule_elem not in context_variables:\n instance_corresponding_index = pattern_indexes[rule_formula_number]\n # Will not be updated if last element and not formula\n\n # First formula\n if rule_elem not in context_variables and rule_formula_number == 0:\n instance_context = self_side[0:instance_corresponding_index]\n # Last element and not formula\n elif rule_elem in context_variables and last_element:\n # Last element and there is no previous formula instance (i.e. the rule has no formulae)\n if instance_corresponding_index is None:\n instance_context = self_side\n else:\n instance_context = self_side[instance_corresponding_index+1:]\n # Formula that came after a previous formula\n else:\n prev_instance_index = pattern_indexes[rule_formula_number-1]\n instance_context = self_side[prev_instance_index+1:instance_corresponding_index]\n\n # We now have, for example, that the rule's [Γ, Δ] corresponds to the instance's [Σ, A]\n # We need to determine every possible way of assigning the right elements to the left vars\n # i.e. {Γ:[Σ, A], Δ:[]}, {Γ:[Σ], Δ:[A]} and {Γ:[], Δ:[Σ, A]} and see if they violate the prev\n # established constraints in possible_dict\n new_pattern_possible_subst_dicts = list()\n for possible_subst_dict in pattern_possible_subst_dicts:\n # Each possible correspondance dict has something like\n # {Γ:[Σ, A], Δ:[]} or {Γ:[Σ], Δ:[A]} or {Γ:[], Δ:[Σ, A]}\n # We need to se if each possible assignment is compatible with each of the prev subst dicts\n possible_correspondances_iterator = self._get_context_distribs_iterator(prev_rule_context,\n instance_context)\n for possible_correspondance_dict in possible_correspondances_iterator:\n new_subst_dict = copy(possible_subst_dict)\n compatible = True\n for context_var in possible_correspondance_dict:\n # No previous restriction\n if context_var not in possible_subst_dict:\n new_subst_dict[context_var] = possible_correspondance_dict[context_var]\n # There is a previous incompatible restriction\n elif possible_subst_dict[context_var] != possible_correspondance_dict[context_var]:\n compatible = False\n break\n\n if compatible:\n new_pattern_possible_subst_dicts.append(new_subst_dict)\n\n # Since we may continue looking at this pattern, (e.g in rule [Gamma, A, Delta, B])\n # we reach here two times, in A and B), replace the old possibilities with the new ones\n if new_pattern_possible_subst_dicts:\n pattern_possible_subst_dicts = copy(new_pattern_possible_subst_dicts)\n # If no possibility, exit the pattern\n else:\n exit_pattern = True\n break # This breaks out of the for rule_elem loop\n\n # Once you found a formula and did all the above, empty the previous context for the next formula\n prev_rule_context = []\n\n # Here we have finished looking at all the rule's elements.\n # See if we have any pattern_possible_subst_dicts and append them to new_subst_dicts (to return)\n if pattern_possible_subst_dicts and not exit_pattern:\n new_subst_dicts.extend(pattern_possible_subst_dicts)\n\n # Here we have finished looking at all the patterns\n return new_subst_dicts",
"def pixel_value(self, x, y, c1, c2, i1, i2, val1, val2, F1, F2, l1, l2, mask='FQPM'):\r\n x1, y1 = c1\r\n x1 += 0.5\r\n y1 += 0.5\r\n\r\n a1 = np.sqrt(x1**2+y1**2)\r\n x2, y2 = c2\r\n x2 += 0.5\r\n y2 += 0.5\r\n a2 = np.sqrt(x2**2+y2**2)\r\n r1 = np.sqrt((x1-x)**2 + (y1-y)**2) # doesn't have to be an integer\r\n r2 = np.sqrt((x2-x)**2 + (y2-y)**2)\r\n k1_airy = self.airy(r1, F1, l1, i1)\r\n k2_airy = self.airy(r2, F2, l2, i2)\r\n norm_airy = k1_airy + k2_airy\r\n k1_airy /= norm_airy\r\n k2_airy /= norm_airy\r\n if mask == 'FQPM':\r\n val_airy = k1_airy*self.four_qs(x, y, c1, val1, val2) + \\\r\n k2_airy*self.four_qs(x, y, c2, val1, val2)\r\n return val_airy\r\n elif mask == 'EOPM':\r\n val_airy = k1_airy*self.eight_octants(x, y, c1, val1, val2) + \\\r\n k2_airy*self.eight_octants(x, y, c2, val2, val1)\r\n return val_airy",
"def _side_context_formulae(side, language):\n return [x for x in side if x in language.context_variables]",
"def _get_rule_pattern(sequent_side, context_variables):\n pattern = list()\n left_context = False\n right_context = False\n together = list()\n # This is for the together, see below\n prev_context = False\n\n for member_index in range(len(sequent_side)):\n # Context variable\n if sequent_side[member_index] in context_variables:\n prev_context = True\n if member_index == 0:\n left_context = True\n if member_index == len(sequent_side) - 1:\n right_context = True\n\n # Non-context variable (formula)\n else:\n pattern.append(sequent_side[member_index])\n if not prev_context and len(pattern) > 1:\n together.append((len(pattern) - 2, len(pattern) - 1)) # last index, prev to last index of pattern\n prev_context = False\n\n return pattern, left_context, right_context, together",
"def batch_sample_context(self):\n c_beta = self.beta[self.iter]\n \n # sample the context of each trial\n for i in xrange(self.total_trial):\n c_contexts = self.context[self.iter]\n c_pos = self.data[i]['pos']\n context_dict = self.make_context_dict(c_contexts, excluded = i)\n context_grid = context_dict.keys()\n context_grid.append(self.smallest_unused_label(context_grid))\n context_p_grid = np.empty(len(context_grid))\n\n for context in context_grid:\n try: \n context_size = len(context_dict[context])\n prior = context_size / (self.total_trial + self.alpha)\n likelihood = (context_dict[context].count(c_pos) + c_beta) \\\n / (context_size + self.support_size * c_beta)\n except KeyError:\n prior = self.alpha / (self.total_trial + self.alpha)\n likelihood = 1.0 / self.support_size\n \n context_p_grid[context_grid.index(context)] = prior * likelihood\n \n context_p_grid /= sum(context_p_grid)\n #print('pos:', c_pos)\n #print(context_grid)\n #print(context_p_grid)\n #raw_input()\n self.context[self.iter, i] = sample(context_grid, context_p_grid)\n\n return True",
"def cond_gen(self, context):\n dist = {}\n for event in self.alphabet:\n dist[event] = self.cond_prob(event, context)\n return sample_categorical(dist)",
"def gets_discount(x, y):\n \"*** YOUR CODE HERE ***\"\n return (x <= 12 and y >=65) or (x >=65 and y <= 12)",
"def calcCondition(edge, x1, y1, x2, y2, left, right, top, bottom):\n\n stat1 = insideWindow(edge, x1, y1, left, right, top, bottom)\n stat2 = insideWindow(edge, x2, y2, left, right, top, bottom);\n\n if(not stat1 and stat2):\n return 1;\n if(stat1 and stat2):\n return 2;\n if(stat1 and not stat2):\n return 3;\n if(not stat1 and not stat2):\n return 4;\n return 0 #never executed",
"def patch_context(data, i, j, k, r):\n idxs = (np.array([i+r,i-r,i,i,i,i]),\n np.array([j,j,j+r,j-r,j,j]),\n np.array([k,k,k,k,k+r,k-r]))\n ctx = data[idxs]\n return ctx",
"def other_memory_patterns(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n return True\n \n def get_memory_patterns(obs, player_x, player_y):\n \"\"\" get list of memory patterns \"\"\"\n memory_patterns = [\n idle\n ]\n return memory_patterns\n \n return {\"environment_fits\": environment_fits, \"get_memory_patterns\": get_memory_patterns}"
] | [
"0.53613746",
"0.51822996",
"0.51312876",
"0.5097705",
"0.50885594",
"0.50869316",
"0.4989999",
"0.49350446",
"0.49140304",
"0.49015668",
"0.47930938",
"0.47618896",
"0.47190344",
"0.47127545",
"0.46902323",
"0.46751982",
"0.46719325",
"0.46577978",
"0.46197823",
"0.46173385",
"0.46082637",
"0.46069846",
"0.46023634",
"0.45963773",
"0.45901746",
"0.457231",
"0.45683974",
"0.4564296",
"0.4559413",
"0.455348"
] | 0.6842048 | 0 |
for each x,y you place, spider horiz + vertically to find all the words. Word scores are calculated tile by tile. returns (score,placed_tiles) for the move or False,False for failure | def checkWords(x,y,across_or_down,word,first_move):
print "checking all crosswords..."
tile_coords=list() # tiles in the word you placed (includes tiles you're connecting to)
placed_tile_coords=list() # tiles you personally placed (excludes tiles you're connecting to)
x2 = x
y2 = y
word_size = len(word)-1
if across_or_down == "A":
x2 = x + word_size
else:
y2 = y + word_size
char_counter = 0
did_append_to_existing_word=False
for x_index in xrange(x,x2+1):
for y_index in xrange(y,y2+1):
if board[x_index][y_index] == None:
tile_coords.append((x_index,y_index))
else:
tile_coords.append("pass")
did_append_to_existing_word=True #using an existing tile
if not did_append_to_existing_word:
print "A word played must be attached to another word somehow."
return False,False
for new_tile in tile_coords:
if new_tile == "pass":
char_counter+=1
continue
else:
board[new_tile[0]][new_tile[1]] = Tile(word[char_counter])
placed_tile_coords.append(new_tile)
char_counter += 1
points=0
found_words = spider(placed_tile_coords,across_or_down)
if len(placed_tile_coords) == 7: # you placed your whole rack! bingo! 50pt bonus! you go glen coco!
points += 50
print found_words
for word in found_words:
if not word.is_valid():
print word.word_str+" is not a valid scrabble word."
stripTiles(placed_tile_coords) #take your (temp) tiles back from the board
return False,False
else:
points+=(word.pts*word.word_multi)
placed_tiles = list() #tiles to remove from yout rack
for coord in placed_tile_coords:
placed_tiles.append(board[coord[0]][coord[1]])
return points,placed_tiles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play_word(self):\n new_tiles = {}\n placed_tiles_computer = []\n\n # from the rack, get the word to be played, and add the score\n computer_rack = self.rack\n self.board.generate_moves(computer_rack)\n # tiles that make up the word in the board\n word_tiles = self.board.best_move_cell()\n self.board.cross_checks_sums(word_tiles)\n self.score += self.board.compute_score(word_tiles)\n self.board.placed_cell_cleanup(word_tiles)\n\n # from the tiles of the word formed on the board, retrieves the indices\n # of which tiles were the ones that were played from the rack\n for tile in word_tiles:\n for i in range(len(computer_rack)):\n if tile.letter == computer_rack[i]:\n if i in placed_tiles_computer:\n continue\n placed_tiles_computer.append(i)\n break\n\n # gets new tiles from the bag for every tile that was played\n for t in placed_tiles_computer:\n new_tile = self.board.draw_random_tile()\n if new_tile != None:\n new_tiles[t] = new_tile\n self.rack[t] = new_tiles[t]\n else:\n return False\n return True",
"def play_word(self):\n tiles_played = []\n\n for t in self.placed_tiles:\n row = self.placed_tiles[t][1][0]\n col = self.placed_tiles[t][1][1]\n cell = self.board.board[row][col]\n tiles_played.append(cell)\n\n # convert the tiles played to the whole word that the tiles made on the board\n word_played = self.board.convert_cells_played(tiles_played)\n\n # checks if the word is valid\n if self.board.check_valid(word_played):\n self.board.cross_checks_sums(word_played)\n self.score += self.board.compute_score(word_played)\n self.board.placed_cell_cleanup(word_played)\n\n return True\n else:\n return False",
"def walk_board(self):\n words = set()\n\n # Walk Left to right, up to down\n for x in range(0, self.DIMENSIONS[0] - 1):\n for y in range(0, self.DIMENSIONS[1] - 1):\n tile = self.grid[x][y]\n if tile:\n\n # Checking if a start of a word\n if self.grid[x][y + 1]:\n\n # If we're already half way through a word don't do anything\n if not self.grid[x][y - 1]:\n words.add(self.get_full_word_for_tile(tile, \"right\"))\n\n if self.grid[x + 1][y]:\n\n # If we're already half way through a word don't do anything\n if not self.grid[x - 1][y]:\n words.add(self.get_full_word_for_tile(tile, \"down\"))\n\n return words",
"def crosswordPuzzle(crossword, words):\n words = words.split(';')\n n_rows = len(crossword)\n lines = []\n columns = []\n row = 0\n twisted = twistgrid(crossword)\n while row < n_rows:\n lines.append(list(re.finditer(r'(-){2,}', crossword[row])))\n columns.append(list(re.finditer(r'(-){2,}', twisted[row])))\n row += 1\n # lines and columns are a list of lists; both are n_rows long.\n # If there are any matches in the column or row, they will appear\n # as a match object in one of the sublists\n # A match requires 2 or more blank spaces consecutively +--+\n # A single blank is not a word / match\n # print('lines', lines)\n # print(columns)\n row_words = []\n col_words = []\n blank_lengths = []\n for irow, matches in enumerate(lines):\n row_words.extend([((irow, x.span()[0]),\n (irow, x.span()[1] - 1)) for x in matches])\n # row_words is a list of 2-tuples, which have the coord of\n # start and end of blank space for a word\n # ex: ((3, 0), (3, 4)) represents a space at (row 3, col 0),\n # which extends 5 blank space to terminate on (row 3, col 4)\n # .extend method is used because we could have more than one\n # match object per line and append method only takes one\n # argument\n blank_lengths.extend([x.span()[1] - x.span()[0] for x in matches])\n # blank_lengths is a list that counts how long each of the row\n # words is, going top to bottom in grid.\n # a match object's span()[1] coordinate is one greater than the\n # last index of the match, so subtracting gives the length\n\n for icol, matches in enumerate(columns):\n col_words.extend([((x.span()[0], icol),\n (x.span()[1] - 1, icol)) for x in matches])\n blank_lengths.extend([x.span()[1] - x.span()[0] for x in matches])\n\n intersections = {'row_words': [], 'col_words': []}\n n_intersections = 0\n # we want coordinates of where the intersections occur of the row\n # words with column words to verify a matching letter later on\n # intersections is a dictionary of two lists and each list contains\n # 2-tuples:\n # (p, q) p = index of row_words or col_words\n # q = index of row_words[p] that corresponds to the letter\n # in the word that needs to be verified\n\n for i, rword in enumerate(row_words):\n for j, cword in enumerate(col_words):\n if rword[0][0] >= cword[0][0] and rword[0][0] <= cword[1][0] and\\\n cword[0][1] >= rword[0][1] and cword[0][1] <= rword[1][1]:\n # rword[0][0] is row of rword, which needs to fall\n # within\n # the range of the column word in order to intersect\n # cword[0][1] is column row of cword, which needs to\n # fall within of the row word in order to intersect\n intersections['row_words'].append((i,\n cword[0][1] - rword[0][1]))\n intersections['col_words'].append((j,\n rword[0][0] - cword[0][0]))\n n_intersections += 1\n\n guesses = list(permutations(words))\n # there might be better way to choose ways to check potential solutions\n # but max # words in test cases was 6, so 6x5x4x3x2 = 720 guesses\n right_length = []\n for i, guess in enumerate(guesses):\n if all((len(guess[k]) == blank_lengths[k] for k in range(len(guess)))):\n # Only pass guesses where every single word fits in the blank.\n # Rows are filled first, then columns\n right_length.append(guess)\n\n for k, guess in enumerate(right_length):\n row_intersections = []\n col_intersections = []\n i = 0\n while col_intersections == row_intersections and i < n_intersections:\n word = guess[intersections['row_words'][i][0]]\n letter_ind = intersections['row_words'][i][1]\n row_intersections.append(word[letter_ind])\n\n # now find letters of columns that are intersections\n word = guess[intersections['col_words'][i][0] + len(row_words)]\n # need offset because the first words in guess fall into rows\n letter_ind = intersections['col_words'][i][1]\n col_intersections.append(word[letter_ind])\n i += 1\n if col_intersections == row_intersections:\n # print(f'Intersections match for guess {guess}')\n break # don't keep changing guess even after you found fit\n print(f'Found correct guess on search {k+1} out of {len(guesses)} choices')\n\n # Now let's insert solution into grid, replacing the blank spaces '-'\n # with the words\n # To mutate elements of crossword, we need to mutate it\n # so we need to turn it into list.\n # We also need to transpose it to fill in the columns, then transpose\n # it back to normal orientation\n\n out = [list(row) for row in crossword]\n for i, word in enumerate(row_words):\n out[word[0][0]][word[0][1]:word[1][1] + 1] = list(guess[i])\n\n out = [''.join(row) for row in out]\n out = twistgrid(out)\n\n out = [list(row) for row in out]\n for i, word in enumerate(col_words):\n out[word[0][1]][word[0][0]:word[1][0] + 1] = list(guess[i + len(row_words)])\n # the indexing is different from row above because we need\n # to update the range of rows that column word occupies\n # need offset because row words come first in guesses list\n out = [''.join(row) for row in out]\n out = twistgrid(out)\n out = [''.join(row) for row in out]\n print('\\n'.join(out))\n\n return out",
"def main(self):\n grid = self.make_game_grid()\n print(self.grid_size, ' by ', self.grid_size, 'grid')\n trie = self.retrieve_trie()\n if not trie:\n trie = self.read_in_file(self.file)\n self.persist_trie(trie)\n\n all_possible_words = []\n # left to right rows\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid)\n # right to left rows\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid, reverse=True, transpose=False)\n # left to right columns\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid, reverse=False, transpose=True)\n # right to left columns\n all_possible_words = all_possible_words + self.search_columns_and_rows(grid, reverse=True, transpose=True)\n\n # handle all possible sun sets of the array row\n all_possible_words = self.all_words(all_possible_words)\n # get diagonal letters top to bottom\n all_possible_words = all_possible_words + self.get_diagonal_words(grid)\n # get diagonal letters bottom to top\n all_possible_words = all_possible_words + self.get_diagonal_words(grid, reverse=True)\n ans = self.check_words_in_trie(trie, all_possible_words)\n self.sorted_words = sorted(ans, key=len)\n if self.sorted_words:\n print(\"The number of words in the solution is: %s.\" % (len(ans),))\n print(\"The shortest word in the solution is: %s.\" % (self.sorted_words[0],))\n print(\"The longest word in the solution is: %s.\" % (self.sorted_words[-1],))\n print('the possible words in this grid are ', self.sorted_words)\n return self.sorted_words",
"def detect_word_overlap(self, direction, old_word, new_word):\r\n #get the location of the old_word\r\n old_move_data = self.myboard.find_specific_word(old_word) #all the instances of the old word.\r\n print(\"This is the data collected {}\".format(old_move_data))\r\n print(\"New word to place is {}\".format(new_word))\r\n\r\n for move_data in old_move_data:\r\n \r\n x = move_data[1]\r\n y = move_data[2]\r\n limit = len(old_word)\r\n old_word_duplicate = \"\"\r\n\r\n if self.myboard.board_empty:\r\n return True #Special case: board is empty, so we can use all squares.\r\n \r\n results = []\r\n if direction == \"horizontal\":\r\n row = x\r\n for col in range(y, y + limit):\r\n cur_letter = self.myboard[row][col].curTile.letter\r\n if cur_letter is None:\r\n if len(old_word_duplicate) > 0:\r\n results.append(old_word_duplicate)\r\n old_word_duplicate = \"\"\r\n \r\n else:\r\n old_word_duplicate += cur_letter\r\n\r\n results.append(old_word_duplicate) # save last value\r\n for prev_word in results:\r\n if (len(prev_word) > 0 and new_word.startswith(prev_word) or new_word.endwith(prev_word)):\r\n return True\r\n\r\n old_word_duplicate = \"\"\r\n full_results = []\r\n if direction == \"vertical\":\r\n col = y \r\n for row in range(x, x + limit):\r\n cur_letter = self.myboard[row][col].curTile.letter\r\n if cur_letter is None:\r\n if len(old_word_duplicate) > 0:\r\n results.append(old_word_duplicate)\r\n old_word_duplicate = \"\"\r\n else:\r\n old_word_duplicate += cur_letter\r\n\r\n \r\n results.append(old_word_duplicate) # save last value\r\n for prev_word in results:\r\n if (len(prev_word) > 0 and new_word.startswith(prev_word) or new_word.endwith(prev_word)):\r\n return True\r\n \r\n return False",
"def _check_valid_words(self) -> Set[Tuple[int, int]]:\n invalid_points = set()\n # Check across each row\n for row in range(self.board_size):\n current_word = \"\"\n for col in range(self.board_size):\n tile = self.board[row][col]\n # If the position is blank, it's time to check\n if tile is None:\n # If we have a current word of length more than 1, check its validity\n if len(current_word) > 1:\n # If the word is not valid, add the points to the list of invalid points\n if not self.word_manager.is_word(current_word):\n for i in range(len(current_word)):\n invalid_points.add((row, col - 1 - i))\n # Now that we are done with our checks, we clear the current word to continue our search\n current_word = \"\"\n else:\n current_word += tile\n\n # The current word could go to the end of the board so we need to do an additional check\n if not self.word_manager.is_word(current_word):\n for i in range(len(current_word)):\n invalid_points.add((row, self.board_size - 1 - i))\n\n # Check down each column\n for col in range(self.board_size):\n current_word = \"\"\n for row in range(self.board_size):\n tile = self.board[row][col]\n # If the position is blank, it's time to check\n if tile is None:\n # If we have a current word of length more than 1, check its validity\n if len(current_word) > 1:\n # If the word is not valid, add the points to the list of invalid points\n if not self.word_manager.is_word(current_word):\n for i in range(len(current_word)):\n invalid_points.add((row - 1 - i, col))\n # Now that we are done with our checks, we clear the current word to continue our search\n current_word = \"\"\n else:\n current_word += tile\n\n # The current word could go to the end of the board so we need to do an additional check\n if not self.word_manager.is_word(current_word):\n for i in range(len(current_word)):\n invalid_points.add((self.board_size - 1 - i, col))\n\n return invalid_points",
"def play_move(self, move_data, all_english_words):\r\n \r\n #first, make copy of board and try to apply move there.\r\n board_cpy = copy.deepcopy(self.myboard)\r\n \r\n rack_cpy = copy.deepcopy(self.rack) #***need to remove appropriate words from rack after we've made a move. \r\n\r\n i = 0 \r\n for cur_char in move_data[0]:\r\n new_tile = Tile(cur_char, self.game_bag.letter_freq_and_val[cur_char][1]) #create a new tile.\r\n if move_data[3] == \"horizontal\":\r\n print(\"adding cur_char {} at {} {}\".format(cur_char, move_data[1], move_data[2]+i))\r\n board_cpy.place_tile(move_data[1], move_data[2]+ i, new_tile)\r\n elif move_data[3] == \"vertical\":\r\n board_cpy.place_tile(move_data[1] + i, move_data[2], new_tile)\r\n\r\n i = i + 1\r\n #print(\"New i value is {}\".format(i))\r\n board_cpy.print_board()\r\n \r\n\r\n #once we're done placing the tiles, check for validity of entire board.\r\n cur_board_words = board_cpy.find_words_on_board()\r\n move_valid = True #assume move is valid, until proven otherwise.\r\n\r\n for word_data in cur_board_words:\r\n word = word_data[0]\r\n print(word)\r\n if word not in all_english_words:\r\n return False #do nothing else\r\n \r\n #print(\"Getting here; all words valid\")\r\n \r\n #getting here means that the move is actually valid, with no conflicts.\r\n main_board = self.myboard\r\n #In this case, add to the real board. \r\n i = 0 \r\n for cur_char in move_data[0]:\r\n new_tile = Tile(cur_char, self.game_bag.letter_freq_and_val[cur_char][1]) #create a new tile.\r\n if move_data[3] == \"horizontal\":\r\n main_board.place_tile(move_data[1], move_data[2]+ i, new_tile)\r\n elif move_data[3] == \"vertical\":\r\n main_board.place_tile(move_data[1] + i, move_data[2], new_tile)\r\n\r\n i = i + 1\r\n return True",
"def _find_best_fit(self, puzzle):\n\n word = puzzle['answer']\n\n # if first word\n print(len(self.filled_pos))\n if len(self.filled_pos) == 0:\n x = random.randint(0,4)\n y = random.randint(0,4)\n print(\"first_word: {} x:{} y:{}\".format(word, x, y))\n print(\"will_fit: {}\".format(will_fit[ACROSS](x, y, length(word, self.lang))))\n if will_fit[ACROSS](x, y, length(word, self.lang)):\n puzzle['orientation'] = \"across\"\n # puzzle['position'] = t + 1\n puzzle['startx'] = x + 1\n puzzle['starty'] = y + 1\n self._fill_word_in_matrix(word, ACROSS, (x,y))\n return puzzle\n\n # first find the location where it overlaps.. then move to the other ones to keep it interesting\n for key in self.filled_pos:\n #the orientation for this word should be perpendicular to the one we are trying to match\n pos = int(not self.filled_pos[key]['orientation'])\n # find the intersecting letters between the two words\n intersect = find_intersection(key, word, self.lang)\n print(\"trying to intersect filled_word={} with word={}\".format(key, word))\n if len(intersect) == 0:\n # no letters matched.. lets find the next\n continue\n else:\n a = [-10, -10]\n print(\"intersecting letters={}\".format(intersect))\n for letter in intersect:\n indexes1 = find_all_char_pos(key, letter, self.lang)\n for index in indexes1:\n # index = filled_pos[key]['word'].find(letter)\n print(\"location of the letter={} in word={} is {}\".format(letter, key, index))\n filled_word_pos = self.filled_pos[key]['position']\n a[pos] = filled_word_pos[pos] + index\n indexes2 = find_all_char_pos(word, letter, self.lang)\n for index2 in indexes2:\n # index2 = word.find(letter)\n print(\"location of the letter={} in word={} is {}\".format(letter, word, index2))\n a[self.filled_pos[key]['orientation']] = filled_word_pos[int(not pos)] - index2\n print(\"looking for match in location={}\".format(a))\n print(\"will_fit={}\".format(will_fit[pos](a[0], a[1], length(word, self.lang))))\n if will_fit[pos](a[0], a[1], length(word, self.lang)):\n if not self._check_overlap(word, pos, a[0], a[1]):\n self._fill_word_in_matrix(word, pos, (a[0], a[1]))\n calculate_free_rows(self.puzzle_matrix, self.height)\n puzzle['orientation'] = \"down\" if pos else \"across\"\n # puzzle['position'] = t + 1\n puzzle['startx'] = a[0] + 1\n puzzle['starty'] = a[1] + 1\n return puzzle\n # if we are still here then we havent found a place for this word\n # fill it in an empty space\n free_blocks_across = calculate_free_rows(self.puzzle_matrix, self.height)\n print(\"@@@@@@filling a random across free_blocks_across={}\".format(free_blocks_across))\n for key, val in sorted(free_blocks_across.items()):\n print(\"key={} val={}\".format(key, val))\n if key >= length(word, self.lang):\n pos = val.pop(random.randint(0, len(val)-1 ))\n if will_fit[ACROSS](pos[0], pos[1], length(word, self.lang)) and not self._check_overlap(word, ACROSS, pos[0], pos[1]):\n self._fill_word_in_matrix(word, ACROSS, (pos))\n puzzle['orientation'] = \"across\"\n puzzle['startx'] = pos[0] + 1\n puzzle['starty'] = pos[1] + 1\n return puzzle",
"def place_word(grid, coords, word):\n\n for i, l in enumerate(word):\n x, y = coords[0] + i, coords[1]\n\n grid[y][x] = l\n\n return grid",
"def main():\n grid = make_grid(3, 3) # Takes a long time for a grid of 4 , 4\n dictionary = get_dictionary('words.txt')\n words = search(grid, dictionary)\n for word in words:\n print(word)\n print(\"Found %s words\" % len(words))",
"def _word_hunt_recursive(self, matrix, y, x, visited, moves, word):\n found = []\n if not self.search_prefix(word): # If prefix doesn't exist in trie, sieze operations\n return found\n if self.search_word(word): # If word is found, add to found list but keep going!\n found.append(word)\n\n for dx, dy in moves:\n new_x = x + dx\n new_y = y + dy\n if new_x >= 0 and new_y >= 0 and new_y < len(matrix) and new_x < len(matrix[y]) and (new_x, new_y) not in visited:\n visited.add((new_x, new_y))\n found.extend(self._word_hunt_recursive(matrix, new_y, new_x, visited, moves, word + matrix[new_y][new_x]))\n visited.remove((new_x, new_y))\n return list(set(found)) # Set to list to remove duplicates",
"def find_word(self,word):\r\n self.start_pos = []\r\n #check each row\r\n for i in range(0,len(self.wordsearch)):\r\n #check each column\r\n for j in range(0, len(self.wordsearch[i])):\r\n #find all coordinates which have the first letter of the word and store them\r\n if self.wordsearch[i][j] == self.word[0]:\r\n self.start_pos.append([i,j])\r\n \r\n \r\n #print(count)\r\n for pos in self.start_pos:\r\n if self.check_start(self.word, pos):\r\n \r\n return",
"def solve_puzzle(self):\n\n # for each word in the words list\n # ...for each row in the game board\n # ......for each column in each row\n for word in self.words:\n for y, row in enumerate(self.board):\n for x, col in enumerate(row):\n \n # for each direction\n # try to find a word in said direction\n for dir in self.directions:\n self.scan_word(word, y, x, dir)",
"def scan(o, words, grid):\n finds = []\n words_range = range(len(words))\n for y in o.outer_loop:\n vector = []\n x = 0\n if o.desc:\n x = len(o.inner_loop) - 1\n if o.while_block:\n while y <= (len(o.outer_loop)-1):\n if o.flip_xy:\n vector.append(grid[y][x])\n else:\n vector.append(grid[x][y])\n y = y + 1 \n if o.desc:\n x = x - 1\n else:\n x = x + 1\n scanString = sep.join(vector)\n print \"scanning \" + scanString\n\n elif not o.inner_loop:\n scanString = sep.join(grid[y])\n\n else:\n for x in o.inner_loop:\n if o.flip_xy:\n vector.append(grid[x][y])\n else:\n vector.append(grid[y][x])\n scanString = sep.join(vector)\n\n for w in words_range:\n if o.test(words[w], scanString):\n finds.append(words[w])\n return finds",
"def generate_words_with_scores(rack,placed_tile,scrabble_words_dict):\n res_set = set()\n word_score_dict = {}\n comb_set = generate_combinations(rack,placed_tile)\n for combo in comb_set:\n words_set = generate_words(combo, scrabble_words_dict)\n for word in words_set:\n res_set.add(word)\n for word in res_set:\n score = calculate_score(rack,word)\n word_score_dict[word] = score\n return word_score_dict",
"def main():\n grid = make_grid(3, 3) # change to 3x3\n dictionary = get_dictionary(\"words.txt\")\n words = search(grid, dictionary)\n display_words(words)",
"def scan_word(self, word, y, x, direction):\n\n # extract x and y offsets from the direction list\n y_offset, x_offset = direction\n\n # # initialize an empty list of coordinates for the potential word location\n coordinates = []\n\n # for each character in the word\n for i, char in enumerate(word):\n\n # compute the x and y coordinates of the point we wish to explore using offsets\n x_i = x + i * x_offset\n y_i = y + i * y_offset\n\n # if the computed point is out of bounds, return\n if(x_i < 0 or x_i >= self.dims or y_i < 0 or y_i >= self.dims):\n return\n\n # if the letter at the computed point equals the letter of the word we are looking for\n # append the current point to the coordinates list\n if(self.board[y_i][x_i] == char):\n coordinates.append([x_i, y_i])\n\n # finally, if the length of the coordinates list is equal to length of the word\n # we were looking for, append the coordinates we found, along with the word,\n # to the answers dictionary\n if(len(coordinates) == len(word)):\n self.answers[word] = coordinates",
"def find(board, word, available_tiles=None, current_path=[]):\n # If there are no available tiles, it means that a new search has started,\n # either for a new word, or a search that begins at a new position on the \n # board (looking for a new path to find the word). In this case, the \n # available tiles are set to include all the tiles of the board, and the \n # current path is (re)set to an empty list.\n if not available_tiles:\n current_path, available_tiles = reset_board()\n \n # The base case is when the word we're looking for only has one letter.\n if len(word) == 1:\n return run_base_case(board, word, available_tiles, current_path)\n \n # If the word has more than one letter, find the tiles on the board that \n # contain the first letter of the word. This is saved as a list: there \n # might be no tiles on the board having the first letter of the word, there\n # might be just one possible starting point (only one tile), or there could \n # be multiple possible starting points.\n origin_tile = [tile for tile in available_tiles if board[tile[0]][tile[1]] == word[0] if tile not in current_path]\n\n # If the list of origin tiles is empty, then the current path did not \n # return the word we were searching for. Otherwise, we have at least one \n # possible starting point.\n if origin_tile:\n for ot in origin_tile:\n # Add the current tile to the path currently being evaluated, so\n # that a tile already in the path is not considered again in the \n # seach. \n current_path.append(ot)\n # Get the neighbors of the current tile, to figure out where to\n # go next.\n neighbors = find_neighbors(ot)\n # The magic of recursion...\n if find(board, word[1:], available_tiles=neighbors, current_path=current_path):\n return True\n # If the path was not found starting from this origin tile,\n # then the current path needs to be reset to an empty list.\n current_path = []\n\n return False",
"def location_score(rows):\n # hard max of 1,000,000\n locations = dict([(row[0], 1000000) for row in rows])\n for row in rows:\n # gets a sum of the locations of the word in each url\n loc = sum(row[1:])\n if loc < locations[row[0]]:\n locations[row[0]] = loc\n return normalize_scores(locations, small_is_better=1)",
"def move(self, direction):\n if direction in (UP, DOWN):\n num_steps = self._grid_height\n elif direction in (LEFT, RIGHT):\n num_steps = self._grid_width\n moved = False\n temp_list = []\n for start_cell in self._move_dir[direction]:\n # step 1: iterate through each line, write results to temp list\n for step in range(num_steps):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n temp_list.append(self._cells[row][col])\n # step 2: merge temp list\n temp_list_snap = temp_list[:]\n temp_list = merge(temp_list)\n print(temp_list_snap, temp_list)\n if temp_list_snap != temp_list:\n moved = True\n # step 3: store merged temp list back on grid\n idx = 0\n for step in range(num_steps):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n if direction in (UP, DOWN):\n self._cells[row][col] = temp_list[idx]\n idx += 1\n elif direction in (LEFT, RIGHT):\n self._cells[row][col] = temp_list[idx]\n idx += 1\n temp_list = []\n if moved:\n self.new_tile()\n moved = False\n score = sum(map(sum, self._cells))\n print(\"Your score: %s\" % score)\n #return self._cells",
"def spider(coord_list, across_or_down):\n word_list = list()\n last_coord = coord_list[-1]\n if across_or_down == \"A\":\n for coord_set in coord_list:\n spiderUp(coord_set[0],coord_set[1],word_list)\n spiderLeft(last_coord[0],last_coord[1],word_list)\n else: #DOWN\n for coord_set in coord_list:\n spiderLeft(coord_set[0],coord_set[1],word_list)\n spiderUp(last_coord[0],last_coord[1],word_list)\n return word_list",
"def check_direction(self,word,pos,d):\r\n self.word_found = [self.word[0]] #this is a list of the characters found in the word in the particular direction\r\n self.current_pos = pos #the position you start at - i.e the first character of the word\r\n self.pos_checked = [pos] #list containing all the positions we have checked so far\r\n \r\n \r\n while self.check_match(self.word_found,self.word):\r\n check = 0\r\n #check if length of word found is same is length of the word yoou are searching for\r\n if (len(self.word) == len(self.word_found)):\r\n \r\n #correct word has been found !\r\n #print('word found!')\r\n \r\n #change these characters to red to highlight the word\r\n #print(self.word_found)\r\n \r\n self.highlight_word(self.pos_checked)\r\n \r\n return True\r\n \r\n \r\n else:\r\n #word isnt correct length, move to next coordinate and try again\r\n self.current_pos = [self.current_pos[0] + d[0], self.current_pos[1] + d[1]] \r\n #current_pos = [initial x pos + x indices of direction,\r\n self.pos_checked.append(self.current_pos) \r\n #print(self.current_pos) \r\n # initial y pos + y indices of direction]\r\n \r\n if self.valid_coordinate(self.current_pos[0],self.current_pos[1]):\r\n self.word_found.append(self.wordsearch[self.current_pos[0]][self.current_pos[1]]) \r\n #add new character to word found\r\n else:\r\n return #word not found - out of worsearch range\r",
"def find_word(target):\n results = []\n string = \"\"\n\n for a in range(0, len(grid)):\n for b in range(0, len(grid[a])):\n # Create strings on rows in the grid.\n string += grid[a][b]\n # Is the target is his string?\n if target in string:\n # Find the target by index in the string.\n index = string.index(target)\n # The target string was found at the row and index.\n results += [(a, index)]\n string = \"\"\n\n for b in range(0, len(grid[0])):\n for a in range(0, len(grid)):\n # Create strings based on the columns of the grid.\n string += grid[a][b]\n # Is the target in this string?\n if target in string:\n # Find the target by index in the string.\n index = string.index(target)\n # The target string was found at the index and column.\n results += [(index, b)]\n string = \"\"\n\n return results",
"def crosswordPuzzle(crossword, words):\n words = words.split(';')\n n_rows = len(crossword)\n lines = []\n columns = []\n row = 0\n twisted = twistgrid(crossword)\n while row < n_rows:\n lines.append(list(re.finditer(r'(-){2,}', crossword[row])))\n columns.append(list(re.finditer(r'(-){2,}', twisted[row])))\n row += 1\n row_words = []\n col_words = []\n blank_lengths = []\n for irow, matches in enumerate(lines):\n row_words.extend([((irow, x.span()[0]),\n (irow, x.span()[1] - 1)) for x in matches])\n blank_lengths.extend([x.span()[1] - x.span()[0] for x in matches])\n\n for icol, matches in enumerate(columns):\n col_words.extend([((x.span()[0], icol),\n (x.span()[1] - 1, icol)) for x in matches])\n blank_lengths.extend([x.span()[1] - x.span()[0] for x in matches])\n\n intersections = {'row_words': [], 'col_words': []}\n n_intersections = 0\n\n for i, rword in enumerate(row_words):\n for j, cword in enumerate(col_words):\n if rword[0][0] >= cword[0][0] and rword[0][0] <= cword[1][0] and\\\n cword[0][1] >= rword[0][1] and cword[0][1] <= rword[1][1]:\n intersections['row_words'].append((i,\n cword[0][1] - rword[0][1]))\n intersections['col_words'].append((j,\n rword[0][0] - cword[0][0]))\n n_intersections += 1\n\n guesses = list(permutations(words))\n right_length = []\n for i, guess in enumerate(guesses):\n if all((len(guess[k]) == blank_lengths[k] for k in range(len(guess)))):\n right_length.append(guess)\n\n for k, guess in enumerate(right_length):\n row_intersections = []\n col_intersections = []\n i = 0\n while col_intersections == row_intersections and i < n_intersections:\n word = guess[intersections['row_words'][i][0]]\n letter_ind = intersections['row_words'][i][1]\n row_intersections.append(word[letter_ind])\n\n # now find letters of columns that are intersections\n word = guess[intersections['col_words'][i][0] + len(row_words)]\n # need offset because the first words in guess fall into rows\n letter_ind = intersections['col_words'][i][1]\n col_intersections.append(word[letter_ind])\n i += 1\n if col_intersections == row_intersections:\n print(f'Intersections match for guess {guess}')\n break # don't keep changing guess even after you found fit\n print(f'Found correct guess on search {k+1} out of {len(guesses)} choices')\n\n out = [list(row) for row in crossword]\n for i, word in enumerate(row_words):\n out[word[0][0]][word[0][1]:word[1][1] + 1] = list(guess[i])\n\n out = [''.join(row) for row in out]\n out = twistgrid(out)\n\n out = [list(row) for row in out]\n for i, word in enumerate(col_words):\n out[word[0][1]][word[0][0]:word[1][0] + 1] = list(guess[i + len(row_words)])\n out = [''.join(row) for row in out]\n out = twistgrid(out)\n out = [''.join(row) for row in out]\n print('\\n'.join(out))\n\n return out",
"def main(board, word):\r\n for i, row in enumerate(board):\r\n for j, square in enumerate(row):\r\n if square == word[0]:\r\n res = neighbors(board, word, i, j)\r\n if res:\r\n return True\r\n return False",
"def search_2(self, word, index, board, x, y):\n for i in range(len(board)):\n for j in range(len(board[0])):\n\n if self.in_board(x, y, board) and not self.is_visited(x, y):\n # if this point meet, search its for directions\n if word[index] == board[x][y]:\n if self.dfs(board, x, y, word, index) is True:\n return True",
"def test_find_word(self):\n self.assertEqual(find_word('GREEN'), [(1, 1), (1, 1), (0, 9)])\n self.assertEqual(find_word('ABSENT'), [])\n self.assertEqual(find_word('PW'), [(1, 7), (3, 7), (0, 8)])",
"def can_be_placed(grid, coords, word, vertical=False):\n\n # starting at coords, iterate in direction over the grid\n # - if the location is None, it's okay\n # - if there is a letter, make sure it's the same letter as the one we're\n # placing over it\n # - doesn't go over the edge of the grid (although we could just grow the\n # grid in this case)\n # - need to go back over the grid and make sure any new \"words\" exist in\n # some dictionary or are not generated at all\n #\n # might be useful to be able to transpose the matrix so we don't have to\n # figure out two ways to iterate over it. let's do horizontal first\n # (if we transpose the grid, we have to transpose the coordinates too)\n\n has_touched = is_empty(grid)\n\n if vertical:\n return can_be_placed(transpose(grid), (coords[1], coords[0]), word)\n else:\n for i in range(0, len(word)):\n\n x, y = coords[0] + i, coords[1]\n\n if y not in range(0, len(grid)) or x not in range(0, len(grid[0])):\n return False\n else:\n grid_contents = grid[y][x]\n current_letter = word[i]\n\n if grid_contents == None:\n pass\n elif grid_contents == current_letter:\n has_touched = True\n pass\n else:\n return False\n return has_touched",
"def search(grid, dictionary):\n neighbours = all_grid_neighbours(grid)\n paths = []\n \n def do_search(path):\n word = path_to_word(grid, path)\n if word_in_dictionary(word, dictionary):\n paths.append(path)\n for next_pos in neighbours[path[-1]]:\n if next_pos not in path:\n do_search(path + [next_pos])\n for position in grid:\n do_search([position])\n \n words = []\n for path in paths:\n words.append(path_to_word(grid, path))\n return set(words)"
] | [
"0.632966",
"0.62599075",
"0.62542665",
"0.61993295",
"0.6190933",
"0.6091917",
"0.60526085",
"0.6044552",
"0.6018634",
"0.5890883",
"0.5811492",
"0.58039117",
"0.5795048",
"0.57586914",
"0.57189506",
"0.5687535",
"0.5678156",
"0.5638673",
"0.5599674",
"0.5583667",
"0.55259067",
"0.5505913",
"0.5505291",
"0.54882723",
"0.5471719",
"0.54554546",
"0.5412505",
"0.53931177",
"0.53793705",
"0.53698164"
] | 0.7477762 | 0 |
Function to calculate the LL for an array of fluxes in a given energy bin | def calc_flux_array(self):
# First determine the associated spectrum
self.compute_template_spectrum()
# Calculate baseline counts to normalise fluxes we scan over
# Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps
b = self.setup_b_instance(0,add_ps_mask=True)
mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])
A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)
# Array to get LLs when no profile likelihood run
norun = np.array([1.0, 1.0, 1.0, 1.0])
# Now setup and compute the arrays
LL_array = np.array([])
A_array_short = np.array([])
spect_array = np.array([])
for i in range(len(A_array)):
print "on i =",i
# Calculate LL
if i == 0:
b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)
else:
for key in b1.fixed_template_dict_nested.keys():
b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]
ll_val = b1.ll(norun,4,4)
# Make triangle
# Append to arrays
LL_array = np.append(LL_array,ll_val)
A_array_short = np.append(A_array_short,A_array[i])
spect_array = self.spectrum*np.array(A_array_short)
# Save output
np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)
np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)
np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_lims(self, day):\n lmda, flux = self.data[day]\n ind_min = np.argpartition(flux, 3)[:3]\n mean_min = np.mean(flux[ind_min])\n lmda_min = float(lmda[np.where(flux == np.amin(flux))])\n return [mean_min, 0.4 + 0.6*mean_min, 0.001, 0.05, lmda_min - 0.02, lmda_min + 0.02]",
"def compute_llfr(fwd, volume=np.array([3.3, 1.45 , 6, 0.3, 0.4]), va=0.0):\n weight = volume / volume.sum()\n fwds_incl_va = fwd.copy()\n fwds_incl_va[0] = fwds_incl_va[0] + va / 10000.0\n llfr = fwds_incl_va * weight\n return np.array([llfr.sum()])",
"def calc_ell_list(chain) :\n ell_list = np.zeros(len(chain.bridges_dict))\n \n for b in chain.bridges_dict.keys() :\n i, j = chain.bridges_dict[b].lumen1, chain.bridges_dict[b].lumen2\n L_i, pos_i = chain.lumens_dict[i].length, chain.lumens_dict[i].pos\n L_j, pos_j = chain.lumens_dict[j].length, chain.lumens_dict[j].pos\n \n chain.bridges_dict[b].length = np.abs(pos_j - pos_i) - (L_i + L_j)",
"def estimate_L(da):\n from statsmodels.tsa.stattools import acf\n \n def acf_lag1(x):\n if np.sum(~np.isnan(x)) == 0: # if all NaNs\n return np.nan\n else:\n x = x[~np.isnan(x)]\n return acf(x, nlags=1)[-1]\n \n n = len(da.time.values)\n \n # DataArray of lag1 ACF coefficients\n rho_da = xr.apply_ufunc(acf_lag1, da, input_core_dims=[['time']], output_core_dims=[[]], vectorize=True, dask='allowed')\n \n # DataArray of effective sample size\n n_eff_da = n * ((1 - rho_da) / (1 + rho_da))\n \n # Initialise guess for block length\n Ls_da = xr.full_like(rho_da, 1)\n for i in range(10): # iterate to get estimate of L\n L_da = (n - Ls_da + 1) ** ( (2/3) * (1 - n_eff_da / n) )\n Ls_da = L_da\n \n return np.ceil(L_da) # round up to get block length",
"def calc_Lr(rho,mld,f,g=9.8,po=1027.):\n n2ml=np.ndarray(len(rho[1,:-1]))\n for i in range(len(rho[1,:-1])):\n n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])\n Lr=(np.sqrt(n2ml)*mld[:-1])/f\n\n return Lr",
"def levin(x):\n summ = 0\n for t, l in x: # for the time and length of each algorithm\n summ += l + np.log(t)\n return summ",
"def calc_L_ch(signal_tot, Delta_0 = 10, Delta_2 = 100, cfd_frac = None): \n # Note: These if statements are pretty sloppy. Should improve them.\n \n if cfd_frac == None: # Simply sum entire pulse\n L_ch = np.sum(signal_tot, axis=1)\n\n else: # Use ranges relative to i_p\n [num_pulses, pulse_length] = signal_tot.shape\n \n # Set up integration regions\n i_p = scint_math.cfd(signal_tot, cfd_frac)\n \n # Calculate boundary samples, correct any out of range\n i_0 = i_p - Delta_0\n i_0[i_0 < 0] = 0\n i_2 = i_p + Delta_2\n i_2[i_2 > pulse_length - 1] = pulse_length - 1\n \n # Iterate through pulses and calculate L\n L_ch = np.zeros(num_pulses)\n for i in np.arange(num_pulses): \n L_ch[i] = scint_math.sum_pulse_region(\n signal_tot[i,:],\n i_0[i],\n i_2[i])\n \n return L_ch",
"def lam(freq):\n return C / freq",
"def leff(self):\n with Vega() as v:\n s = self.reinterp(v.wavelength)\n w = s._wavelength\n if s.transmit.max() > 0:\n leff = np.trapz(w * s.transmit * v.flux.value, w, axis=-1)\n leff /= np.trapz(s.transmit * v.flux.value, w, axis=-1)\n else:\n leff = float('nan')\n if s.wavelength_unit is not None:\n leff = leff * Unit(s.wavelength_unit)\n if self.wavelength_unit is not None:\n return leff.to(self.wavelength_unit)\n return leff\n else:\n return leff",
"def _LL(state, effects, observed_frequencies) -> float:\n observed_frequencies = np.array(observed_frequencies)\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n return sum(np.log10(predicted_probs) * observed_frequencies)",
"def _calc_Lc(self, signal, dx):\n\n Wwin = np.fft.rfftfreq(len(signal), dx)\n f_signal = np.fft.rfft(signal)\n amplitude = np.abs(f_signal) * dx\n power = amplitude ** 2\n fc = np.sum(power[1:]*Wwin[1:])/np.sum(power[1:])\n\n return 1/fc",
"def loadLuminosityFunction(self):\n\n tab = np.genfromtxt(self.fname[0], skip_header=self.skip_header)\n if not self.evolve:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, self.nzbins))\n\n else:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, 1))\n\n if self.ecol is not None:\n self.ye = np.zeros(self.luminosity_function.shape)\n imult = 1\n else:\n self.ye = None\n imult = 2\n\n self.magmean = tab[:,self.xcol]\n\n if self.nzbins==1:\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,self.ecol]\n else:\n if not self.evolve:\n assert((tab.shape[1]-1)==self.nzbins)\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,i*imult+self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,i*imult+self.ecol]\n else:\n for j in range(self.nbands):\n self.luminosity_function[:,j,0] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,0] = tab[:,self.ecol]\n\n self.xmean = self.magmean\n self.y = self.luminosity_function",
"def CLF(mag,L_bins,dL,Mhost_min,Mhost_max,Mhost):\n\n mag_bin = np.asarray([mag[i] for i in range(len(mag)) if Mhost_min<log(Mhost[i],10)<=Mhost_max]) \n Nhosts = len(mag_bin)\n print \"len(\"+str((Mhost_max+Mhost_min)/2)+\")\"\n print Nhosts\n Msun = 4.76 # The Sun's absolute magnitude\n L = ((Msun-mag_bin)/2.5) # Calculated the luminosity \n counts = np.histogram(L,L_bins)[0]\n CLF = counts/(dL*Nhosts)\n return CLF,L_bins",
"def _llr_detection(signal: np.array, symbol_energy: float, noise_power: float) -> np.array:\n return -(4 * np.sqrt(symbol_energy) / noise_power) * signal",
"def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1",
"def BoosterFlux(E,mbparam):\n flux_data = np.array(LoadBoosterFlux(mbparam))\n E_lo = flux_data[:,0]*mbparam.GeV\n E_hi = flux_data[:,1]*mbparam.GeV\n \n nu_mu = flux_data[:,2]/(50.0*mbparam.MeV) # conv. scale to eV\n nu_mub = flux_data[:,3]/(50.0*mbparam.MeV) # conv. scale to eV\n \n nu_e = flux_data[:,4]/(50.0*mbparam.MeV) # conv. scale to eV\n nu_eb = flux_data[:,5]/(50.0*mbparam.MeV) # conv. scale to eV\n \n for i,EE in enumerate(E_lo):\n if E >= E_lo[i] and E < E_hi[i]:\n return [nu_e[i],nu_eb[i],nu_mu[i],nu_mub[i]]\n else :\n pass\n \n return [0.0,0.0,0.0,0.0]",
"def n0derivative_clbb(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,cls,cltt,clee,array1001[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n1_py(clpp,cls,cltt,clee,array999[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n \n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n0bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dclbb.txt'.format(keys[k]),der)\n return derlist",
"def nllfun(x):\n mu = x[0]\n logsigma = x[1]\n return logsigma + 0.5*np.sum((mu/(np.exp(logsigma) + 0.01))**2) + 0.1/np.exp(logsigma)",
"def nllfun(x):\n mu = x[0]\n logsigma = x[1]\n return logsigma + 0.5*np.sum((mu/(np.exp(logsigma) + 0.01))**2) + 0.1/np.exp(logsigma)",
"def XtoL(self, x):\n lc = np.zeros(3)\n \n lc[0] = (x[0]-self.x0[0])/self.dh[0];\n lc[1] = (x[1]-self.x0[1])/self.dh[1];\n lc[2] = (x[2]-self.x0[2])/self.dh[2];\n \n return lc",
"def mel_to_linear(\n self, mel_amp: np.ndarray, threshold: float = 1e-10\n ) -> np.ndarray:\n return np.maximum(threshold, np.dot(self._inv_mel_basis, mel_amp))",
"def _calcLoudnesses(self, barSamples, sampleRate, loudness):\n\n numFFTBins = self._calcNumFFTBins(sampleRate)\n loudnesses = []\n for yBar in barSamples:\n yHarmonic, yPercussive = self._separate(yBar, numFFTBins)\n loudnessHarmonic = self._loudness(yHarmonic, sampleRate, loudness)\n loudnessPercussive = self._loudness(yPercussive, sampleRate,\n loudness)\n loudnesses.append((loudnessHarmonic, loudnessPercussive))\n return loudnesses",
"def getLogBins(nbins, low, high):\n\n x = float(low)\n dx = pow(high/low, 1.0/nbins);\n \n return np.array([x*pow(dx,i) for i in range(nbins+1)], dtype=float)",
"def flux2lum(flux, dist, log_flux=False, log_dist=False, log_lum=True,\n flux_unit='erg/s/cm^2', log_wlen=False):\n\n if log_flux == False:\n flux = np.log10(flux)\n\n if flux_unit == 'W/m^2':\n flux = flux + 3\n elif flux_unit != 'erg/s/cm^2':\n print(\"MISC.FLUX2LUM: ERROR: flux_unit not understood: \"+ flux_unit +\n \" Return -1! \")\n return(-1)\n\n if log_dist == False:\n dist = np.log10(dist)\n\n lum = flux + np.log10(4.0 * np.pi) + 2*dist + 2*np.log10(3.086e24)\n\n if log_lum == False:\n\n lum = 10.0**lum\n\n return(lum)",
"def create_Leff(self, latitudes):\n Leff = np.array([day_length(day, latitudes) for day in self.days_of_year])\n Leff[Leff < 0] = 0\n return Leff - 3",
"def calc_lbol(mbol,unc_mbol,mbol_sun=MBOL_SUN,lbol_sun=LBOL_SUN,null_mag=-9999.):\n\n lbol = lbol_sun * 10**((mbol_sun - mbol) / 2.5)\n lbol[(mbol==null_mag) | (np.isinf(mbol))] = null_mag\n\n unc_lbol = lbol * np.log(10.) * unc_mbol\n unc_lbol[(mbol==null_mag) | (np.isinf(mbol))] = null_mag\n\n return lbol, unc_lbol",
"def lam(E):\n return (12398.4/E)*1e-10",
"def diff_cl(cl_array,bins):\n ls=np.arange(2,len(cl_array)+2)\n cls=cl_array*2*np.pi/(ls*(ls+1))\n dcltt=[]\n for i in range(len(bins)):\n dcltt.append(2*0.001*cls[int(bins[i])])\n return dcltt",
"def compute_flux_points_ul(quantity, quantity_errp):\n return 2 * quantity_errp + quantity",
"def getOrdFlux(self):\n totOrdFlux = []\n for cell in self.cells:\n totOrdFlux.append(cell.totOrdFlux)\n return np.array(totOrdFlux)"
] | [
"0.6446283",
"0.6150002",
"0.60571516",
"0.59681475",
"0.5960083",
"0.59101176",
"0.58506566",
"0.5835133",
"0.57849824",
"0.5698113",
"0.5681642",
"0.5586205",
"0.5581096",
"0.5571262",
"0.55535036",
"0.5538423",
"0.5537508",
"0.55365354",
"0.55365354",
"0.5528996",
"0.5528851",
"0.55275166",
"0.552669",
"0.55232275",
"0.55134565",
"0.55094975",
"0.550481",
"0.5478978",
"0.5454589",
"0.5449667"
] | 0.6197937 | 1 |
Function that is called whenever the AI is supposed to make a move. Requires the board as a parameter. The board is a copy of the board dict used by the tictactoe game board. Also requires the player's mark as a parameter. This is 'o' by default. | def move(self, board, player_mark='o'):
# First things first, let's check if the board is full first before we
# make a move
full = 1
for location in board.keys():
if board[location] == '-':
full = 0
if not full:
# Storm Spirit is a dumb yet aggressive AI, so he does not need to
# check whether the opponent has created a line.
# Initialize a move variable that determines the location that the
# AI will mark.
move = ''
# Let's see if there are any potential lines that we can form,
# then mark the location that would finish that line.
print('Searching for potential lines...')
move = self.find_line_attempt(board, 'x')
if(move == ''):
print('No potential lines found. Marking random location.')
# Initialize a boolean variable that tracks whether we have
# marked a location or not.
marked = 0
while not marked:
location = random.randint(1,9)
# The location will have to be empty
if(location == 1 and board['topleft'] == '-'):
marked = 1
print('Marking topleft location\n')
elif(location == 2 and board['topcenter'] == '-'):
marked = 1
print('Marking topcenter location\n')
elif(location == 3 and board['topright'] == '-'):
marked = 1
print('Marking topright location\n')
elif(location == 4 and board['middleleft'] == '-'):
marked = 1
print('Marking middleleft location\n')
elif(location == 5 and board['middlecenter'] == '-'):
marked = 1
print('Marking middlecenter location\n')
elif(location == 6 and board['middleright'] == '-'):
marked = 1
print('Marking middleright location\n')
elif(location == 7 and board['bottomleft'] == '-'):
marked = 1
print('Marking bottomleft location\n')
elif(location == 8 and board['bottomcenter'] == '-'):
marked = 1
print('Marking bottomcenter location\n')
elif(location == 9 and board['bottomright'] == '-'):
marked = 1
print('Marking bottomright location\n')
else:
# There are no more locations to mark, but set marked to
# true anyway
print('No empty spaces found! Re-rolling')
# Mark the location chosen
if(location == 1):
board['topleft'] = self.mark
elif(location == 2):
board['topcenter'] = self.mark
elif(location == 3):
board['topright'] = self.mark
elif(location == 4):
board['middleleft'] = self.mark
elif(location == 5):
board['middlecenter'] = self.mark
elif(location == 6):
board['middleright'] = self.mark
elif(location == 7):
board['bottomleft'] = self.mark
elif(location == 8):
board['bottomcenter'] = self.mark
elif(location == 9):
board['bottomright'] = self.mark
else:
# We found a line attempt, let's mark the finishing location
board[move] = self.mark
print('Marked location at ' + move) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_move(self, board):\n pass",
"def move(self, board):\n raise NotImplementedError",
"def do_move(self, board):\n raise NotImplementedError(\"do_move method not implemented for Player: {}\".format(self))",
"def make_move(self, board: Block) -> int:\n raise NotImplementedError",
"def make_move(self, board: Board) -> int:\n raise NotImplementedError",
"def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False",
"def mark(board, player, row, col):\r\n pass",
"def mark(board, player, row, col):\n pass",
"def makeMove(self, board, move):\n\t\trotation, this_board = self.__getNormalizedAndRotatedBoard(board)\n\t\tthis_state = TicTacToeHelper.serializeBoard(this_board)\n\n\t\tthis_move = TicTacToeHelper.rotateMove(move, rotation)\n\n\t\tself.__state_history.append((this_state, this_move))",
"def getMove(self, board):\n pass",
"def make_move(board, position, player):\n # only valid moves are passed in here\n board[position-1] = player",
"def set_board(board):",
"def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board",
"def computer_move(board,move,player):\r\n com_execution(board, move, player)",
"def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board",
"def makeMove(self, move, player):",
"def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()",
"def move(self, board):\n if self.name == \"Combination_Easy\":\n return self.alpha_beta_search(board, 1)\n elif self.name == \"Combination_Normal\":\n return self.alpha_beta_search(board, 2)\n elif self.name == \"Combination_Hard\":\n return self.alpha_beta_search(board, 3)\n elif self.name == \"static\":\n return self.static_player(board)\n elif self.name == \"parity\":\n return self.parity_player(board)\n elif self.name == \"mobility\":\n return self.mobility_player(board)\n elif self.name == \"pmobility\":\n return self.potential_mobility_player(board)\n elif self.name == \"corners\":\n return self.corners_player(board)\n elif self.name == \"stability\":\n return self.stability_player(board)",
"def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'",
"def __init__(self, board):\n self.board = board\n self.position = 0\n self.no_moves = 0 # Added number of moves\n self.adjustment = 0",
"def place(self, board):\r\n self.board = board",
"def make_move(board, move, ch):\n board[move['row']][move['col']] = ch\n \n winner = board_winner(board)\n \n if winner is not None:\n return True, winner\n \n if not board_has_move(board):\n return True, None\n \n return False, None",
"def requestMove(self, board):\n\t\t#get the rotated and normalized board for this player\n\t\trotation, this_board = self.__getNormalizedAndRotatedBoard(board)\n\n\t\t# pass the rotated and normalized board to the policy to get back the optimal move.\n\t\tthis_state = TicTacToeHelper.serializeBoard(this_board)\n\t\tvalid_moves = TicTacToeHelper.getValidMoves(this_board)\n\n\t\tmove = self.policy.getAction(this_state, valid_moves)\n\n\t\t# rotate the optimal move back `rotation` times to match the original board index\n\t\tret_move = TicTacToeHelper.reverseRotateMove(move, rotation)\n\t\treturn ret_move",
"def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O",
"def ai_1(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n return update_board_then_give_random(board, move)\n board.ai_random_move()\n return board",
"def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])",
"def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move",
"def make_play(board, your_team, last_move):\n \"\"\"\n # a list containing all the entities from all the teams (either Monkeys or Queens)\n entities = board.get_entities()\n\n # just like entities, but into a map (dictionary). The key is a Vec2I object containing the position where you\n # want to get the entity. Use entity_map.get(Vec2I(x, y)) instead of entity_map[Vec2I(x, y)] if you want to avoid\n # raising a KeyError. Vec2I is used for the positions\n entity_map = board.get_entity_map()\n\n # List all the possible legal moves\n all_possible_moves = board.get_legal_moves(your_team)\n\n # You can iterate over all the entities like so:\n for entity in entities:\n position = entity.get_position()\n team = entity.get_team()\n print('Entity at position {}, is from team {}'.format(position, team))\n\n # You can get other information from the board functions.\n your_queen = board.search_queen(your_team)\n\n # There are only two teams, either Team.WHITE or Team.BLACK\n enemy_team = None\n if your_team == Team.WHITE:\n enemy_team = Team.BLACK\n else:\n enemy_team = Team.WHITE\n\n # you can do the same with this one liner\n enemy_team = Team.WHITE if your_team == Team.BLACK else Team.BLACK\n\n # get the enemy queen info from the board\n enemy_queen = board.search_queen(enemy_team)\n\n # Get the position of an entity, for example, with this queen\n # This can also work with Monkeys\n your_queen_position = enemy_queen.get_position()\n\n # Get the queen stack (number of remaining monkeys)\n your_queen_stack = your_queen.get_stack()\n\n # Print the position information, positions use the object Vec2I, defined in the file src/game/geo.py\n print(your_queen_position.x, your_queen_position.y)\n\n # Get all the possible moves for your queen\n possible_moves = your_queen.get_legal_moves()\n\n # We want to move our queen one cell down\n your_queen_x = your_queen_position.x\n your_queen_y = your_queen_position.y\n\n # Again, the game uses the Vec2I object for the positions\n new_position = Vec2I(your_queen_x, your_queen_y + 1)\n\n # As the board is a DEEP COPY of the real board, you can use it to forecast the future, for example, if you\n # want to list all your enemy moves after the move you want to select\n\n # As said, you have to return a tuple of Vec2I from this function, but to make a play you have to put those\n # two Vec2I in a Command object\n move_command = Command(your_queen_position, new_position)\n\n # Make a copy of the current game state\n current_board = board.copy_state()\n\n # Plays the command, now the board is just like you have played your decised move\n board.make_play(move_command)\n\n # Forecast all the legal moves from your opponent\n opponent_possible_responses = board.get_legal_moves()\n\n # We check if the new position is a legal move\n if new_position in possible_moves:\n # We make this play by returning the new_position\n return your_queen_position, new_position\n else:\n new_position = random.choice(possible_moves)\n return your_queen_position, new_position\n \"\"\"\n begin = time()\n np_board = board_translate(board,your_team)\n move = alpha_beta_search(np_board, your_team)\n print(\"Execution time: \" + str(time() - begin))\n move = (Vec2I(move[0][0], move[0][1]),Vec2I(move[1][0],move[1][1]))\n return move",
"def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()",
"def move(self, board):\r\n self.start_time = time.time()\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n if disk_total < 15:\r\n # In early-game, we can allow a deeper minimax search since there's not too many possible moves.\r\n self.minimax_max_depth = 7\r\n\r\n elif disk_total < 45:\r\n # In mid-game, minimax tree has the most branches. Therefore, we must give it space to breathe.\r\n self.minimax_max_depth = 5\r\n else:\r\n # In the very end-game, minimax tree has the least branches, so we can allow a full search.\r\n self.minimax_max_depth = 8\r\n\r\n possible_moves = self.find_possible_moves(board, self.my_color)\r\n\r\n # If there's only one move available, return it\r\n if len(possible_moves) == 1:\r\n return possible_moves[0]\r\n\r\n # If we can take a corner, take it and don't consider any other options.\r\n # This rarely backfires and allows to save a tiny bit of time\r\n corners = [(0,0), (0,7), (7,0), (7,7)]\r\n for corner in corners:\r\n if corner in possible_moves:\r\n return corner\r\n\r\n # Grow a minimax tree to find the best available move\r\n alpha_init = -10000000\r\n beta_init = 10000000\r\n\r\n available_moves = self.minimax(board, 0, self.my_color, alpha_init, beta_init)\r\n print(available_moves)\r\n if available_moves != 0:\r\n best_value = max(available_moves.values())\r\n for move in available_moves:\r\n if available_moves[move] == best_value:\r\n return move\r\n\r\n return None"
] | [
"0.7398969",
"0.717974",
"0.7096175",
"0.7025511",
"0.6846444",
"0.68410504",
"0.6791851",
"0.67604375",
"0.67156684",
"0.67131007",
"0.67055535",
"0.6701758",
"0.6698193",
"0.6682788",
"0.66431797",
"0.65866613",
"0.65660536",
"0.6501024",
"0.649859",
"0.6494082",
"0.6484072",
"0.6469209",
"0.6461079",
"0.640554",
"0.64008105",
"0.6384275",
"0.63677824",
"0.63536054",
"0.6347386",
"0.6341367"
] | 0.7213819 | 1 |
Calculate series resistance from a jV list | def find_rs(v, j):
v_s, j_s = np.sort([v, j], axis=1)
m = np.polyfit(v_s[-10:], j_s[-10:], 1)
return 1/abs(m[0]) * 1000 #[Ohm cm^2]
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_s11(rs):\n freq = []\n resist = []\n react = []\n\n for f in rs:\n fs = f.split(\" \")\n fs = list(filter(None, fs))\n freq.append(float(fs[0]))\n resist.append(float(fs[5]))\n react.append(float(fs[6]))\n\n #print('freq',freq,'resist',resist,'react',react)\n\n refc = []\n s11 = []\n ts11 = 0\n for i in range(0,len(freq)):\n refc.append((resist[i] + 1j*react[i]- 50)/((resist[i]) + 1j*react[i] + 50));\n\n if refc[i]==0:\n s11.append(0)\n else:\n s11.append(20*math.log(abs(refc[i]),10))\n\n ts11 += s11[i]\n\n #print(s11)\n return (ts11, s11)",
"def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r",
"def _calculate_residual(original_values: List[Any], new_values: List[Any]) -> List[float]:\n residual = []\n for i in range(len(original_values)):\n residual.append(original_values[i] - new_values[i])\n return residual",
"def calc_bulk_values(s, Qv, Qs, print_info=False): \n # use the find_extrema algorithm\n ind, minmax = find_extrema(Qv, print_info=print_info)\n \n # compute dividing salinities\n smin=s[0]\n DS=s[1]-s[0]\n div_sal=[]\n i=0\n while i < len(ind): \n div_sal.append(smin+DS*ind[i])\n i+=1\n \n #calculate transports etc.\n Q_in_m=[]\n Q_out_m=[]\n s_in_m=[]\n s_out_m=[]\n index=[]\n i=0\n while i < len(ind)-1:\n # compute the transports and sort to in and out\n Q_i=-(Qv[ind[i+1]]-Qv[ind[i]])\n F_i=-(Qs[ind[i+1]]-Qs[ind[i]])\n s_i=np.abs(F_i)/np.abs(Q_i)\n if Q_i<0 and np.abs(Q_i)>1:\n Q_out_m.append(Q_i)\n s_out_m.append(s_i)\n elif Q_i > 0 and np.abs(Q_i)>1:\n Q_in_m.append(Q_i)\n s_in_m.append(s_i)\n else:\n index.append(i)\n i+=1\n div_sal = np.delete(div_sal, index)\n \n return Q_in_m, Q_out_m, s_in_m, s_out_m, div_sal, ind, minmax",
"def calc_stat_values(self):",
"def find_rsh(v, j):\r\n\r\n zp = sp.where(v[:-1] * v[1:] <= 0)[0][0] #make a list of A[x] * A[x -1] without usinf \"for\" loop in original python.\r\n m = np.polyfit(v[(zp - 5):(zp + 5)], j[(zp -5):(zp + 5)], 1)\r\n return 1/abs(m[0]) * 1000 #[Ohm cm^2]\r",
"def seriesResistance(self):\n return 13.38 * math.pow(self.concentration, -0.8397)",
"def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp",
"def resistance(stock):\n output= stock_max(stock)-(stock_max(stock)*.05)\n return output",
"def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s",
"def _get_values(self) -> ty.List[float]:\r\n ...",
"def find_local_mins(lis: list, tolerance: float = 0.1) -> list:\r\n temp = []\r\n mins = []\r\n\r\n # Go through the list\r\n for i in range(len(lis)):\r\n\r\n # get all items below the threshold\r\n if lis[i] < tolerance:\r\n temp.append(i)\r\n\r\n # Find the breaks between all of the rows\r\n curr = temp[0] - 1\r\n s = temp[0]\r\n\r\n # Step through partial series\r\n for i in temp:\r\n\r\n # if there has been a break in the series\r\n if i > curr:\r\n\r\n # Record the middle of the break\r\n mins.append((s + curr) / 2)\r\n\r\n # Ignore the break and continue following the series\r\n s = i\r\n curr = i\r\n\r\n curr += 1\r\n\r\n return mins",
"def compute_j(self, trajectory):\r\n J = 0\r\n for i, (_,_,r,_) in enumerate(trajectory):\r\n J += (self.domain.discount**i) * r\r\n return J",
"def get(self) -> list[float]:",
"def compute_relative_changes(a_list):\n result = []\n\n for i in range(1,len(a_list)):\n rel = (a_list[i] - a_list[i-1])/a_list[i-1]\n result.append(rel)\n\n return result",
"def compute_velocities(self):\n Ddemo_trajs = []\n\n for demo_traj in self._demo_trajs:\n d_traj = np.diff(demo_traj, axis=0)/self._dt\n #append last element to adjust the length\n d_traj = np.hstack([d_traj, d_traj[-1]])\n #add it to the list\n Ddemo_trajs.append(d_traj)",
"def mass_flow_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]\n return residual",
"def mass_flow_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]\n return residual",
"def dseries(temp, wavelength):\n if wavelength < 300 or wavelength > 830:\n return 0\n mm=wavelength%10\n s=_dseriesd(temp, wavelength-mm)\n if mm==0:\n return s\n m=mm*0.1\n e=_dseriesd(temp, (wavelength-mm)+10)\n return s+(e-s)*m",
"def __calculate(self, series):\n quants = self.__quants.copy()\n # TODO: Performance Optimization needed\n # the range from __minn to __maxx\n # __maxx and __minn both can be negative\n value_range = abs(self.__maxx - self.__minn)\n # if value_range is zero, skip calculations\n if value_range == 0.0:\n return quants\n for value in series:\n quant = (100 * abs(value - self.__minn) / value_range) / self.__width\n try:\n quants[int(quant)] += 1\n except KeyError as exc:\n # this is the case if value == __maxx\n if int(quant) == len(quants):\n quants[int(quant) - 1] += 1\n else:\n logging.error(\"quant = %s, value = %s, __maxx = %s, __minn = %s, width = %s\", quant, value, self.__maxx, self.__minn, self.__width)\n raise exc\n return quants",
"def topo_efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.topo_shortestpathij(i, j) == None):\n continue\n Temp += 1/self.topo_shortestpathij(i, j)\n \n self.topo_efficiency = 1/(self.supplynum*self.demandnum)*Temp",
"def fluid_func(self):\n residual = []\n for i in range(self.num_i):\n for fluid, x in self.inl[0].fluid.val.items():\n residual += [x - self.outl[0].fluid.val[fluid]]\n return residual",
"def compute_vvalue(trajectories, val_func):\n\n for on_trajectory in trajectories: # 15 trajectories, each with 50 time steps\n observes = on_trajectory['observes']\n values = val_func.predict(observes)\n on_trajectory['values'] = values",
"def metric(self, eles):\n point = eles.pop()\n dist = edist(point, self.soln)\n return dist",
"def jvalue(self, idx):\n \n length = len(self.data)\n\n a = self.data[:,idx] / self.data[:,idx].sum()\n b = np.dot((self.avg_dists / self.dset), (a * np.log(a)))\n results = -length * (b / ((self.avg_dists / \n self.dset).sum() * np.log(length)))\n return results",
"def calculate_speed(centres, fs, scale):\r\n xs = centres.apply(lambda c: c[0])\r\n ys = centres.apply(lambda c: c[1])\r\n positions = np.array([xs, ys]).T\r\n vectors = np.diff(positions, axis=0)\r\n distances = np.linalg.norm(vectors, axis=1)\r\n speeds = distances * float(scale) * float(fs)\r\n return speeds",
"def convert(sensor_variances, lookup_table, x):\n l_reading = int(x) #round to int for now\n\n dist_s1 = lookup_table['s1'][l_reading]\n dist_s2 = lookup_table['s2'][l_reading]\n \n sv = sensor_variances\n\n fused_dist = ( (dist_s1 * sv[0]**-1) + (dist_s2 * sv[1]**-1) ) / (sv[0]**-1 + sv[1]**-1)\n\n return fused_dist",
"def calc_nuj_list(theta_list) :\n return theta_list / np.sin(2*theta_list)",
"def value(self) -> List[float]:\n raise NotImplementedError()",
"def vel2acc(timeseries, dt):\n return np.diff(np.hstack(([0], timeseries)) * (1.0 / dt))"
] | [
"0.57512945",
"0.55763984",
"0.5393082",
"0.53737885",
"0.53721565",
"0.5360579",
"0.5360513",
"0.53144544",
"0.5287248",
"0.52655554",
"0.523811",
"0.52313983",
"0.5228463",
"0.52057403",
"0.51803756",
"0.5156343",
"0.5149992",
"0.5149992",
"0.5138027",
"0.5129928",
"0.51286477",
"0.51086843",
"0.51047176",
"0.5096698",
"0.5091814",
"0.5086687",
"0.5081406",
"0.5065808",
"0.50547713",
"0.50486124"
] | 0.5693607 | 1 |
Calculate shant resistance from a jV list | def find_rsh(v, j):
zp = sp.where(v[:-1] * v[1:] <= 0)[0][0] #make a list of A[x] * A[x -1] without usinf "for" loop in original python.
m = np.polyfit(v[(zp - 5):(zp + 5)], j[(zp -5):(zp + 5)], 1)
return 1/abs(m[0]) * 1000 #[Ohm cm^2]
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r",
"def get_total_fuel_requirements_part2(mass_lst: List[int]) -> int:\n total_fuel = 0\n for mass in mass_lst:\n while True:\n if get_fuel_requirements(mass) <= 0:\n break\n else:\n mass = get_fuel_requirements(mass)\n total_fuel += mass\n return total_fuel",
"def calc_dist(L, Seff):\n return (L / Seff)**0.5",
"def solution(l):\n wn = turtle.Turtle()\n sqrfield(l)\n sprinkler(l)\n shade(l)\n sol = shaded_area(l)\n print(\"The area not covered by the sprinklers is {0:.2f}\".format(sol),\"units squared\")",
"def distances(self):",
"def __secant(x_list, fx_list):\n if fx_list[-1] != 0:\n if len(x_list) > 1 and abs(fx_list[-1]) != abs(fx_list[-2]):\n x_0 = x_list[-2]\n x_1 = x_list[-1]\n fx_0 = fx_list[-2]\n fx_1 = fx_list[-1]\n slope = (fx_1 - fx_0) / (x_1 - x_0)\n return x_1 + (-fx_1 / slope)\n else:\n return x_list[0] * 0.9 + 0.0001\n else:\n return x_list[-1]",
"def calculate_stress(self, x, side=1):\n s11, s22, s12 = self.calculate_principle_stresses(x, side)\n vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)\n return vm_stress.squeeze()",
"def radarScat(sp, wl, K2=0.93):\n#TODO check if K2 is for ice or liquid!\n prefactor = 2*np.pi*wl**4/(np.pi**5*K2)\n \n \n reflect_hh = prefactor*(sp.Z11+sp.Z22+sp.Z12+sp.Z21)\n reflect_vv = prefactor*(sp.Z11+sp.Z22-sp.Z12-sp.Z21)\n kdp = 1e-3*(180.0/np.pi)*wl*sp.S22r_S11r\n\n reflect_hv = prefactor*(sp.Z11 - sp.Z12 + sp.Z21 - sp.Z22)\n #reflect_vh = prefactor*(sp.Z11 + sp.Z12 - sp.Z21 - sp.Z22).values\n ldr_h = reflect_hh/reflect_hv\n \n # delta_hv np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])\n #a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2\n #b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n #c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])\n #rho_hv np.sqrt(a / (b*c))\n rho_hv = np.nan*np.ones_like(reflect_hh) # disable rho_hv for now\n #Ah = 4.343e-3 * 2 * scatterer.wavelength * sp.S22i.values # attenuation horizontal polarization\n #Av = 4.343e-3 * 2 * scatterer.wavelength * sp.S11i.values # attenuation vertical polarization\n\n #- test: calculate extinction: TODO: test Cextx that is given in DDA with this calculation.\n k = 2 * np.pi / (wl)\n cext_hh = sp.S22i*4.0*np.pi/k\n cext_vv = sp.S11i*4.0*np.pi/k\n \n return reflect_hh, reflect_vv, reflect_hv, kdp, rho_hv, cext_hh, cext_vv",
"def drag(s):\n\n r = np.linalg.norm(s[0:3])\n v_atm = we*np.array([-s[1],s[0],0]) # calculate velocity of atmosphere\n v_rel = s[3:6] - v_atm\n\n rs = Re*(1-(ee*s[2]/r)**2) # calculate radius of surface\n h = r-rs\n p = 0.6*np.exp(-(h-175)*(29.4-0.012*h)/915) # in kg/km^3\n coeff = 3.36131e-9 # in km^2/kg\n acc = -p*coeff*np.linalg.norm(v_rel)*v_rel\n\n return acc",
"def get_total_fuel_requirements(mass_lst: List[int]) -> int:\n total_fuel = 0\n for mass in mass_lst:\n fuel_requirement = get_fuel_requirements(mass)\n total_fuel += fuel_requirement\n return total_fuel",
"def virtual_distance(self):\n conflict_zone_radio = 384.0\n path_width = 172.0\n right_turn_radio = path_width / 4.0\n left_turn_radio = 3 * path_width / 4.0\n initial_straight_section = conflict_zone_radio - path_width / 2.0\n if self.get_intention() == \"s\":\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_intention() == \"r\":\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() > -right_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (right_turn_radio + self.get_virtual_y_position())\n ) * right_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * right_turn_radio / 2.0 -\n self.get_virtual_y_position() - right_turn_radio\n )\n\n a = path_width / 2.0\n b = right_turn_radio + path_width / 4.0\n c = pi * right_turn_radio / 2.0\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n else:\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() < left_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (\n left_turn_radio -\n self.get_virtual_y_position()\n )\n ) * left_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * left_turn_radio / 2 +\n self.get_virtual_y_position() - left_turn_radio\n )\n\n a = path_width / 2\n b = right_turn_radio + path_width / 4\n c = pi * left_turn_radio / 2\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n return virtual_distance_value",
"def distance_between_wheels():",
"def get_total_shield(self,obs):",
"def calc_dist_to_poi(self,agent):\n mini_dist = 100000 \n for poi in self.poi_pos_list:\n mini_dist = np.linalg.norm(agent.get_pos() - poi)\n\n return mini_dist",
"def calc_muj_list(theta_list) :\n return np.sin(theta_list)**2 / (2*theta_list - np.sin(2*theta_list))",
"def find_rs(v, j):\r\n v_s, j_s = np.sort([v, j], axis=1)\r\n m = np.polyfit(v_s[-10:], j_s[-10:], 1)\r\n return 1/abs(m[0]) * 1000 #[Ohm cm^2]\r",
"def dust_solver(WL, WR):\n\n # Compute the conserved quantities\n UL = np.full([len(WL), 2], np.nan)\n UL[:,0] = WL[:,i_rho_d]\n UL[:,1] = WL[:,i_rho_d]*WL[:,i_vel_d]\n\n UR = np.full([len(WR), 2], np.nan)\n UR[:,0] = WR[:,i_rho_d]\n UR[:,1] = WR[:,i_rho_d]*WR[:,i_vel_d]\n\n fL = UL*WL[:,4].reshape(-1,1)\n fR = UR*WR[:,4].reshape(-1,1)\n\n # Upwind the advection\n f_dust = np.zeros_like(UL)\n f_dust[(WL[:,4] > 0)] += fL[(WL[:,4] > 0)]\n f_dust[(WR[:,4] < 0)] += fR[(WR[:,4] < 0)]\n\n # Dust signal speed: Roe-average\n #R = np.sqrt(WL[:,3]/WR[:,3])\n #f = R /(1 + R)\n\n #Sd = (f*WL[:,4] + (1-f)*WR[:,4]).reshape(-1,1)\n #f_dust = Sd*np.where(Sd > 0, UL, UR)\n\n return f_dust",
"def show_rel_wt(list_obj):\r\n total = sum_list(list_obj)\r\n wt_list = []\r\n \r\n for num in list_obj:\r\n weight = int((num / total) * 100)\r\n wt_list.append(f\"{weight}%\")\r\n \r\n return wt_list",
"def V(x,nw):\n V = 0\n pList, iList = getLists(nw)\n #print(\"pList : {}\".format(pList))\n #print(\"iList : {}\".format(iList))\n \n if (checkValue(x,iList)):\n V = -300/Eh\n elif (x in pList):\n V = -150/Eh\n return V",
"def stairway_path(stairway: Sequence[Union[float, int]]) -> Union[float, int]:\n\tstairway_sum = [0] * len(stairway)\n\tstairway_sum[0] = stairway[0]\n\tstairway_sum[1] = stairway[1]\n\n\tfor i in range(2, len(stairway)):\n\t\tstairway_sum[i] = stairway[i] + min(stairway_sum[i - 1], stairway_sum[i - 2])\n\n\t# for i in range(len(stairway)):\n\t# \tif i + 1 < len(stairway) and stairway_sum[i + 1] > stairway_sum[i] + stairway[i + 1]:\n\t# \t\tstairway_sum[i + 1] = stairway_sum[i] + stairway[i + 1]\n\t# \tif i + 2 < len(stairway) and stairway_sum[i + 2] > stairway_sum[i] + stairway[i + 2]:\n\t# \t\tstairway_sum[i + 2] = stairway_sum[i] + stairway[i + 2]\n\n\tmin_cost =stairway_sum[-1]\n\tprint(stairway_sum)\n\treturn min_cost",
"def set_potential_target(self):\r\n \r\n import copy\r\n import numpy as np\r\n \r\n # Get the hydraulic conductivity\r\n for e in self.model.elementlist:\r\n if isinstance(e, ElementMoebiusBase) or isinstance(e, ElementUniformBase):\r\n temp_k = e.k\r\n \r\n for e in self.model.elementlist:\r\n if isinstance(e, ElementInhomogeneity):\r\n if e.are_points_inside_polygon(self.zc):\r\n temp_k = e.k\r\n \r\n # Create a list of hydraulic potential targets\r\n self.strength = copy.copy(self.head_change)\r\n if self.model.aquifer_type == 'confined':\r\n # Strack 1989, Eq. 8.6\r\n self.strength = temp_k*self.model.H*self.strength - \\\r\n 0.5*temp_k*self.model.H**2\r\n elif self.model.aquifer_type == 'unconfined':\r\n # Strack 1989, Eq. 8.7\r\n self.strength = 0.5*temp_k*self.strength**2\r\n elif self.model.aquifer_type == 'convertible':\r\n # Find out which points are confined and which are unconfined\r\n index_conf = np.where(self.strength >= self.model.H)[0]\r\n index_unconf = np.where(self.strength < self.model.H)[0]\r\n # Account for the confined points\r\n # confined: Strack 1989, Eq. 8.6\r\n self.strength[index_conf] = \\\r\n temp_k[index_conf]*self.model.H*self.strength[index_conf] - \\\r\n 0.5*temp_k[index_conf]*self.model.H**2\r\n # unconfined: Strack 1989, Eq. 8.7\r\n self.strength[index_unconf] = \\\r\n 0.5*temp_k[index_unconf]*self.strength[index_unconf]**2",
"def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten så stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, så feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data",
"def calculate_speed(centres, fs, scale):\r\n xs = centres.apply(lambda c: c[0])\r\n ys = centres.apply(lambda c: c[1])\r\n positions = np.array([xs, ys]).T\r\n vectors = np.diff(positions, axis=0)\r\n distances = np.linalg.norm(vectors, axis=1)\r\n speeds = distances * float(scale) * float(fs)\r\n return speeds",
"def heuristic_cal(current: list, goal: list) -> int:\n\n current_locations = state_to_locations(current)\n goal_locations = state_to_locations(goal)\n\n h_val = 0 # Tracks the cost of the heuristic function\n for i in range(1, 16):\n h_val += (abs(current_locations[i][0] - goal_locations[i][0]) +\n abs(current_locations[i][1] - goal_locations[i][1]))\n \"\"\" Loops through both lists of locations and adds the Manhattan distance \n of each number to the sum h_val. The range is from 1 to 16 because the \n blank in either state is not taken into account.\"\"\"\n\n return h_val",
"def molar_mass_dry_air():\n return 28.9647",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def resistance(stock):\n output= stock_max(stock)-(stock_max(stock)*.05)\n return output",
"def calculate_SWIR1_TOA(d,sza,L):\n rho_SWIR1_TOA=np.pi*(d**2)*L[:,118,:]/(0.225*np.cos(sza))\n return rho_SWIR1_TOA",
"def calc_nuj_list(theta_list) :\n return theta_list / np.sin(2*theta_list)",
"def SSHZ(L, teff):\n return [np.sqrt(L/seffi(teff)), np.sqrt(L/seffo(teff))]"
] | [
"0.5403019",
"0.5291932",
"0.5259825",
"0.52283496",
"0.5211883",
"0.51628804",
"0.51541704",
"0.5146375",
"0.5139574",
"0.5136119",
"0.513204",
"0.5131023",
"0.511907",
"0.5116268",
"0.5108971",
"0.51066464",
"0.510313",
"0.5085152",
"0.5072366",
"0.5072023",
"0.5070843",
"0.5063493",
"0.5063127",
"0.50563914",
"0.50550014",
"0.50545895",
"0.50308406",
"0.50163454",
"0.4995743",
"0.4993059"
] | 0.61245406 | 0 |
If this instruction is selected or not. | def selected(self):
return self.infodock.is_instruction_selected(self.addr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_selected(self) -> bool:\n return self.proto.is_selected",
"def is_selected(self) -> bool:\r\n return self.selected",
"def is_selected(self):\n return self.container['is_selected']",
"def is_in_cmd(self):\r\n return self.select_cmd is not None",
"def IsSelected(self):\r\n\r\n return self._hasHilight != 0",
"def is_selected(self):\n return self._selected",
"def is_selected(self):\n return self._selected",
"def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False",
"def selectable(cls):\n return True",
"def is_selected(self):\n return self._element_call(lambda: self.el.is_selected)",
"def is_instruction(self):\n return False",
"def requires_selection(self) -> bool:\n return True",
"def is_instruction(self):\n return True",
"def __bool__(self):\n return any(self.smask)",
"def IsSelected(self, item):\r\n\r\n return item.IsSelected()",
"def selected(self):\n\n return self.element().is_selected() if self.exists() else False",
"def value(self):\n return self.element.is_selected()",
"def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]",
"def is_selected(self, is_selected):\n\n self.container['is_selected'] = is_selected",
"def is_selected(self):\n return NSCSpecIO().read()[\"profile\"] == self.path.stem",
"def is_multi_selection(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_multi_selection\")",
"def idSelect(self):\n # So IDPs don't have to import the constant\n return self.identity == IDENTIFIER_SELECT",
"def autoselect(self):\n # type: () -> bool\n return self._autoselect",
"def isAnyFlagSelected(self):\n for key in self.canSelectFlags.keys():\n if self.canSelectFlags[key] == 1:\n return 1\n return 0",
"def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False",
"def requested() -> bool:\n\treturn _flag.is_set()",
"def is_selected(self, selector):\n el = self.locate_element(selector)\n return el.is_selected()",
"def __is_selected_frame(self, frame_index):\n return frame_index == self.selected_index",
"def is_on(self):\n pass",
"def has_target(self):\n return self.target is not None"
] | [
"0.73396176",
"0.7214151",
"0.72019464",
"0.71467733",
"0.7037371",
"0.7018988",
"0.7018988",
"0.68195313",
"0.6804341",
"0.67748004",
"0.6731266",
"0.66472644",
"0.6607464",
"0.65271384",
"0.65178984",
"0.6441089",
"0.64344895",
"0.6430764",
"0.6391337",
"0.637323",
"0.6329193",
"0.6223868",
"0.62106717",
"0.61576915",
"0.6143863",
"0.61159825",
"0.6108867",
"0.6064187",
"0.6007549",
"0.59959614"
] | 0.8597773 | 0 |
Lookup all valid routes for an address | def lookup_routes(self, daddr):
outroutes = []
for entry in self.routes:
# split netmask and daddr by the IP dots
netmask_split = entry[NMSK].split('.')
daddr_split = daddr.split('.')
# bitwise ANd the netmask with the daddr
result = []
for i in range(0, len(netmask_split)):
result.append(str(int(netmask_split[i]) & int(daddr_split[i])))
# compare ANDed result to the network
is_valid = True
network_split = entry[NTWK].split('.')
for i in range(0, len(network_split)):
if result[i] != network_split[i]:
is_valid = False
break
if is_valid:
outroutes.append(entry)
if len(outroutes) == 0:
return outroutes
# reform IP address
outroutes.sort(key=lambda r: int(r[NMSK].replace('.', '')), reverse=True)
longest_matching_prefix = int(outroutes[0][NMSK].replace('.', ''))
outroutes = list(filter(lambda r: int(r[NMSK].replace('.', '')) == longest_matching_prefix, outroutes))
return outroutes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scan_addresses(self, root=None):",
"def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes",
"def lookup_routes(self, daddr):\n # TODO\n outroutes = []\n\n net_pre = daddr[0 : daddr.index('.')] + '.0.0.0'\n\n #print(self.routes)\n\n for ip in self.routes.keys():\n network = self.routes[ip][NTWK]\n net_pre_2 = network[0:network.index('.')] + '.0.0.0'\n if net_pre_2 == net_pre:\n outroutes.append(ip)\n return outroutes",
"def lookup_routes(self, daddr):\n outroutes = []\n binary_of_dest = self.ip_to_binary(daddr)\n best_cidr = float('-inf')\n\n for r in self.routes:\n # convert network and netmask to binary for longest prefix matching\n binary_of_network = self.ip_to_binary(r[MESG][NTWK])\n cidr_of_netmask = self.ip_to_binary(r[MESG][NMSK]).count('1')\n # use subnet mask to get the prefix\n dst = binary_of_dest[:cidr_of_netmask]\n ntk = binary_of_network[:cidr_of_netmask]\n # matching prefixes?\n if dst == ntk:\n # found better match. clear and start over with just this route\n if best_cidr < cidr_of_netmask:\n best_cidr = cidr_of_netmask\n outroutes.clear()\n outroutes.append(r)\n # 1 to 1 match, add route to list\n if best_cidr == cidr_of_netmask:\n outroutes.append(r)\n\n return outroutes",
"def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList",
"def get_routes():\n\n return Db().get_line_ids()",
"def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)",
"def resolve_routes(\n routes: List[RouteMetadata],\n token_network_address: TokenNetworkAddress,\n chain_state: ChainState,\n) -> List[RouteState]:\n\n resolvable = []\n for route_metadata in routes:\n if len(route_metadata.route) < 2:\n continue\n\n channel_state = views.get_channelstate_by_token_network_and_partner(\n chain_state=chain_state,\n token_network_address=token_network_address,\n partner_address=route_metadata.route[1],\n )\n\n if channel_state is not None:\n resolvable.append(\n RouteState(\n route=route_metadata.route,\n # This is only used in the mediator, so fees are set to 0\n estimated_fee=FeeAmount(0),\n )\n )\n return resolvable",
"def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)",
"def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes",
"def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover",
"def route_list(request):\n\n hard_limit = 100\n\n if request.method == 'GET' and 'prefix__in' in request.GET:\n # find all routers covered by this prefix\n match_prefix = request.GET.get('prefix__in')\n # max number of items to return\n limit = request.GET.get('count', 10)\n if limit < 1 or limit > hard_limit:\n return http.HttpResponseBadRequest('invalid value for count parameter')\n\n try:\n if ':' in match_prefix:\n # v6\n pfx = resource_set.resource_range_ipv6.parse_str(match_prefix)\n manager = RouteOriginV6\n else:\n # v4\n pfx = resource_set.resource_range_ipv4.parse_str(match_prefix)\n manager = RouteOrigin\n except (AssertionError, rpki.exceptions.BadIPResource), e:\n return http.HttpResponseBadRequest(e)\n\n try:\n qs = manager.objects.filter(prefix_min__gte=pfx.min,\n prefix_max__lte=pfx.max)[:limit]\n # FIXME - a REST API should really return the url of the resource,\n # but since we are combining two separate tables, the .pk is not a\n # unique identifier.\n matches = [{'prefix': str(x.as_resource_range()), 'asn': x.asn} for x in qs]\n except IndexError:\n # no matches\n matches = []\n\n return http.HttpResponse(json.dumps(matches), content_type='text/javascript')\n\n return http.HttpResponseBadRequest()",
"def get_routes():\n # get from cache if it exists\n routes = cache.get(\"routes\")\n if routes:\n return routes\n\n trips_url = \"https://data.edmonton.ca/api/views/ctwr-tvrd/rows.json?accessType=DOWNLOAD\"\n bus_heading_url = \"https://data.edmonton.ca/resource/atvz-ppyb.json\"\n\n trips_response = requests.get(trips_url)\n bus_heading_response = requests.get(bus_heading_url)\n\n if trips_response.status_code == 200 and bus_heading_response.status_code == 200:\n trips = trips_response.json()\n headings = bus_heading_response.json()\n\n bus_to_headings = {}\n trip_to_bus = {}\n\n for heading in headings:\n if \"route_long_name\" in heading:\n bus_to_headings[heading[\"route_id\"]] = heading[\"route_long_name\"]\n\n for item in trips[\"data\"]:\n trip_id = item[-4]\n bus_number = item[-6]\n if bus_number in bus_to_headings:\n bus_heading = bus_to_headings[bus_number]\n trip_to_bus[trip_id] = [bus_number, bus_heading]\n \n # store the routes in the cache for five minutes\n cache.set(\"routes\", trip_to_bus, timeout=5*60) \n return trip_to_bus",
"def get_all_cab_routes(self):\n cab_routes = self.admin_repository.get_all_routes()\n if cab_routes:\n for cab_route in cab_routes:\n print(\"\\nId : {}\".format(cab_route[0]))\n print(\"Cab Number : {}\".format(cab_route[1]))\n print(\"Route Id : {}\".format(cab_route[2]))\n print(\"Stop Name : {}\".format(cab_route[3]))\n print(\"Stop stage : {}\".format(cab_route[4]))\n print(\"Timings : {}\".format(cab_route[5]))\n print(\"----------------------------\")\n return True\n else:\n print(\"Data Empty/Not Found.\")\n return False",
"def check_if_repeated_route(route, user_lat, user_lon):\n bus_route = route\n repeated_routes = {'101':'','105':'','106':'','107':'','230':'','111':'','113':'','116':'','119':'','240':'','120':'','271':'','70':'','2':'pt','7':'','8':'','18':'','29':'','1':'pt','3':'pt','4':'pt','402':'pt','425':'pt','202':'pt','212':'pt','214':'pt','102':'pt','10':'pt','11':'pt','13':'','28':'pt','41':'','45':'','55':'pt','57':'pt','63':'pt','47':'','48':'','60':'','64':'','67':'','42':'','12':'','21':''}\n intercity_transit = ['47','48','60','64','67','42','12','13','21','41','45']\n king_county_metro = ['917', 'A Line', '225', '231', '239', '230', '250', '37', '910', '628', '372', '373', '630', '218', '631', '63', '4', '36', '43', '986', '823', '44', '987', '212', '45', '988', 'Trailhead Direct Issaquah Alps', '989', '824', '214', '47', '180', '48', '635', '216', '5', '217', '982', '41', '21', '984', 'F Line', 'E Line', '342', '345', '346', '952', '347', '894', '348', '49', '248', '355', '895', '116', '243', '245', '893', '118', '246', '661', '931', '119', '67', '915', '12', '249', '120', '238', '62', '226', '111', '24', '64', '193', '113', '240', '65', '930', '241', '114', '255', '73', '128', '74', '257', '75', '13', '907', '121', '122', '7', '123', '252', '70', '124', '71', '125', '221', '244', 'Trailhead Direct Cougar Mt.', '55', '994', '50', '995', 'Trailhead Direct Mailbox Peak', '219', '981', 'Trailhead Direct Mt. Si', '22', '224', '157', '204', '101', '232', '102', '105', '57', '106', '234', '156', '107', '235', '236', '60', '980', '237', 'B Line', '11', '775', '56', '1', '10', '166', '167', '903', '158', '908', '159', '3', '906', '301', '913', '914', '303', '164', '304', '916', '901', '178', '169', '308', '17', '309', '31', '311', '312', '177', '168', '629', 'Duvall-Monroe Shuttle', '268', '14', '76', '77', '131', '26', '773', '29', '132', '78', '40', '8', '887', 'C Line', '277', '9', '153', '28', '154', '269', 'D Line', '27', '143', '271', '886', '148', '888', '889', '15', '150', '891', '892', '208', '200', '181', '32', '182', '33', '183', '330', '331', '186', '187', '316', '179', '18', '192', '197', '2', '19', '190']\n pierce_transit = ['1','2','3','4','402','425','202','212','214','102','10','11','13','28','41','45','48','55','57','63']\n north_routes = ['101','105','106','107','230','111','113','116','119','240','120','271','70','2','3','4','7','8','12','18','29']\n \n if bus_route in repeated_routes:\n if user_lon < -122.5 and user_lat > 47.1:\n # going to be pierce county (or kitsap)\n if bus_route in pierce_transit: \n bus_route += 'pt'\n else:\n bus_route += repeated_routes[bus_route]\n elif user_lat > 47.7:\n # going to be community transit or everett transit (N)\n if bus_route in north_routes: \n bus_route += 'N'\n else:\n bus_route += repeated_routes[bus_route] \n elif user_lat > 47.33:\n # going to be king county metro\n if bus_route in king_county_metro:\n bus_route = bus_route\n else:\n bus_route += repeated_routes[bus_route]\n elif user_lat > 47.08:\n # going to be pierce transit\n if bus_route in pierce_transit: \n bus_route += 'pt'\n else:\n bus_route += repeated_routes[bus_route] \n else:\n # going to be intercity transit\n if bus_route in intercity_transit: \n bus_route += 'it'\n else:\n bus_route += repeated_routes[bus_route] \n\n return bus_route",
"def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])",
"def lookup_routes(self, daddr):\n outroutes = []\n for route in self.forwarding_table:\n anded_address = self.and_addresses(daddr, route[NMSK])\n if anded_address == route[NTWK]:\n outroutes.append(route)\n return outroutes",
"def possible_routes(srcLat, srcLon, destLat, destLon, searchPreference, dateTime):\n\n dateTime = dateTime.split(\",\")\n\n routes = Db().get_best_route(srcLat, srcLon, destLat, destLon)\n try:\n best_routes = get_three_best_routes(routes, searchPreference, dateTime)\n except IndexError:\n best_routes = \"No Journey Found\"\n\n # Get the address for map display purposes\n try:\n for i in range(len(best_routes)):\n #address is a dataframe, hency the use of .loc\n address = Db().get_single_address(best_routes[i][2]).loc[0,\"Address\"]\n best_routes[i].append(address)\n except IndexError:\n # In case the source is outside Dublin\n best_routes = \"No Journey Found\"\n\n return json.dumps(best_routes, ensure_ascii=False)",
"def set_address_path(manager, routing, assignment,data_locations):\n assignment.ObjectiveValue()\n index = routing.Start(0)\n route_distance = 0\n address_list=[]\n while not routing.IsEnd(index):\n cur_node=manager.IndexToNode(index)\n# print('what are: index,cur_node=',index,cur_node)\n address_list.append(data_locations[cur_node])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n manager.IndexToNode(index)\n# print('almost there: ',address_list)\n address1=address_list[0]\n address2=address_list[1]\n address3=address_list[2]\n address4=address_list[3]\n address5=address_list[4]\n address6=address_list[5]\n address7=address_list[6]\n address8=address_list[7]\n address9=address_list[8]\n address10=address_list[9]\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10",
"def show_all_routes(self):\n try:\n routes = self.admin_repository.show_all_routes()\n if routes:\n for route in routes:\n print(\"Route Id: {}\".format(route[0]))\n print(\"Route : {}\".format(route[1]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def _parse_address_list(path):\n path = path.replace(\" \", \"\")\n array = (\n not (\",\" not in path and path.count(\":\") > 1 and path.count(\"[\") == 1)\n and path.startswith(\"[\")\n and path.endswith(\"]\")\n )\n\n routers = []\n address_list = _SPLIT_RE.split(path[1:-1] if array else path)\n priority_count = 0\n for address in address_list:\n router = {}\n\n match = _PRIORITY_RE.match(address)\n if match:\n address = match.group(1)\n router[\"priority\"] = int(match.group(2))\n priority_count += 1\n else:\n match = _ROUTER_RE.match(address)\n if match:\n address = match.group(1)\n router[\"priority\"] = 100\n\n match = urlparse(f\"//{address}\")\n if not match.hostname:\n raise InterfaceError(f\"Invalid address: {address}\")\n\n try:\n router.update(host=match.hostname, port=match.port)\n except ValueError as err:\n raise ProgrammingError(f\"Invalid URI: {err}\", 4002) from err\n\n routers.append(router)\n\n if 0 < priority_count < len(address_list):\n raise ProgrammingError(\n \"You must either assign no priority to any of the routers or give \"\n \"a priority for every router\",\n 4000,\n )\n\n return {\"routers\": routers} if array else routers[0]",
"def find_address_matches(input_text: str) -> List[str]:\n address_regex = '(?:ana)(?:_)(?:1|3)(?:[13456789abcdefghijkmnopqrstuwxyz]{59})'\n matches = re.findall(address_regex, input_text)\n if len(matches) >= 1:\n return matches\n raise AddressMissingException(\"address_not_found\")",
"def resolve(self, address):",
"def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)",
"def getRoutes(self):\n pass",
"def get_route_urls(area_url, area_id, lat, long):\n\n # Open page html with BeautifulSoup\n area_doc = urlopen(area_url, context=ctx)\n area_html = area_doc.read()\n # Parses html with BS package\n area_soup = BeautifulSoup(area_html, 'html.parser')\n\n # Opens main body of page\n body = area_soup.body\n # Contains list of all routes in an area\n sidebar = body.find('div', class_='mp-sidebar')\n # Opens routes section\n class_ = 'max-height max-height-md-0 max-height-xs-150'\n table = sidebar.find('div',\n class_=class_)\n table = table.find('table')\n routes = table.find_all('tr', id=None)\n # Gets route url and sends to get_route_features(route_url)\n for route in routes:\n route_url = route.find('a')['href']\n get_route_features(route_url, area_id, lat, long)",
"def available_routes():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"/api/v1.0/start/end\"\r\n )",
"def test_get_all_routes(self):\n\n post = {\n 'ip': 'test_ip',\n 'next_hop': 'test_nexthop',\n 'communities': 'test_commu'\n }\n post2 = {\n 'ip': 'test_ip2',\n 'next_hop': 'test_nexthop2',\n 'communities': 'test_commu2'\n }\n route1_id = self.database.add_route(post)\n route2_id = self.database.add_route(post2)\n post3 = self.database.get_all_routes()\n self.assertFalse(len(post3) > 2,\n 'Database was not empty before this function')\n self.database.delete_route({'_id': route1_id})\n self.database.delete_route({'_id': route2_id})\n for r in post3:\n if r['ip'] == post['ip']:\n self.assertEqual(r['ip'], post['ip'], 'insertion failed')\n self.assertEqual(r['next_hop'], post['next_hop'],\n 'insertion failed')\n self.assertEqual(r['communities'], post['communities'],\n 'insertion failed')\n else:\n self.assertEqual(r['ip'], post2['ip'], 'insertion failed')\n self.assertEqual(r['next_hop'], post2['next_hop'],\n 'insertion failed')\n self.assertEqual(r['communities'], post2['communities'],\n 'insertion failed')",
"def _resolve(addresses):\n\n for addr in addresses:\n _, _, ips = socket.gethostbyname_ex(addr)\n for ip in ips:\n yield ip",
"def list_routes():\n for rule in sorted(application.url_map.iter_rules(), key=lambda r: r.rule):\n print(\"{:10} {}\".format(\", \".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule))"
] | [
"0.6619424",
"0.61736727",
"0.609861",
"0.59860164",
"0.5949275",
"0.5928671",
"0.5904322",
"0.58720094",
"0.5834335",
"0.58248335",
"0.58065253",
"0.577405",
"0.57536596",
"0.5716622",
"0.56800824",
"0.56762975",
"0.5647484",
"0.56378794",
"0.5623062",
"0.5596893",
"0.5584083",
"0.5570617",
"0.5569287",
"0.5549166",
"0.5547392",
"0.5535519",
"0.5520551",
"0.54941887",
"0.54855835",
"0.5466669"
] | 0.6262349 | 1 |
select the route with the highest localPref | def get_highest_preference(self, routes):
# filter out any routes that don't have the highest localPref
outroutes = routes.copy()
outroutes.sort(reverse=True, key=lambda r: r[MESG][LPRF])
highest = outroutes[0][MESG][LPRF]
outroutes = list(filter(lambda r: r[MESG][LPRF] == highest, outroutes))
return outroutes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_highest_preference(self, routes):\n if len(routes) == 1: \n return routes\n if len(routes) == 0: \n return []\n prefList = []\n for val in routes: \n prefList.append(val[\"msg\"][\"localpref\"]) \n highestVal = max(prefList) \n outroutes = []\n for val in routes:\n if val[\"msg\"][\"localpref\"] == highestVal:\n outroutes.append(val)\n return outroutes",
"def get_highest_preference(self, routes):\n # start highest lpref route as the first route's path\n highest_lprf_route = [routes[0]]\n # start the highest lpref as that\n # of the first route's path\n highest_lprf = int(routes[0][LPRF])\n # iterate through all routes in given list and\n # find the one with the highest local pref\n for route in routes:\n r_lprf = int(route[LPRF])\n if r_lprf > highest_lprf:\n highest_lprf = r_lprf\n highest_lprf_route = [route]\n elif r_lprf == highest_lprf:\n highest_lprf_route.append(route)\n return highest_lprf_route",
"def get_highest_preference(self, routes):\n outroutes = []\n max_val = float('-inf')\n # find highest local pref first\n for r in routes:\n if r[MESG][LPRF] > max_val:\n max_val = r[MESG][LPRF]\n # find all routes with that val\n for r in routes:\n if r[MESG][LPRF] == max_val:\n outroutes.append(r)\n\n return outroutes",
"def get_highest_preference(self, routes):\n # routes is all possible routes to daddr\n outroutes = {}\n highest = 0\n\n for ip in routes.keys():\n if routes[ip][LPRF] > highest:\n highest = routes[ip][LPRF]\n\n for ip in routes.keys():\n if routes[ip][LPRF] == highest:\n outroutes[ip] = routes[ip]\n\n return outroutes",
"def get_highest_priority(self):\n for i in self.query.index.values.tolist():\n if not int(self.query.loc[i,'in_%s'%self.program]):\n pick = self.query.loc[i]\n break\n return pick",
"def _choose_best_trip(self):\n times = [(key, self._trips_dict[key].get_duration()) for key in self._trips_dict.keys()\n if self._trips_dict[key] is not None]\n self._primary_mode = min(times, key=lambda tup: tup[1])[0]",
"def find_best_route(all_cost, all_routes):\n cost_best_route = np.inf\n for i in range(len(all_cost)):\n if all_cost[i] < cost_best_route:\n cost_best_route = all_cost[i]\n best_route = all_routes[i]\n return cost_best_route, best_route",
"def get_shortest_route(routes):\n route = sorted(routes, key=lambda dist: dist[2]).pop(0)\n return route",
"def part_2(distances: Distances) -> int:\n\n result, _ = max(generate_routes(distances))\n print(f\"part 2: longest route has distance {result}\")\n return result",
"def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global",
"def select_leader(self):\n\n if self.leaders.size() == 1:\n return self.leaders.rand_choice()\n\n candidates = self.leaders.rand_sample(2)\n\n # randomly favourize one of them\n # best_global = choice(candidates)\n\n # should select those which has bigger fitness\n # # if one of them dominates, it will be selected as global best\n # dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)\n #\n # if dom == 1:\n # best_global = candidates[0]\n #\n # if dom == 2:\n # best_global = candidates[1]\n\n if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:\n best_global = candidates[1]\n else:\n best_global = candidates[0]\n return best_global",
"def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path",
"def select_plan(self):\n\n i_best = self.r_exp.index(max(self.r_exp))\n self.plan = self.u_seq[i_best]",
"def get_best_candidate(self):\n if not self.scores:\n return None\n return self.te_list[self.scores.index(max(self.scores))]",
"def getNearestPreference(self, myABR):\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1 and (enemyShip.myShipHull.abr in globals.targetPreference[myABR]):\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n return closestShip",
"def maximal_destination_for_passenger(state, problem):\n unsatisfied = [p for p in state.passengers if not (p.is_arrived() or p.onboard)]\n if unsatisfied:\n max_dist = max([p.opt for p in unsatisfied])\n return max_dist\n return 0",
"def best_routes(self) -> Sequence['outputs.GetRouterStatusBestRouteResult']:\n return pulumi.get(self, \"best_routes\")",
"def select_MAP_control(Pi, t):\n\n u = Pi.idxmax()[t]\n return u",
"def _node_highest_neighbour(self, node):\n\n highest = self.neighbour_array_lo_hi[node][-1]\n\n if highest != node:\n return highest\n else:\n return -1",
"def find_route_optimal_route_length(tsp):\n best_route = []\n for city_id in tsp.best_route:\n for city in tsp.cities:\n if city_id == city.id:\n best_route.append(city)\n return Fitness(route=best_route).route_distance()",
"def _find_largest_candidate(self, reduced):\n nbr_counts = np.count_nonzero(reduced == 0, axis=0) # = [1, 1, 4, 2,...] where each value is the number of neighbours for the variant at that index.\n count_max = nbr_counts.max()\n if count_max == 0: # Indicates there are no available variants close enough\n return None, [] # to the remaining unassigned. Usually raises an error.\n max_inds = np.nonzero(nbr_counts == count_max)[0] # Array containing the indices of all variants with the max number of neighbours.\n if len(max_inds) == 1: # A single largest cluster\n best_center = max_inds[0]\n best_clstr = np.nonzero(reduced[:,best_center] == 0)[0]\n else: # A tie for largest cluster. Broken by smallest sum of full scores\n # This was tested with the below more accurate and true scoring function. Unfortunately it became hideously slow (clustered_inds and centre_inds were given as args):\n # clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n # covered_inds = list(clustered_inds | set(clstr_inds))\n # centre_inds.append(max_ind)\n # score = np.sum(np.min(self.orig_dists[np.ix_(covered_inds,centre_inds)], axis=1))\n # centre_inds.pop()\n best_center, best_clstr, best_score = None, [], np.inf\n for max_ind in max_inds:\n clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n score = np.sum(self.orig_dists[clstr_inds,max_ind])\n if score < best_score:\n best_center, best_clstr, best_score = max_ind, clstr_inds, score\n return best_center, best_clstr",
"def longest_flight(g):\n max_distance = None\n max_destination = None\n max_key = None\n \n for key in g.city_dict:\n for flight in g.city_dict[key].get_flights_out():\n if(flight[1] > max_distance):\n max_key = key\n max_destination = flight[0]\n max_distance = flight[1]\n return g.city_dict[max_key].get_code(), max_destination, max_distance",
"def _choose_best_option(self):",
"def find_path_best_first(world_nparray, heuristic_type=\"\"):\n world_ndarray = np.copy(world_nparray)\n start = tuple(np.argwhere(world_ndarray == -2)[0])\n goal = tuple(np.argwhere(world_ndarray == -3)[0])\n\n world_ndarray[world_ndarray == -2] = 0\n world_ndarray[world_ndarray == -3] = 0\n\n world_tuple = tuple(map(tuple, world_ndarray))\n\n pr_queue = [] # Use heapqueue as priority queue\n heappush(pr_queue, (get_h(start, goal, heuristic_type), 0, \"\", start))\n visited = set() # Each element has to be unique in a set\n graph = get_neighbors(world_tuple)\n route_str = \"\"\n\n while pr_queue:\n _, cost, path, current = heappop(pr_queue)\n if current == goal:\n route_str = path\n break\n if current in visited:\n continue\n visited.add(current)\n for direction, neighbour in graph[current].iteritems():\n heappush(pr_queue, (get_h(neighbour, goal, heuristic_type), cost + 1, path + direction, neighbour))\n world_ndarray[neighbour] = cost + 1\n\n # print \"Expanded nodes(Best First +\", heuristic_type, \"): \", len(visited), \"Path length: \", len(route_str)\n # Convert string directions to 2D(x,y) coordinates\n route_coord = [start]\n for p in route_str:\n route_coord.append(graph[route_coord[-1]][p])\n\n world_ndarray[start] = -2 # Mark the start and end coordinates again\n world_ndarray[goal] = -3\n\n return route_coord, world_ndarray, len(visited), len(route_str)",
"def get_cheapest_neighbor(client, facilities):\n adj_facilities = client.get_facility_list(facilities)\n return min(adj_facilities, key=lambda fac: fac['cost'])",
"def best_move(self, scores):\n \n max_val = max(scores)\n \n tie_list = []\n for i in range(len(scores)):\n if scores[i] == max_val:\n tie_list.append(i)\n if self.is_random:\n return random.choice(tie_list)\n else:\n return tie_list[0]",
"def choose_best_neighbour_simple(self):\n\t\trejected = set([]) #list of prohibited indexes which are rejected because of tabu and energy\n\t\tnIndex = -1\n\t\twhile(True):\n\t\t\tnIndex = self._find_min_diff(rejected=rejected)\t\t#index of best neighbor\n\n\t\t\tif self.is_tabu(nIndex):\n\t\t\t\toutput(message=\"\\t Neuron is in tabu. Need to check the aspiration criteria\",isDebug=True)\n\t\t\t\tif self.aspiration_criteria_satisfied(nIndex):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\trejected.add(nIndex)\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# output(\"Neuron is found\",isDebug=True)\n\t\treturn nIndex",
"def mostConstrainingFeature(self):\n # keep track of which feature we'll choose next\n nextFeature = None\n # a counter for the minimum number of constraints\n maxCount = -1\n # loop through all the features\n for feature in self.features:\n # if this feature has a value then go back to the top of the loop and get\n # the next feature\n if (feature.value != 'none'):\n continue\n # get a list of all the constraints involving this feature\n constraintList = self.getOpenConstraints(feature.name)\n # compare the number of constraints involving this feature to the current max\n # if this is the first unassigned feature we found or this feature has the most\n # constraints we've found...\n if (len(constraintList) > maxCount):\n # save a pointer to the current feature with most constraints\n nextFeature = feature\n # save the max number of constraints\n maxCount = len(constraintList)\n # return the least constraining feature\n return nextFeature",
"def select_move_minimax(board, color, limit, caching = 0):\n #IMPLEMENT\n return minimax_max_node(board, color, limit, caching)[0] #change this!",
"def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)"
] | [
"0.7739929",
"0.76304084",
"0.759504",
"0.6923462",
"0.62135226",
"0.59675246",
"0.59384817",
"0.59176296",
"0.5840786",
"0.5819175",
"0.5819175",
"0.56594867",
"0.55900145",
"0.5547393",
"0.5547266",
"0.5503074",
"0.54446244",
"0.53964007",
"0.5369732",
"0.53554595",
"0.52877575",
"0.5279148",
"0.52773166",
"0.524",
"0.52364147",
"0.52189136",
"0.5217841",
"0.5212879",
"0.52045494",
"0.51958287"
] | 0.77910876 | 0 |
There should be at least an active document. | def IsActive(self):
return not FreeCAD.ActiveDocument is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_document(self):\n pass",
"def startDocument(self):\n pass",
"def startDocument(self):\n pass",
"def current_document(self):\n return self.current_buffer.document",
"def has_doc() -> None:",
"def document_exists(self, docid):\n raise NotImplementedError",
"def test_document_listing(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create a topic and product\n t = topic(save=True)\n p = product(save=True)\n\n # Create 3 documents with the topic and product and one without\n for i in range(3):\n doc = revision(is_approved=True, save=True).document\n doc.topics.add(t)\n doc.products.add(p)\n doc = revision(is_approved=True, save=True).document\n\n self.refresh()\n\n # GET the page and verify the content\n url = reverse('products.documents', args=[p.slug, t.slug])\n r = self.client.get(url, follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(3, len(doc('#document-list li')))",
"def document(self):\n ...",
"def _getForDocument (self):\n return self.__forDocument",
"def update_document(self):\n pass",
"def getCurrentDocument(self):\n tabId = self.tab.currentIndex()\n if tabId == -1:\n return None\n \n currentDocument = self.tab.widget(tabId)\n return currentDocument",
"def get_document(self):\n return self.document",
"def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist",
"def _update_documents(self):\n if self.campaigns.active():\n top_ranked_documents = utils._rank_documents(self.event)\n # [TODO] this is a kludge\n if set(top_ranked_documents) == set(self.documents.all()):\n return False\n print('[DEBUG] Docs changed! New campaign ahoy!')\n self.documents = top_ranked_documents\n return True",
"def ThisDatabaseDocument(self):\n comp = self.ThisComponent # Get the current component\n if comp is None:\n return None\n #\n sess = CreateScriptService('Session')\n impl, ident = '', ''\n if sess.HasUnoProperty(comp, 'ImplementationName'):\n impl = comp.ImplementationName\n if sess.HasUnoProperty(comp, 'Identifier'):\n ident = comp.Identifier\n #\n targetimpl = 'com.sun.star.comp.dba.ODatabaseDocument'\n if impl == targetimpl: # The current component is the main Base window\n return comp\n # Identify resp. form, table/query, table/query in edit mode, report, relations diagram\n if impl == 'SwXTextDocument' and ident == 'com.sun.star.sdb.FormDesign' \\\n or impl == 'org.openoffice.comp.dbu.ODatasourceBrowser' \\\n or impl in ('org.openoffice.comp.dbu.OTableDesign', 'org.openoffice.comp.dbu.OQuertDesign') \\\n or impl == 'SwXTextDocument' and ident == 'com.sun.star.sdb.TextReportDesign' \\\n or impl == 'org.openoffice.comp.dbu.ORelationDesign':\n db = comp.ScriptContainer\n if sess.HasUnoProperty(db, 'ImplementationName'):\n if db.ImplementationName == targetimpl:\n return db\n return None",
"def is_docs_accessed(self):\n return self._tag == 'docs_accessed'",
"def test_doc1(self):\n assert models.review.__doc__ is not None",
"def active(self):\n\n return True",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"def is_docs_created(self):\n return self._tag == 'docs_created'",
"def _document_exists(self, document_name):\n return len(self.ssm_client.list_document_versions(Name=document_name)['DocumentVersions']) >= 1",
"def is_active(self):\r\n return True",
"def IsSaved(self):\n\t\treturn self.acad.ActiveDocument.Saved",
"def active(self):\n return self.owner.active",
"def docAreModified(self):\n nbDoc = 0\n for tabId in xrange( self.tab.count() ):\n doc = self.tab.widget(tabId)\n \n # bypass the welcome page\n if isinstance(doc, WelcomePage):\n continue\n # end of bypass\n \n if doc.isModified():\n nbDoc +=1\n return nbDoc",
"def active(self):\n return self._active",
"def active(self):\n return self._active",
"async def is_valid(self):\n\n # TODO: check if we are inside transaction and raise exception\n try:\n log.debug(\"Perform dirty document check\")\n await self._per_currency_balance_is_valid()\n await self._per_account_balance_is_valid(include_dirty=True)\n\n log.debug(\"Perform clean document check\")\n await self._per_account_balance_is_valid(include_dirty=False)\n except InvalidDocumentException as e:\n log.error(\"Document is not valid: %s\", e.args[0])\n return False\n log.debug(\"Document is valid\")\n return True",
"def checkScene ( doc_id ):\n if cmds.objExists ( \"root\" ) :\n \n self.labelStatus.setText ( \"You shouldn't have any named 'root' node in your scene\" )\n return False \n \n return True",
"def is_doc_not_found(self):\n return self._tag == 'doc_not_found'"
] | [
"0.65061957",
"0.6093264",
"0.6093264",
"0.6007754",
"0.5975979",
"0.5963389",
"0.59061444",
"0.58995444",
"0.579283",
"0.57361865",
"0.5707996",
"0.5682608",
"0.5675246",
"0.5667107",
"0.5646221",
"0.5572093",
"0.55599684",
"0.5558164",
"0.5511485",
"0.549962",
"0.549701",
"0.54939175",
"0.5462884",
"0.54474247",
"0.5420809",
"0.5401067",
"0.5401067",
"0.5379132",
"0.53719753",
"0.537021"
] | 0.6904055 | 1 |
Uses 'func_name' graph generator of NetworkX library to create a NetworkX graph which can be used as topology. | def get_networkx_func (func_name, seed=0, **kwargs):
nx_func = getattr(importlib.import_module("networkx"), func_name)
generated_graph = nx_func(seed=seed, **kwargs)
return generated_graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_graph_func(name):\n if name == \"chain\":\n f = generate_chain\n elif name == \"bidiag\":\n f = generate_bidiag\n elif name == \"collider\":\n f = generate_collider\n elif name == \"jungle\":\n f = generate_jungle\n elif name == \"full\":\n f = generate_full\n elif name == \"regular\":\n f = generate_regular_graph\n elif name == \"random\":\n f = generate_random_graph\n elif name.startswith(\"random_max_\"): # Random graph with maximum number of parents\n max_parents = int(name.split(\"_\")[-1])\n f = lambda *args, **kwargs: generate_random_graph(*args, max_parents=max_parents, **kwargs)\n else:\n f = generate_random_graph\n return f",
"def create_dag(func_dict):\n dag_dict = {\n name: inspect.getfullargspec(func).args for name, func in func_dict.items()\n }\n return nx.DiGraph(dag_dict).reverse()",
"def gen_graph(self):",
"def gen_graph_functions(env: jinja2.environment.Environment, main_graph: onnx.GraphProto) -> ([str], str, [str]):\n\n main_function_node_scripts = []\n sub_graph_functions = []\n generated_imports = set() # set to avoid duplicate imports\n\n node_tree = onnx_helper.NodeTree(main_graph.node)\n available_outputs = [o.name for o in list(main_graph.output)]\n\n while len(node_tree.nodes) != 0:\n current_lowest_nodes = node_tree.end_nodes\n\n # Find next operation to insert -> check if all outputs are available\n next_tree_node = None\n for tree_node in current_lowest_nodes:\n if all(output in available_outputs for output in list(tree_node.node.output)):\n next_tree_node = tree_node\n break\n if not next_tree_node:\n raise Exception(\"Error in parsing nodes, did not find a next node to compute\")\n\n # Insert generated parts\n generated_node = gen_node_script(env, main_graph, next_tree_node.node)\n generated_imports.update(generated_node.imports)\n main_function_node_scripts.append(generated_node.dml_script)\n # handle sub-graphs\n for sub_graph in generated_node.sub_graphs:\n sub_graph_imports, sub_graph_main_function, sub_graph_sub_graph_functions = \\\n gen_graph_functions(env, sub_graph)\n # Inherit imports\n generated_imports.update(sub_graph_imports)\n # Inherit sub-graph functions of sub-graph\n sub_graph_functions += sub_graph_sub_graph_functions\n # Sub-graph main-function becomes sub-graph function\n sub_graph_functions.append(sub_graph_main_function)\n\n # After insertion the inputs to the node become available and the node is removed\n available_outputs += list(next_tree_node.node.input)\n node_tree.remove_end_node(next_tree_node)\n\n main_function_node_scripts.reverse()\n main_graph_function = render_function(env, main_graph, main_function_node_scripts)\n return list(generated_imports), main_graph_function, sub_graph_functions",
"def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(axis=-1)\n\n return graph_from_edges(variable_names, dist_func, edges)",
"def node(func, name=None):\n return NamedFunc(func, name)",
"def networkx_resource_generator (func_name, seed=0, max_cpu=40, max_mem=16000,\n max_storage=30, max_link_bw=70,\n abc_nf_types_len=10,\n supported_nf_cnt=6, max_link_delay=2,\n sap_cnt=10,\n **kwargs):\n rnd = random.Random()\n rnd.seed(seed)\n nx_graph = get_networkx_func(func_name, seed=seed, **kwargs)\n\n nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]\n nffg = NFFG(id=\"net-\" + func_name + \"-seed\" + str(seed))\n gen = NameGenerator()\n\n for infra_id in nx_graph.nodes_iter():\n infra = nffg.add_infra(id=\"infra\" + str(infra_id),\n bandwidth=rnd.random() * max_link_bw * 1000,\n cpu=rnd.random() * max_cpu,\n mem=rnd.random() * max_mem,\n storage=rnd.random() * max_storage)\n infra.add_supported_type(rnd.sample(nf_types, supported_nf_cnt))\n\n for i, j in nx_graph.edges_iter():\n infra1 = nffg.network.node[\"infra\" + str(i)]\n infra2 = nffg.network.node[\"infra\" + str(j)]\n nffg.add_undirected_link(port1=infra1.add_port(id=gen.get_name(\"port\")),\n port2=infra2.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.random() * max_link_bw)\n\n infra_ids = [i.id for i in nffg.infras]\n for s in xrange(0, sap_cnt):\n sap_obj = nffg.add_sap(id=gen.get_name(\"sap\"))\n sap_port = sap_obj.add_port(id=gen.get_name(\"port\"))\n infra_id = rnd.choice(infra_ids)\n infra = nffg.network.node[infra_id]\n nffg.add_undirected_link(port1=sap_port,\n port2=infra.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.uniform(max_link_bw / 2.0,\n max_link_bw))\n\n return nffg",
"def build_graph(self, graph, inst_name, port_nets):\n return",
"def graph_from_edges(variable_names, dist_func, edges, latents=None):\n adj_matrix = edges_to_adj_matrix(edges, len(variable_names))\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix, latents=latents)",
"def generate_call_graph(func):\n def wrapper(*args, **kwargs):\n # Get Calling Module\n mod = inspect.getmodule(inspect.stack()[1][0])\n if _provider_configuration[mod].enabled:\n caller_func = func.__name__\n if len(args) > 1:\n caller_func += \"_\" + \"_\".join(args[1:])\n # Get Factory and Configuration\n factory = _provider_factory[mod]\n config = _provider_configuration[mod]\n # Generate Call Graph\n with factory(config, caller_func) as p:\n p.start()\n return func(*args, **kwargs)\n else:\n return func(*args, **kwargs)\n return wrapper",
"def _construct_graph(self):\n raise NotImplementedError",
"def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g",
"def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G",
"def generate_chain(variable_names, dist_func, **kwargs):\n shuffle(variable_names) # To have a random order\n num_vars = len(variable_names)\n\n adj_matrix = np.zeros((num_vars, num_vars), dtype=np.bool)\n for v_idx in range(num_vars-1):\n adj_matrix[v_idx, v_idx+1] = True\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix)",
"def generate_full(variable_names, dist_func, **kwargs):\n return generate_random_graph(variable_names, dist_func, edge_prob=1.0)",
"def topological_nodes_generator(graph, reverse=...):\n ...",
"def onnx_compiler(func):\n\n assert isinstance(func, tvm.relay.function.Function)\n name = str(func.attrs.global_symbol)\n model = to_onnx(func, {}, name)\n const_vars = [const.name for const in model.graph.initializer]\n name_bytes = bytes(name, \"utf-8\")\n name_size = struct.pack(\"I\", len(name_bytes))\n model_serialized = model.SerializeToString()\n model_size = struct.pack(\"I\", model.ByteSize())\n data = b\"\" + name_size + name_bytes + model_size + model_serialized\n\n runtime_func = \"runtime.ONNXModuleCreate\"\n fcreate = tvm._ffi.get_global_func(runtime_func)\n return fcreate(data.hex(), name, const_vars)",
"def render_function(env: jinja2.environment.Environment, graph: onnx.GraphProto,\n generated_node_scripts: [str]) -> str:\n function_template = env.get_template(\"graph_function.dml.jinja\")\n\n inputs_with_initializers = onnx_helper.get_graph_inputs_with_initializers(graph)\n inputs_without_initializers = onnx_helper.get_graph_inputs_without_initializers(graph)\n outputs = list(graph.output)\n\n # prepare inputs/outputs\n function_inputs = [onnx_helper.PreparedValue(i) for i in inputs_without_initializers]\n function_outputs = [onnx_helper.PreparedValue(o) for o in outputs]\n function_initializers = [onnx_helper.PreparedValue(info, init) for info, init in inputs_with_initializers]\n\n # render function\n graph_function_render = function_template.render(\n function_inputs=function_inputs,\n function_outputs=function_outputs,\n function_start_initializers=function_initializers,\n graph_function_name=util.generate_function_name(graph.name),\n graph_function_description=graph.doc_string,\n node_scripts=generated_node_scripts\n )\n return graph_function_render",
"def make_dag(self, expand=set()):\n G = nx.DiGraph()\n\n ## Inputs-to-Functions\n for f in self.functions:\n # Expand composed models\n if isinstance(f, FunctionModel) and (f.name in expand):\n G_ref = f.model.make_dag(expand=expand - {f})\n G_sub = nx.DiGraph()\n # Add nodes\n G_sub.add_node(f.name + \".var\")\n G_sub.add_node(f.name + \".out\")\n for g in f.model.functions:\n G_sub.add_node(f.name + \".\" + g.name)\n # Add node metadata\n nx.set_node_attributes(G_sub, f.name, \"parent\")\n\n # Add edges\n for u, v, d in G_ref.edges(data=True):\n # Add renamed edge\n if u == \"(var)\":\n G_sub.add_edge(f.name + \".var\", f.name + \".\" + v, **d)\n elif v == \"(out)\":\n G_sub.add_edge(f.name + \".\" + u, f.name + \".out\", **d)\n else:\n G_sub.add_edge(f.name + \".\" + u, f.name + \".\" + v, **d)\n\n # Compose the graphs\n G = nx.compose(G, G_sub)\n\n i_var = set(self.var).intersection(set(f.var))\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(\"(var)\", f.name + \".var\", label=s_var)\n else:\n G.add_edge(\"(var)\", f.name, label=s_var)\n\n ## Function-to-Function\n for i0 in range(len(self.functions)):\n for i1 in range(i0 + 1, len(self.functions)):\n f0 = self.functions[i0]\n f1 = self.functions[i1]\n i_var = set(f0.out).intersection(set(f1.var))\n\n ## If connected\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n ## Handle composed models\n if isinstance(f0, FunctionModel) and (f0.name in expand):\n name0 = f0.name + \".out\"\n else:\n name0 = f0.name\n if isinstance(f1, FunctionModel) and (f1.name in expand):\n name1 = f1.name + \".out\"\n else:\n name1 = f1.name\n\n G.add_edge(name0, name1, label=s_var)\n\n ## Functions-to-Outputs\n for f in self.functions:\n i_out = set(self.out).intersection(set(f.out))\n\n if len(i_out) > 0:\n s_out = \"{}\".format(i_out)\n ## Target composed model's out\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(f.name + \".out\", \"(out)\", label=s_out)\n ## An ordinary function\n else:\n G.add_edge(f.name, \"(out)\", label=s_out)\n\n # Add node metadata\n nx.set_node_attributes(G, {f.name: {\"parent\": self.name}})\n\n # Final metadata\n nx.set_node_attributes(G, {\"(var)\": {\"parent\": self.name}})\n nx.set_node_attributes(G, {\"(out)\": {\"parent\": self.name}})\n\n return G",
"def mutate_topology_func(op_names):\n def mutate_topology_func(parent_arch):\n child_arch = deepcopy( parent_arch )\n node_id = random.randint(0, len(child_arch.nodes)-1)\n node_info = list( child_arch.nodes[node_id] )\n snode_id = random.randint(0, len(node_info)-1)\n xop = random.choice( op_names )\n while xop == node_info[snode_id][0]:\n xop = random.choice( op_names )\n node_info[snode_id] = (xop, node_info[snode_id][1])\n child_arch.nodes[node_id] = tuple( node_info )\n return child_arch\n return mutate_topology_func",
"def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1",
"def build_graphviz(input_dim, output_dim, num_intermediate, \n connections, activations, activation_labels):\n \n if not is_valid_adjacency_matrix(connections, num_intermediate, input_dim, output_dim):\n raise ValueError(\"Connectivity matrix is invalid\")\n num_emitting = num_intermediate + input_dim\n num_receiving = num_intermediate + output_dim\n size = num_emitting + output_dim\n dag = graphviz.Digraph()\n #add nodes labeled by activation functions\n for i in range(size):\n node=str(i)\n if i < input_dim:\n label = \"input %d\" % i\n attrs = {}\n else:\n act_index = activations[i-input_dim].item()\n act_label = activation_labels[act_index]\n attrs = {\n 'activation_index': str(act_index),\n 'activation_label': str(act_label)\n } \n if i >= num_emitting:\n label = f\"output {i-num_emitting}\"\n else:\n label = None\n\n dag.node(node, label=label, **attrs)\n #add edges\n edgelist = []\n for i in range(num_receiving):\n rec_index = i + input_dim\n for emitting_index in range(min(rec_index, num_emitting)):\n if connections[i, emitting_index] > 0:\n edgelist.append((str(emitting_index), str(rec_index)))\n dag.edges(edgelist)\n act_mapping = {str(i) : activation_labels[i] for i in range(len(activation_labels))}\n dag.attr(**act_mapping)\n return dag",
"def add_graph(self, graph={}, name=\"main\"):\n if name in self.ssa.functions:\n print(\"Failed adding graph! Name already exist in the NNSSA network!\")\n else:\n self.ssa.add_function(name, SSAFunction(graph))",
"def _create_graph(netlist):\n G = nx.Graph()\n for t in netlist:\n G.add_edges_from([(t.name, t.drain), (t.name, t.gate), (t.name, t.source)])\n return G",
"def _build_graph(self):\n pass",
"def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]",
"def build_graph(self):\n pass",
"def _graph_fn_parameterize(self, *parameters):\n raise NotImplementedError",
"def build_nodes(graph):\n\n acc = {}\n for k, v in graph.items():\n if callable(v):\n acc[k] = util.fninfo(v)\n else:\n acc[k] = v\n\n return acc",
"def build_graph(self, graph, inst_name, port_nets):\n self.add_graph_edges(graph, port_nets)"
] | [
"0.71898025",
"0.65309864",
"0.6433408",
"0.6324329",
"0.6269729",
"0.6199446",
"0.6140116",
"0.60027516",
"0.58967483",
"0.58881515",
"0.5823595",
"0.5820199",
"0.5809142",
"0.58074343",
"0.579847",
"0.579281",
"0.5770418",
"0.57583",
"0.5748758",
"0.57378566",
"0.5734552",
"0.57264614",
"0.5717103",
"0.56472653",
"0.55706793",
"0.5546009",
"0.5540218",
"0.55310416",
"0.55274403",
"0.5524199"
] | 0.7829313 | 0 |
test clickerstate class in the program | def test_class(ClickerState):
#create the test suite
suite = poc_simpletest.TestSuite()
# create game (current initial state)
state1 = ClickerState() # we initialize the game and then do nothing
state2 = ClickerState() # ... we will somethings to rest of the games .. work out expected
state3 = ClickerState() #values and test . like we can call state2.wait(1000.0) for state2
state4 = ClickerState() #game
state5 = ClickerState()
## test __str__ method
#for game 1
string_expected1 = "total cookies so far: 0.0 current # of cookies: 0.0 cps: 1.0 at time: 0.0"
suite.run_test(str(state1), string_expected1, "test 0.1: __str__")
# test get_cookies method
suite.run_test(state1.get_cookies(), 0.0, "test 1.1: get_cookies() test")
# test get_cps method
suite.run_test(state1.get_cps(), 1.0, "test 2.1: get_cps() test")
# test get_time method
suite.run_test(state1.get_time(), 0.0, "test 3.1: get_time() test")
# test get_ history method
suite.run_test(state1.get_history(), [(0.0, None, 0.0, 0.0)], "test 4.1: get_history() test")
# test time_until method
suite.run_test(state1.time_until(5.0), 5.0, "test 5.1: test time_until with whole no. of cookies")
suite.run_test(state1.time_until(6.7), 7.0, "test 5.2: test time_until with fractional no. of cookies")
# test wait method
state1.wait(9.0)
wait_string1 = "total cookies so far: 9.0 current # of cookies: 9.0 cps: 1.0 at time: 9.0"
suite.run_test(str(state1), wait_string1, "test 6.1: test wait for whole num of seconds")
state1.wait(2.3)
wait_string2 = "total cookies so far: 11.3 current # of cookies: 11.3 cps: 1.0 at time: 11.3"
suite.run_test(str(state1), wait_string2, "test 6.2: test wait for consecutive call")
state2.wait(24.9)
wait_string3 = "total cookies so far: 24.9 current # of cookies: 24.9 cps: 1.0 at time: 24.9"
suite.run_test(str(state2), wait_string3, "test 6.3: test wait for fractional num of secs")
# test buy_item method
state3.wait(99.4)
state3.buy_item("my bf's kisses", 90.0, 10.0)
buy_string1 = "total cookies so far: 99.4 current # of cookies: 9.4 cps: 11.0 at time: 99.4"
suite.run_test(str(state3), buy_string1, "test 7.1: test game state after buy_item")
suite.run_test(state3.get_history(), [(0.0, None, 0.0, 0.0),(99.4, "my bf's kisses", 90.0, 99.4)], "test 7.2: retriving history after buy_item")
# reporting result
suite.report_results() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click(self):\r\n pass",
"def _get_click_state(self, event):\n raise NotImplementedError",
"def take_action(self, state):",
"def test_class_started(self, cls):",
"def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass",
"def _test(self):\n pass",
"def _test(self):\n pass",
"def _test(self):\n pass",
"def runtest(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def on_click(self) -> None:\n pass",
"def act(self, state):\n return",
"def test(self):\n pass",
"def test_02_visit_again(self):",
"def setUpClass(cls):\n cls.driver = driver_class()\n cls.page = None\n cls.mouse = ActionChainEx(cls.driver)",
"def test_01_visit(self):",
"def clickOverride():\n\n pass",
"def startTestRun(self):",
"def test_click_Locationtypes(self):\n self.tc_id = \"Ts_013\"\n self.tc_desc = \"Verify user is able to click Location types\"\n self.tc_step = \"TC Start\"\n\n clickLocationTypes = ClickLocationTypes(self.driver)\n\n self.tc_step = \"Launch the url\"\n clickLocationTypes.launchUrl(\"https://massdtaiot.com/dtahip/\")\n self.tc_step = \"Select all cities\"\n clickLocationTypes.allcities()\n self.tc_step = \"Click the HIP CSA filter\"\n clickLocationTypes.clickHIPCSALoc()\n clickLocationTypes.clickonLegend()\n clickLocationTypes.clickHIPFarmStandLoc()\n clickLocationTypes.clickonLegend()\n clickLocationTypes.clickHIPFarmerMarketBoothLoc()\n clickLocationTypes.clickonLegend()\n clickLocationTypes.clickHIPMobileMarketLoc()\n clickLocationTypes.clickonLegend()\n clickLocationTypes.clickHIPFarmerMarketLoc()",
"def testing(self):\n print('test successful')",
"def c_arrow_test(self):",
"def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1",
"def test_easy_level_choosing_ok(self):\n events = [StubEvent(pygame.MOUSEBUTTONDOWN), StubEvent(pygame.QUIT)]\n menu = Menu(self.display, StubEventQueue(events), 600, 700)\n\n menu.menu()\n\n self.assertEqual(menu.event_queue.get_pos(), (303, 319))",
"def test_update_state(self):\n pass",
"def test_let(self):",
"def test_alive():\n pass"
] | [
"0.674187",
"0.6613401",
"0.62807655",
"0.6226961",
"0.61802983",
"0.61757636",
"0.61757636",
"0.61757636",
"0.6106897",
"0.60738784",
"0.60738784",
"0.60738784",
"0.60738784",
"0.60738784",
"0.6005138",
"0.5963129",
"0.59450465",
"0.59245735",
"0.59171283",
"0.5907409",
"0.5906282",
"0.5897374",
"0.586526",
"0.5856665",
"0.58562887",
"0.58523744",
"0.58462954",
"0.5845681",
"0.5811321",
"0.58083516"
] | 0.6928895 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.