query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
My delimited list. Allows for length zero list and uses no delimiter by default.
def List(expr, delim=""): if not delim: return Group(ZeroOrMore(expr)) else: return Group(Optional(expr + ZeroOrMore( Suppress(delim) + expr)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenlist(sep, item):\n return item + ZeroOrMore(sep + item) + Optional(sep)", "def listify(item, delimiter=\",\"):\n if not item:\n return []\n if type(item) is str:\n item = item.split(delimiter)\n if type(item) is not list:\n raise TypeError(\"'listify' must take None, str, or list!\")\n return item", "def test_string_to_list_string_delimiter(self):\n assert_equals(\n str_to_list(' a | b | c ', delimiter='|'),\n ['a', 'b', 'c']\n )", "def test_string_to_list_string_delimiter(self):\n\n assert_equals(\n str_to_list(' a | b | c ', delimiter='|'),\n ['a', 'b', 'c']\n )", "def getlist(self, option, sep=',', chars=None):\n return [chunk.strip(chars) for chunk in option.split(sep)]", "def delimit(self):\n pass", "def split(self) -> List[String]:\n pass", "def to_list(name, default=[], separator=\":\"):\n value = get(name)\n if value is None:\n return list(default)\n return [e.strip() for e in value.split(separator)]", "def test_separators_only():\n assert my_splitter(\",ad,\", \"ad\") == [\",\", \",\"]", "def explode(delim, val, limit = None): \n if limit != None:\n return val.split(delim, limit)\n else:\n return val.split(delim)", "def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()", "def delimiter(self) -> \"pattern\":\n raise NotImplementedError", "def clean_commas(song_list: List[str]) -> List[str]:\n res = []\n for idx, line in enumerate(song_list):\n if line[-1] == ',':\n if idx + 1 >= len(song_list) or song_list[idx + 1] == '':\n line = line[:-1]\n res.append(line)\n return res", "def texto_para_lista(elemento, delimitador='|'):\n return elemento.split(delimitador)", "def implode(delim, items):\n return delim.join(items)", "def separate_list_input(input_: str) -> List[str]:\n no_commas = input_.replace(\",\", \" \")\n # Each string is naturally unicode, this causes problems with M2Crypto SANs\n # TODO: check if above is still true when M2Crypto is gone ^\n return [str(string) for string in no_commas.split()]", "def list_sugar(self):\n return 'list(', ')'", "def pretty_list(input_list, separator=', '):\n if input_list:\n output = ' %s' % separator.join(input_list)\n else:\n output = ' empty'\n return output", "def list_option(s):\n return _convert(s, (list, tuple))", "def parse_list(tokens: deque) -> list:\n # Exemplo de implementação...\n\n # Consome o colchete de abertura\n if tokens.popleft() != \"[\":\n raise SyntaxError\n\n # Verifica se corresponde à uma lista vazia\n elif tokens[0] == \"]\":\n tokens.popleft()\n return []\n\n # Consome os valores\n xs = []\n while True:\n # Lê valor e salva na saída\n x = parse_value(tokens)\n xs.append(x)\n\n # Verifica fim da lista e remove vírgula se necessário\n tk = tokens.popleft()\n if tk == \"]\":\n break\n elif tk != \",\":\n raise SyntaxError(\"token inesperada em lista: %r\" % tk)\n\n return xs", "def _parse_list(string, dtype=int, delimiter=','):\n\n items = string.lower().strip().replace(' ', '').split(delimiter)\n\n if 'none' in items:\n items.pop(items.index('none'))\n contains_none = True\n else:\n contains_none = False\n\n\n if dtype == bool:\n items = [item == 'true' for item in items]\n else:\n items = [dtype(item) for item in items]\n\n if contains_none:\n items.append(None)\n\n return items", "def listify(item, do_strip=False):\n if not item:\n return []\n elif isinstance(item, list):\n return item\n elif isinstance(item, string_types) and item.count(','):\n if do_strip:\n return [token.strip() for token in item.split(',')]\n else:\n return item.split(',')\n else:\n return [item]", "def separator(self):\n pass", "def list(self, item, default=None, spliter=\",\", strip=True, mod=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n if strip:\n item = item.lstrip(\"[\").rstrip(\"]\")\n out = [x.strip() if strip else x for x in item.split(spliter)]\n if mod:\n return list(map(mod, out))\n return out", "def split(value, delimiter):\n return value.split(delimiter)", "def repair_size_list(self, str_val):\n return [word for word in str_val[2:-2].split('\\', \\'')]", "def read_list(name, default=NO_ARGUMENT, separator=\",\"):\n value = os.environ.get(name)\n if value is None:\n if default is NO_ARGUMENT:\n return []\n else:\n return default\n return [v.strip() for v in value.split(separator) if v.strip()]", "def test_list(self, env: yaenv.Env):\n _val = env.list('LIST_VAR', separator=':')\n _expect = ['item1', 'item2']\n assert _val == _expect and type(_val) == list\n _expect.append('item3')\n _val = env.list('MISSING', _expect)\n assert _val == _expect and type(_val) == list\n assert env.list('MISSING') is None", "def separate(delim):\n # Return a function that takes an argument s, which when called will split\n # s over the delimiter specified (i.e. the delim parameter).\n return lambda s: s.split(delim)", "def parse_list(value: str) -> list[str]:\n segments = _QUOTED_SEGMENT_RE.findall(value)\n for segment in segments:\n left, match, right = value.partition(segment)\n value = ''.join([left, match.replace(',', '\\000'), right])\n return [_dequote(x.strip()).replace('\\000', ',') for x in value.split(',')]" ]
[ "0.7051483", "0.6357497", "0.6287068", "0.62133163", "0.6168434", "0.61420405", "0.6067352", "0.5842047", "0.57866776", "0.5777707", "0.5761801", "0.57346326", "0.5694061", "0.56513804", "0.5629008", "0.56282747", "0.5587309", "0.55792344", "0.5534608", "0.5531955", "0.5512744", "0.54927844", "0.5468525", "0.54599667", "0.5411374", "0.5409233", "0.53893363", "0.53776604", "0.5346936", "0.53310263" ]
0.6472422
1
Returns True if the user has the specified permission. This method queries all available auth backends, but returns immediately if any backend returns True. Thus, a user who has permission from a single auth backend is assumed to have permission in general. If an object is provided, permissions for this specific object are checked.
def has_perm(self, user, perm, obj=None): # Active superusers have all permissions. if user.is_active and user.is_superuser: return True # Otherwise we need to check the backends. return _user_has_perm(user, perm, obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_perm(self, perm, obj=None):\n\n # Active superusers have all permissions.\n if self.is_active and self.is_superuser:\n return True\n\n # Otherwise we need to check the backends.\n return _user_has_perm(self, perm, obj)", "def has_perm(self, perm, obj=None):\n\n # Active superusers have all permissions.\n\n if self.is_active and self.is_superuser:\n return True\n\n # Otherwise we need to check the backends.\n return _user_has_perm(self, perm, obj)", "def _user_has_perm(user, perm, obj):\n for backend in auth.get_backends():\n if not hasattr(backend, 'has_perm'):\n continue\n try:\n if backend.has_perm(user, perm, obj):\n return True\n except PermissionDenied:\n return False\n return False", "def _user_has_perm(user, perm, obj):\n for backend in auth.get_backends():\n if not hasattr(backend, 'has_perm'):\n continue\n try:\n if backend.has_perm(user, perm, obj):\n return True\n except PermissionDenied:\n return False\n return False", "def has_field_perm(self, perm, obj=None, field=None):\n # Inactive users have no permissions.\n if not self.is_active:\n return False\n\n # Superusers have all permissions.\n if self.is_superuser:\n return True\n\n # Otherwise we need to check the backends.\n return _user_has_field_perm(self, perm, obj, field)", "def has_perm(self, user, perm, obj=None):\n if obj is not None and not isinstance(obj, LocalSite):\n logging.error('Unexpected object %r passed to has_perm. '\n 'Returning False.', obj)\n\n if settings.DEBUG:\n raise ValueError('Unexpected object %r' % obj)\n\n return False\n\n if not user.is_active:\n return False\n\n if obj is not None:\n if not hasattr(user, '_local_site_admin_for'):\n user._local_site_admin_for = {}\n\n if obj.pk not in user._local_site_admin_for:\n user._local_site_admin_for[obj.pk] = obj.is_mutable_by(user)\n\n if user._local_site_admin_for[obj.pk]:\n return perm in self._VALID_LOCAL_SITE_PERMISSIONS\n\n return super(StandardAuthBackend, self).has_perm(user, perm, obj)", "def has_perm(self, user_obj, perm, obj=None):\n # Ignore check without obj.\n if obj is None:\n return False\n\n # Ignore if user is not authenticated .\n if not user_obj.is_authenticated():\n return False\n\n # Resolve permission.\n try:\n perm = 'can_%s' % perm.split('.')[-1].split('_')[0]\n except IndexError:\n return False\n \n # Find shares for user and object content types.\n content_type = ContentType.objects.get_for_model(obj)\n user_shares = UserShare.objects.filter(\n content_type=content_type,\n object_id=obj.id,\n user=user_obj,\n )\n\n # Return true if user has permission.\n if user_shares.filter(**{perm: True}).exists():\n return True\n \n # Find shares for user group and object content types.\n group_shares = GroupShare.objects.filter(\n content_type=content_type,\n object_id=obj.id,\n group__in=user_obj.groups.all(),\n )\n\n # Return true if user group has permission.\n if group_shares.filter(**{perm: True}).exists():\n return True\n\n return False", "def has_perm(self, perm, obj=None):\n user_obj = self.user\n if not user_obj.is_active:\n return False\n return perm in self.get_group_permissions(obj)", "def user_has_perm(user, perm, obj):\n return _user_has_perm(user, perm, obj)", "def has_perm(context, perm, obj):\n return access.has_perm(context['request'].user, perm, obj)", "def has_perm(context, perm, obj):\n return access.has_perm(context['request'].user, perm, obj)", "def hasPermission(self, permission, extra_params):\n\n with DBSession(self.__config_db) as session:\n perm, params = ThriftAuthHandler.__create_permission_args(\n permission, extra_params, session)\n\n return require_permission(perm, params,\n self.__auth_session)", "def has_permission(self, permission_name, *args, **kwargs):\n # TODO: This should be mapped to permission object, so that \"perm in user.permissions\" would work\n # TODO: Now we search for a string, which is not so high-performance\n\n permission_access = False\n\n # Collect permissions\n #####################\n all_permissions = self.permissions\n\n # Add role permissions\n for role in self.roles:\n for permission in role.permissions:\n if permission not in all_permissions:\n all_permissions.append(permission)\n\n for group in self.groups:\n # Add direct group permissions\n for permission in group.permissions:\n if permission not in all_permissions:\n all_permissions.append(permission)\n # Add group permission given by roles\n for role in group.roles:\n for permission in role.permissions:\n if permission not in all_permissions:\n all_permissions.append(permission)\n\n # Check permissions\n ###################\n for permission in all_permissions:\n if permission_name == permission.name:\n permission_access = True\n break\n\n if permission_name not in app.permissions._permissions.keys():\n raise PermissionException(\"Permission %s does not exist\" % permission_name)\n\n if not permission_access:\n return False\n # right now, we only know that the user has the needed permission string.\n # But we need to execute the related permission function, if one was set\n permission = app.permissions._permissions[permission_name]\n\n # If no extra function for permissions tests is given, the permission check is true\n if permission.func is None:\n return True\n\n return app.permissions._permissions[permission_name].func(permission_name, *args, **kwargs)", "def __has_permission(self, permission) -> bool:\n if self.__manager.is_enabled and not self.__auth_session:\n return False\n\n return self.hasPermission(permission, None)", "def has_permission(self, user: User, permission: Permission) -> bool:\n return self.tree.has_permission(user, permission)", "def has_permission(cls, perm, user):\n return perm in cls.get_permissions(user)", "def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n if all([request.user, request.user.is_staff]):\n return True\n elif all([request.user, type(obj) == type(request.user), obj == request.user]):\n return True\n\n return True", "def _has_permission(self, user, user_is_mod, command, db_session):\n\n if command[1] == 'for_all':\n return True\n if command[1] == 'for_mods' and user_is_mod:\n return True\n if type(command[1]) == db.Command:\n db_command = command[1]\n if bool(db_command.permissions) is False:\n return True\n elif user in [permission.user_entity for permission in db_command.permissions]:\n return True\n return False", "def has_permission(self, permission: ActionObjectPermission) -> bool:\n collection_permissions_status = self.permissions\n if collection_permissions_status.is_err():\n return False\n collection_permissions: MongoCollection = collection_permissions_status.ok()\n\n # TODO: fix for other admins\n if self.root_verify_key.verify == permission.credentials.verify:\n return True\n\n permissions: Optional[Dict] = collection_permissions.find_one(\n {\"_id\": permission.uid}\n )\n\n if permissions is None:\n return False\n\n if permission.permission_string in permissions[\"permissions\"]:\n return True\n\n # check ALL_READ permission\n if (\n permission.permission == ActionPermission.READ\n and ActionObjectPermission(\n permission.uid, ActionPermission.ALL_READ\n ).permission_string\n in permissions[\"permissions\"]\n ):\n return True\n\n return False", "def has_perm(self, user_obj, perm, obj):\n return None", "def has_object_permission(self, request, view, obj):\n\n if request.method in permissions.SAFE_METHODS:\n return True\n\n return obj.user_profile.id == request.user.id", "def has_permission(self, permission):\n return permission in self._permissions", "def has_permission(self, permission: Union[BasePermission, int]) -> bool:\n if self.permissions & Administrator().value:\n return True\n\n if isinstance(permission, int):\n return self.permissions & permission == permission\n\n return self.permissions & permission.value == permission.value", "def has_perm(self, user, perms, any_perm=False, checker=None):\n if not has_guardian:\n return True\n\n checker = checker or ObjectPermissionChecker(user)\n perms = [perms] if isinstance(perms, str) else perms\n\n f = any if any_perm else all\n return f(checker.has_perm(p, self) for p in perms)", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.registration.child.family:\n return True\n return False", "def check_permissions(cls, user: Union[AbstractUser, AnonymousUser]) -> bool:\n if not cls._meta.public and not check_authenticated(user):\n return False\n\n if not cls._meta.permissions:\n return True\n\n return check_perms(user, cls._meta.permissions, any_perm=cls._meta.permissions_any)", "def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS or request.user.is_admin:\n return True\n\n # Compare instance to the user in request\n return obj.user == request.user", "def has_object_permission(self, request, view, obj):\n return request.user == obj", "def has_perm(self, perm, obj=None):\n return True", "def has_perm(self, perm, obj=None):\n return True" ]
[ "0.7863119", "0.7828964", "0.7681567", "0.7681567", "0.7347684", "0.730742", "0.7284849", "0.72575694", "0.72098434", "0.7167823", "0.7167823", "0.71654195", "0.7089648", "0.70708424", "0.70448405", "0.7043486", "0.69482195", "0.6947326", "0.69120204", "0.69032484", "0.6840182", "0.6820343", "0.6757894", "0.6730788", "0.6728846", "0.6716837", "0.66395116", "0.66267544", "0.6620422", "0.6620422" ]
0.7860136
1
Test scenario where branch is deleted by someone.
def test_branch_deleted(local): pytest.run(local, ['git', 'checkout', 'feature']) pytest.run(local, ['git', 'push', 'origin', '--delete', 'feature']) local.join('README').write('Changed by local.') # Run. actual = commit_and_push(str(local), 'origin', Versions(REMOTES)) assert actual is True pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed. assert local.join('README').read() == 'Changed by local.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def branch_delete(request, branch_id):\n branch = models.Branch.get_by_id(int(branch_id))\n if branch.owner != request.user:\n return HttpTextResponse('You do not own this branch', status=403)\n\n repo_key = branch.repo_key\n branch.key.delete()\n num_branches = models.Branch.query(models.Branch.repo_key == repo_key).count()\n if not num_branches:\n # Even if we don't own the repository? Yes, I think so! Empty\n # repositories have no representation on screen.\n repo_key.delete()\n\n return HttpResponseRedirect(reverse(repos))", "def __gitDeleteBranch(self):\n self.vcs.gitDeleteRemoteBranch(self.project.getProjectPath())", "def test_delete_run(self):\n pass", "def test_handle_delete(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"12345\"\n test_user = User(\"userid\")\n test_user.github_id = \"1234\"\n team.add_team_lead(\"1234\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (f\"Team brs deleted\", 200))\n self.db.delete.assert_called_once_with(Team, \"12345\")\n self.gh.org_delete_team.assert_called_once_with(int(\"12345\"))", "def test_delete_case(self):\n pass", "def execute(self: \"DeleteBranchOperator\", context: Dict[str, Any]) -> Any:\n hook = NessieHook(conn_id=self.conn_id)\n\n hook.delete_reference(self.branch)", "def test_heads_delitem_pass(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n del heads[\"branch\"]\n assert \"branch\" not in heads", "def test_delete(self):\n pass", "def test_delete(self):\n scenario = factories.Scenario(config='', status=Scenario.Status.INACTIVE)\n scenario.delete()\n self.assertEqual(scenario.status, Scenario.Status.INACTIVE)", "def test_delete_boat(self):\n pass", "def test_client_bank_account_delete(self):\n pass", "def test_delete_goal(self):\n pass", "def test_delete1(self):\n pass", "def delete_branch(api_access_token: str, repo: str, ref: str) -> response.Response:\n api = github.Github(api_access_token)\n\n repository = api.get_repo(repo)\n repository_ref = repository.get_git_ref('heads/{}'.format(ref))\n repository_ref.delete()\n\n return response.success('Successfully deleted \"{}\" from repository \"{}\"'.format(ref, repo))", "def test_delete(self):\n self.basic_login()\n cassette_name = self.cassette_name(\"delete\")\n with self.recorder.use_cassette(cassette_name):\n auth = self.gh.authorize(\n username=self.user,\n password=self.password,\n scopes=[\"gist\"],\n note=\"testing github3.py\",\n )\n assert isinstance(auth, github3.auths.Authorization)\n assert auth.delete() is True", "def test_delete_no_target(self):\n # login as library manager\n self.authenticate(self.user)\n\n # remove all works\n Work.objects.all().delete()\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 0})", "def test_delete_data(self):\n data_github = {\n \"version_control\": \"github\",\n \"scm_repo\": \"test_delete\",\n \"scm_branch\": \"test_delete\",\n \"scm_commit\": \"test_delete\",\n \"repo\": \"test_delete1\",\n \"branch\": \"test_delete1\",\n \"enabled\": 0\n }\n\n data_git = {\n \"version_control\": \"git\",\n \"scm_repo\": \"test_delete\",\n \"scm_branch\": \"test_delete\",\n \"scm_commit\": \"test_delete\",\n \"repo\": \"test_delete1\",\n \"branch\": \"test_delete1\",\n \"enabled\": 0\n }\n\n for data in [data_git, data_github]:\n self.client.post(\"/tracking\", json=data, content_type=\"application/json\", headers=self.auth)\n\n resp = self.client.delete(\n \"/tracking?repo=test_delete1&branch=test_delete1\", content_type=\"application/json\", headers=self.auth\n )\n resp_dict = json.loads(resp.data)\n self.assertIn(\"code\", resp_dict, msg=\"Error in data format return\")\n self.assertEqual(ResponseCode.SUCCESS, resp_dict.get(\"code\"), msg=\"Error in status code return\")", "def test_delete__valid(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with register.app.test_request_context(self.request_path):\n actual_json = self.handler.do_delete(self.feature_id)\n self.assertEqual({'message': 'Done'}, actual_json)\n\n revised_feature = models.Feature.get_by_id(self.feature_id)\n self.assertTrue(revised_feature.deleted)", "def test_heads_pop_removes_branch(repository: Repository) -> None:\n heads = repository.heads\n heads[\"branch\"] = repository.head.commit\n heads.pop(\"branch\")\n assert \"branch\" not in heads", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_duo_account_delete(self):\n pass", "def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))", "def test_delete7(self):\n pass", "def test_issue_delete_issue_reaction(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_delete_team(self):\n pass", "def test_delete_user(self):\n\n with self.client:\n result = self.client.post('/users/cool-guy-johnny-B/delete',\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertNotIn(b'cool-guy-johnny-B', result.data)", "def test_delete_team_member(self):\n pass" ]
[ "0.70407706", "0.6978257", "0.6950403", "0.69398266", "0.69226867", "0.6910521", "0.68338317", "0.6806933", "0.67489284", "0.6737305", "0.67202115", "0.6653965", "0.65832716", "0.65230274", "0.65227497", "0.6496914", "0.64698577", "0.6468279", "0.6465555", "0.64556766", "0.6448419", "0.6447418", "0.64461327", "0.64441574", "0.644069", "0.644069", "0.64318573", "0.64176315", "0.63810176", "0.62923664" ]
0.7768845
0
Get and validate user input for a bounded number. Loops until true. Uses GLOBAL BOUNDS
def get_number(): valid_input = False while not valid_input: try: user_num = int(input("Enter a number between {} and {}: ".format(LOWER_BOUND, UPPER_BOUND))) if LOWER_BOUND <= user_num <= UPPER_BOUND: return user_num except ValueError: pass print("That is not a valid number !")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boundary(quantity, lower, upper):\r\n in_range = False\r\n while not in_range:\r\n if quantity < lower or quantity > upper:\r\n quantity = int(input(\"That is out of range, please try a number between \" + \\\r\n str(lower) + \" and \" + str(upper) + \": \"))\r\n else:\r\n in_range = True\r\n return quantity", "def AskForNumberRange():\n\n\twhile True:\n\t\t# This OUTER loop will loop forever until the user enters correct integers for\n\t\t# lower and upper bound, such that lobound < hibound.\n\n\t\twhile True:\n\t\t\t# This INNER loop will loop forever until the user enters a valid value for lobound\n\t\t\tprint \"Enter the LOWER bound for the range of numbers, or press enter for default 1:\"\n\t\t\tlobound = SolicitInteger( default_return=1 )\n\t\t\tif lobound != None:\n\t\t\t\tprint \"Ok, lower bound of {}.\".format( lobound )\n\t\t\t\tbreak\n\n\t\twhile True:\n\t\t\t# This INNER loop will loop forever until the user enters a valid value for hibound\n\t\t\tprint \"Enter the UPPER bound for the range of numbers that's greater than the lowerbound, or press enter for default 20:\"\n\t\t\thibound = SolicitInteger( default_return=20 )\n\t\t\tif hibound != None:\n\t\t\t\tprint \"Ok, upper bound of {}.\".format( hibound )\n\t\t\t\tbreak\n\n\t\tif lobound < hibound:\n\t\t\t# We've got what we need! return out of this function!\n\t\t\treturn lobound, hibound\n\n\t\t# Uh oh. If we're still here, the user didn't enter in a correct range\n\t\tprint \"***Invalid input: upper bound must be greater than lower bound***\"\n\t\t# Back to the beginning of the outer loop", "def check_input(min_guess_range, max_guess_range):\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess", "def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")", "def ask_number(question, low, high):\n response = None\n while response not in range(low, high, 1):\n response = input(question)\n return response", "def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r", "def _ask_user_range(question, first, last, default):\n\n while True:\n answer = input(question)\n if answer == \"\":\n answer = default\n break\n if re.findall(r\"[0-9+]\", answer):\n if int(answer) in range(first, last + 1):\n break\n else:\n print(\n \"Please a value between {} and {} or Return.\".format(\n first, last\n )\n )\n else:\n print(\n \"Please a number between {} and {} or Return.\".format(first, last)\n )\n\n return int(answer)", "def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")", "def stubborn_asker(low, high):\n import random\n a=random.randint(1,100)\n for i in range(1,10):\n n=input('enter the number: ')\n if n.isdigit():\n n=int(n)\n if n==a:\n return('Correct')\n break\n elif n>a:\n return('The number is bigger.')\n elif n<a:\n return('The number is smaller.')\n else:\n return('please enter an integer.')\n i+=1", "def part2():\n random_number = random.randrange(1,10,1)\n user_input = input(\"Guess the number: \")\n while(user_input != \"exit\"):\n if(int(user_input) > random_number):\n print(\"Too high\")\n elif(int(user_input) < random_number):\n print(\"Too low\")\n else:\n print(\"Exactly right\")\n user_input = input(\"Guess the number: \")", "def input_loop(menu_range):\n def check(inp, rng):\n\n try:\n chk = int(inp)\n except ValueError:\n return False\n\n if chk in range(0, rng):\n return True\n else:\n return False\n\n print('-' * 20) # spacer\n\n inpu = input('choose option: ')\n\n while not check(inpu, menu_range):\n inpu = input('try again: ')\n\n return int(inpu)", "def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))", "def guess_number(min_guess_range, max_guess_range):\n\tprint(f'guess the number between {min_guess_range} and {max_guess_range}!')\n\treturn check_input(min_guess_range, max_guess_range)", "def ask_number (question,low,high):\n response = None\n while response not in range(low,high):\n response = int(input(question))\n return response", "def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check", "def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)", "def user_choice():\n number_choice=50 #for enter in a loop\n while number_choice < 0 or number_choice > 49:\n try:\n number_choice=int(input(\"enter number between 0 and 49 :\")) #ask user a number and convert it in integer\n except ValueError: # if number_choice not a number\n print(\"your enter is not a number\") #display error message\n number_choice = 50 #return in a loop\n if number_choice < 0 or number_choice >49:\n print(\"your enter is not included in range\") #display error message if number is out of range\n return number_choice", "def main():\n number = 99\n bouncy_n = 0\n while True:\n number += 1\n if IsBouncy(number):\n bouncy_n += 1\n proportion = (bouncy_n / number)\n if proportion == 0.99:\n print(f'The least number when the proportion of bouncy numbers is 99% is {number:,}')\n break", "def validate_bet(buy_type, cash_in):\n while cash_in < 0:\n print(\"Invalid\", buy_type)\n cash_in = round(float(input(\"Enter \" + buy_type + \": $\")), 2)\n\n return cash_in", "def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)", "def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response", "def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response", "def valid(question, first, last):\n\n while 1:\n try:\n choice = input(question)\n if choice < first or choice > last or not isinstance(choice, int):\n print \"\\nInvalid input, please try again.\"\n else:\n return choice\n except Exception:\n print \"\\nInvalid input, please try again.\"", "def ask_number(question, low, high):\n response = None\n while response not in range (low, high):\n response = int(input(question))\n return response", "def gameLogic(level = 0):\n\n\tallLevels = [0, 1, 2, 3, 4] #all possible levels of this game\n\t#ranges where the user must choose a number from the appropriate domain for each level\n\tnumberRanges = [(1, 500), (1, 1000), (1, 1500), (1, 2000), (1, 2500)] \n\tif level > 4:\n\t\treturn\n\tcurrentRange = numberRanges[level]\n\t\n\tprint(\"\\t\\t\\t***********************************************\")\n\tprint(f\"\\t\\t\\tKEEP IN YOUR MIND NUMBER FROM RANGE {currentRange[0]} to {currentRange[1]}!\")\n\tprint(\"\\t\\t\\t***********************************************\")\n\tready = getAnswer(\"Are you ready?\")\n\tprint(\"\\n\")\n\tif ready:\n\t\tlowerNumber, higherNumber = numberRanges[level][0], numberRanges[level][1]\n\t\trightAnswer = False\n\t\twhile (higherNumber > numberRanges[level][0] or higherNumber < numberRanges[level][1]) and not rightAnswer:\n\t\t\tmid = (higherNumber + lowerNumber) // 2\n\t\t\tans = getAnswer(f\"Does your number is {mid}?\", mid)\n\t\t\tif ans:\n\t\t\t\trightAnswer = True\n\t\t\telse:\n\t\t\t\tcurrentNumRange = lowerNumber, higherNumber\n\t\t\t\tlowerNumber, higherNumber = checkRange(currentNumRange, level)\n\n\t\tif level < 4:\n\t\t\tprint(\"\\t\\t===========================================\")\n\t\t\tprint(\"\\t\\tOK! Let's make it a little more complicated\")\n\t\t\tprint(\"\\t\\t===========================================\")\n\t\t\tlevel += 1\n\t\t\tgameLogic(level)\n\t\telse:\n\t\t\tprint(\"\\n\\t\\t\\t***************************************************\")\n\t\t\tprint(\"\\t\\t\\tEND OF GAME!\")\n\t\t\tprint(\"\\t\\t\\tI hope you made sure that I can guess any number!!\")\n\t\t\tprint(\"\\t\\t\\t******************************************************\")\n\n\telse: #don't ready\n\t\twhetherWannaContinue = getAnswer(\"OK: Do You want to continue this game? Am I waiting for you?\")\n\t\tif not whetherWannaContinue:\n\t\t\tprint(\"OK! Good bye!\")\n\t\t\treturn\n\t\telse:\n\t\t\talreadyReady = False\n\t\t\twhile not alreadyReady:\n\t\t\t\tprint(\"If you will be ready please Enter Y[es]\")\n\t\t\t\talreadyReady = getAnswer(\"Are you ready?\")\n\t\t\tgameLogic(level)", "def GetInteger(prompt=\"Please enter a number:\",\n lowerbound=0, upperbound=99,\n smaller_prompt=\"It's Smaller, please re-enter:\",\n bigger_prompt=\"It's Bigger, please re-enter:\",\n not_int_prompt=\"You did not enter a number, please re-enter:\"):\n user_input = input(prompt)\n\n def InternalFunc1(num):\n while True:\n try:\n return int(num)\n except ValueError:\n num = input(not_int_prompt)\n result = InternalFunc1(user_input)\n\n while not lowerbound <= result <= upperbound:\n if result < lowerbound:\n user_input = input(smaller_prompt)\n result = InternalFunc1(user_input)\n if upperbound < result:\n user_input = input(bigger_prompt)\n result = InternalFunc1(user_input)\n return result", "def guest_num(max=20):\n rand_num = random.randint(1, 101)\n retries = 0\n while retries <= max:\n try:\n n = int(input('Input a number: '))\n if n == rand_num:\n print('YOU WIN!')\n break\n elif n > rand_num:\n print('Iputed number is great than result number. Just retry!')\n retries += 1\n else:\n print('Iputed number is less than result number. Just retry!')\n retries += 1\n except ValueError:\n print('Only can input a number!')\n except:\n print('Only can input a number!')\n else:\n print('YOU LOST!')", "def main():\n # init variables\n lower_bound = 1\n higher_bound = 10\n guess = generate_guess(1, 10)\n while True:\n try:\n secret = input(\"What should the computer guess? Enter a number between 1 and 10: \")\n except ValueError:\n print(\"{} isn't a number!\".format(secret))\n while True:\n if int(guess) == int(secret):\n print(\"I guessed {}! Your number was {}! I win!\".format(guess, secret))\n play_again = input(\"Do you want to play again? (Y/n)\")\n if play_again != \"Y\":\n print(\"Thanks for playing!\")\n exit()\n else:\n main()\n elif int(guess) != int(secret):\n high_or_low = input(\"I guessed {}. Was it high or low? (H/L)\".format(guess))\n print(\"G: {}, HB: {}, LB: {}\".format(guess, higher_bound, lower_bound))\n if high_or_low == \"H\":\n higher_bound = guess - 1\n guess = generate_guess(lower_bound, higher_bound)\n elif high_or_low == \"L\":\n lower_bound = guess + 1\n guess = generate_guess(lower_bound, higher_bound)\n else:\n print(\"Please try again: \\n\")" ]
[ "0.7017077", "0.6891233", "0.6279303", "0.6265519", "0.6217458", "0.6194167", "0.6188855", "0.6186043", "0.6179174", "0.61711013", "0.6162517", "0.6142887", "0.6130406", "0.61281335", "0.6114784", "0.60892946", "0.60543203", "0.6048339", "0.6033415", "0.6026771", "0.5994991", "0.5975282", "0.59742045", "0.59742045", "0.59728616", "0.5954552", "0.59490955", "0.5948008", "0.58976346", "0.58764625" ]
0.70898324
0
Bind to the ``IID_str`` with the given ``version``
def bind(self, IID_str, version=(1,0)): IID = windows.com.IID.from_string(IID_str) request = self._forge_bind_request(IID, version, self.number_of_bind_if) response = self._send_request(request) # Parse reponse request_type = self._get_request_type(response) if request_type != gdef.RPC_RESPONSE_TYPE_BIND_OK: raise ValueError("Unexpected reponse type. Expected RESPONSE_TYPE_BIND_OK got {0}".format(KNOW_RESPONSE_TYPE[request_type])) iid_hash = hash(buffer(IID)[:]) # TODO: add __hash__ to IID self.if_bind_number[iid_hash] = self.number_of_bind_if self.number_of_bind_if += 1 #TODO: attach version information to IID return IID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_server_ident(name, version=None):\n global server_ident\n \n server_ident[\"server_name\"] = name\n \n if version != None and len(version) > 0:\n server_ident[\"server_version\"] = str(version)\n version_text = \"/%s\" % server_ident[\"server_version\"]\n else:\n version_text = \"\"\n \n server.version = server_ident[\"server_name\"] + version_text", "async def set(\n self,\n itx: discord.Interaction,\n /,\n version: app_commands.Transform[str, bible_lookup],\n ) -> None:\n\n if TYPE_CHECKING:\n assert itx.guild is not None\n\n version = version.lower()\n\n async with Session.begin() as session:\n existing = await BibleVersion.get_by_command(session, version)\n await existing.set_for_guild(session, itx.guild)\n\n await utils.send_embed(\n itx,\n description=self.localizer.format(\n 'set.response',\n data={'version': version},\n locale=itx.locale,\n ),\n ephemeral=True,\n )", "def version_number(version_str):\n raise NotImplementedError", "def version(self, version: str):\n\n self._version = version", "def version(self, version: str):\n\n self._version = version", "def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n except AttributeError :\n if len(version) == 2 : # 2-tuple\n self.version = version\n else :\n try :\n self.version = [int(p) for p in str(float(version)).split(\".\")]\n except :\n self.version = [int(p) for p in IPP_VERSION.split(\".\")]", "def version_name(self, version_name):\n\n self._version_name = version_name", "def version(self, version):\n self._version = utils.VersionParser().parse(version)", "def version(self, version):\n self._version = version", "def version(self, version):\n self._version = version", "def __init__(self, *args):\n _snap.TStrVIntPrV_swiginit(self, _snap.new_TStrVIntPrV(*args))", "def do_version(self, line):\n self.version = self.load_version()\n print('%s-%s' % (self.PIP_NAME, self.version))", "def version(self, version):\n \n self._version = version", "def get_bindings_by_version_tag(api_major):\n api_major = str(api_major)\n if api_major in ('v1', '1'):\n return v1_2\n elif api_major in ('v2', '2'):\n return v2_0\n else:\n raise ValueError('Unknown DataONE API version tag: {}'.format(api_major))", "def __init__(self, value: str) -> None:\n try:\n id_part, version_part = self.split('v', 1)\n self.arxiv_id = Identifier(id_part)\n self.version = int(version_part)\n except ValueError as e:\n raise ValueError(f'Not a valid version identifier: {value}') from e", "def _GetVersion(version_str):\n return int(version_str.split('.')[1])", "def player_version(self, player_version):\n # type: (string_types) -> None\n\n if player_version is not None:\n if not isinstance(player_version, string_types):\n raise TypeError(\"Invalid type for `player_version`, type has to be `string_types`\")\n\n self._player_version = player_version", "def from_parts(cls, arxiv_id: Identifier, version: int) \\\n -> 'VersionedIdentifier':\n return cls(f'{arxiv_id}v{version}')", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version" ]
[ "0.5488293", "0.5464984", "0.5414237", "0.5216008", "0.5216008", "0.5132779", "0.5110682", "0.50267404", "0.49960196", "0.49960196", "0.49735996", "0.49455595", "0.4940661", "0.49362436", "0.49324334", "0.49238288", "0.48973906", "0.48956478", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187", "0.48696187" ]
0.7529546
0
Craft an ALPC message containing an RPC request to call ``method_offset`` of interface ``IID` with ``params``. Can be used to craft request without directly sending it
def forge_alpc_request(self, IID, method_offset, params, ipid=None): iid_hash = hash(buffer(IID)[:]) interface_nb = self.if_bind_number[iid_hash] # TODO: add __hash__ to IID if len(params) > 0x900: # 0x1000 - size of meta-data request = self._forge_call_request_in_view(interface_nb, method_offset, params, ipid=ipid) else: request = self._forge_call_request(interface_nb, method_offset, params, ipid=ipid) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call(self, IID, method_offset, params, ipid=None):\n request = self.forge_alpc_request(IID, method_offset, params, ipid=ipid)\n response = self._send_request(request)\n # Parse reponse\n request_type = self._get_request_type(response)\n if request_type != gdef.RPC_RESPONSE_TYPE_SUCCESS:\n raise ValueError(\"Unexpected reponse type. Expected RESPONSE_SUCCESS got {0}\".format(KNOW_RESPONSE_TYPE[request_type]))\n\n # windows.utils.sprint(ALPC_RPC_CALL.from_buffer_copy(response + \"\\x00\" * 12))\n data = struct.unpack(\"<6I\", response[:6 * 4])\n assert data[3] == self.REQUEST_IDENTIFIER\n return response[4 * 6:] # Should be the return value (not completly verified)", "def createRequest(self, method, *params):\n # this method update interface contract in order to be compatible with\n # methods defined by connection handlers\n return super(MsgpackDatagramProtocol, self).createRequest(method, params)", "def _rpc_request(self, method, params, key):\n payload = {\n \"method\": method,\n \"params\": params,\n \"jsonrpc\": \"2.0\",\n \"id\": 0\n }\n res = requests.post(\n \"http://{}:{}\".format(self.url, self.port),\n data=json.dumps(payload),\n headers=self.headers).json()\n return res[key]", "def build_method_call(code, line, method_object):\n full_signature = method_object[\"methodSignature\"]\n normalised_signature = normalise_signature(full_signature)\n param_values = get_method_parameter_values(code, line, full_signature)\n string_values, cmplx_types = get_string_values(param_values, full_signature)\n\n rpc_payload_length = str(\n 4 + len(normalised_signature) + len(string_values)\n )\n # Default to stub value if method-to-service correlation failed\n strong_name = (\n method_object[\"service\"][\"strongName\"]\n if method_object[\"service\"] is not None\n else \"X\"*32\n )\n rpc_blocks = []\n rpc_blocks.extend([\n RPC_VERSION,\n RPC_FLAGS,\n rpc_payload_length,\n BASE_URL,\n strong_name,\n method_object[\"rmtSvcIntName\"],\n method_object[\"methodName\"],\n ])\n rpc_blocks.extend(normalised_signature)\n rpc_blocks.extend(string_values)\n rpc_blocks.extend([\n \"1\", \"2\", \"3\", \"4\",\n method_object[\"paramCount\"]\n ])\n rpc_blocks.extend(\n generate_parameter_map(\n rpc_blocks,\n full_signature,\n param_values\n )\n )\n return rpc_blocks, cmplx_types", "def call(self, method, name, params=None, payload=None, **kwds):", "async def call_rpc(self, rpc_message: RpcMessage, options: dict, bus_client: \"BusClient\"):\n raise NotImplementedError()", "def rpc_call(self, request, method=None, params=None, **kwargs):\r\n args = []\r\n kwargs = dict()\r\n if isinstance(params, dict):\r\n kwargs.update(params)\r\n else:\r\n args = list(as_tuple(params))\r\n\r\n method_key = \"{0}.{1}\".format(self.scheme_name, method)\r\n if method_key not in self.methods:\r\n raise AssertionError(\"Unknown method: {0}\".format(method))\r\n method = self.methods[method_key]\r\n\r\n if hasattr(method, 'request'):\r\n args.insert(0, request)\r\n\r\n return method(*args, **kwargs)", "def _jadeRpc(self, method, params=None, inputid=None, http_request_fn=None, long_timeout=False):\n newid = inputid if inputid else str(random.randint(100000, 999999))\n request = self.jade.build_request(newid, method, params)\n reply = self.jade.make_rpc_call(request, long_timeout)\n result = self._get_result_or_raise_error(reply)\n\n # The Jade can respond with a request for interaction with a remote\n # http server. This is used for interaction with the pinserver but the\n # code below acts as a dumb proxy and simply makes the http request and\n # forwards the response back to the Jade.\n # Note: the function called to make the http-request can be passed in,\n # or it can default to the simple _http_request() function above, if available.\n if isinstance(result, collections.abc.Mapping) and 'http_request' in result:\n this_module = sys.modules[__name__]\n make_http_request = http_request_fn or getattr(this_module, '_http_request', None)\n assert make_http_request, 'Default _http_request() function not available'\n\n http_request = result['http_request']\n http_response = make_http_request(http_request['params'])\n return self._jadeRpc(\n http_request['on-reply'],\n http_response['body'],\n http_request_fn=make_http_request,\n long_timeout=long_timeout)\n\n return result", "def _rpc(http, project, method, base_url, client_info, request_pb, response_pb_cls):\n req_data = request_pb.SerializeToString()\n response = _request(http, project, method, req_data, base_url, client_info)\n return response_pb_cls.FromString(response)", "def remote(self, method, params=()):\n\n response = self.transport.request(self.host, \n '/RPC2',\n dumps(params, method))\n return response", "def _dispatch(self, method, params):\n logging.debug('Calling %s%s', method, params)\n self._rpc_received_event.set()\n return SimpleJSONRPCServer.SimpleJSONRPCServer._dispatch(\n self, method, params)", "def build_request(input_id, method, params=None):\n request = {\"method\": method, \"id\": input_id}\n if params is not None:\n request[\"params\"] = params\n return request", "def api_call(self, method, host, params):\n session_id = self.rpc_login(host)\n params.insert(0, session_id)\n json_rpc_request = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'id': self.ID\n }\n\n self.ID += 1\n response = requests.post(host, data=json.dumps(json_rpc_request), headers=self.headers)\n\n return response", "def _request(self, account, method, params, key):\n params_bytes = py23_bytes(json.dumps(params), self.ENCODING)\n params_enc = base64.b64encode(params_bytes).decode(self.ENCODING)\n timestamp = datetime.utcnow().strftime(self.TIMEFORMAT)[:-3] + \"Z\"\n nonce_int = random.getrandbits(64)\n nonce_bytes = struct.pack('>Q', nonce_int) # 64bit ULL, big endian\n nonce_str = \"%016x\" % (nonce_int)\n\n message = self.prehash_message(timestamp, account, method,\n params_enc, nonce_bytes)\n signature = sign_message(message, key)\n signature_hex = hexlify(signature).decode(self.ENCODING)\n\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": self.id,\n \"method\": method,\n \"params\": {\n \"__signed\": {\n \"account\": account,\n \"nonce\": nonce_str,\n \"params\": params_enc,\n \"signatures\": [signature_hex],\n \"timestamp\": timestamp\n }\n }\n }\n r = requests.post(self.url, data=json.dumps(request))\n self.id += 1\n return r.json()", "def __getattr__(self, cmd):\n\n if hasattr(self._rpc, cmd+'Request'):\n lnfunc = getattr(self._rpc, cmd+'Request')\n elif hasattr(self._rpc, f'Get{cmd}Request'):\n lnfunc = getattr(self._rpc, f'Get{cmd}Request')\n else:\n raise NotImplementedError('Unhandled method self._rpc.(Get)' + cmd + 'Request')\n\n if hasattr(self._stub, cmd):\n stubfunc = getattr(self._stub, cmd)\n\n def rpcCommand(*args,**kwargs):\n return stubfunc(lnfunc(*args, **kwargs))\n return rpcCommand\n\n elif hasattr(self._stub, 'Get'+cmd):\n stubfunc = getattr(self._stub, 'Get'+cmd)\n def rpcCommand(*args,**kwargs):\n if args:\n raise TypeError('Cannot use positional arguments with this command')\n return stubfunc(lnfunc(**kwargs))\n return rpcCommand\n\n else:\n raise NotImplementedError('Unhandled method stub.(Get)' + cmd)", "def rpc(self) -> global___Rpc:", "def _call_method(self, method, req, resp_class):\n payload = req.SerializeToString()\n headers = {\n 'Content-Type': 'application/x-protobuf',\n 'Content-Length': str(len(payload))\n }\n response, content = self._http.request(\n self._url + method, method='POST', body=payload, headers=headers)\n if response.status != 200:\n raise RPCError(method, response, content)\n resp = resp_class()\n resp.ParseFromString(content)\n return resp", "def _execApiCall(headers, params, method_name,\r\n domain='ma.gnolia.com',\r\n urlhead='/api/rest/1/'):\r\n \r\n if 'api_key' not in params and method_name not in ['echo', 'get_key']:\r\n raise MagnoliaException('Required API Key parameter missing')\r\n conn = httplib.HTTPConnection(domain)\r\n conn.request('POST', urlhead + method_name, params, headers)\r\n return conn.getresponse()", "def do_rpc(self, method, **params):\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']", "def _ServerProxy__request(self, methodname, params):\n\n paddedHandler = self._ServerProxy__handler\n\n # add on the methodName\n sep = '&'\n if '?' not in paddedHandler:\n sep = '?'\n paddedHandler = paddedHandler + \"%smethod=%s\" % (sep, methodname)\n sep = '&'\n\n # add on the auth token\n if self._authToken:\n paddedHandler = paddedHandler + \"%sauth_token=%s\" % (sep, urllib.quote_plus(self._authToken))\n\n # add on the partnerId\n if self._partnerId:\n paddedHandler = paddedHandler + \"%spartner_id=%s\" % (sep, self._partnerId)\n\n # add on the userId\n if self._userId:\n paddedHandler = paddedHandler + \"%suser_id=%s\" % (sep, self._userId)\n\n EXCLUDED_PAYLOAD_CALLS = ([\n \"auth.partnerLogin\",\n \"test.\",\n \"debug.\",\n \"testability.\"\n ])\n encryptRequest = True\n if self._requestCipher:\n for excludedMethodPattern in EXCLUDED_PAYLOAD_CALLS:\n if methodname.startswith(excludedMethodPattern):\n encryptRequest = False\n break\n else:\n encryptRequest = False\n\n # add the syncTime request\n if encryptRequest and self._sync:\n server_value, sync_time = self._sync\n params[0]['syncTime'] = server_value + int(time.time()) - sync_time\n\n request = xmlrpclib.dumps(params, methodname,\n encoding=self._ServerProxy__encoding,\n allow_none=self._ServerProxy__allow_none)\n\n #print \"------- XML REQUEST --------\"\n #print request\n\n if encryptRequest:\n request = self.encodeRequest(request)\n\n if self.x509:\n response = self._ServerProxy__transport.request(\n (self._ServerProxy__host, self.x509),\n paddedHandler,\n request,\n verbose=self._ServerProxy__verbose\n )\n else:\n response = self._ServerProxy__transport.request(\n self._ServerProxy__host,\n paddedHandler,\n request,\n verbose=self._ServerProxy__verbose\n )\n\n if len(response) == 1:\n response = response[0]\n\n #print \"------ RESPONSE ------\"\n #print response\n\n return response", "def rpc_request(method, params = [], key = None):\n payload = {\n \"method\": method,\n \"params\": params,\n \"jsonrpc\": \"2.0\",\n \"id\": 0\n }\n\n res = requests.post(\n URL,\n data=json.dumps(payload),\n headers={\"content-type\": \"application/json\"}).json()\n\n if not res.get('result'):\n raise RuntimeError(res)\n\n return res['result'][key] if key else res['result']", "def __call__(self, *argv):\n\n if self.args is not None:\n if len(argv) != self.args:\n raise TypeError(\"Expecting %i arguments, not %i\" %\n (self.args, len(argv)))\n\n for i in range(len(argv)):\n t = type(argv[i])\n if t is not int and t is not str:\n raise TypeError(\"Argument %i has type '%s'\" % (i, repr(t)))\n\n # Prepare Call Request\n crq = pack('II', self.code, len(argv))\n for a in argv:\n if type(a) is str:\n crq += pack('Ii', RPC_ARG_STR, len(a))\n crq += a\n else:\n crq += pack('Ii', RPC_ARG_INT, a)\n\n # Send call to other process\n self.rpcbridge.send(crq)", "def rpc_request(method, params, url=LOCAL):\n client = HTTPClient(url)\n return client.request(method, params)", "def call_method(self, request, context):\n response = CallMethodResponse()\n args = []\n for arg in request.args:\n args.append(decode(arg))\n if args != []:\n result = \\\n self._delegator.call_method(\n request.component, request.method, *args)\n else:\n result = \\\n self._delegator.call_method(\n request.component, request.method, None)\n response.result = encode(result)\n return response", "def call(self, method, *args):\n flatcall = flatten(\n m(n=method, t=self.groupName)[[\n squish(x) for x in args if x is not None]])\n self.socket.write(flatcall + '\\0')", "def writeMethod( # CMETHOD\n self,\n name,\n socksVersion,\n address,\n args,\n optArgs,\n ):\n\n methodLine = 'CMETHOD %s socks%s %s:%s' % (name, socksVersion,\n address[0], address[1])\n if args and len(args) > 0:\n methodLine = methodLine + ' ARGS=' + args.join(',')\n if optArgs and len(optArgs) > 0:\n methodLine = methodLine + ' OPT-ARGS=' + args.join(',')\n self.emit(methodLine)", "def __call__(self):\n params, method = parse_xmlrpc_request(self.request)\n return xmlrpc_response(getattr(self,method)(*params))", "def _invoke_request(self,\n context: 'IconScoreContext',\n request: dict,\n index: int) -> 'TransactionResult':\n\n method = request['method']\n params = request['params']\n\n from_ = params['from']\n to = params['to']\n\n # If the request is V2 the stepLimit field is not there,\n # so fills it as the max step limit to proceed the transaction.\n step_limit = self._step_counter_factory.get_max_step_limit(context.type)\n if 'stepLimit' in params:\n step_limit = min(params['stepLimit'], step_limit)\n\n context.tx = Transaction(tx_hash=params['txHash'],\n index=index,\n origin=from_,\n timestamp=params.get('timestamp', context.block.timestamp),\n nonce=params.get('nonce', None))\n\n context.msg = Message(sender=from_, value=params.get('value', 0))\n context.current_address = to\n context.event_logs: List['EventLog'] = []\n context.traces: List['Trace'] = []\n context.step_counter = self._step_counter_factory.create(step_limit)\n context.msg_stack.clear()\n context.event_log_stack.clear()\n\n return self._call(context, method, params)", "def make_rpc_batch_request_entry(rpc_name, params):\n return {\n \"id\": \"50\",\n \"version\": \"1.1\",\n \"method\": rpc_name,\n \"params\": params,\n }", "def call(self, procedure: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n receive_progress: bool = None,\n call_timeout: float = None,\n cancel_mode: aiowamp.CancelMode = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> aiowamp.CallABC:\n ..." ]
[ "0.7709393", "0.6050143", "0.5798952", "0.5638268", "0.5624949", "0.55940336", "0.5567791", "0.5512863", "0.5477219", "0.537469", "0.5337115", "0.53062266", "0.52889895", "0.5286633", "0.52851754", "0.5255648", "0.5251362", "0.52488613", "0.52292114", "0.5226254", "0.51244795", "0.5114735", "0.50935155", "0.5063795", "0.5057943", "0.50346416", "0.50323176", "0.50297004", "0.5025365", "0.50068814" ]
0.7918275
0
Call method number ``method_offset`` of interface ``IID`` with mashalled ``params``
def call(self, IID, method_offset, params, ipid=None): request = self.forge_alpc_request(IID, method_offset, params, ipid=ipid) response = self._send_request(request) # Parse reponse request_type = self._get_request_type(response) if request_type != gdef.RPC_RESPONSE_TYPE_SUCCESS: raise ValueError("Unexpected reponse type. Expected RESPONSE_SUCCESS got {0}".format(KNOW_RESPONSE_TYPE[request_type])) # windows.utils.sprint(ALPC_RPC_CALL.from_buffer_copy(response + "\x00" * 12)) data = struct.unpack("<6I", response[:6 * 4]) assert data[3] == self.REQUEST_IDENTIFIER return response[4 * 6:] # Should be the return value (not completly verified)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call(self, method, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params})", "def call(self, method, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params})", "def forge_alpc_request(self, IID, method_offset, params, ipid=None):\n iid_hash = hash(buffer(IID)[:])\n interface_nb = self.if_bind_number[iid_hash] # TODO: add __hash__ to IID\n if len(params) > 0x900: # 0x1000 - size of meta-data\n request = self._forge_call_request_in_view(interface_nb, method_offset, params, ipid=ipid)\n else:\n request = self._forge_call_request(interface_nb, method_offset, params, ipid=ipid)\n return request", "def _call(self,\n context: 'IconScoreContext',\n method: str,\n params: dict) -> Any:\n\n self._push_context(context)\n handler = self._handlers[method]\n ret_val = handler(context, params)\n self._pop_context()\n return ret_val", "def call(self, method, name, params=None, payload=None, **kwds):", "def _call_method(self, module, method, *args, **kwargs):\n return self.invoke_api(module, method, *args, **kwargs)", "def _call_it(params): # pragma: no cover\n instance, name, args = params\n kwargs = {}\n return getattr(instance, name)(*args, **kwargs)", "def doCall(self, *args, **kw):\n args = list(args)\n\n for param in self.params[len(args):]:\n args.append(kw.pop(param.name, []))\n\n if not set(kw) <= {'_client'}:\n raise TypeError('Invalid keyword arguments: %s' % kw)\n\n if len(args) > len(self.params):\n err = cTypeError('%(func)s() takes exactly %(needed)d arguments '\n '(%(given)d given)',\n nt={'func': self.name,\n 'needed': len(self.params),\n 'given': len(args)})\n\n if kw['_client']:\n raise ClientError(err)\n else:\n raise err\n\n elist = []\n for i in range(len(self.params)):\n attr = self.params[i]\n try:\n v = attr.coerceValueList(args[i], str(i))\n attr.validateValues(False, v)\n except LocalisedError as e:\n if not hasattr(attr, '_toc') and hasattr(attr, '_xlatKey'):\n e.t['name'] = attr._xlatKey\n elist.append(e)\n continue\n args[i] = v\n if elist:\n if kw['_client']:\n raise cAttrErrorList(*elist)\n else:\n raise AttrErrorList(*elist)\n\n # Exceptions in the implementation won't be wrapped in ClientError\n if self.toi:\n aList = [self.toi] + args\n return self.method(*aList)\n else:\n return self.method(*args)", "def execute_method(self, method, params):\n args, varargs, varkw, defaults = getargspec(method)\n if varargs or varkw:\n raise InvalidParamsError(\n \"Service method definition must not have variable parameters\")\n args_set = set(args[1:])\n if params is None:\n if not len(args_set) == 0:\n raise InvalidParamsError(\n \"Wrong number of parameters; \"\n \"expected %i but 'params' was omitted \"\n \"from JSON-RPC message\" % (len(args_set)))\n return method()\n elif isinstance(params, (list, tuple)):\n if not len(args_set) == len(params):\n raise InvalidParamsError(\n \"Wrong number of parameters; \"\n \"expected %i got %i\" % (len(args_set),len(params)))\n return method(*params)\n elif isinstance(params, dict):\n paramset = set(params)\n if not args_set == paramset:\n raise InvalidParamsError(\n \"Named parameters do not \"\n \"match method; expected %s\" % (str(args_set)))\n params = self.decode_dict_keys(params)\n return method(**params)", "def _dispatch(self, method, params):\n logging.debug('Calling %s%s', method, params)\n self._rpc_received_event.set()\n return SimpleJSONRPCServer.SimpleJSONRPCServer._dispatch(\n self, method, params)", "def call_method(self, name, method, params):\n self.logger.debug(\"API call: {}.{}({})\".format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n # Calls given obj.method, unpacking and passing params dict\n call_return = getattr(obj, method)(**params)\n msg = \"Called {}.{}\".format(name, method)\n self.logger.debug(msg + \",returned:{}\".format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n # Raised when we have a mismatch of the method's kwargs\n # TODO: Return argspec here?\n err_msg = \"Invalid params for {}.{}\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n # Catch exception raised by called method, notify client\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)", "def __call__(self, args, kwargs):\n im_self = self._im_self_ref()\n if im_self is not None:\n method = MethodType(self._im_func, im_self, self._im_class)\n method(*args, **kwargs)", "def process_method(self, method, args, kwargs, request_id=None, **context):\n return method(*([] if args is None else args), **({} if kwargs is None else kwargs))", "def on_invoke(self, ins, const, obj, args):\n pass", "def call(self, port, method, *args, **kwargs):\n method = self.provides[port][method]\n return method(*args, **kwargs)", "def MethodFromMojom(self, mojom_method, interface):\n method = module.Method(interface, mojom_method.decl_data.short_name)\n method.ordinal = mojom_method.ordinal\n method.parameters = [self.ParamFromMojom(param)\n for param in mojom_method.parameters.fields]\n if mojom_method.response_params is not None:\n method.response_parameters = [self.ParamFromMojom(param)\n for param in mojom_method.response_params.fields]\n return method", "def test_wrapper_with_params():\n my_method = SGMethod(\"test\")\n par = my_method.create_parameter(\"par1\")\n other_method = SGMethod(\"other\")\n par1 = other_method.create_parameter(\"par1\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 1\n assert par == my_method.args[0]", "def call(self, method, *args):\n flatcall = flatten(\n m(n=method, t=self.groupName)[[\n squish(x) for x in args if x is not None]])\n self.socket.write(flatcall + '\\0')", "def _iter_call_meth(self, method, *args, **kwargs):\n for obj in self:\n if hasattr(obj, method):\n f = op.methodcaller(method, *args, **kwargs)\n f(obj)", "def query(self, method: str, params: dict) -> Any:\n context = self._context_factory.create(IconScoreContextType.QUERY)\n context.block = self._icx_storage.last_block\n step_limit = self._step_counter_factory.get_max_step_limit(context.type)\n\n if params:\n from_: 'Address' = params.get('from', None)\n context.msg = Message(sender=from_)\n if 'stepLimit' in params:\n step_limit = min(params['stepLimit'], step_limit)\n\n context.traces: List['Trace'] = []\n context.step_counter: IconScoreStepCounter = \\\n self._step_counter_factory.create(step_limit)\n\n ret = self._call(context, method, params)\n\n self._context_factory.destroy(context)\n\n return ret", "def identify_method(self, func):", "def call_method(self, action):\n\n\t\tif action[0] in self.methods:\n\t\t\tself.methods[action[0]](action[0:])\n\t\telse:\n\t\t\tself.no_such_method()", "def callmethod(\n self, method: str, *args: Sequence[Any], **kwargs: Sequence[Any]\n ) -> List[Any]:\n return getattr(self, method)(*args, **kwargs)", "def _call_method(self, call, method):\n raise Exception(\"_call_method must be implemented by subclasses.\")", "def rpc_call(self, request, method=None, params=None, **kwargs):\r\n args = []\r\n kwargs = dict()\r\n if isinstance(params, dict):\r\n kwargs.update(params)\r\n else:\r\n args = list(as_tuple(params))\r\n\r\n method_key = \"{0}.{1}\".format(self.scheme_name, method)\r\n if method_key not in self.methods:\r\n raise AssertionError(\"Unknown method: {0}\".format(method))\r\n method = self.methods[method_key]\r\n\r\n if hasattr(method, 'request'):\r\n args.insert(0, request)\r\n\r\n return method(*args, **kwargs)", "def call(self, *args, **kwargs):", "def _call_method(self, module, method, *args, **kwargs):\n if not self._is_vim_object(module):\n return self.invoke_api(module, method, self.vim, *args, **kwargs)\n else:\n return self.invoke_api(module, method, *args, **kwargs)", "def _method_call(self, msg):\n #print(\"Performing service: %s, method_name: %s\" % (msg.service_name, msg.method_name))\n service = self._services.get(msg.service_name)\n if service is None:\n raise MessageHandleError(MessageHandleError.RESULT_UNKNOWN_SERVICE, msg)\n\n try:\n return execute_remote_method_call(service, msg.method_name, *msg.pargs, **msg.kwargs)\n #return service.call(msg.method_name, *msg.pargs, **msg.kwargs)\n except MessageHandleError as error:\n error.original_message = msg\n raise error", "def _call(self, rpc_method_name, *args, **kwargs):\n method = getattr(self, rpc_method_name)\n return method(*args, **kwargs)", "def calls(self, method, args=None):\n if self.method_called != None:\n logger.error('Model Error: Changing method called by %s', self.name)\n assert False\n self.method_called = method\n self.args = list()\n \n for arg in args:\n if not isinstance(arg, SGParameter):\n self.args.append(arg)\n else:\n self.args.append(self.get_parameter(arg.name))" ]
[ "0.6457688", "0.6457688", "0.63944983", "0.60446113", "0.60007215", "0.5442638", "0.5425219", "0.5395106", "0.53930616", "0.5373983", "0.5315418", "0.5293637", "0.52931386", "0.5262458", "0.5239935", "0.5228087", "0.520957", "0.5179884", "0.5162715", "0.5161636", "0.5153802", "0.5083417", "0.507523", "0.50295764", "0.50253916", "0.50090045", "0.49933985", "0.49923775", "0.4939953", "0.49311477" ]
0.6616697
0
return a 2tuple list (idx, run) filter out not incrementables
def compress_runs(runs, incrementable): return [(i, r) for i, r in enumerate(runs) if r.analysis_type in incrementable]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))", "def runs(lst):\n for j, two in enumerate(lst):\n if j == 0:\n one, i = two, 0\n if one != two:\n yield j - i, one\n i = j\n one = two\n yield j - i + 1, two", "def run_traj_idxs(self, run_idx):\n return list(range(len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])))", "def scan(self) -> list[int]:", "def scan(self) -> List[int]:", "def scan(self) -> List[int]:", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def _get_run_onsets(\n runs, length_fr, pad_fr, running_threshold_cms, offset_fr):\n out = []\n for run in runs:\n t2p = run.trace2p()\n tr = t2p.trace('deconvolved')\n\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr,\n running_threshold_cms)\n for ot in others:\n start = ot + offset_fr\n out.append(tr[:, start:start + length_fr])\n\n return out", "def _exclude_indices(self):\n idx = self._next_idx\n exclude = np.arange(idx - 1, idx + self.obs_len) % self._maxsize\n return exclude", "def filter_runs_by_run():\n this_ipts_number, run_tup_list = my_data.get_ipts_runs()\n\n first_run = 80230\n last_run = 80240\n status, filter_run_tup_list = vdapi.filter_runs_by_run(run_tup_list, first_run, last_run)\n assert len(filter_run_tup_list) == 10\n\n my_data.set_ipts_runs(ipts_number, filter_run_tup_list)\n\n return", "def pt_index(*args):\n index = []\n x = check_pt_data(args[0])\n i = 0\n for line in args[0].Data.PTData.pt_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def get_remaining_events(index_disappeared,to_destroy):\n index_cp = index_disappeared[:]\n for i,deb,fin in to_destroy:\n index_cp = [(x,y,z) for x,y,z in index_cp if (x!=deb and x!=fin)]\n return index_cp", "def non_zero_idx_val(seq):\n return [(i, v) for i, v in enumerate(seq) if v > 0]", "def pull_runs(self, rows):\n \n runs = []\n count = 0\n while (len(rows) > 0):\n run = []\n prev = None\n\n for row_num in xrange(len(rows)):\n row = rows[row_num]\n first, rest = row[0], row[1:]\n\n # TODO: only accept actually adjacent rows here\n if prev is not None and not self.is_same_run(prev, first):\n break\n\n run.append(first)\n prev = first\n\n rows[row_num] = rest\n\n # print >> sys.stderr, len(run)\n runs.append(run)\n rows = [row for row in rows if len(row) > 0]\n\n count += 1\n\n return runs", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def spit(self):\n idxs = np.arange(self.total_tuples)\n return [self.recurse_index_state(copy.deepcopy(self.state_buffer), idxs), self.recurse_index_state(copy.deepcopy(self.state2_buffer), idxs), self.action_buffer[idxs], self.adv_buffer[idxs], \n self.rtg_buffer[idxs], self.logp_buffer[idxs], self.valid_actions_buffer[idxs], self.rew_buffer[idxs], self.done_buffer[idxs]]", "def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def get_valid_indices():\n return [i for i, val in enumerate(all_topics) if val[1] == \"1\"]", "def listNonDegenerate(self):\n return arange(self.nelems())[~self.testDegenerate()]", "def runs(self, char_skip: int) -> Tuple[List[str], List[int]]:\n\t\tchar_indices = list(range(char_skip, len(self.text) + char_skip, char_skip))\n\t\treturn [self.text[:i] for i in char_indices], char_indices", "def runs(self, char_skip: int) -> Tuple[List[str], List[int]]:\n\t\tchar_indices = list(range(char_skip, len(self.text) + char_skip, char_skip))\n\t\treturn [self.text[:i] for i in char_indices], char_indices", "def runs(self, char_skip: int) -> Tuple[List[str], List[int]]:\n\t\tchar_indices = list(range(char_skip, len(self.text) + char_skip, char_skip))\n\t\treturn [self.text[:i] for i in char_indices], char_indices", "def possible_moves(self): \n return [a + 1 for a, b in enumerate(self.board) if b == 0]", "def _get_actor_unfilled_indices(self, actor_index, entries_per_buffer):\n filled_indices = set(\n self._get_replay_buffer_filled_indices(self._replay_buffers, actor_index)\n )\n actor_id_set = set(range(0, entries_per_buffer))\n unfilled_indices = actor_id_set - filled_indices\n return unfilled_indices", "def run_traj_idx_tuples(self, runs=None):\n tups = []\n if runs is None:\n run_idxs = self.run_idxs\n else:\n run_idxs = runs\n for run_idx in run_idxs:\n for traj_idx in self.run_traj_idxs(run_idx):\n tups.append((run_idx, traj_idx))\n\n return tups", "def moles(board):\n return (pos for pos in range(1, length+1) if at(board, pos))", "def cardinal_indices(self, index):\n cardinals = [\n self.north_index(index),\n self.east_index(index),\n self.south_index(index),\n self.west_index(index)\n ]\n return [i for i in cardinals if 0 < i < (self.size * self.size)]", "def get_unprescribed_indexes(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n all_indexes = np.arange(total_dof)\n return np.delete(all_indexes, self.prescribed_indexes)" ]
[ "0.61934006", "0.59456134", "0.5884365", "0.58346885", "0.57831305", "0.57831305", "0.5749621", "0.57459706", "0.568972", "0.5641108", "0.56102896", "0.560884", "0.5552723", "0.55208975", "0.54915106", "0.54656136", "0.5431433", "0.5424431", "0.54129857", "0.5407284", "0.5405968", "0.53729254", "0.53729254", "0.53729254", "0.53668743", "0.53153634", "0.531413", "0.5274005", "0.5245587", "0.5238228" ]
0.6838056
0
Implemented label methods should place labels within a LETTER_HEIGHT x len(label) LETTER_WIDTH region centered at label_x, label_y
def _draw_label(label, label_x, label_y): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)", "def centered_label(text, y_pos, scale):\n group = displayio.Group(scale=scale, x=board.DISPLAY.width // 2)\n x_pos = len(text) * FONT_WIDTH // -2\n group.append(label.Label(FONT, text=text, x=x_pos, y=y_pos))\n return group", "def draw_label(self):\n x, y, z, phi, theta, psi = self.airplane.eta\n u, v, w, p, q, r = self.airplane.nu\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot = self.airplane.nu_dot\n alpha = np.arctan(w/u)\n V_a = np.sqrt(u**2+v**2+w**2)\n beta = np.arcsin(v/V_a)\n\n self.labels[0].text = 'Roll [deg]: %.2f' % (phi*180/np.pi,)\n self.labels[0].draw()\n self.labels[1].text = 'Pitch [deg]: %.2f' % (theta*180/np.pi,)\n self.labels[1].draw()\n self.labels[3].text = 'Pos: (%.2f, %.2f, %.2f)' % (x, y, z)\n self.labels[3].draw()\n self.labels[4].text = 'Speed: %.2f (%.2f, %.2f, %.2f)' % (V_a, u, v, w)\n self.labels[4].draw()\n self.labels[5].text = 'Acceleration: (%.2f, %.2f, %.2f)' % (u_dot, v_dot, w_dot)\n self.labels[5].draw()\n self.labels[6].text = 'Angle of attack: %.2f' % (alpha,)\n self.labels[6].draw()\n self.labels[7].text = 'Sideslip angle: %.2f' % (beta,)\n self.labels[7].draw()\n\n self.labels[9].text = 'Drag: %.2f' % (self.airplane.f_drag,)\n self.labels[9].draw()\n self.labels[10].text = 'Lift: %.2f' % (self.airplane.f_lift,)\n self.labels[10].draw()\n self.labels[11].text = 'Thruster: %.2f' % (self.airplane.f_thruster,)\n self.labels[11].draw()\n self.labels[12].text = 'Elevators: %.2f' % (self.airplane.elevator,)\n self.labels[12].draw()\n self.labels[13].text = 'Ailerons: %.2f' % (self.airplane.aileron,)\n self.labels[13].draw()\n self.labels[14].text = 'Rudder angle: %.2f' % (self.airplane.rudder_angle,)\n self.labels[14].draw()\n self.labels[15].text = 'Flaps: %.2f' % (self.airplane.flaps,)\n self.labels[15].draw()\n\n if (alpha > CRITICAL_STALL_ANGLE):\n self.stall_warning.text = 'Stall!'\n self.stall_warning.draw()", "def _create_label(self, x, y, text, width=50, **config):\n\n self.main_canvas.create_text(x, y, text='%6s' % text, width=width, **config)", "def label_grid(self):\n\n self.pc_label.grid(row=0, sticky=\"nw\", pady=2, padx=3)\n self.sc_label.grid(row=1, sticky=\"nw\", pady=2, padx=3)\n self.avg_t_label.grid(row=2, sticky=\"nw\", pady=2, padx=3)\n self.nwt_label.grid(row=4, sticky=\"nw\", pady=2, padx=3)\n self.nw_ip_label.grid(row=5, sticky=\"nw\", pady=2, padx=3)\n self.nw_gw_label.grid(row=6, sticky=\"nw\", pady=2, padx=3)\n self.nw_sm_label.grid(row=7, sticky=\"nw\", pady=2, padx=3)\n self.nw_mca_label.grid(row=8, sticky=\"nw\", pady=2, padx=3)", "def align_labels(labels):\n # get longest label width\n max_width = -1\n for label in labels:\n width = label.GetSize().width\n max_width = max(max_width, width)\n \n # resize all labels to the longest width\n for label in labels:\n label.SetSize((max_width,-1))", "def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)", "def add_labels(self, labels):\n for i, axis in enumerate(self.bottom):\n self.grid[axis].set_xlabel(labels[i])\n\n for i, axis in enumerate(np.array(self.left)[-1::-1]):\n if axis == self.upperleft:\n continue\n\n self.grid[axis].set_ylabel(labels[i]) \n\n pl.draw()", "def draw_shape_label(self, label, xform, colour):\n #TODO deal with alignment, rotation\n pos = xform.chain(Point(label.x, label.y))\n self.canvas.text((pos.x, pos.y), label.text, fill=colour)", "def put_label(i):\n i = min(i, len(x) - 2)\n dx = sx[i + 1] - sx[i]\n dy = sy[i + 1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i + 1]) / 2. + offset[0],\n (y[i] + y[i + 1]) / 2 + offset[1]]\n plt.text(pos[0],\n pos[1],\n label_text,\n size=9,\n rotation=rotation,\n color=line.get_color(),\n ha=\"center\",\n va=\"center\",\n bbox=dict(ec='1', fc='1', alpha=0.8))", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def put_label(i):\n i = min(i, len(x)-2)\n dx = sx[i+1] - sx[i]\n dy = sy[i+1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i+1])/2. + offset[0], (y[i] + y[i+1])/2 + offset[1]]\n plt.text(pos[0], pos[1], label_text, size=9, rotation=rotation, color = line.get_color(),\n ha=\"center\", va=\"center\", bbox = dict(ec='1',fc='1',alpha=0.8))", "def DrawLabel(self, screen):\r\n screen.blit(self.label, self.pos)", "def draw_text(label_text, label_position, scene):\n\n # Distance of camera from focus point to determine text size\n distance_from_center = mag(scene.center - scene.camera.pos)\n\n # Far away = smaller text, closer = larger text (up to a min (20) and max (40))\n # Typically 5->20 units away\n # (eqn and limits modified to suit display better) = -1.3333 * distance_from_center + 46.6667\n label_height = -1.3333 * distance_from_center + 36.6667 # Calculate label height\n label_height = max(min(label_height, 35), 10) # Limit to 10->35\n label_xoffset = 0\n label_yoffset = 0\n label_space = 0\n label_font = 'serif'\n label_text_colour = color.black\n label_line_color = color.white\n label_bg_opacity = 0\n label_linewidth = 0.1\n\n the_label = label(\n canvas=scene,\n pos=label_position,\n text=label_text,\n height=label_height,\n xoffset=label_xoffset,\n yoffset=label_yoffset,\n space=label_space,\n font=label_font,\n color=label_text_colour,\n linecolor=label_line_color,\n opacity=label_bg_opacity,\n linewidth=label_linewidth\n )\n\n return the_label", "def autolabel(rects):", "def _draw_x_label(self):\n overlay = self.image.getOverlay()\n TextRoi.setGlobalJustification(TextRoi.CENTER)\n offset = self.image.getHeight() - self.extend_label\n label_pos = self.image.getWidth() / 2\n text = TextRoi(label_pos, offset, 'Energy loss [eV]', self.font)\n text_width = text.getFloatWidth()\n text_y = text.getYBase()\n text.setLocation(label_pos - text_width / 2, text_y)\n text.setStrokeColor(Color(1.00, 1.00, 1.00))\n overlay.add(text)", "def autolabel(rects, text, extra_height=0):\n for index, rect in enumerate(rects):\n\n height = rect.get_height()\n if extra_height != 0 and index == 2:\n extra_height = 0.5\n if extra_height != 0 and index == 0:\n extra_height = 2.5\n\n plt.text(rect.get_x() + rect.get_width() / 2., height + 4 + extra_height,\n text,\n ha='center', va='bottom')", "def makeInstructionLabel(self, textInstruction):\n Label(self.sideFrame, text=textInstruction,\n font=self.sideFont, anchor='w').pack(fill=X, padx=10)", "def getVerticalLabels(labels, font, textGap):\n\n maxWidth = 0\n height = 0\n textHeight = font.getsize(\"testq\")[1]\n for label in labels:\n maxWidth = max(maxWidth, font.getsize(label)[0])\n if height > 0: height += textGap\n height += textHeight\n size = (maxWidth, height)\n textCanvas = Image.new(\"RGB\", size, WHITE)\n textdraw = ImageDraw.Draw(textCanvas)\n py = 0\n for label in labels:\n indent = (maxWidth - font.getsize(label)[0]) / 2\n textdraw.text((indent, py), label, font=font, fill=(0,0,0))\n py += textHeight + textGap\n return textCanvas.rotate(90)", "def build_labels():\n l_title = GLabel('Which one is Karel?')\n l_title.font = 'Courier-25'\n l_title.color = 'black'\n window.add(l_title, x=260, y=60)\n l_num = GLabel('19')\n l_num.font = 'Courier-50'\n l_num.color = 'whitesmoke'\n window.add(l_num, x=37, y=242)\n l_skip = GLabel('skip')\n l_skip.font = 'Courier-20'\n l_skip.color = 'whitesmoke'\n window.add(l_skip, x=726, y=152)\n l_ans1 = GLabel('Answers')\n l_ans1.font = 'Courier-20-italic'\n l_ans1.color = 'black'\n window.add(l_ans1, x=698, y=270)\n l_ans2 = GLabel('0')\n l_ans2.font = 'Courier-50-italic'\n l_ans2.color = 'black'\n window.add(l_ans2, x=722, y=252)\n l_game_pin = GLabel('Game PIN: SC101')\n l_game_pin.font = 'Courier-20'\n l_game_pin.color = 'black'\n window.add(l_game_pin, x=20, y=540)\n l_1 = GPolygon()\n l_1.add_vertex((210, 360))\n l_1.add_vertex((197, 380))\n l_1.add_vertex((221, 380))\n l_1.filled = True\n l_1.color = 'whitesmoke'\n l_1.fill_color= 'whitesmoke'\n window.add(l_1)\n l_2_1 = GPolygon()\n l_2_1.add_vertex((210+380, 359))\n l_2_1.add_vertex((198+380, 370))\n l_2_1.add_vertex((221+380, 370))\n l_2_1.filled = True\n l_2_1.fill_color = 'whitesmoke'\n l_2_1.color = 'whitesmoke'\n window.add(l_2_1)\n l_2_2 = GPolygon()\n l_2_2.add_vertex((210+380, 381))\n l_2_2.add_vertex((198+380, 370))\n l_2_2.add_vertex((221+380, 370))\n l_2_2.filled = True\n l_2_2.fill_color = 'whitesmoke'\n l_2_2.color = 'whitesmoke'\n window.add(l_2_2)\n l_3 = GOval(23, 23, x=198, y=450)\n l_3.filled = True\n l_3.fill_color = 'whitesmoke'\n l_3.color = 'whitesmoke'\n window.add(l_3)\n l_4 = GRect(20, 20, x=583, y=450)\n l_4.filled = True\n l_4.fill_color = 'whitesmoke'\n l_4.color = 'whitesmoke'\n window.add(l_4)", "def drawlabels(t, t1):\r\n t.fd(250)\r\n t.pd()\r\n t.write(\"Life\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(12)\r\n t.pd()\r\n t.write(\"Exp.\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(238)\r\n t.right(90)\r\n t.fd(80)\r\n t1.pu()\r\n t1.back(50)\r\n t1.rt(90)\r\n t1.fd(250)\r\n t1.pd()\r\n t1.write(\"Year\", font=(\"Arial\", 10, \"bold\"))\r\n t1.pu()\r\n t1.back(250)\r\n t1.left(90)\r\n t1.fd(50)", "def label(self, margin):\n if self.alphaL == None or self.alphaR == None:\n self.label = \"N\"\n elif abs(self.alphaL - self.alphaR) <= margin:\n self.label = \"S\"\n elif (self.alphaL - self.alphaR) > margin:\n self.label = \"L\"\n elif -(self.alphaL - self.alphaR) > margin:\n self.label = \"R\"\n else:\n self.label = \"N\"", "def __init__(self, text, separator_line_thickness, label_type, dpi=(600, 600)):\n \n def get_text_on_label(text, label_type):\n \"\"\"Format how the text will look on the label.\n \n text - Text to be placed on the label.\n label_type - One of the types specifying the label layout.\n \"\"\"\n text_on_label = \"\".join([c for c in text if c in string.ascii_letters + string.digits])\n if label_type == 0:\n text_on_label = \"\"\n elif label_type == 1 or label_type == 2 or label_type == 4:\n text_on_label = \"\\n\".join([text_on_label[:4],\n text_on_label[4:8],\n text_on_label[8:12],\n text_on_label[12:]])\n elif label_type == 3:\n text_on_label = \"\\n\".join([\"-\".join([text_on_label[:4],\n text_on_label[4:8]]),\n \"-\".join([text_on_label[8:12],\n text_on_label[12:]])])\n else:\n text_on_label = \"\"\n return text_on_label\n \n self.label_image = None\n self.text_on_label = get_text_on_label(text, label_type)\n self.label_type = label_type\n self.separator_line_thickness = separator_line_thickness\n self.dpi = dpi", "def draw_label(self, text, event_name, num_items = 1, item = 0):\n width = self.XCOLUMNSKIP//num_items\n self.guiElements[event_name] = Draw.Label(\n text,\n self.xPos + item*width, self.yPos, width, self.YLINESKIP)\n if item + 1 == num_items:\n self.yPos -= self.YLINESKIP", "def draw_label(label_text, label_position, scene):\n\n # Custom settings for the label\n label_height = 10\n label_xoffset = 0\n label_yoffset = 50\n label_space = 20\n label_font = 'serif'\n label_text_colour = color.black\n label_line_color = color.black\n\n the_label = label(\n canvas=scene,\n pos=label_position,\n text=label_text,\n height=label_height,\n xoffset=label_xoffset,\n yoffset=label_yoffset,\n space=label_space,\n font=label_font,\n color=label_text_colour,\n linecolor=label_line_color\n )\n\n return the_label", "def add_labels(axes, style='(%s)', format_=None, size=12, start='A'):\n\n if format_ is None:\n def format_(x):\n return x\n\n letters = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n start_pos = letters.index(start)\n\n for ax, l in zip(*(axes, letters[start_pos:])):\n x_lo, x_hi = ax.get_xlim()\n y_lo, y_hi = ax.get_ylim()\n\n left = x_lo + 0.1 * (x_hi - x_lo)\n bottom = y_hi - 0.1 * (y_hi - y_lo)\n ax.text(left, bottom, style % format_(l), size=size)", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def GridLabel(Parent,Text,Row,Column):\r\n L = Label(Parent,text=Text)\r\n L.grid(row=Row,column=Column)\r\n return L", "def create_frame_icons(self):\n self.text = \"{}\".format(self.name)\n self.y = self.startY - 10 if self.startY - 10 > 10 else self.startY + 10\n self.colorIndex = LABELS.index(self.name)", "def make_label(self, label, units):\n nice_label = self.tex_axis_label(label)\n if not (units == 'dimensionless') and \\\n (units is not None) and (not units == []):\n nice_label += ' (%s)'%self.tex_axis_label(units)\n return nice_label" ]
[ "0.71363807", "0.68898165", "0.6765323", "0.6661851", "0.65560156", "0.6492479", "0.6486989", "0.64741296", "0.644576", "0.6443592", "0.6416232", "0.63992846", "0.63034606", "0.62848526", "0.62837756", "0.6281372", "0.6280866", "0.62708366", "0.62629914", "0.6259574", "0.6256455", "0.6255242", "0.6237383", "0.6219977", "0.6190325", "0.61829484", "0.61792773", "0.61666894", "0.6137797", "0.6137411" ]
0.7144518
0
Get the ElasticSearch index or indices to query. By default, we obtain the index from the foreign table options. However, this method can be overridden to derive the index from the query quals. For example, the `timestamp` qual could be used to select one or more timebased indices.
def get_index(self, _quals): return self._options['index']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index(self, *args, **dargs):\n pass", "def get_index(\n self,\n ) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_index\" not in self._stubs:\n self._stubs[\"get_index\"] = self.grpc_channel.unary_unary(\n \"/google.datastore.admin.v1.DatastoreAdmin/GetIndex\",\n request_serializer=datastore_admin.GetIndexRequest.serialize,\n response_deserializer=index.Index.deserialize,\n )\n return self._stubs[\"get_index\"]", "def get_index(self, name):\n for index in self.indexes:\n if index.name == name:\n return index\n return None", "def fusion_api_get_index_association(self, uri=None, api=None, headers=None):\n return self.index_association.get(uri, api, headers)", "def get_index(self):\n return self.index", "def get_index(self):\n return self.index", "def index_set(self):\n return self._index", "def indexes(self):\n return getattr(self, '_indexes', None)", "def index(self):\n return self.container['index']", "def index_together(self):\n return self._index_together", "def get_search_index(self):\n return self.get_content.searchIndex", "def index_endpoint(self) -> Optional[str]:\n return pulumi.get(self, \"index_endpoint\")", "def index_queryset(self, using=None):\n return self.get_model().objects.all() # 确定在建立索引时有些记录被索引,这里我们简单的返回所有记录", "def getIndex(self):\n return self.index", "def get_aliased_index(client):\n try:\n result = client.conn.indices.get_alias(name=client.index)\n except elasticsearch.exceptions.NotFoundError: # no alias with that name\n return None\n if len(result) > 1:\n raise RuntimeError(\n \"We don't support managing aliases that \"\n \"point to multiple indices at the moment!\"\n )\n return list(result.keys())[0]", "def indexed(self):\n return self.properties.get('indexed', None)", "def query_indexd(self, limit=100, page=0, uploader=None, args=None):\n data, records = {}, []\n\n if uploader == None:\n index_url = \"{}/index/index/?limit={}&page={}\".format(self._endpoint, limit, page)\n else:\n index_url = \"{}/index/index/?limit={}&page={}&uploader={}\".format(self._endpoint, limit, page, uploader)\n\n if args != None:\n index_url = \"{}&{}\".format(index_url,args)\n\n try:\n response = requests.get(index_url).text\n data = json.loads(response)\n except Exception as e:\n print(\n \"\\tUnable to parse indexd response as JSON!\\n\\t\\t{} {}\".format(\n type(e), e\n )\n )\n\n if \"records\" in data:\n records = data[\"records\"]\n else:\n print(\n \"\\tNo records found in data from '{}':\\n\\t\\t{}\".format(index_url, data)\n )\n\n return records", "def getIndex(self):\n\n return self._index", "def elasticsearch_index_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"elasticsearch_index_prefix\")", "def _get_ea_index():\n ea_index_temp = {'Address': 5, 'Agency': 10, 'City': 4, 'Country': 3,\n 'Datacenter': 7, 'Division': 8, 'Interface Name': 13,\n 'Region_List': 2, 'Requester Email': 9, 'Site': 6,\n 'VLAN Description': 11, 'IPR Designation': 16}\n return ea_index_temp", "def search_indexes(self):\n return get_model_indexes(self.__class__)", "def db_index_name(self):\r\n return 'index_{}'.format(self.db_field_name)", "def index(self):\n if not isinstance(self._index, pd.core.frame.DataFrame):\n self.load()\n return self._index", "def get_index_from_alias(alias_name, index_client=None):\n index_client = index_client or indices_client()\n if not index_client.exists_alias(name=alias_name):\n return None\n return list(index_client.get_alias(name=alias_name).keys())[0]", "def index(self):\n return self._index", "def get_index(index_name):\n try:\n return ES.indices.get(index=[index_name])\n except NotFoundError:\n raise IndexNotFound(index_name)", "def _index(self):\n return es.index(CLUSTER_NAME, 'record', self.dict, id=self.uuid)", "def idx(self, store):\n if hasattr(store, 'index'):\n return store.index.get(self, None)\n else:\n return store.idx(self)", "def fusion_api_index_resource(self, uri=None, api=None, headers=None):\n return self.index_resource.get(uri, api, headers)", "def index(self):\n return self._index" ]
[ "0.62230533", "0.6008187", "0.58028996", "0.57998234", "0.57949215", "0.57949215", "0.57921827", "0.56789416", "0.5601105", "0.5534349", "0.552037", "0.5520204", "0.5516885", "0.55096304", "0.55079347", "0.54522127", "0.5441961", "0.5438087", "0.54296577", "0.53880614", "0.5383899", "0.53644145", "0.53597564", "0.5350471", "0.5327574", "0.53252137", "0.5316749", "0.5291654", "0.52898943", "0.5288184" ]
0.68531275
0
Converts a CoreBluetooth UUID to a Python string. If ``_uuid`` is a 16bit UUID, it is assumed to be a Bluetooth GATT UUID (``0000xxxx00001000800000805f9b34fb``). Args
def cb_uuid_to_str(_uuid: CBUUID) -> str: _uuid = _uuid.UUIDString() if len(_uuid) == 4: return "0000{0}-0000-1000-8000-00805f9b34fb".format(_uuid.lower()) # TODO: Evaluate if this is a necessary method... # elif _is_uuid_16bit_compatible(_uuid): # return _uuid[4:8].lower() else: return _uuid.lower()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uuid(_uuid=uuid4):\n return str(_uuid())", "def _format_uuid(self, uuid):\n uuid_format = self.uuid_format\n uuid_list=uuid_format.split(\"-\")\n pad=len(uuid_list[-1])\n last_element=uuid.zfill(pad)\n formatted_uuid=uuid_format.replace(uuid_list[-1], last_element)\n return formatted_uuid", "def sortable_time_uuid_str(uuid):\n return flip_uuid_parts(str(uuid))", "def set_uuid(self, device):\n import uuid\n\n return str(uuid.uuid4())", "def get_uuid():\n\n x = uuid.uuid1()\n return str(x)", "def _get_uuid():\n return str(uuid.uuid4())", "def get_uuid():\n return str(uuid4())", "def get_uuid(device):\n uuids = uuid_table()\n return str(uuids[device])", "def _generate_uuid_str_if_none(given_uuid):\n\t\treturn given_uuid or uuid.uuid4().__str__()", "def get_uuid():\n\n return str(uuid.uuid4())", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def uuid_(identifier: Optional[uuid.UUID]) -> Optional[str]:\n if identifier is None:\n return None\n\n return str(identifier)", "def getUUID():\n return str(uuid.uuid4())", "def to_uuid(string):\n if sys.version_info[0] == 2:\n string = string.encode('utf-8')\n \n # This the seed Ansible has chosen for their UUID's\n return str(uuid.uuid5(uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E'), string))", "def gen_uuid() -> str:\n return str(uuid4())", "def get_uuid(s):\n sha = sha256(s.encode('utf-8')).hexdigest()\n uuid = UUID(sha[:32])\n return str(uuid)", "def uuid(self, obj: typing.Any = None) -> str:\n if obj is None:\n obj = self.randomString()\n self._counter += 1\n elif isinstance(obj, bytes):\n obj = obj.decode('utf8') # To binary\n else:\n obj = '{}'.format(obj)\n\n return str(uuid.uuid5(self._namespace, obj)).lower() # I believe uuid returns a lowercase uuid always, but in case... :)", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def _generate_uuid():\n return str(uuid.uuid4())", "def gen_uuid():\n return str(uuid.uuid4())", "def characteristic_uuid(self) -> str:\n return self.__characteristic_uuid", "def ordered_uuid(value=None):\n if not HAVE_ORDERED_UUID:\n raise RuntimeError(\"ordered_uuid package: not found\")\n if not value:\n value = str(uuid.uuid1())\n return OrderedUUID(value)", "def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))", "def build_uuid(self):\n self._uuid = str(uuid.uuid1())\n return self._uuid", "def uuid(self, value):\n self.unique_id = UUID(str(value)).hex", "def _uuid(self):\n u = self.__uuid\n if u is None:\n u = str(uuid.uuid1())\n self._set_uuid(u)\n return u", "def convert_guid_intstr(guid):\n return str(int(guid, 16))", "def uuid_to_bytes(id):\n return uuid.UUID(id).bytes", "def generate_uuid():\n return f'{uuid.uuid1()}'" ]
[ "0.67531985", "0.6394974", "0.6309436", "0.6288315", "0.6235836", "0.6185628", "0.61492383", "0.61275625", "0.6092875", "0.60705936", "0.6040981", "0.595578", "0.59540427", "0.5948298", "0.59475523", "0.58622694", "0.5757375", "0.575162", "0.575162", "0.57498914", "0.5726065", "0.5710371", "0.5694612", "0.565608", "0.56067014", "0.55960816", "0.5583327", "0.5582306", "0.5527181", "0.55066884" ]
0.83089083
0
Instruct the light to turn on. You can skip the brightness part if your light does not support brightness control.
def turn_on(self, **kwargs): self._brightness = 100 self._state = 'on' #self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255) #self._light.turn_on() _LOGGER.info("turn_on() is called")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_on(self, **kwargs: Any) -> None:\n self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n self._light.turn_on()", "def set_light_on(self):\r\n self._light = \"ON\"", "async def async_turn_on(self, **kwargs: Any) -> None:\n if (brightness := kwargs.get(ATTR_BRIGHTNESS)) is not None:\n # set the brightness, which will also turn on/off light\n if brightness == 255:\n brightness = 256 # this will end up as 16 which is max\n self._device.light_brightness = int(brightness / 16)\n else:\n self._device.light_on = True", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def set_light_on(self):\n self._light = \"ON\"", "def turn_on(self, **kwargs):\n brightness_pct = 100\n if kwargs.get(ATTR_BRIGHTNESS):\n brightness_pct = \\\n brightness_to_percentage(int(kwargs.get(ATTR_BRIGHTNESS)))\n elif self._is_dimmable:\n brightness_pct = 101 # Sets the light to last known brightness.\n self._client.set_brightness(self._id, brightness_pct)", "def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)", "def turn_on(self, **kwargs: Any) -> None:\n if self._dimmable:\n level = kwargs.get(ATTR_BRIGHTNESS, self._last_brightness)\n else:\n level = 255\n self._light.turn_on(to_futurenow_level(level))", "def turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Turn on light %s %s\", self._device.ip, kwargs)\n if not self.is_on:\n self._device.power_on = True\n\n if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:\n self._device.brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]:\n color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])\n self._device.color_temperature = color_temp", "def turn_on(self, **kwargs):\n onValue = str((kwargs.get(ATTR_BRIGHTNESS, int(self._brightness))/255)*100)\n request = requests.post(self._resource,\n data=onValue,\n timeout=10)\n if (request.status_code == 200) or (request.status_code == 201):\n self._state = True\n else:\n _LOGGER.info(\"HTTP Status Code: %s\", request.status_code)\n _LOGGER.error(\"Can't turn on %s. Is resource/endpoint offline?\", self._resource)\n\n self.schedule_update_ha_state()", "def lightning_turnon(self):\n self.turnOn()", "def turn_on(self, **kwargs):\n _LOGGER.error(\"DALI TURN ON\")\n\n self._state = True\n\n if ATTR_BRIGHTNESS in kwargs:\n _LOGGER.error(kwargs[ATTR_BRIGHTNESS])\n\n bri = kwargs[ATTR_BRIGHTNESS]\n\n if bri == 0:\n self._state = False\n else:\n bri = int(bri / 1.5)\n _LOGGER.error(bri)\n\n\n url = self.urlx + '/dimset?bri=' + str(bri)\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n self._dimmer = kwargs[ATTR_BRIGHTNESS]\n\n else:\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n self._dimmer = 255\n self._state = state == 'on'", "def light_on(self, pin='D13'):\n self.light_set(pin, '1')", "def turn_on(self, **kwargs):\n default_hs = (0, 0) if self._hs_color is None else self._hs_color\n hue_sat = kwargs.get(ATTR_HS_COLOR, default_hs)\n\n default_brightness = 0 if self._brightness is None else self._brightness\n brightness = kwargs.get(ATTR_BRIGHTNESS, default_brightness)\n\n default_white_value = 255 if self._white_value is None else self._white_value\n white_value = kwargs.get(ATTR_WHITE_VALUE, default_white_value)\n\n if brightness == 0 and white_value == 0 and not kwargs:\n # If the light would be off, and no additional parameters were\n # passed, just turn the light on full brightness.\n brightness = 255\n white_value = 255\n\n rgb = color_util.color_hsv_to_RGB(*hue_sat, brightness / 255 * 100)\n\n self._light.set_color(*rgb, white_value)", "def turnLightingSystemOn():\n dislin.light('ON')", "def turn_on(self, r=None, g=None, b=None, brightness=None):\n print(\"Got request to turn on the lights on with values: (r=%s, g=%s, b=%s, brightness=%s)\" % (r, g, b, brightness))\n if r is not None:\n self.r = r\n if g is not None:\n self.g = g\n if b is not None:\n self.b = b\n if brightness is not None:\n self.brightness = brightness\n print(\"Turning on lights on with values: (r=%s, g=%s, b=%s, brightness=%s)\" % (self.r, self.g, self.b, self.brightness))\n self.led.fill(Color(self.r,self.g,self.b, self.brightness))\n\n self.led.update()\n self.client.publish(STATE_TOPIC, ON) #publish", "def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))", "def turn_on(self, **kwargs):\n if ATTR_BRIGHTNESS in kwargs:\n brightness = int(kwargs[ATTR_BRIGHTNESS] / 255 * 99)\n self._lj.activate_load_at(self._index, brightness, 0)\n else:\n self._lj.activate_load(self._index)", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def change_light(self):\n self._light_status = not self._light_status", "def turn_on(self, **kwargs):\n if (CommandSwitch._switch(self._command_on) and\n not self._command_state):\n self._state = True\n self.schedule_update_ha_state()\n if ATTR_BRIGHTNESS in kwargs:\n self._brightness = kwargs[ATTR_BRIGHTNESS]\n self.schedule_update_ha_state()\n if ATTR_RGB_COLOR in kwargs:\n self._color = kwargs[ATTR_RGB_COLOR]\n self.schedule_update_ha_state()\n # White is a special case.\n if min(self._color) > 256 - RGB_BOUNDARY:\n self._color = WHITE\n self.schedule_update_ha_state()\n if ATTR_EFFECT in kwargs:\n if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:\n self.repeating = True\n pipeline.append(COLORLOOP)\n if kwargs[ATTR_EFFECT] == EFFECT_WHITE:\n pipeline.white()\n self._color = WHITE", "def turn_on(self, **kwargs: Any) -> None:\n commands = []\n _LOGGER.debug(\"light kwargs-> %s\", kwargs)\n\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands += [{\"code\": DPCODE_LIGHT, \"value\": True}]\n else:\n commands += [{\"code\": DPCODE_SWITCH, \"value\": True}]\n\n if ATTR_BRIGHTNESS in kwargs:\n if self._work_mode().startswith(WORK_MODE_COLOUR):\n colour_data = self._get_hsv()\n v_range = self._tuya_hsv_v_range()\n colour_data[\"v\"] = int(\n self.remap(kwargs[ATTR_BRIGHTNESS], 0, 255, v_range[0], v_range[1])\n )\n commands += [\n {\"code\": self.dp_code_colour, \"value\": json.dumps(colour_data)}\n ]\n else:\n new_range = self._tuya_brightness_range()\n tuya_brightness = int(\n self.remap(\n kwargs[ATTR_BRIGHTNESS], 0, 255, new_range[0], new_range[1]\n )\n )\n commands += [{\"code\": self.dp_code_bright, \"value\": tuya_brightness}]\n\n if ATTR_HS_COLOR in kwargs:\n colour_data = self._get_hsv()\n # hsv h\n colour_data[\"h\"] = int(kwargs[ATTR_HS_COLOR][0])\n # hsv s\n ha_s = kwargs[ATTR_HS_COLOR][1]\n s_range = self._tuya_hsv_s_range()\n colour_data[\"s\"] = int(\n self.remap(\n ha_s,\n HSV_HA_SATURATION_MIN,\n HSV_HA_SATURATION_MAX,\n s_range[0],\n s_range[1],\n )\n )\n # hsv v\n ha_v = self.brightness\n v_range = self._tuya_hsv_v_range()\n colour_data[\"v\"] = int(self.remap(ha_v, 0, 255, v_range[0], v_range[1]))\n\n commands += [\n {\"code\": self.dp_code_colour, \"value\": json.dumps(colour_data)}\n ]\n if self.tuya_device.status[DPCODE_WORK_MODE] != \"colour\":\n commands += [{\"code\": DPCODE_WORK_MODE, \"value\": \"colour\"}]\n\n if ATTR_COLOR_TEMP in kwargs:\n # temp color\n new_range = self._tuya_temp_range()\n color_temp = self.remap(\n self.max_mireds - kwargs[ATTR_COLOR_TEMP] + self.min_mireds,\n self.min_mireds,\n self.max_mireds,\n new_range[0],\n new_range[1],\n )\n commands += [{\"code\": self.dp_code_temp, \"value\": int(color_temp)}]\n\n # brightness\n ha_brightness = self.brightness\n new_range = self._tuya_brightness_range()\n tuya_brightness = self.remap(\n ha_brightness, 0, 255, new_range[0], new_range[1]\n )\n commands += [{\"code\": self.dp_code_bright, \"value\": int(tuya_brightness)}]\n\n if self.tuya_device.status[DPCODE_WORK_MODE] != \"white\":\n commands += [{\"code\": DPCODE_WORK_MODE, \"value\": \"white\"}]\n\n self._send_command(commands)", "def async_turn_on(self, **kwargs):\n self._state = STATE_ON\n transition = kwargs.get(ATTR_TRANSITION, self._fade_time)\n\n # Update state from service call\n if ATTR_BRIGHTNESS in kwargs:\n self._brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_HS_COLOR in kwargs:\n self._rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])\n # self._white_value = color_rgb_to_rgbw(*self._rgb)[3]\n\n if ATTR_WHITE_VALUE in kwargs:\n self._white_value = kwargs[ATTR_WHITE_VALUE]\n\n logging.debug(\"Setting light '%s' to %s with transition time %i\",\n self._name, repr(self.dmx_values), transition)\n asyncio.ensure_future(\n self._controller.set_channels_async(\n self._channels, self.dmx_values, transition=transition))\n self.async_schedule_update_ha_state()", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def _set_light(self, new_state):\n try:\n self._device.lights = new_state\n except requests.Timeout:\n _LOGGER.error(\"Time out setting %s light to %s\", self.entity_id, new_state)\n return\n\n self._light_on = new_state == ON_STATE\n self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY\n self.async_write_ha_state()", "def setLightSwitch(self, _state=False):\n if _state == True:\n render.setLight(self.lightNP)\n elif _state == False:\n render.clearLight(self.lightNP)", "def turnLightOn(ID):\n dislin.litmod(ID, 'ON')", "def turn_on(self):\n self._remote.power(1)", "async def async_turn_on(self, **kwargs) -> None:\n self._state = await self._gate.turn_on_light(self._light_id)" ]
[ "0.876063", "0.86473703", "0.86437523", "0.86226034", "0.86144036", "0.83741844", "0.8299596", "0.8281327", "0.82521445", "0.8087994", "0.8022874", "0.80080384", "0.78507304", "0.7818195", "0.77767706", "0.7775988", "0.7701322", "0.7684567", "0.767373", "0.761866", "0.7589698", "0.7510111", "0.7466082", "0.74618995", "0.74395126", "0.74091667", "0.7402338", "0.73808867", "0.734347", "0.7338064" ]
0.8921207
0
Predicts whether the faces belong to a trained class.
def face_prediction(self, frame, faces): predictions = FaceModel.model.predict_proba(FaceModel.emb_array) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[ np.arange(len(best_class_indices)), best_class_indices ] print(' prediction:') rec_name_lst = [] for i in range(len(best_class_indices)): print(' %4d %s: %.3f' % ( i, FaceModel.class_names[best_class_indices[i]], best_class_probabilities[i] ) ) accuracy = np.mean(np.equal(best_class_indices, FaceModel.labels)) rec_name = FaceModel.class_names[best_class_indices[i]] if best_class_probabilities[i] < 0.7: rec_name = "unknown" rec_name_lst.append(rec_name) print(' Accuracy: %.3f' % accuracy) j = 0 for (x, y, w, h) in faces: cv2.rectangle(frame, (x-20, y-20), (x+w +20, y+h+20), (0, 255, 0), 4) cv2.putText(frame, rec_name_lst[j], (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 255, 0), 2); j = j + 1 return frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_trained(self) -> bool:\r\n return not getattr(self._lda, \"classes_\", None) is None", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def is_trained(self) -> bool:", "def classification(self):\n if self.video_in != None:\n ret, frame = self.video_in.get_a_frame()\n elif self.camera_in != None:\n ret, frame = self.camera_in.get_a_frame()\n if ret == True:\n # detect face\n faces = FaceModel.detect_face(self, frame)\n FaceModel.write_faces_to_file(self, frame, faces)\n status = FaceModel.face_embeddings(self, faces)\n if status == True:\n bounded_frame = self.face_prediction(frame, faces)\n # We are done with embedding and prediction.\n # We can delete the temp directory where we saved\n # the frame, so that the next frame with face\n # can be saved there\n shutil.rmtree(FaceModel.data_dir)\n os.makedirs(FaceModel.data_dir)\n return True, bounded_frame\n else:\n return True, frame\n else:\n return False, None", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def train_classifier(train_faces, train_faces_ids):\n recognizer_lbph = cv2.face.LBPHFaceRecognizer_create()\n print('Training model in progress...')\n recognizer_lbph.train(train_faces, np.array(train_faces_ids))\n print('Saving...')\n recognizer_lbph.save('trainner.yml')\n print('Model training complete!')", "def __call__(self, pred_texture, gt_texture):\n pred_class = self.classifier.predict(pred_texture)\n gt_class = self.classifier.predict(gt_texture)\n if pred_class == gt_class:\n return 0\n else:\n return 1", "def predict(X_emb, knn_clf=None, model_path=None, distance_threshold=0.5):\n# if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:\n# raise Exception(\"Invalid image path: {}\".format(X_img_path))\n\n if knn_clf is None and model_path is None:\n raise Exception(\"Must supply knn classifier either thourgh knn_clf or model_path\")\n\n # Load a trained KNN model (if one was passed in)\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n knn_clf = pickle.load(f)\n\n # Load image file and find face locations\n# X_img = face_recognition.load_image_file(X_img_path)\n# X_face_locations = face_recognition.face_locations(X_img)\n #X_face_locations = face_recognition.face_locations(X_img)\n\n # If no faces are found in the image, return an empty result.\n #if len(X_face_locations) == 0:\n #return []\n\n # Find encodings for faces in the test iamge\n #faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)\n\n # Use the KNN model to find the best matches for the test face\n faces_encodings=[ X_emb ]\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=10)\n print(closest_distances)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(faces_encodings))]\n\n # Predict classes and remove classifications that aren't within the threshold\n #return [(pred, loc) if rec else (\"unknown\", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]\n return [(pred) if rec else (\"Unknown\") for pred, rec in zip(knn_clf.predict(faces_encodings), are_matches)]", "def classify_face(img, HOG_model):\r\n img = pp.resize_image(img, img_input_size)\r\n \r\n # Ensure shape matches exactly\r\n shape_delta = img_input_shape[0] - img.shape[0]\r\n if shape_delta > 0:\r\n new_row = np.random.randint(0,255,[shape_delta,img_input_shape[1],img_input_shape[2]],dtype='uint8')\r\n img = np.vstack([img, new_row])\r\n \r\n elif shape_delta < 0:\r\n img = img[:img_input_shape[0],:,:] \r\n \r\n HOG_img = HOG_extractor(img).flatten()\r\n class_pred = img_classes[HOG_model.predict_proba([HOG_img]).argmax(axis=-1)[0]]\r\n return class_pred", "def predict_only(self):", "def predict_class(self, inputs):\n if not self.trained:\n if self.verbose:\n print(\"KMeans Model Class - Predict Class Function: No trained model\")\n return -1\n\n\n return self.cluster_classes[self.model.predict(inputs)]", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)", "def predict(c, mean_ball, covariance_ball, mean_bg, covariance_bg):\n likelihood_ball = get_likelihood(c, mean_ball, covariance_ball)\n likelihood_bg = get_likelihood(c, mean_bg, covariance_bg)\n neuman_test = likelihood_ball/likelihood_bg\n return True if neuman_test >= 1 else False", "def predict(model, images):\n return model.predict_classes(images)", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def svm_classification(self):\n\n if len(self.saved_gestures.keys()) <= 1:\n print(\"Not enough gestures!\")\n return None\n else:\n x = []\n y = []\n z = []\n for elem in self.current_recording:\n x.append(elem[0][0])\n y.append(elem[1][0])\n z.append(elem[2][0])\n\n gesture_fft = self.get_fft(x, y, z)\n\n if len(gesture_fft) > self.cutoff_length:\n print(\"bigger than cutoff\")\n gesture_fft = gesture_fft[:self.cutoff_length]\n elif len(gesture_fft) < self.cutoff_length:\n\n print(\"smaller than cutoff\")\n temp = np.zeros(self.cutoff_length)\n for x in range(len(gesture_fft)):\n temp[x] = gesture_fft[x]\n gesture_fft = temp\n else:\n pass\n\n return self.classifier.predict(gesture_fft)", "def predict(self, face):\r\n # Resize the face to the model input size\r\n face = resize(image=rgb2gray(face),output_shape=self.input_size)\r\n # Predict the probabilities of each emotion\r\n probabilities = self.network.predict(face[None,...,None])[0]\r\n # Take the most probable emotion\r\n max_prob = probabilities.argmax()\r\n # Take this label if the confidence is high enough, or Missing Value (None) elsewhere.\r\n emotion = EMOTIONS[max_prob] if probabilities[max_prob] > MIN_CONFIDENCE else None\r\n return emotion", "def is_training(self):\n return self.mode == \"train\"", "def is_training(self):\n return self.mode == \"train\"", "def predict(self, text):\n prediction = self.pipeline.predict([text])\n return bool(prediction[0])", "def predict(self):\n raise NotImplementedError", "def is_trained(self) -> bool:\r\n return not getattr(self._qda, \"classes_\", None) is None", "def class_predict_3(trained_model, X_test, y_test, image_name):\n # Predict test set\n try:\n test_pred = trained_model.predict_proba(X_test)\n except:\n test_pred = trained_model.predict(X_test)\n \n if len(test_pred.shape) == 1:\n raise Exception(\"Probabilistic prediction needed.\")\n \n # Transform y_test\n if len(y_test.shape) > 1:\n y_test = np.argmax(y_test, axis=1)\n \n classes = np.unique(y_test)\n results = np.zeros((len(classes), ))\n for class_num in classes:\n \n # Take predictions for current class\n X_pred = test_pred[y_test == class_num, :]\n \n # Number of hits\n pred_ok = (np.argmax(X_pred, axis=1) == class_num).sum()\n \n # Percentage of hits\n pred_acc = pred_ok / X_pred.shape[0]\n \n # Actualize data for plotting results\n results[class_num] = pred_acc\n \n # Write test message\n with open(OUTPUT_FILE, 'a') as f:\n f.write(\"test_acc of class {}: {:.3f}\\n\".format(class_num,\n pred_acc))\n \n # Generate accuracy plot\n plt.figure()\n plt.bar(classes, results, align='center')\n plt.xticks(classes, classes)\n plt.title('Per class test accuracy')\n plt.xlabel('class')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save test plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_test_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict_class(self, feature):\n return self._clf.predict(feature)", "def predict(self, img):\n logger.info(\"predict() for %s\" %threading.current_thread())\n\n #detect face from the image\n face, rect = self.detect_face(img)\n\n if face is None or rect is None:\n #print(\"No face found for img \", type(img))\n return None, None, None, None\n\n if self.redis_server_password is None:\n # No training data available. Just perform detection and return\n # an error message in the subject value.\n warning = \"Training data not available. Redis password not set.\"\n subject = \"No Training Password\" # This will be displayed with the face\n confidence = 0\n logger.warning(\"%s\" %warning)\n return None, subject, confidence, rect\n\n #predict the image using our face recognizer\n label, confidence = self.face_recognizer.predict(face)\n #get name of respective label returned by face recognizer\n label_text = self.face_recognizer.getLabelInfo(label)\n logger.info(\"label=%s label_text=%s\" %(label, label_text))\n\n # print(label_text, confidence, rect)\n return img, label_text, confidence, rect", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict(self, features):\n scores = self.predict_proba(features)\n return self.classes[np.argmax(scores)]" ]
[ "0.6539211", "0.64435315", "0.642666", "0.64105463", "0.6291514", "0.62014234", "0.6199739", "0.61470646", "0.60917354", "0.60511786", "0.6036084", "0.6013855", "0.6013855", "0.6013855", "0.6013592", "0.6008291", "0.5985599", "0.59688056", "0.5966839", "0.5940728", "0.5940728", "0.5938922", "0.5922767", "0.59069973", "0.5899131", "0.58839864", "0.58839864", "0.5867779", "0.58631444", "0.58443844" ]
0.70823354
0
Performs all necessary work to do face classification. Returns
def classification(self): if self.video_in != None: ret, frame = self.video_in.get_a_frame() elif self.camera_in != None: ret, frame = self.camera_in.get_a_frame() if ret == True: # detect face faces = FaceModel.detect_face(self, frame) FaceModel.write_faces_to_file(self, frame, faces) status = FaceModel.face_embeddings(self, faces) if status == True: bounded_frame = self.face_prediction(frame, faces) # We are done with embedding and prediction. # We can delete the temp directory where we saved # the frame, so that the next frame with face # can be saved there shutil.rmtree(FaceModel.data_dir) os.makedirs(FaceModel.data_dir) return True, bounded_frame else: return True, frame else: return False, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify_face(im):\r\n faces = get_encoded_faces()\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n \"\"\"\r\n Resize optinal \r\n \"\"\"\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n \"\"\"\r\n All the photo lables in the faces foler end with (number) so a simiple .find(\"(\") command takes the () away from\r\n the label leaving us with the full name of the person\r\n\r\n \"\"\"\r\n\r\n result = name.find('(') \r\n fullname = (name[:result])\r\n \"\"\"\r\n If face_recogntion module recognizes a face but that face is not in the faces module then \r\n it will print unknown and we print 12345678 to use it on the start attednace program \r\n\r\n \"\"\"\r\n if (name == \"Unknown\"):\r\n print(\"12345678\")\r\n else:\r\n \"\"\"\r\n f'{len(face_locayion)}-people - will return the number of people in photo taken by Nao'\r\n \"\"\"\r\n print (f'{len(face_locations)}-people')\r\n print (fullname)\r\n print(courseid)\r\n print (lateornot)\r\n c34 = fullname.find(' ')\r\n firstname = (fullname[:c34])\r\n lastname = (fullname[c34:])\r\n \"\"\"\r\n We get all the data courseid , fristname , lastname, datetime1,and late or not and submited on the website \r\n \r\n\r\n \"\"\"\r\n login_data = {\r\n\t 'Course': courseid,\r\n\t 'FirstName': firstname,\r\n\t 'LastName': lastname,\r\n\t 'Date': datetime2,\r\n\t 'Attendance': 'on',\r\n\t 'Late': latev,\r\n\t 'submitbutton': 'Submit'\r\n }\r\n if(fullname == \"Unknow\"):\r\n \tprint(\"I-dont-know-you\")\r\n else:\r\n \r\n with requests.Session() as s:\r\n \turl = \"https://rbattendance.000webhostapp.com/update.php\"\r\n \tr = s.get(url)\r\n \tsoup = BeautifulSoup(r.content, 'html5lib')\r\n \tr = s.post(url, data = login_data)\r\n \t#print(r.content)\r\n \r\n \r\n\r\n\r\n\r\n\r\n \"\"\"\r\n This for loop is reponsible for drawing on the image \r\n \"\"\"\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n \r\n \r\n while True:\r\n #cv2.imshow('Video', img)\r\n #if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names", "def detectFaceAndClassify(faceNet, faceMaskClassifier, testImagePath, threshold):\n # load the input test image from disk\n image = cv2.imread(testImagePath)\n # making a copy of image and finding the image spatial dimensions\n orig = image.copy()\n (h, w) = image.shape[:2]\n\n # construct a blob from the image to pass to the network\n # using standard weights for the face detection model for image preprocessing\n blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n # obtain the face detections by passing the blob through the network\n print(\"computing face detections...\")\n faceNet.setInput(blob)\n faceDetections = faceNet.forward()\n\n # loop over the detections to classify them and form bounding boxes and labels\n for i in range(0, faceDetections.shape[2]):\n # extract only confident detections using the confidence/probability\n # associated with the detection\n confidence = faceDetections[0, 0, i, 2]\n\n # filter out weak detections by ensuring the confidence is\n # greater than the minimum confidence 0.5 or input variable\n if confidence > threshold:\n # extract bounding box dimensions and face Region of intrest for classification\n faceROI, startX, startY, endX, endY = extractBoxAndFaceROI(image, faceDetections, itemNum=i,\n height=h, width=w)\n\n faceROI = np.expand_dims(faceROI, axis=0)\n\n # Passing the pre-processed image with classification model to check if there is a mask or not\n (mask, withoutMask) = faceMaskClassifier.predict(faceROI)[0]\n # (mask, withoutMask) = faceMaskClassifier.predict(faceROI)\n\n # find the class and associated colour to use for the bounding box and text\n label = \"Mask\" if mask > withoutMask else \"No Mask\"\n color = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n # include the probability of prediction in the label of the bounding box\n label = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n # forming bounding box rectangle and display the label the output image frame\n cv2.putText(image, label, (startX, startY - 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.45, color, 2)\n cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)\n\n # show the output image\n cv2.imshow(\"Output\", image)\n # display the image still a key is pressed, when key is pressed program is terminated\n cv2.waitKey(0)", "def classify_face(im):\n faces_death = get_encoded_faces_deaths()\n faces_arrested = get_encoded_faces_arrested()\n faces_wanted = get_encoded_faces_wanted()\n\n faces_encoded_death = list(faces_death.values())\n known_face_names_death = list(faces_death.keys())\n\n faces_encoded_arrested = list(faces_arrested.values())\n known_face_names_arrested = list(faces_arrested.keys())\n\n faces_encoded_wanted = list(faces_wanted.values())\n known_face_names_wanted = list(faces_wanted.keys())\n\n img = cv2.imread(im, 1)\n face_locations = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img,face_locations)\n face_names = []\n find_in_db(im,known_face_names_death,unknown_face_encodings,face_names,faces_encoded_death,\"unnatural_death_images/unnatural_death_images\")\n find_in_db(im,known_face_names_arrested,unknown_face_encodings,face_names,faces_encoded_arrested,\"ArrestPerson_images\")\n find_in_db(im,known_face_names_wanted,unknown_face_encodings,face_names,faces_encoded_wanted,\"wanted\")", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()", "def detect_face_task(img):\n\n # paramter for detect\n # image_size = 160\n # margin = 44\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n\n # caffe model\n pnet = caffe_model.get_pnet()\n rnet = caffe_model.get_rnet()\n onet = caffe_model.get_onet()\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n print('detect bounding: ', bounding_boxes)\n print('Find faces: ', bounding_boxes.shape[0])\n\n # all_faces is faces information list, include face bytes, face position\n all_faces = []\n for face_position in bounding_boxes:\n face_position = face_position.astype(int)\n print('face position: ', face_position)\n\n # each face information, include position, face image\n head_rect = face_position[:4].tolist() # numpy array to python list\n head_img = misc.toimage(img).crop(head_rect)\n head_img_io = StringIO.StringIO()\n head_img.save(head_img_io, format='JPEG')\n head_img_b64 = base64.b64encode(head_img_io.getvalue())\n\n # construct response\n face_info = {}\n face_info['rect'] = head_rect\n face_info['image'] = head_img_b64\n\n all_faces.append(face_info)\n\n return all_faces", "def face_detect(sess, net, image_name):\n\n\t# Load the demo image\n\tim_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n\tim = cv2.imread(im_file)\n\n\t# Detect all object classes and regress object bounds\n\ttimer = Timer()\n\ttimer.tic()\n\t# scores, boxes = im_detect(sess, net, im)\n\tscores, boxes, eyes, smiles = im_detect_ori(sess, net, im)\n\ttimer.toc()\n\tprint ('Detection took {:.3f}s for '\n\t\t\t'{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n\t# Visualize detections for each class\n\t# im = im[:, :, (2, 1, 0)]\n\t# fig, ax = plt.subplots(figsize=(8, 8))\n\t# ax.imshow(im, aspect='equal')\n\n\tCONF_THRESH = 0.9\n\tNMS_THRESH = 0.3\n\tfor cls_ind, cls in enumerate(CLASSES[20:]):\n\t\tcls_ind += 20 # because we skipped everything except face\n\t\tcls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\t\tcls_scores = scores[:, cls_ind]\n\t\tdets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)\n\t\tkeep = nms(dets, NMS_THRESH)\n\t\tdets = dets[keep, :]\n\t\teye = eyes[keep, :]\n\t\tsmile= smiles[keep, :]\n\n\tinds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n\tface_num = len(inds)\n\tprint '{} faces detected!'.format(face_num)\n\tdets = dets[inds, :]\n\teye = eye[inds, 1]\n\tsmile = smile[inds, 1]\n\n\treturn dets, eye, smile", "def get_classification(self, image):\n if self.correct_gamma:\n if self.gamma == 1.0:\n self.gamma = 0.6\n elif self.gamma == 0.6:\n self.gamma = 1.0\n image = self.adjust_gamma(image, self.gamma)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np = np.asarray(image, dtype=\"uint8\")\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n detected = False\n\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n best_scores = []\n\n for idx, classID in enumerate(classes):\n if self.MODEL_NAME == 'ssdlite_mobilenet_v2_coco_2018_05_09':\n if classID == 10: # 10 is traffic light\n if scores[idx] > 0.10: #confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n else: # we tuned the model to classify only traffic lights\n if scores[idx] > 0.10: # confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n\n tl_index = TrafficLight.UNKNOWN\n if detected:\n best_scores.sort(key=lambda tup: tup[0], reverse=True)\n\n best_score = best_scores[0]\n rospy.logdebug(\"number of TL found %d, best score: %f, color: %f\", len(best_scores), best_score[0], best_score[2])\n nbox = boxes[best_score[1]]\n\n height = image.shape[0]\n width = image.shape[1]\n\n box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n ratio = float(box_height)/float(box_width)\n rospy.logdebug(\"ratio: %f\", ratio)\n if ratio >= 2.0 and ratio < 3.0: #started from 2.4\n tl_cropped = image[box[0]:box[2], box[1]:box[3]]\n tl_color, tl_index = self.get_color(tl_cropped)\n #color = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']\n #tl_index = best_score[2]\n #tl_color = color[tl_index]\n #augment image with detected TLs\n cv2.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_color = (255, 255, 255)\n cv2.putText(image, tl_color, (box[1], box[0]), font, 2.0, font_color, lineType=cv2.LINE_AA)\n return image, tl_index", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def run(self):\n while True:\n ret, frame = self.classification()\n # valid frame\n if ret == True:\n # output the recognized face\n if self.video_out != None:\n self.video_out.display(frame)\n if self.pic_out != None:\n self.pic_out.save_frame(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n if self.video_out != None:\n cv2.destroyAllWindows()", "def face_recognition_train(self, data_dir='datasets', batch_size=32, img_height=128, img_width=128, epochs=10,\n model_path='model', pretrained=None, base_model_trainable=False):\n\n obj = train.Classifier(data_dir=data_dir, batch_size=batch_size, img_height=img_height,\n img_width=img_width, epochs=epochs, model_path=model_path, pretrained=pretrained,\n base_model_trainable=base_model_trainable)\n obj.start()", "def process_image(self):\n\n detect.main(self.nn_args)", "def get_classification(self, image):\n\n image_np_expanded = np.expand_dims(image, axis=0)\n\n # Perform network inference\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores,\n self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n if self.RUNNING_ON_CARLA == True:\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n if classes[i] == 10:\n classname = self.category_index[classes[i]]['name']\n print(classname, scores[i])\n\n # Extract image from best bounding box and pass through light classifier\n ymin, xmin, ymax, xmax = boxes[i]\n im_height, im_width, im_depth = image.shape\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)\n tf_image_cropped = image[int(top):int(bottom), int(left):int(right), :]\n\n PILImage = Image.fromarray(tf_image_cropped)\n resized_img = PILImage.resize((85, 256), Image.ANTIALIAS)\n image_np_resized = self.load_image_into_numpy_array(resized_img)\n x = np.expand_dims(image_np_resized, axis=0)\n x = np.vstack([x])\n\n #model = load_model('tf_classifier_1.h5')\n #model.compile(loss='categorical_crossentropy',\n # optimizer='adam',\n # metrics=['accuracy'])\n classes = self.keras_model.predict_classes(x, batch_size=1)\n print(classes)\n\n if classes[0] == 0:\n self.current_light = TrafficLight.GREEN\n elif classes[0] == 2:\n self.current_light = TrafficLight.YELLOW\n else:\n self.current_light = TrafficLight.RED\n\n break\n\n else:\n # Check the detections. If it has a good score\n # then set the current light to the detected label. The\n # first one is always the best (they are returned sorted \n # in score order).\n # Note that we have trained for 14 categories, including\n # left/right arrows etc. Here we are only looking for \n # standard red, yellow and green light and ignore others.\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n classname = self.category_index[classes[i]]['name']\n print(classname, scores[i])\n\n if classname == 'Green':\n self.current_light = TrafficLight.GREEN\n elif classname == 'Yellow':\n self.current_light = TrafficLight.YELLOW\n elif classname == 'Red':\n self.current_light = TrafficLight.RED\n else:\n self.current_light = TrafficLight.UNKNOWN\n\n break\n\n return self.current_light", "def update(self,image):\r\n \r\n self._faces=[]\r\n \r\n if util.isgray(image):\r\n image=cv2.equalizeHist(image)\r\n \r\n else:\r\n \r\n image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n cv2.equalizeHist(image,image)\r\n \r\n minsize=util.widthheightdividedby(image,8)\r\n\r\n \r\n\r\n \r\n facerect=self._faceclassifier.detectMultiScale(image,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n \"\"\"if facerects is not None:\r\n \r\n for facerect in facerects:\r\n face=face()\r\n \r\n face.facerect=facerect\r\n \r\n \r\n x,y,w,h=facerect\r\n \r\n # Seek an eye in the upper-left part of the face. \r\n searchRect = (x+w/7, y, w*2/7, h/2) \r\n face.leftEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek an eye in the upper-right part of the face. \r\n searchRect = (x+w*4/7, y, w*2/7, h/2) \r\n face.rightEyeRect = self._detectOneObject( \r\n self._eyeClassifier, image, searchRect, 64) \r\n \r\n \r\n \r\n # Seek a nose in the middle part of the face. \r\n searchRect = (x+w/4, y+h/4, w/2, h/2) \r\n face.noseRect = self._detectOneObject( \r\n self._noseClassifier, image, searchRect, 32) \r\n \r\n # Seek a mouth in the lower-middle part of the face. \r\n searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) \r\n face.mouthRect = self._detectOneObject( \r\n self._mouthClassifier, image, searchRect, 16) \r\n \r\n \r\n \r\n self._faces.append(face)\r\n\r\n \r\n \r\n def _detectoneobject(self,\r\n classifier,\r\n image,\r\n rect,\r\n imagesizetominsizeratio):\r\n \r\n x ,y ,w ,h=rect\r\n \r\n minsize=util.widthheightdividedby(image,\r\n imagesizetominsizeratio)\r\n \r\n subimage=image[y:y+h,x:x+w]\r\n \r\n subrect=classifier.dectectMultiScale(subimage,\r\n self.scalefactor,\r\n self.minneighbors,\r\n self.flags,\r\n minsize)\r\n \r\n if len(subrect)==0:\r\n return None\r\n \r\n subx,suby,subw,subh=subrects[0]\r\n \r\n return (x+subx,y+suby,w+subw,h+subh)\r\n \r\n \"\"\"", "def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator()\n cls.livenessEstimator = cls.faceEngine.createLivenessV1Estimator()\n cls.detection = cls.detector.detectOne(VLImage.load(filename=CLEAN_ONE_FACE))", "def main():\n\n inputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_raw'\n outputDirectory = '/home/mr-paul/atmp/aaproject/scripts/surprised_faces'\n\n # detects all faces from all images in inputDirectory and outputs\n # to outputDirectory\n FaceDetection.extractFaces(\n inputDirectory=inputDirectory, outputDirectory=outputDirectory)", "def face_prediction(self, frame, faces):\n predictions = FaceModel.model.predict_proba(FaceModel.emb_array)\n best_class_indices = np.argmax(predictions, axis=1)\n best_class_probabilities = predictions[\n np.arange(len(best_class_indices)),\n best_class_indices\n ]\n print(' prediction:')\n rec_name_lst = []\n for i in range(len(best_class_indices)):\n print(' %4d %s: %.3f' % (\n i,\n FaceModel.class_names[best_class_indices[i]],\n best_class_probabilities[i]\n )\n )\n accuracy = np.mean(np.equal(best_class_indices, FaceModel.labels))\n rec_name = FaceModel.class_names[best_class_indices[i]]\n if best_class_probabilities[i] < 0.7:\n rec_name = \"unknown\"\n rec_name_lst.append(rec_name)\n print(' Accuracy: %.3f' % accuracy)\n j = 0\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x-20, y-20),\n (x+w +20, y+h+20), (0, 255, 0), 4)\n cv2.putText(frame, rec_name_lst[j], (x, y),\n cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 255, 0), 2);\n j = j + 1\n return frame", "def detection():\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=3,\n minSize=(30, 30)\n )\t#Haar-cascade: A Face detection algorithm\n\n area = faces[:,2] * faces[:,3]\n faces = np.c_[faces,area]\t#concatenates area values to last column of 'face' array.\n\n print('All detected faces\\n',faces)\n i,j = unravel_index(faces.argmax(), faces.shape)\t# gets the position of maximum value from 'face' array.\n print(i,j)\n print(\"Found %d Face%s!\" %(len(faces),\"s\"[len(faces)==1:]))\n\n X = faces[i,0]\n Y = faces[i,1]\n W = faces[i,2]\n H = faces[i,3]\n \n cv2.rectangle(image, (X, Y), (X + W, Y + H), (0, 255, 0), 2)\n roi_color = image[Y:Y + H, X:X + W] \n print(\"Face(largest) Extracted.\")\n cv2.imwrite('Extracted_face.jpg', roi_color)\t#Image Extraction.\n status = cv2.imwrite('Output.jpg', image)\n print(\"Image Output.jpg written to filesystem: \", status)", "def train_classifier(train_faces, train_faces_ids):\n recognizer_lbph = cv2.face.LBPHFaceRecognizer_create()\n print('Training model in progress...')\n recognizer_lbph.train(train_faces, np.array(train_faces_ids))\n print('Saving...')\n recognizer_lbph.save('trainner.yml')\n print('Model training complete!')", "def get_classification(self, image):\n\n\tif 'session' in locals() and session is not None:\n \t print('Close interactive session')\n session.close()\n\n time_start = time.time()\n #TODO implement light color prediction\n #image_np = self.__preprocess_image(image)\n \timage_np = image \n \n \t# Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n time0 = time.time()\n\n # Actual detection.\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n time1 = time.time()\n\n output = self.__postprocessing_detected_box(scores[0], classes[0])\n rospy.loginfo('Time in seconds' + str(time1-time_start)+' Result:'+self.__traffic_id_to_name(output))\n return output", "def recognize_faces(x_img,\n knn_clf=None,\n model_path=None,\n distance_threshold=0.3):\n print(\"Start recognize\")\n # Making a check\n if knn_clf is None and model_path is None:\n raise Exception(\"Must supply knn classifier either thought knn_clf or model_path\")\n\n # Load a trained KNN model (if one was passed in)\n if knn_clf is None:\n with open(model_path, 'rb') as f:\n knn_clf = pickle.load(f)\n\n # Load image file and find face locations\n x_face_locations = face_recognition.face_locations(x_img)\n # Set variable for changes on camera (if connected) check\n # x_face_locations_len = 0\n\n # If no faces are found in the image, return an empty result\n if len(x_face_locations) == 0:\n return []\n \n\n # Checking for changes on camera (if connected)\n # if len(x_face_locations) != x_face_locations_len:\n # Find encodings for faces in the test iamge\n faces_encodings = face_recognition.face_encodings(x_img, known_face_locations=x_face_locations)\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(x_face_locations))]\n accur_list = [1-closest_distances[0][i][0] for i in range(len(x_face_locations))]\n x_face_locations_len = len(x_face_locations)\n # Predict classes and remove classifications that aren't within the threshold\n return [(pred, loc, accur, rec) if rec else (\"unknown\", loc, 0,0) for pred, loc, accur, rec in\n zip(knn_clf.predict(faces_encodings),\n x_face_locations,\n accur_list,\n are_matches)]", "def classify_face(img, HOG_model):\r\n img = pp.resize_image(img, img_input_size)\r\n \r\n # Ensure shape matches exactly\r\n shape_delta = img_input_shape[0] - img.shape[0]\r\n if shape_delta > 0:\r\n new_row = np.random.randint(0,255,[shape_delta,img_input_shape[1],img_input_shape[2]],dtype='uint8')\r\n img = np.vstack([img, new_row])\r\n \r\n elif shape_delta < 0:\r\n img = img[:img_input_shape[0],:,:] \r\n \r\n HOG_img = HOG_extractor(img).flatten()\r\n class_pred = img_classes[HOG_model.predict_proba([HOG_img]).argmax(axis=-1)[0]]\r\n return class_pred", "def run(self):\n #parse requests\n self.bqSession.update_mex('Calculating Features...')\n log.debug('Forming Feature Requests...')\n #get rectanle gobjects for roi\n r_xml = self.bqSession.fetchxml(self.options.mexURL, view='deep')\n\n rectangles = r_xml.xpath('//tag[@name=\"inputs\"]/tag[@name=\"image_url\"]/gobject[@name=\"roi\"]/rectangle')\n image_xml = self.bqSession.fetchxml(self.options.image_url)\n image_url = self.bqSession.service_url('image_service',path=image_xml.attrib['resource_uniq'])\n if rectangles: #On chooses the first rectangle\n #construct operation node\n x1 = int(float(rectangles[0][0].attrib['x']))\n y1 = int(float(rectangles[0][0].attrib['y']))\n x2 = int(float(rectangles[0][1].attrib['x']))\n y2 = int(float(rectangles[0][1].attrib['y']))\n log.debug('Adding Crop: roi=%s,%s,%s,%s' % (x1, y1, x2, y2))\n image_url = self.bqSession.c.prepare_url(image_url, roi='%s,%s,%s,%s' % (x1, y1, x2, y2))\n \n try:\n feature_vectors = extract_bush_feature(self.bqSession, image_url)\n except FeatureCommError as e:\n raise BotanicamError(str(e))\n \n #parse features\n self.bqSession.update_mex('Classifying Results...')\n log.debug('Classifying Results...')\n results= []\n pca = joblib.load(os.path.join(self.model_path,'pca_model'))\n clf = joblib.load(os.path.join(self.model_path,'svm_model'))\n \n for f in feature_vectors:\n f_norm = pca.transform(f)\n results.append(int(clf.predict(f_norm)))\n \n\n class_count = np.bincount(np.array(results))\n self.class_number = np.argmax(class_count)\n self.confidence = float(class_count[self.class_number])/np.sum(class_count)\n log.debug('Found Class %s'%str(self.class_number))", "def detect_faces(self, img, return_best=False):\n if numpy.all(img != None):\n\n try:\n if not self.is_cuda_enable:\n caffe.set_mode_cpu()\n else:\n caffe.set_mode_gpu()\n caffe.set_device(cfg.GPU_ID)\n\n scores, boxes = im_detect(self.net, img)\n\n cls_ind = 1\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = numpy.hstack((cls_boxes,\n cls_scores[:, numpy.newaxis])).astype(numpy.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n\n keep = numpy.where(dets[:, 4] > CONF_THRESH)\n dets = dets[keep]\n\n if len(dets) > 0:\n if return_best:\n # dets is ordered by confidence dets[:, 4], so the first one is the best\n det = [int(dets[0, 0]), int(dets[0, 1]), int(dets[0, 2]), int(dets[0, 3]), dets[0, 4]]\n # extend detection\n extend_factor = self.face_rect_expand_factor\n width = round(det[2]-det[0])\n height = round(det[3]-det[1])\n length = (width + height)/2.0\n centrepoint = [round(det[0]) + width/2.0, round(det[1]) + height/2.0]\n det[0] = centrepoint[0] - round((1+extend_factor)*length/2.0)\n det[1] = centrepoint[1] - round((1+extend_factor)*length/2.0)\n det[2] = centrepoint[0] + round((1+extend_factor)*length/2.0)\n det[3] = centrepoint[1] + round((1+extend_factor)*length/2.0)\n ## prevent going off image\n det[0] = int(max(det[0], 0))\n det[1] = int(max(det[1], 0))\n det[2] = int(min(det[2], img.shape[1]))\n det[3] = int(min(det[3], img.shape[0]))\n return [det]\n else:\n det_list = []\n for j in range(dets.shape[0]):\n det = [int(dets[j, 0]), int(dets[j, 1]), int(dets[j, 2]), int(dets[j, 3]), dets[0, 4]]\n # extend detection\n extend_factor = self.face_rect_expand_factor\n width = round(det[2]-det[0])\n height = round(det[3]-det[1])\n length = (width + height)/2.0\n centrepoint = [round(det[0]) + width/2.0, round(det[1]) + height/2.0]\n det[0] = centrepoint[0] - round((1+extend_factor)*length/2.0)\n det[1] = centrepoint[1] - round((1+extend_factor)*length/2.0)\n det[2] = centrepoint[0] + round((1+extend_factor)*length/2.0)\n det[3] = centrepoint[1] + round((1+extend_factor)*length/2.0)\n ## prevent going off image\n det[0] = int(max(det[0], 0))\n det[1] = int(max(det[1], 0))\n det[2] = int(min(det[2], img.shape[1]))\n det[3] = int(min(det[3], img.shape[0]))\n det_list.append(det)\n return det_list\n else:\n return None\n\n except Exception as e:\n print ('Exception in FaceDetectorFasterRCNN: ' + str(e))\n pass\n\n return None", "def detect_face(gray):\r\n face_cascade = cv2.CascadeClassifier(classifier_file_name)\r\n faces = face_cascade.detectMultiScale(gray, scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=flags)\r\n return faces", "def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n #submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, \"submit\")\n #os.makedirs(submit_dir)\n\n # Read dataset\n img_ids = []\n dataset_dir = os.path.join(dataset_dir, subset)\n image_file = os.listdir(dataset_dir)\n #submission = []\n for img in image_file:\n if not img.startswith('.'):\n img_file = os.path.join(dataset_dir, img)\n image = skimage.io.imread(img_file)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # Detect object\n\t\t\t\n r = model.detect([image])[0]\n # Encode image to RLE. Returns a string of multiple lines\n source_id = img.split(\".\")[0]\n #rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n #submission.append(rle)\n # Save image with masks\n visualize.display_instances(\n image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'],\n #show_bbox=False, show_mask=False,\n title=\"Predictions\")\n plt.savefig(\"{}/{}.png\".format(submit_dir, source_id))\n\n\n\t\t\n # Save to csv file", "def train():\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n \n # Load all saved people\n people = PersonModel.select()\n\n # List of face images\n photos = []\n # List of person IDs corresponding to images in photos[]\n labels = []\n\n for person in people:\n person_dataset_path = os.path.join(Constants.PATH_DATASET, \"person_{}\".format(person.id))\n\n if not os.path.exists(person_dataset_path):\n continue\n\n # List of all images for current person\n photo_files = [os.path.join(person_dataset_path, item) for item in os.listdir(person_dataset_path)]\n person.update(photos_count=len(photo_files)).execute()\n\n # Load all photos\n for photo_file in photo_files:\n photos.append(\n np.array(Image.open(photo_file).convert(\"L\"))\n )\n \n labels.append(person.id)\n\n face_recognizer.train(photos, np.array(labels))\n\n if not face_recognizer.write(Constants.FILE_MODEL):\n return False\n\n return True", "def cluster_faces_in_video(self):\r\n\r\n logger.debug('Executing people clustering')\r\n\r\n rec_loaded = False\r\n\r\n # Try to load YAML files\r\n if os.path.exists(self.cluster_files_path):\r\n\r\n print 'Loading YAML files with clustering results'\r\n logger.debug('Loading YAML files with clustering results')\r\n\r\n self.recognized_faces = []\r\n for yaml_file in os.listdir(self.cluster_files_path):\r\n yaml_file_path = os.path.join(\r\n self.cluster_files_path, yaml_file)\r\n with open(yaml_file_path) as f:\r\n self.recognized_faces.append(yaml.load(f))\r\n\r\n print 'YAML files with clustering results loaded'\r\n logger.debug('YAML files with clustering results loaded')\r\n\r\n if not rec_loaded:\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n return\r\n\r\n # Make copy of tracked faces\r\n tracking_list = list(self.tracked_faces)\r\n\r\n if ((self.params is not None) and\r\n (ce.FACE_MODELS_DIR_PATH_KEY in self.params)):\r\n if ce.NOSE_POS_FILE_PATH_KEY in self.params:\r\n nose_pos_file_path = self.params[ce.NOSE_POS_FILE_PATH_KEY]\r\n\r\n with open(nose_pos_file_path) as f:\r\n self.nose_pos_list = pk.load(f)\r\n else:\r\n # Save face models\r\n self.save_face_models(tracking_list)\r\n\r\n use_clothing_rec = c.USE_CLOTHING_RECOGNITION\r\n\r\n if ((self.params is not None) and\r\n (c.USE_CLOTHING_RECOGNITION_KEY in self.params)):\r\n use_clothing_rec = self.params[c.USE_CLOTHING_RECOGNITION_KEY]\r\n\r\n if (use_clothing_rec and\r\n ((self.params is None)\r\n or (ce.CLOTH_MODELS_DIR_PATH_KEY not in self.params))):\r\n # Save cloth models\r\n self.save_cloth_models(tracking_list)\r\n\r\n print '\\n\\n### People clustering ###\\n'\r\n logger.debug('\\n\\n### People clustering ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n self.recognized_faces = []\r\n\r\n # List of segments already analyzed and annotated\r\n ann_segments = []\r\n\r\n model = None\r\n\r\n # Iterate through tracked faces\r\n person_counter = 0\r\n segment_counter = 0\r\n tracked_faces_nr = float(len(tracking_list))\r\n\r\n for tracking_segment_dict in tracking_list:\r\n\r\n self.progress = 100 * (segment_counter / tracked_faces_nr)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n if segment_counter not in ann_segments:\r\n\r\n # Save all segments relative\r\n # to one person in person_dict\r\n person_dict = {c.PERSON_COUNTER_KEY: person_counter,\r\n c.ASSIGNED_LABEL_KEY: c.UNDEFINED_LABEL,\r\n c.ASSIGNED_TAG_KEY: c.UNDEFINED_TAG}\r\n\r\n segment_list = []\r\n\r\n segment_dict = {}\r\n\r\n segment_frame_list = tracking_segment_dict[c.FRAMES_KEY]\r\n\r\n segment_dict[c.FRAMES_KEY] = segment_frame_list\r\n\r\n segment_dict[c.ASSIGNED_TAG_KEY] = c.UNDEFINED_TAG\r\n\r\n segment_dict[c.CONFIDENCE_KEY] = 0\r\n\r\n segment_dict[c.SEGMENT_COUNTER_KEY] = segment_counter\r\n\r\n # Start of segment in milliseconds\r\n # of elapsed time in video\r\n\r\n start = tracking_segment_dict[c.SEGMENT_START_KEY]\r\n\r\n segment_dict[c.SEGMENT_START_KEY] = start\r\n\r\n # Duration of segment in milliseconds\r\n\r\n duration = tracking_segment_dict[c.SEGMENT_DURATION_KEY]\r\n\r\n segment_dict[c.SEGMENT_DURATION_KEY] = duration\r\n\r\n if c.ANN_TAG_KEY in tracking_segment_dict:\r\n segment_ann = tracking_segment_dict[c.ANN_TAG_KEY]\r\n segment_dict[c.ANN_TAG_KEY] = segment_ann\r\n\r\n segment_list.append(segment_dict)\r\n\r\n ann_segments.append(segment_counter)\r\n\r\n db_path = os.path.join(\r\n self.face_models_path, str(segment_counter))\r\n\r\n if os.path.isfile(db_path):\r\n\r\n model = cv2.createLBPHFaceRecognizer()\r\n\r\n model.load(db_path)\r\n\r\n if model:\r\n # Use model of this segment\r\n # to recognize faces of remaining segments\r\n\r\n ann_segments = self.search_face(ann_segments,\r\n segment_list, model,\r\n segment_counter)\r\n\r\n # Add segments to person dictionary\r\n\r\n person_dict[c.SEGMENTS_KEY] = segment_list\r\n\r\n # Save total duration of video in milliseconds\r\n\r\n tot_duration = (\r\n self.video_frames * 1000.0 / self.fps)\r\n\r\n person_dict[c.VIDEO_DURATION_KEY] = tot_duration\r\n\r\n self.recognized_faces.append(person_dict)\r\n\r\n person_counter += 1\r\n\r\n segment_counter += 1\r\n\r\n del model\r\n\r\n if not (os.path.exists(self.cluster_path)):\r\n # Create directory for people clustering\r\n os.makedirs(self.cluster_path)\r\n\r\n # Save clustering result in YAML files\r\n\r\n # Remove previous files\r\n if os.path.exists(self.cluster_files_path):\r\n shutil.rmtree(self.cluster_files_path)\r\n # Create directory for people clustering results\r\n os.makedirs(self.cluster_files_path)\r\n\r\n counter = 0\r\n for person_dict in self.recognized_faces:\r\n yaml_file_name = str(counter) + '.YAML'\r\n yaml_file_path = os.path.join(self.cluster_files_path, yaml_file_name)\r\n utils.save_YAML_file(yaml_file_path, person_dict)\r\n counter += 1\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for people clustering:', time_in_seconds, 's\\n'\r\n logger.debug('Time for people clustering:', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.PEOPLE_CLUSTERING_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)\r\n\r\n self.calculate_medoids()", "def train_routine(training_file, output_folder):\n if output_folder[-1] != '/':\n output_folder += '/'\n\n svm_file = output_folder + 'svm.txt'\n centroid_file = output_folder + 'centroids.txt'\n ids_file = output_folder + 'ids.txt'\n\n surf = cv2.SURF(250, extended=False)\n categories = dict()\n ids = dict()\n id = 1\n features = list()\n\n print \"Extracting features\"\n for line in open(training_file):\n try:\n category, path = line.split(';')\n except:\n print \"Error: File not in proper format. Ensure: <category/class name>; <path to image of said category>\"\n sys.exit(0)\n path = path.strip()\n\n try:\n img = cv2.imread(path)\n #img = cv2.resize(img, (500, 500))\n except Exception as e:\n print e\n continue\n\n keypoints, descriptors = surf.detectAndCompute(img, None)\n\n if not category in categories:\n categories[category] = Category(label=category)\n ids[category] = id\n id += 1\n categories[category].add_feature(descriptors)\n\n #for category in categories:\n #f = categories[category].yield_features()\n ##features.extend(f)\n #for i in f:\n #features.extend(i)\n\n print \"Calculating centroids\"\n #np_features = numpy.array(features)\n #print \"Features: \", np_features.shape\n #centroids, labels = kmeans2(np_features, FEATURE_TYPES)\n centroids = helpers.loadObject(output_folder + 'centroids.txt')\n print centroids.shape\n\n print \"Forming bag of words\"\n X, Y = [], []\n for category in categories:\n categories[category].calc_bagofwords(centroids)\n for bow in categories[category].bagofwords:\n X.append(bow)\n Y.append(ids[category])\n print \"Fitting linear SVMs onto the bag of words\"\n lin_clf = svm.LinearSVC()\n lin_clf.fit(X, Y)\n\n helpers.saveObject(lin_clf, svm_file)\n helpers.saveObject(centroids, centroid_file)\n helpers.saveObject(ids, ids_file)" ]
[ "0.72616476", "0.70782316", "0.6882108", "0.6881206", "0.6809572", "0.6784606", "0.67753196", "0.66968185", "0.6624295", "0.65775526", "0.6573839", "0.6529528", "0.65139365", "0.6483432", "0.6458223", "0.6457295", "0.6426916", "0.6408628", "0.6396731", "0.6369119", "0.6361105", "0.63471866", "0.63393116", "0.631696", "0.63164854", "0.6306556", "0.62977576", "0.6293344", "0.6293082", "0.6292837" ]
0.71059865
1
Build an expression equivalent to a lookup table
def build_lookup(mapping, var='ptype', default='ptype'): if len(mapping) > 0: return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default)) else: return str(default)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_lookup(mapping, var='ptype', default=0.):\n # force mapping to be a list if it wasn't already\n mapping=list(mapping)\n if len(mapping) > 0:\n return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default))\n else:\n return str(default)", "def __getitem__(self, key: str) -> ir.TableExpr:\n return self.table(key)", "def __getattr__(self, key: str) -> ir.TableExpr:\n return self.table(key)", "def lookup_table():\n datapackage = {\n 'resources': [\n {\n 'schema': {\n 'fields': [\n {\n 'name': 'foo',\n 'maps_to': 'project_id'\n },\n {\n 'name': 'bar',\n 'maps_to': 'invalid_fiscal_field'\n },\n {\n 'name': 'spam',\n 'maps_to': None\n },\n {\n 'name': 'eggs',\n },\n\n ]\n\n }\n }\n ]\n }\n return build_lookup_table(datapackage)", "def build_lookup(self, field):\n lud = defaultdict(list)\n for i, r in enumerate(self.__elements__):\n lud[getattr(r, field)].append(i)\n return dict(lud)", "def build_table(\n self,\n data: Union[ScalarValue, pd.DataFrame, List[ScalarValue], Tuple[ScalarValue]],\n key_columns: Union[List[str], Tuple[str]],\n parameter_columns: Union[List[str], Tuple[str]],\n value_columns: Union[List[str], Tuple[str]],\n ) -> LookupTable:\n table = self._build_table(data, key_columns, parameter_columns, value_columns)\n self._add_constraint(\n table._call, restrict_during=[\"initialization\", \"setup\", \"post_setup\"]\n )\n return table", "def build_table(\n self,\n data: Union[ScalarValue, pd.DataFrame, List[ScalarValue], Tuple[ScalarValue]],\n key_columns: Union[List[str], Tuple[str]] = None,\n parameter_columns: Union[List[str], Tuple[str]] = None,\n value_columns: Union[List[str], Tuple[str]] = None,\n ) -> LookupTable:\n return self._manager.build_table(data, key_columns, parameter_columns, value_columns)", "def make_lookup_table(opt_func, Ms, degree):\n nu_s = np.arange(Ms // 2 + degree + 1, dtype=float) / Ms\n C = calc_C(opt_func.h, opt_func.x0, nu_s, opt_func.W)\n table = [C]\n for d in range(degree):\n C = np.diff(C, 1)\n table.append(C)\n return Lookup(opt_func.W, Ms, table, degree)", "def build_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer", "def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)", "def get_prep_lookup(self, lookup_type, value):\n raise TypeError('Lookup not supported on matrix values')", "def gen_get_const_table(cls, names, p, const_p):\n s = \"// Store constant table for {p} to {const_p}\\n\".format(\n const_p = const_p, p = p\n ) \n s += \"{c} = {t}[(((({p}) + 1) * {mul}) >> 8) & 7];\\n\".format(\n c = const_p, p = p, t = names[cls.T_NAME],\n mul = cls.deBruijnMult\n )\n return s", "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)", "def make_tuple_lookup(columns) -> Callable[[str, str], int]:\n\n # col is a hierarchical column index represented by a tuple of strings\n tuple_lookup: Dict[Tuple[str, str], int] = { \n col: i + 1 for i, col in enumerate(columns) \n }\n\n return lambda symbol, metric: tuple_lookup[(symbol, metric)]", "def oracle(t):\n for entry in t.table:\n model = {e.v: e.b for e in entry}\n t.table[entry] = getTruthVal(t.formula, model)", "def substitute_with_bindings(self,bindings):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in bindings:\n term[i] = bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))", "def build_poly_expr(query_tuple):\n print(\"query_tuple: \", query_tuple)\n expression = '0 + '\n factors = np.arange(7)\n\n for coeff, factor in zip(query_tuple, factors):\n if coeff != None:\n expression += '(' + str(np.float64(coeff)) + '*x^{}) + '.format(factor)\n\n # Remove trailing '+'\n expression = expression[:-3]\n \n # Return as a tuple.\n return (expression,)", "def lookup():", "def as_expression(self, bare_lookup, fallback=True):\n language = self.get_language()\n if language == DEFAULT_LANGUAGE:\n return F(self._localized_lookup(language, bare_lookup))\n\n if not fallback:\n i18n_lookup = self._localized_lookup(language, bare_lookup)\n return Cast(i18n_lookup, self.output_field())\n\n fallback_chain = get_fallback_chain(language)\n # First, add the current language to the list of lookups\n lookups = [self._localized_lookup(language, bare_lookup)]\n\n # Optionnally add the lookup for the per-row fallback language\n i18n_field = self.model._meta.get_field(\"i18n\")\n if i18n_field.fallback_language_field:\n lookups.append(\n self._localized_lookup(F(i18n_field.fallback_language_field), bare_lookup)\n )\n\n # and now, add the list of fallback languages to the lookup list\n for fallback_language in fallback_chain:\n lookups.append(self._localized_lookup(fallback_language, bare_lookup))\n return Coalesce(*lookups, output_field=self.output_field())", "def _assemble(self):\n setexpr = ', '.join(\n f'{name} = %({name})s'\n for name in self._valueskw\n )\n froms = 'from ' + ', '.join(self._tables) if self._tables else ''\n kw = self._kw.copy()\n wheres, wkw = self._build_where()\n kw.update(wkw)\n kw.update(self._valueskw)\n return (\n f'update {self._table} '\n f'set {setexpr} '\n f'{froms} '\n f'{wheres}'\n ), kw", "def get_expression(binary_addr, expected_value):\n\n expression = expressions[binary_addr]\n utils.check_expr(expression, expected_value)\n return expression", "def _populate_expr_impl_map(extend_context: bool) -> Dict[int, Dict[str, Callable]]:\n assert isinstance(extend_context, bool)\n # TODO: fill in more\n if extend_context:\n impl_map_0 = {\n \"count\": lambda : pl.col(_da_temp_one_column_name).cumsum(), # ugly SQL def\n \"_count\": lambda : pl.col(_da_temp_one_column_name).cumsum(), # ugly SQL def\n \"cumcount\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"_cumcount\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"row_number\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"_row_number\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n }\n else:\n impl_map_0 = {\n \"count\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_count\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"cumcount\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_cumcount\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"row_number\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_row_number\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n }\n impl_map_1 = {\n \"+\": lambda x: x,\n \"-\": lambda x: 0 - x,\n \"abs\": lambda x: x.abs(),\n \"all\": lambda x: x.all(),\n \"any\": lambda x: x.any(),\n \"any_value\": lambda x: x.min(),\n \"arccos\": lambda x: x.arccos(),\n \"arccosh\": lambda x: x.arccosh(),\n \"arcsin\": lambda x: x.arcsin(),\n \"arcsinh\": lambda x: x.arcsinh(),\n \"arctan\": lambda x: x.arctan(),\n \"arctan2\": lambda x: x.arctan2(),\n \"arctanh\": lambda x: x.arctanh(),\n \"as_int64\": lambda x: x.cast(int),\n \"as_str\": lambda x: x.cast(str),\n \"base_Sunday\": lambda x: x.base_Sunday(),\n \"bfill\": lambda x: x.fill_null(strategy='backward'),\n \"ceil\": lambda x: x.ceil(),\n \"coalesce0\": lambda x: pl.coalesce(x, _build_lit(0)),\n \"cos\": lambda x: x.cos(),\n \"cosh\": lambda x: x.cosh(),\n \"count\": lambda x: pl.when(x.is_null() | x.is_nan()).then(_build_lit(0)).otherwise(_build_lit(1)).sum(), # not tested yet TODO\n \"cumcount\": lambda x: pl.when(x.is_null() | x.is_nan()).then(_build_lit(0)).otherwise(_build_lit(1)).cumsum(), # not working yet TODO\n \"cummax\": lambda x: x.cummax(),\n \"cummin\": lambda x: x.cummin(),\n \"cumprod\": lambda x: x.cumprod(),\n \"cumsum\": lambda x: x.cumsum(),\n \"datetime_to_date\": lambda x: x.datetime_to_date(),\n \"dayofmonth\": lambda x: x.dayofmonth(),\n \"dayofweek\": lambda x: x.dayofweek(),\n \"dayofyear\": lambda x: x.dayofyear(),\n \"exp\": lambda x: x.exp(),\n \"expm1\": lambda x: x.expm1(),\n \"ffill\": lambda x: x.fill_null(strategy='forward'),\n \"first\": lambda x: x.first(),\n \"floor\": lambda x: x.floor(),\n \"format_date\": lambda x: x.format_date(),\n \"format_datetime\": lambda x: x.format_datetime(),\n \"is_bad\": lambda x: x.is_null() | x.is_infinite() | x.is_nan(), # recommend only for numeric columns\n \"is_inf\": lambda x: x.is_infinite(),\n \"is_nan\": lambda x: x.is_nan(),\n \"is_null\": lambda x: x.is_null(),\n \"last\": lambda x: x.last(),\n \"log\": lambda x: x.log(),\n \"log10\": lambda x: x.log10(),\n \"log1p\": lambda x: x.log1p(),\n \"max\": lambda x: x.max(),\n \"mean\": lambda x: x.mean(),\n \"median\": lambda x: x.median(),\n \"min\": lambda x: x.min(),\n \"month\": lambda x: x.month(),\n \"nunique\": lambda x: x.n_unique(),\n \"quarter\": lambda x: x.quarter(),\n \"rank\": lambda x: x.rank(),\n \"round\": lambda x: x.round(decimals=0),\n \"shift\": lambda x: x.shift(),\n \"sign\": lambda x: x.sign(),\n \"sin\": lambda x: x.sin(),\n \"sinh\": lambda x: x.sinh(),\n \"size\": lambda x: pl.col(_da_temp_one_column_name).sum(),\n \"sqrt\": lambda x: x.sqrt(),\n \"std\": lambda x: x.std(),\n \"sum\": lambda x: x.sum(),\n \"tanh\": lambda x: x.tanh(),\n \"var\": lambda x: x.var(),\n \"weekofyear\": lambda x: x.weekofyear(),\n }\n impl_map_2 = {\n \"-\": lambda a, b: a - b,\n \"**\": lambda a, b: a ** b,\n \"/\": lambda a, b: a / b,\n \"//\": lambda a, b: a // b,\n \"%\": lambda a, b: a % b,\n \"%/%\": lambda a, b: a / b,\n \"around\": lambda a, b: a.round(b),\n \"date_diff\": lambda a, b: a.date_diff(b),\n \"is_in\": lambda a, b: a.is_in(b),\n \"mod\": lambda a, b: a % b,\n \"remainder\": lambda a, b: a % b,\n \"shift\": lambda a, b: a.shift(b),\n \"timestamp_diff\": lambda a, b: a.timestamp_diff(b),\n \"==\": lambda a, b: a == b,\n \"<=\": lambda a, b: a <= b,\n \"<\": lambda a, b: a < b, \n \">=\": lambda a, b: a >= b,\n \">\": lambda a, b: a > b, \n \"!=\": lambda a, b: a != b,\n \"not\": lambda x: x == False,\n \"~\": lambda x: x == False,\n \"!\": lambda x: x == False,\n # datetime parsing from https://stackoverflow.com/a/71759536/6901725\n \"parse_date\": lambda x, format : x.cast(str).str.strptime(pl.Date, fmt=format, strict=False).cast(pl.Date),\n \"parse_datetime\": lambda x, format : x.cast(str).str.strptime(pl.Datetime, fmt=format, strict=False).cast(pl.Datetime),\n }\n impl_map_3 = {\n \"if_else\": lambda a, b, c: pl.when(a.is_null()).then(pl.lit(None)).otherwise(pl.when(a).then(b).otherwise(c)),\n \"mapv\": _mapv,\n \"trimstr\": lambda a, b, c: a.trimstr(b, c),\n \"where\": lambda a, b, c: pl.when(a.is_null()).then(c).otherwise(pl.when(a).then(b).otherwise(c)),\n }\n impl_map = {\n 0: impl_map_0,\n 1: impl_map_1,\n 2: impl_map_2,\n 3: impl_map_3,\n }\n # could also key the map by grouped, partitioned, regular situation\n return impl_map", "def get_id(self, expr):\n return self.table.inv[expr]", "def col_expr_builder(self) -> Tuple[List[str], List[TableSpec], List[TableSpec]]:\n error_counter = 0\n target_db_alias = self.job_spec.target_db\n\n # SQL expressions (one for each target colum) that will form the SELECT clause of Glue job\n expressions = []\n source_table_specs: List[TableSpec] = set()\n target_table_specs: List[TableSpec] = set()\n \n for index, row in self.merge_rules.iterrows():\n merge_rule, target_table_alias, target_column = self.extract_from_row(row)\n \n if target_table_alias:\n target_table_spec = TableSpec(target_db_alias, target_table_alias)\n target_table_specs.add(target_table_spec)\n \n try:\n result = self._process_merge_rule(merge_rule, target_table_alias, target_column)\n \n if result.expression:\n translated_expression = GlueTable.translate_alias(result.expression)\n expressions.append(f\"{translated_expression} as {target_column.lower()}\") \n\n if result.join:\n join_spec = result.join\n print(f\"join_spec: {join_spec}\")\n \n self.join_map.add_spec(join_spec)\n \n if result.table_specs:\n source_table_specs.update(result.table_specs)\n \n except Exception as exc:\n # DEBUG\n print(row)\n print(exc)\n error_counter += 1\n\n if error_counter:\n print(f\"Errors encountered: {error_counter}\")\n \n return (\n expressions,\n source_table_specs,\n target_table_specs,\n )", "def _sym_constant(self, table: Mapping[int, str]) -> str:\n try:\n return table[self.sym]\n except KeyError:\n return str(self.sym)", "def _group2formula(self, elem_dict):\n formula = \"\"\n for key, value in elem_dict:\n formula += \"{}{}\".format(key, value)\n return formula", "def convert_lookup_table(g, op, block):\n\n indices = g.get_node(op.input(\"Ids\")[0])\n padding_idx = op.attr(\"padding_idx\")\n weights = g.get_node(op.input(\"W\")[0])\n if padding_idx != -1:\n if op.input(\"W\")[0] in g.get_params():\n weights = g.get_params(op.input(\"W\")[0])\n weights[padding_idx] = 0.0\n weights = _expr.const(weights)\n else:\n shape, infered = try_infer_value(shape_of(weights), g.get_params())\n if infered:\n shape = shape.tolist()\n assert not isinstance(\n shape, _expr.Expr\n ), \"Shape of weight has to be fixed for PaddlePaddle's lookup_table\"\n filters = np.ones(shape).astype(infer_type(weights).checked_type.dtype)\n filters[padding_idx] = 0.0\n filters = _expr.const(filters)\n weights = weights * filters\n out = _op.take(weights, indices.astype(\"int32\"), axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def _build_expression(self, exp, object_class, fields):\n if \"op\" not in exp:\n return None\n\n def autocast(o_key, value):\n \"\"\"Try to guess the type of `value` and parse it from the string.\"\"\"\n if not isinstance(o_key, (str, unicode)):\n return value\n key, _ = self.attr_name_map[object_class].get(o_key, (o_key, None))\n # handle dates\n if (\"date\" in key and \"relative\" not in key) or \\\n key in [\"end_date\", \"start_date\"]:\n if isinstance(value, datetime.date):\n return value\n try:\n month, day, year = [int(part) for part in value.split(\"/\")]\n return datetime.date(year, month, day)\n except Exception:\n raise BadQueryException(\"Field \\\"{}\\\" expects a MM/DD/YYYY date\"\n .format(o_key))\n # fallback\n return value\n\n def relevant():\n \"\"\"Filter by relevant object.\"\"\"\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )\n\n def unknown():\n raise BadQueryException(\"Unknown operator \\\"{}\\\"\"\n .format(exp[\"op\"][\"name\"]))\n\n def with_key(key, p):\n key = key.lower()\n key, filter_by = self.attr_name_map[\n object_class].get(key, (key, None))\n if hasattr(filter_by, \"__call__\"):\n return filter_by(p)\n else:\n attr = getattr(object_class, key, None)\n if attr is None:\n raise BadQueryException(\"Bad query: object '{}' does \"\n \"not have attribute '{}'.\"\n .format(object_class.__name__, key))\n return p(attr)\n\n with_left = lambda p: with_key(exp[\"left\"], p)\n\n lift_bin = lambda f: f(self._build_expression(exp[\"left\"], object_class,\n fields),\n self._build_expression(exp[\"right\"], object_class,\n fields))\n\n def text_search():\n \"\"\"Filter by text search.\n\n The search is done only in fields listed in external `fields` var.\n \"\"\"\n existing_fields = self.attr_name_map[object_class]\n text = \"%{}%\".format(exp[\"text\"])\n p = lambda f: f.ilike(text)\n return or_(*(\n with_key(field, p)\n for field in fields\n if field in existing_fields\n ))\n\n rhs = lambda: autocast(exp[\"left\"], exp[\"right\"])\n\n ops = {\n \"AND\": lambda: lift_bin(and_),\n \"OR\": lambda: lift_bin(or_),\n \"=\": lambda: with_left(lambda l: l == rhs()),\n \"!=\": lambda: not_(with_left(\n lambda l: l == rhs())),\n \"~\": lambda: with_left(lambda l:\n l.ilike(\"%{}%\".format(rhs()))),\n \"!~\": lambda: not_(with_left(\n lambda l: l.ilike(\"%{}%\".format(rhs())))),\n \"<\": lambda: with_left(lambda l: l < rhs()),\n \">\": lambda: with_left(lambda l: l > rhs()),\n \"relevant\": relevant,\n \"text_search\": text_search\n }\n\n return ops.get(exp[\"op\"][\"name\"], unknown)()", "def _extract_lookup(self, key):\n parts = key.rsplit(\"__\", 1)\n\n if len(parts) > 1 and parts[1] in operators:\n op = parts[1]\n attribute = parts[0]\n else:\n # 'exact' is the default lookup if there was no explicit comparison op in `key`\n op = \"exact\"\n attribute = key\n\n # Construct and assign the lookup class as a filter criteria\n return attribute, self.get_lookup(op)", "def lookup(self, key):" ]
[ "0.6523732", "0.61150914", "0.5955717", "0.570927", "0.56178665", "0.5579834", "0.5579453", "0.54781777", "0.5457547", "0.5432533", "0.5425061", "0.5388716", "0.5387384", "0.5381003", "0.5378903", "0.53568715", "0.5315897", "0.52996916", "0.5293774", "0.528251", "0.5204979", "0.51445234", "0.5133434", "0.5095085", "0.5093828", "0.50843155", "0.5076597", "0.5069648", "0.5064746", "0.506319" ]
0.64584607
1
Build an expression equivalent to a lookup table
def build_lookup(mapping, var='ptype', default=0.): # force mapping to be a list if it wasn't already mapping=list(mapping) if len(mapping) > 0: return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default)) else: return str(default)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_lookup(mapping, var='ptype', default='ptype'):\n if len(mapping) > 0:\n return 'where(%s==%s, %s, %s)' % (var, mapping[0][0], mapping[0][1], build_lookup(mapping[1:], var, default))\n else:\n return str(default)", "def __getitem__(self, key: str) -> ir.TableExpr:\n return self.table(key)", "def __getattr__(self, key: str) -> ir.TableExpr:\n return self.table(key)", "def lookup_table():\n datapackage = {\n 'resources': [\n {\n 'schema': {\n 'fields': [\n {\n 'name': 'foo',\n 'maps_to': 'project_id'\n },\n {\n 'name': 'bar',\n 'maps_to': 'invalid_fiscal_field'\n },\n {\n 'name': 'spam',\n 'maps_to': None\n },\n {\n 'name': 'eggs',\n },\n\n ]\n\n }\n }\n ]\n }\n return build_lookup_table(datapackage)", "def build_lookup(self, field):\n lud = defaultdict(list)\n for i, r in enumerate(self.__elements__):\n lud[getattr(r, field)].append(i)\n return dict(lud)", "def build_table(\n self,\n data: Union[ScalarValue, pd.DataFrame, List[ScalarValue], Tuple[ScalarValue]],\n key_columns: Union[List[str], Tuple[str]],\n parameter_columns: Union[List[str], Tuple[str]],\n value_columns: Union[List[str], Tuple[str]],\n ) -> LookupTable:\n table = self._build_table(data, key_columns, parameter_columns, value_columns)\n self._add_constraint(\n table._call, restrict_during=[\"initialization\", \"setup\", \"post_setup\"]\n )\n return table", "def build_table(\n self,\n data: Union[ScalarValue, pd.DataFrame, List[ScalarValue], Tuple[ScalarValue]],\n key_columns: Union[List[str], Tuple[str]] = None,\n parameter_columns: Union[List[str], Tuple[str]] = None,\n value_columns: Union[List[str], Tuple[str]] = None,\n ) -> LookupTable:\n return self._manager.build_table(data, key_columns, parameter_columns, value_columns)", "def make_lookup_table(opt_func, Ms, degree):\n nu_s = np.arange(Ms // 2 + degree + 1, dtype=float) / Ms\n C = calc_C(opt_func.h, opt_func.x0, nu_s, opt_func.W)\n table = [C]\n for d in range(degree):\n C = np.diff(C, 1)\n table.append(C)\n return Lookup(opt_func.W, Ms, table, degree)", "def build_match_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match'] = tmp\r\n return answer", "def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)", "def get_prep_lookup(self, lookup_type, value):\n raise TypeError('Lookup not supported on matrix values')", "def gen_get_const_table(cls, names, p, const_p):\n s = \"// Store constant table for {p} to {const_p}\\n\".format(\n const_p = const_p, p = p\n ) \n s += \"{c} = {t}[(((({p}) + 1) * {mul}) >> 8) & 7];\\n\".format(\n c = const_p, p = p, t = names[cls.T_NAME],\n mul = cls.deBruijnMult\n )\n return s", "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)", "def make_tuple_lookup(columns) -> Callable[[str, str], int]:\n\n # col is a hierarchical column index represented by a tuple of strings\n tuple_lookup: Dict[Tuple[str, str], int] = { \n col: i + 1 for i, col in enumerate(columns) \n }\n\n return lambda symbol, metric: tuple_lookup[(symbol, metric)]", "def oracle(t):\n for entry in t.table:\n model = {e.v: e.b for e in entry}\n t.table[entry] = getTruthVal(t.formula, model)", "def substitute_with_bindings(self,bindings):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in bindings:\n term[i] = bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))", "def build_poly_expr(query_tuple):\n print(\"query_tuple: \", query_tuple)\n expression = '0 + '\n factors = np.arange(7)\n\n for coeff, factor in zip(query_tuple, factors):\n if coeff != None:\n expression += '(' + str(np.float64(coeff)) + '*x^{}) + '.format(factor)\n\n # Remove trailing '+'\n expression = expression[:-3]\n \n # Return as a tuple.\n return (expression,)", "def lookup():", "def as_expression(self, bare_lookup, fallback=True):\n language = self.get_language()\n if language == DEFAULT_LANGUAGE:\n return F(self._localized_lookup(language, bare_lookup))\n\n if not fallback:\n i18n_lookup = self._localized_lookup(language, bare_lookup)\n return Cast(i18n_lookup, self.output_field())\n\n fallback_chain = get_fallback_chain(language)\n # First, add the current language to the list of lookups\n lookups = [self._localized_lookup(language, bare_lookup)]\n\n # Optionnally add the lookup for the per-row fallback language\n i18n_field = self.model._meta.get_field(\"i18n\")\n if i18n_field.fallback_language_field:\n lookups.append(\n self._localized_lookup(F(i18n_field.fallback_language_field), bare_lookup)\n )\n\n # and now, add the list of fallback languages to the lookup list\n for fallback_language in fallback_chain:\n lookups.append(self._localized_lookup(fallback_language, bare_lookup))\n return Coalesce(*lookups, output_field=self.output_field())", "def _assemble(self):\n setexpr = ', '.join(\n f'{name} = %({name})s'\n for name in self._valueskw\n )\n froms = 'from ' + ', '.join(self._tables) if self._tables else ''\n kw = self._kw.copy()\n wheres, wkw = self._build_where()\n kw.update(wkw)\n kw.update(self._valueskw)\n return (\n f'update {self._table} '\n f'set {setexpr} '\n f'{froms} '\n f'{wheres}'\n ), kw", "def get_expression(binary_addr, expected_value):\n\n expression = expressions[binary_addr]\n utils.check_expr(expression, expected_value)\n return expression", "def _populate_expr_impl_map(extend_context: bool) -> Dict[int, Dict[str, Callable]]:\n assert isinstance(extend_context, bool)\n # TODO: fill in more\n if extend_context:\n impl_map_0 = {\n \"count\": lambda : pl.col(_da_temp_one_column_name).cumsum(), # ugly SQL def\n \"_count\": lambda : pl.col(_da_temp_one_column_name).cumsum(), # ugly SQL def\n \"cumcount\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"_cumcount\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"row_number\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"_row_number\": lambda : pl.col(_da_temp_one_column_name).cumsum(),\n \"size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n }\n else:\n impl_map_0 = {\n \"count\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_count\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"cumcount\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_cumcount\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"row_number\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_row_number\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n \"_size\": lambda : pl.col(_da_temp_one_column_name).sum(),\n }\n impl_map_1 = {\n \"+\": lambda x: x,\n \"-\": lambda x: 0 - x,\n \"abs\": lambda x: x.abs(),\n \"all\": lambda x: x.all(),\n \"any\": lambda x: x.any(),\n \"any_value\": lambda x: x.min(),\n \"arccos\": lambda x: x.arccos(),\n \"arccosh\": lambda x: x.arccosh(),\n \"arcsin\": lambda x: x.arcsin(),\n \"arcsinh\": lambda x: x.arcsinh(),\n \"arctan\": lambda x: x.arctan(),\n \"arctan2\": lambda x: x.arctan2(),\n \"arctanh\": lambda x: x.arctanh(),\n \"as_int64\": lambda x: x.cast(int),\n \"as_str\": lambda x: x.cast(str),\n \"base_Sunday\": lambda x: x.base_Sunday(),\n \"bfill\": lambda x: x.fill_null(strategy='backward'),\n \"ceil\": lambda x: x.ceil(),\n \"coalesce0\": lambda x: pl.coalesce(x, _build_lit(0)),\n \"cos\": lambda x: x.cos(),\n \"cosh\": lambda x: x.cosh(),\n \"count\": lambda x: pl.when(x.is_null() | x.is_nan()).then(_build_lit(0)).otherwise(_build_lit(1)).sum(), # not tested yet TODO\n \"cumcount\": lambda x: pl.when(x.is_null() | x.is_nan()).then(_build_lit(0)).otherwise(_build_lit(1)).cumsum(), # not working yet TODO\n \"cummax\": lambda x: x.cummax(),\n \"cummin\": lambda x: x.cummin(),\n \"cumprod\": lambda x: x.cumprod(),\n \"cumsum\": lambda x: x.cumsum(),\n \"datetime_to_date\": lambda x: x.datetime_to_date(),\n \"dayofmonth\": lambda x: x.dayofmonth(),\n \"dayofweek\": lambda x: x.dayofweek(),\n \"dayofyear\": lambda x: x.dayofyear(),\n \"exp\": lambda x: x.exp(),\n \"expm1\": lambda x: x.expm1(),\n \"ffill\": lambda x: x.fill_null(strategy='forward'),\n \"first\": lambda x: x.first(),\n \"floor\": lambda x: x.floor(),\n \"format_date\": lambda x: x.format_date(),\n \"format_datetime\": lambda x: x.format_datetime(),\n \"is_bad\": lambda x: x.is_null() | x.is_infinite() | x.is_nan(), # recommend only for numeric columns\n \"is_inf\": lambda x: x.is_infinite(),\n \"is_nan\": lambda x: x.is_nan(),\n \"is_null\": lambda x: x.is_null(),\n \"last\": lambda x: x.last(),\n \"log\": lambda x: x.log(),\n \"log10\": lambda x: x.log10(),\n \"log1p\": lambda x: x.log1p(),\n \"max\": lambda x: x.max(),\n \"mean\": lambda x: x.mean(),\n \"median\": lambda x: x.median(),\n \"min\": lambda x: x.min(),\n \"month\": lambda x: x.month(),\n \"nunique\": lambda x: x.n_unique(),\n \"quarter\": lambda x: x.quarter(),\n \"rank\": lambda x: x.rank(),\n \"round\": lambda x: x.round(decimals=0),\n \"shift\": lambda x: x.shift(),\n \"sign\": lambda x: x.sign(),\n \"sin\": lambda x: x.sin(),\n \"sinh\": lambda x: x.sinh(),\n \"size\": lambda x: pl.col(_da_temp_one_column_name).sum(),\n \"sqrt\": lambda x: x.sqrt(),\n \"std\": lambda x: x.std(),\n \"sum\": lambda x: x.sum(),\n \"tanh\": lambda x: x.tanh(),\n \"var\": lambda x: x.var(),\n \"weekofyear\": lambda x: x.weekofyear(),\n }\n impl_map_2 = {\n \"-\": lambda a, b: a - b,\n \"**\": lambda a, b: a ** b,\n \"/\": lambda a, b: a / b,\n \"//\": lambda a, b: a // b,\n \"%\": lambda a, b: a % b,\n \"%/%\": lambda a, b: a / b,\n \"around\": lambda a, b: a.round(b),\n \"date_diff\": lambda a, b: a.date_diff(b),\n \"is_in\": lambda a, b: a.is_in(b),\n \"mod\": lambda a, b: a % b,\n \"remainder\": lambda a, b: a % b,\n \"shift\": lambda a, b: a.shift(b),\n \"timestamp_diff\": lambda a, b: a.timestamp_diff(b),\n \"==\": lambda a, b: a == b,\n \"<=\": lambda a, b: a <= b,\n \"<\": lambda a, b: a < b, \n \">=\": lambda a, b: a >= b,\n \">\": lambda a, b: a > b, \n \"!=\": lambda a, b: a != b,\n \"not\": lambda x: x == False,\n \"~\": lambda x: x == False,\n \"!\": lambda x: x == False,\n # datetime parsing from https://stackoverflow.com/a/71759536/6901725\n \"parse_date\": lambda x, format : x.cast(str).str.strptime(pl.Date, fmt=format, strict=False).cast(pl.Date),\n \"parse_datetime\": lambda x, format : x.cast(str).str.strptime(pl.Datetime, fmt=format, strict=False).cast(pl.Datetime),\n }\n impl_map_3 = {\n \"if_else\": lambda a, b, c: pl.when(a.is_null()).then(pl.lit(None)).otherwise(pl.when(a).then(b).otherwise(c)),\n \"mapv\": _mapv,\n \"trimstr\": lambda a, b, c: a.trimstr(b, c),\n \"where\": lambda a, b, c: pl.when(a.is_null()).then(c).otherwise(pl.when(a).then(b).otherwise(c)),\n }\n impl_map = {\n 0: impl_map_0,\n 1: impl_map_1,\n 2: impl_map_2,\n 3: impl_map_3,\n }\n # could also key the map by grouped, partitioned, regular situation\n return impl_map", "def get_id(self, expr):\n return self.table.inv[expr]", "def col_expr_builder(self) -> Tuple[List[str], List[TableSpec], List[TableSpec]]:\n error_counter = 0\n target_db_alias = self.job_spec.target_db\n\n # SQL expressions (one for each target colum) that will form the SELECT clause of Glue job\n expressions = []\n source_table_specs: List[TableSpec] = set()\n target_table_specs: List[TableSpec] = set()\n \n for index, row in self.merge_rules.iterrows():\n merge_rule, target_table_alias, target_column = self.extract_from_row(row)\n \n if target_table_alias:\n target_table_spec = TableSpec(target_db_alias, target_table_alias)\n target_table_specs.add(target_table_spec)\n \n try:\n result = self._process_merge_rule(merge_rule, target_table_alias, target_column)\n \n if result.expression:\n translated_expression = GlueTable.translate_alias(result.expression)\n expressions.append(f\"{translated_expression} as {target_column.lower()}\") \n\n if result.join:\n join_spec = result.join\n print(f\"join_spec: {join_spec}\")\n \n self.join_map.add_spec(join_spec)\n \n if result.table_specs:\n source_table_specs.update(result.table_specs)\n \n except Exception as exc:\n # DEBUG\n print(row)\n print(exc)\n error_counter += 1\n\n if error_counter:\n print(f\"Errors encountered: {error_counter}\")\n \n return (\n expressions,\n source_table_specs,\n target_table_specs,\n )", "def _sym_constant(self, table: Mapping[int, str]) -> str:\n try:\n return table[self.sym]\n except KeyError:\n return str(self.sym)", "def _group2formula(self, elem_dict):\n formula = \"\"\n for key, value in elem_dict:\n formula += \"{}{}\".format(key, value)\n return formula", "def convert_lookup_table(g, op, block):\n\n indices = g.get_node(op.input(\"Ids\")[0])\n padding_idx = op.attr(\"padding_idx\")\n weights = g.get_node(op.input(\"W\")[0])\n if padding_idx != -1:\n if op.input(\"W\")[0] in g.get_params():\n weights = g.get_params(op.input(\"W\")[0])\n weights[padding_idx] = 0.0\n weights = _expr.const(weights)\n else:\n shape, infered = try_infer_value(shape_of(weights), g.get_params())\n if infered:\n shape = shape.tolist()\n assert not isinstance(\n shape, _expr.Expr\n ), \"Shape of weight has to be fixed for PaddlePaddle's lookup_table\"\n filters = np.ones(shape).astype(infer_type(weights).checked_type.dtype)\n filters[padding_idx] = 0.0\n filters = _expr.const(filters)\n weights = weights * filters\n out = _op.take(weights, indices.astype(\"int32\"), axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def _build_expression(self, exp, object_class, fields):\n if \"op\" not in exp:\n return None\n\n def autocast(o_key, value):\n \"\"\"Try to guess the type of `value` and parse it from the string.\"\"\"\n if not isinstance(o_key, (str, unicode)):\n return value\n key, _ = self.attr_name_map[object_class].get(o_key, (o_key, None))\n # handle dates\n if (\"date\" in key and \"relative\" not in key) or \\\n key in [\"end_date\", \"start_date\"]:\n if isinstance(value, datetime.date):\n return value\n try:\n month, day, year = [int(part) for part in value.split(\"/\")]\n return datetime.date(year, month, day)\n except Exception:\n raise BadQueryException(\"Field \\\"{}\\\" expects a MM/DD/YYYY date\"\n .format(o_key))\n # fallback\n return value\n\n def relevant():\n \"\"\"Filter by relevant object.\"\"\"\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )\n\n def unknown():\n raise BadQueryException(\"Unknown operator \\\"{}\\\"\"\n .format(exp[\"op\"][\"name\"]))\n\n def with_key(key, p):\n key = key.lower()\n key, filter_by = self.attr_name_map[\n object_class].get(key, (key, None))\n if hasattr(filter_by, \"__call__\"):\n return filter_by(p)\n else:\n attr = getattr(object_class, key, None)\n if attr is None:\n raise BadQueryException(\"Bad query: object '{}' does \"\n \"not have attribute '{}'.\"\n .format(object_class.__name__, key))\n return p(attr)\n\n with_left = lambda p: with_key(exp[\"left\"], p)\n\n lift_bin = lambda f: f(self._build_expression(exp[\"left\"], object_class,\n fields),\n self._build_expression(exp[\"right\"], object_class,\n fields))\n\n def text_search():\n \"\"\"Filter by text search.\n\n The search is done only in fields listed in external `fields` var.\n \"\"\"\n existing_fields = self.attr_name_map[object_class]\n text = \"%{}%\".format(exp[\"text\"])\n p = lambda f: f.ilike(text)\n return or_(*(\n with_key(field, p)\n for field in fields\n if field in existing_fields\n ))\n\n rhs = lambda: autocast(exp[\"left\"], exp[\"right\"])\n\n ops = {\n \"AND\": lambda: lift_bin(and_),\n \"OR\": lambda: lift_bin(or_),\n \"=\": lambda: with_left(lambda l: l == rhs()),\n \"!=\": lambda: not_(with_left(\n lambda l: l == rhs())),\n \"~\": lambda: with_left(lambda l:\n l.ilike(\"%{}%\".format(rhs()))),\n \"!~\": lambda: not_(with_left(\n lambda l: l.ilike(\"%{}%\".format(rhs())))),\n \"<\": lambda: with_left(lambda l: l < rhs()),\n \">\": lambda: with_left(lambda l: l > rhs()),\n \"relevant\": relevant,\n \"text_search\": text_search\n }\n\n return ops.get(exp[\"op\"][\"name\"], unknown)()", "def _extract_lookup(self, key):\n parts = key.rsplit(\"__\", 1)\n\n if len(parts) > 1 and parts[1] in operators:\n op = parts[1]\n attribute = parts[0]\n else:\n # 'exact' is the default lookup if there was no explicit comparison op in `key`\n op = \"exact\"\n attribute = key\n\n # Construct and assign the lookup class as a filter criteria\n return attribute, self.get_lookup(op)", "def lookup(self, key):" ]
[ "0.64584607", "0.61150914", "0.5955717", "0.570927", "0.56178665", "0.5579834", "0.5579453", "0.54781777", "0.5457547", "0.5432533", "0.5425061", "0.5388716", "0.5387384", "0.5381003", "0.5378903", "0.53568715", "0.5315897", "0.52996916", "0.5293774", "0.528251", "0.5204979", "0.51445234", "0.5133434", "0.5095085", "0.5093828", "0.50843155", "0.5076597", "0.5069648", "0.5064746", "0.506319" ]
0.6523732
0
Returns defaultdict with script type / paths mapping, excluding given patterns and python packages.
def walkdirs(root): scriptype_paths = collections.defaultdict(set) for root, subdirs, files in os.walk(root): # Filter subdirs tmpdir = [] for i in subdirs: if i.startswith(EXCLUDE_PATTERNS): continue if '__init__.py' in os.listdir(os.path.join(root, i)): scriptype_paths['python'].add(root) continue tmpdir.append(i) subdirs[:] = tmpdir # If files with extension exists add to right source type. if ext_exists('.py', files): scriptype_paths['python'].add(root) if ext_exists('.mel', files): scriptype_paths['mel'].add(root) return scriptype_paths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_extension_to_type_map(file_types):\n extension_to_type = dict()\n for file_type in file_types:\n for file_ext in file_type['extensions']:\n if file_ext not in extension_to_type:\n extension_to_type[file_ext] = file_type\n return extension_to_type", "def run_mapping(path: str):\n config = {\n 'extensions': ['.js', '.jsx', '.ts', '.tsx', '.vue'],\n 'exceptions': [\n 'react-app-env.d.ts',\n 'reportWebVitals.ts',\n 'setupTests.ts'\n ],\n 'patterns': [\n {\n 'type': 'startswith',\n 'query': 'import',\n 'on_match': {\n 'add_to': 'imports',\n 'extraction_regex': r'import (\\w+)',\n 'other_property_patterns': [\n {\n 'property_name': 'from_path',\n 'regex': r'from\\s*?(?:\"|\\')(.*)(?:\"|\\')'\n }\n ]\n }\n },\n {\n 'type': 'startswith',\n 'query': 'import',\n 'on_match': {\n 'add_to': 'imports',\n 'extraction_regex': r'(\\w+)',\n 'preprocessing_patterns': [\n '{(.*)}'\n ],\n 'other_property_patterns': [\n {\n 'property_name': 'from_path',\n 'regex': r'from\\s*?(?:\"|\\')(.*)(?:\"|\\')'\n }\n ]\n }\n },\n {\n 'type': 'regex',\n 'query': r'import\\s*?\\(',\n 'on_match': {\n 'add_to': 'imports',\n 'extraction_regex': r'/(\\w+)\\'',\n 'other_property_patterns': [\n {\n 'property_name': 'from_path',\n 'regex': r'import\\s*?\\(\\'(.*)\\'\\)'\n }\n ]\n }\n },\n {\n 'type': 'contains',\n 'query': '}',\n 'on_match': {\n 'add_to': 'blocks',\n 'extraction_regex': r'(\\w+)',\n 'preprocessing_patterns': [\n '{(.*)}'\n ],\n 'other_property_patterns': [\n {\n 'property_name': 'closed',\n 'value': True\n }\n ]\n }\n },\n {\n 'type': 'startswith',\n 'query': 'import {',\n 'on_match': {\n 'add_to': 'blocks',\n 'extraction_regex': r'(\\w+)',\n 'preprocessing_patterns': [\n '{(.*)}'\n ],\n 'other_property_patterns': [\n {\n 'property_name': 'closed',\n 'value': False\n }\n ]\n }\n }\n ],\n 'diagrams': [\n {\n 'name': 'dependency-diagram',\n 'mapping_scheme': {\n 'data_list_to_map': 'files_by_path',\n 'data_grouping_map': 'dirs_by_path',\n 'entity_types': [\n {\n 'name': 'file_name',\n 'type': 'class',\n 'match_by': {\n 'field': 'extension',\n 'value': ['jsx', 'tsx', 'vue']\n }\n },\n {\n 'name': 'file_name',\n 'type': 'entity',\n 'match_by': {\n 'field': 'extension',\n 'value': ['js', 'ts']\n }\n }\n ],\n 'entity_name': 'name',\n 'connect_by': 'imports',\n 'connect_path': 'from_path'\n\n }\n }\n ]\n }\n print('generating mapping diagram...')\n print()\n root = path[:-1] if path.endswith('/') else path\n\n # try:\n src_data = scan(root, config)\n draw(src_data, config)\n print()\n print('generation completed!')\n # except:\n # print()\n # print('error - mapping failed')", "def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results", "def list_load_entries(filepath):\n try:\n rewriter = rewriter_factory(filepath)\n dynamic_deps = [dep[6:] for dep in rewriter.dependencies if dep.startswith('@rpath')]\n return {'rpaths': rewriter.rpaths, 'libraries': dynamic_deps}\n except MachoError:\n return {'rpaths': [], 'libraries': []}", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def get_static_regexps():\n handlers = modules_util.module_yaml('default')['handlers']\n retval = set()\n\n for handler in handlers:\n if handler.GetHandlerType() == 'static_dir':\n retval.add('^' + handler.url + '/')\n elif handler.GetHandlerType() == 'static_files':\n retval.add('^' + handler.url + '$')\n\n return sorted(retval)", "def _find_missing_script_paths(self) -> dict:\r\n results: dict = {}\r\n\r\n for object_name, script_path in self.psc_paths.items():\r\n pex_path: str = os.path.join(self.options.output_path, object_name.replace('.psc', '.pex'))\r\n\r\n if not os.path.isfile(pex_path) and script_path not in results:\r\n object_name = script_path if not os.path.isabs(script_path) else self._calculate_object_name(script_path)\r\n results[object_name] = script_path\r\n\r\n return results", "def TYPES():\n\n if config.types_cache:\n return config.types_cache\n\n types = {\n 'actionscript': '.as .mxml',\n 'asm': '.asm .s',\n 'batch': '.bat .cmd',\n #'binary': 'Binary files, as defined by Perl's -B op (default: off)',\n 'cc': '.c .h .xs',\n 'cfmx': '.cfc .cfm .cfml',\n 'cpp': '.cpp .cc .cxx .m .hpp .hh .h .hxx',\n 'csharp': '.cs',\n 'css': '.css',\n 'elisp': '.el',\n 'erlang': '.erl',\n 'fortran': '.f .f77 .f90 .f95 .f03 .for .ftn .fpp',\n 'haskell': '.hs .lhs',\n 'hh': '.h',\n 'html': '.htm .html .shtml .xhtml',\n 'java': '.java .properties',\n 'js': '.js',\n 'jsp': '.jsp .jspx .jhtm .jhtml',\n 'lisp': '.lisp .lsp',\n 'lua': '.lua',\n 'make': 'Makefiles',\n 'mason': '.mas .mhtml .mpl .mtxt',\n 'objc': '.m .h',\n 'objcpp': '.mm .h',\n 'ocaml': '.ml .mli',\n 'parrot': '.pir .pasm .pmc .ops .pod .pg .tg',\n 'perl': '.pl .pm .pod .t',\n 'php': '.php .phpt .php3 .php4 .php5',\n 'plone': '.pt .cpt .metadata .cpy',\n 'python': '.py',\n 'ruby': '.rb .rhtml .rjs .rxml .erb',\n 'scheme': '.scm',\n 'shell': '.sh .bash .csh .tcsh .ksh .zsh',\n 'smalltalk': '.st',\n 'sql': '.sql .ctl',\n 'tcl': '.tcl .itcl .itk',\n 'tex': '.tex .cls .sty',\n 'tt': '.tt .tt2 .ttml',\n 'vb': '.bas .cls .frm .ctl .vb .resx',\n 'vim': '.vim',\n 'xml': '.xml .dtd .xslt .ent',\n 'yaml': '.yaml .yml',\n }\n\n items = {}\n for ftype, ext_list in types.iteritems():\n items[ftype] = ext_list.split()\n\n config.types_cache = items\n return items", "def _LoadPackages():\n return {module.__name__.split('.')[-1]: module for module in\n import_util.LoadModulesForPath(__path__, __name__)}", "def get_pathes(self) -> Dict[str, str]:\n\n pathes: Dict[str, str] = {}\n\n for path in self.files:\n name = path.split(\"/\")[-1].split(\".\")[0]\n pathes[name] = os.path.join(self.home_folder, path)\n return pathes", "def patterns() -> List[Dict[str, Any]]:\n patterns = [\n {\"label\": \"DRUG\", \"pattern\": \"Zithromax\", \"type\": \"fuzzy\", \"id\": \"Antibiotic\"},\n {\"label\": \"GPE\", \"pattern\": \"Mahwahe\", \"type\": \"fuzzy\"},\n {\"label\": \"GPE\", \"pattern\": \"Mahwah\", \"type\": \"fuzzy\"},\n {\n \"label\": \"NAME\",\n \"pattern\": \"Grant Andersen\",\n \"type\": \"fuzzy\",\n \"kwargs\": {\"fuzzy_func\": \"token_sort\"},\n \"id\": \"Developer\",\n },\n {\n \"label\": \"NAME\",\n \"pattern\": \"Garth Andersen\",\n \"type\": \"fuzzy\",\n \"kwargs\": {\"fuzzy_func\": \"token_sort\"},\n \"id\": \"Developer\",\n },\n {\n \"label\": \"STREET\",\n \"pattern\": \"street_addresses\",\n \"type\": \"regex\",\n \"kwargs\": {\"predef\": True},\n },\n {\n \"label\": \"GPE\",\n \"pattern\": \"(?i)[U](nited|\\\\.?) ?[S](tates|\\\\.?)\",\n \"type\": \"regex\",\n \"id\": \"USA\",\n },\n {\"label\": \"GPE\", \"pattern\": \"(?:USR){e<=1}\", \"type\": \"regex\", \"id\": \"USA\"},\n {\"label\": \"GPE\", \"pattern\": \"(?:USSR){d<=1, s<=1}\", \"type\": \"regex\"},\n {\n \"label\": \"BAND\",\n \"pattern\": [{\"LOWER\": {\"FREGEX\": \"(converge){e<=1}\"}}],\n \"type\": \"token\",\n },\n {\n \"label\": \"BAND\",\n \"pattern\": [\n {\"TEXT\": {\"FUZZY\": \"Protest\"}},\n {\"IS_STOP\": True},\n {\"TEXT\": {\"FUZZY\": \"Hero\"}},\n ],\n \"type\": \"token\",\n \"id\": \"Metal\",\n },\n ]\n return patterns # type: ignore", "def create_script_dict(pk_type, path, file, skip_lines, encoding):\n dict_values = pk_type.__dict__\n try:\n resources = pk_type.get_resources(file_path=path,\n skip_lines=skip_lines,\n encoding=encoding)\n except Exception as error:\n print(\"Skipped file: \", file, error)\n print(\"Remove the file from the folder and try again\")\n exit()\n dict_values.setdefault(\"resources\", []).append(resources)\n return dict_values", "def replace_paths(value):\n return {\n str: lambda: value.replace('__FILE__', sys.path[0]),\n list: lambda: [replace_paths(elt) for elt in value],\n dict: lambda: {key: replace_paths(val) for key, val in value.items()},\n OrderedDict: (lambda:\n OrderedDict((key, replace_paths(val)) for key, val in value.items()))\n }.get(type(value), lambda: value)()", "def find(cls, paths):\r\n pythons = []\r\n for path in paths:\r\n for fn in cls.expand_path(path):\r\n basefile = os.path.basename(fn)\r\n if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):\r\n try:\r\n pythons.append(cls.from_binary(fn))\r\n except Exception as e:\r\n TRACER.log('Could not identify %s: %s' % (fn, e))\r\n continue\r\n return pythons", "def find_maps(\n pattern: str, search_restriction: int = 0, paths: bool = False\n ) -> Set[MapInfo]:\n recursive = True\n\n search_dirs = {\n 0: CacheManagerSingleton.UNPROCESSED_MAPS_PARENT,\n 1: \"\",\n 2: CacheManagerSingleton.GENERATED_MAPS_PARENTS,\n }\n\n matching_filepaths = glob.glob(\n os.path.join(\n CacheManagerSingleton.CACHE_PATH,\n os.path.join(\n search_dirs[search_restriction],\n \"**\",\n pattern,\n ),\n ),\n recursive=recursive,\n )\n\n if paths:\n return matching_filepaths\n\n matches: Set[MapInfo] = set()\n for match in matching_filepaths:\n if os.path.isdir(match):\n continue\n map_info = CacheManagerSingleton.map_info_from_path(match)\n if isinstance(map_info, MapInfo):\n matches.add(map_info)\n return matches", "def jinja_files(self) -> Pattern:\n return self._parse_pattern(self.get(\"jinja_files\", None))", "def _load_paths(self):\n ts_paths_map = {}\n for path in Path(self.datadir).rglob(\"roas.*.json.gz\"):\n path = str(path)\n ts = int(path.split(\"/\")[-1].split(\".\")[2])\n ts_paths_map[ts] = path\n return ts_paths_map", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def mapped(cls, package=None):\n mapping = {}\n for extension in cls.all(package):\n signature = extension.signature()\n assert signature not in mapping, \\\n \"%s and %s have identical signatures: %r\" \\\n % (mapping[signature], extension, signature)\n mapping[signature] = extension\n return mapping", "def jinja_files(self) -> Pattern:\n return self._parse_pattern(self.get(\"jinja_files\", \"*.htm?|*.css\"))", "def default_file_hierarchy_dict():\n return {\n directory(\"include\"): {\n directory(\"with spaces\"): {\n file(\"with spaces.hpp\"): {\n namespace(\"with_spaces\"): {\n function(\"int\", \"value\"): parameters()\n }\n }\n }\n }\n }", "def fileTypesCallback(self):\n if self.__e5project.getProjectType() == \"Django\":\n fileTypes = {\n \"*.html\": \"FORMS\",\n \"*.htm\": \"FORMS\",\n \"*.js\": \"SOURCES\",\n \"*.pot\": \"TRANSLATIONS\",\n \"*.po\": \"TRANSLATIONS\",\n \"*.mo\": \"TRANSLATIONS\",\n \"*.py\": \"SOURCES\",\n }\n else:\n fileTypes = {}\n return fileTypes", "def _init_pathinfo():\r\n d = set()\r\n for dir in sys.path:\r\n try:\r\n if os.path.isdir(dir):\r\n dir, dircase = makepath(dir)\r\n d.add(dircase)\r\n except TypeError:\r\n continue\r\n return d", "def get_available_extensions() -> DefaultDict[str, Type]:\n all_extensions:DefaultDict[str, Type] = defaultdict(lambda:False)\n for current_class in Content.__subclasses__():\n for extension in current_class.extensions:\n all_extensions[extension] = current_class\n return all_extensions", "def path_formats():\n return set(path_loaders.keys())", "def _resolve_arguments(patterns, packages_path, search_packages_path):\n\n def _read_patterns(path):\n try:\n with open(path, \"r\") as handler:\n return set(handler.read().splitlines())\n except IOError:\n return set()\n\n ignore_patterns = set()\n\n for item in patterns:\n if os.path.isfile(item) or os.path.isabs(item):\n # This happens in 2 scenarios\n # 1. The user-given pattern is actually a path on-disk\n # 2. The user does bash process substitution (e.g.\n # `rez-batch-process report --ignore-patterns <(cat patterns.txt)`)\n #\n ignore_patterns.update(_read_patterns(item))\n else:\n ignore_patterns.add(item)\n\n if isinstance(packages_path, six.string_types):\n packages_path = packages_path.split(os.pathsep)\n\n if isinstance(search_packages_path, six.string_types):\n search_packages_path = search_packages_path.split(os.pathsep)\n\n return ignore_patterns, packages_path, search_packages_path", "def get_wildcards(inputmap, wildcard_constraints):\n d = {}\n try:\n all_wc = []\n all_files = []\n for wc, filename in inputmap:\n try:\n wc = eval(wc)\n except:\n pass\n wc = update_wildcard_constraints(wc, wildcard_constraints, {})\n all_wc.append(wc)\n if filename is None:\n continue\n if isinstance(filename, str):\n filename = [filename]\n all_files = all_files + filename\n for f in all_files:\n for wc in all_wc:\n wildcards = glob_wildcards(wc, [os.path.basename(f)])\n for k, v in wildcards._asdict().items():\n if len(v) > 0:\n d[k] = v[0]\n except:\n logger.debug(\"Failed to get wildcards for inputmap \", inputmap)\n raise\n return d", "def get_import_lines(\n strategies: Dict[ImportStrategy, Set[str]]\n) -> Dict[Optional[str], Set[str]]:\n import_tuples: List[Tuple[Optional[str], str]] = []\n\n def from_import(name: str) -> Tuple[str, str]:\n if _is_typing_type(name):\n return (\"typing\", name)\n else:\n return cast(Tuple[str, str], tuple(name.rsplit(\".\", maxsplit=1)))\n\n import_tuples.extend(\n from_import(name) for name in strategies.get(ImportStrategy.ADD_FROM, set())\n )\n\n import_tuples.extend(\n (None, name.rsplit(\".\", maxsplit=1)[0])\n for name in strategies.get(ImportStrategy.ADD_DOTTED, set())\n )\n\n imports_dict: Dict[str, Set[str]] = {}\n for left, right in import_tuples:\n imports_dict.setdefault(left, set()).add(right)\n\n return imports_dict", "def find_data_files(source, target, patterns):\r\n if glob.has_magic(source) or glob.has_magic(target):\r\n raise ValueError(\"Magic not allowed in src, target\")\r\n ret = {}\r\n for pattern in patterns:\r\n pattern = os.path.join(source, pattern)\r\n for filename in glob.glob(pattern):\r\n if os.path.isfile(filename):\r\n targetpath = os.path.join(target, os.path.relpath(filename,source))\r\n path = os.path.dirname(targetpath)\r\n ret.setdefault(path, []).append(filename)\r\n return sorted(ret.items())", "def _get_psc_paths(self) -> dict:\r\n object_names: dict = {}\r\n\r\n def add_object_name(p: str) -> None:\r\n object_names[p if not os.path.isabs(p) else self._calculate_object_name(p)] = p\r\n\r\n # try to populate paths with scripts from Folders and Scripts nodes\r\n if self.folders_node is not None:\r\n for path in self._get_script_paths_from_folders_node():\r\n add_object_name(path)\r\n\r\n if self.scripts_node is not None:\r\n for path in self._get_script_paths_from_scripts_node():\r\n add_object_name(path)\r\n\r\n # convert user paths to absolute paths\r\n for object_name, script_path in object_names.items():\r\n # ignore existing absolute paths\r\n if os.path.isabs(script_path) and os.path.isfile(script_path):\r\n continue\r\n\r\n # try to add existing project-relative paths\r\n test_path = os.path.join(self.project_path, script_path)\r\n if os.path.isfile(test_path):\r\n object_names[object_name] = test_path\r\n continue\r\n\r\n # try to add existing import-relative paths\r\n for import_path in self.import_paths:\r\n if not os.path.isabs(import_path):\r\n import_path = os.path.join(self.project_path, import_path)\r\n\r\n test_path = os.path.join(import_path, script_path)\r\n if os.path.isfile(test_path):\r\n object_names[object_name] = test_path\r\n break\r\n\r\n PapyrusProject.log.info(f'{len(object_names)} unique script paths resolved to absolute paths.')\r\n\r\n return object_names" ]
[ "0.5768541", "0.569013", "0.5677841", "0.55597234", "0.55592877", "0.55176985", "0.5469804", "0.5445721", "0.53891", "0.53409356", "0.527778", "0.5227336", "0.5217691", "0.5178613", "0.5168109", "0.5162053", "0.5146098", "0.5145351", "0.5128747", "0.51221925", "0.5121463", "0.5105256", "0.5096236", "0.50875235", "0.5085914", "0.50842553", "0.50784314", "0.50781107", "0.50764084", "0.507235" ]
0.6402314
0
Return valid paths from __file__ dir, PYENV and MELENV.
def get_source_paths(): script_paths = set() try: script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep))) script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep))) except AttributeError: logger.debug('No custom environ variables set.') cwd = os.path.dirname(os.path.abspath(__file__)) for each in os.listdir(cwd): path = os.path.join(cwd, each) if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS): continue script_paths.add(path) return script_paths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_environment_paths(basedir=None):\n basedir = (\n get_default_secrets_basedir() if basedir is None\n else Path(basedir)\n )\n results = list()\n for item in sorted(basedir.iterdir()):\n if is_valid_environment(item):\n results.append(item)\n return results", "def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]", "def _init_pathinfo():\r\n d = set()\r\n for dir in sys.path:\r\n try:\r\n if os.path.isdir(dir):\r\n dir, dircase = makepath(dir)\r\n d.add(dircase)\r\n except TypeError:\r\n continue\r\n return d", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def test_fspath(self, env: yaenv.Env):\n from os import fspath\n from filecmp import cmp\n assert fspath(env) == 'tests/.env'\n assert cmp(env, 'tests/.env')", "def get_paths():\n\n # Get repo name\n git_repo = git.Repo(__file__, search_parent_directories=True)\n repo = git_repo.git.rev_parse(\"--show-toplevel\")\n\n paths = {\"repo\": repo, \"base\":{}, \"src\":{}, \"data\":{}, \"app\":{}}\n\n for base_dir in [\"data\", \"notebooks\", \"src\", \"model\", \"logs\", \"app\"]:\n\n paths[\"base\"][base_dir] = os.path.join(repo, base_dir)\n test = paths[\"base\"][base_dir].split(base_dir)[-1]\n assert len(test) == 0\n\n for src_dir in [\"conf\", \"data\", \"notebooks\", \"tests\", \"utils\",\n \"visualize\", \"conf\", \"model\"]:\n\n src_base_dir = paths.get(\"base\").get(\"src\")\n paths[\"src\"][src_dir] = os.path.join(src_base_dir, src_dir)\n test = paths[\"src\"][src_dir].split(src_dir)[-1]\n assert len(test) == 0\n\n for data_dir in [\"raw\", \"interim\", \"processed\"]:\n\n data_base_dir = paths.get(\"base\").get(\"data\")\n paths[\"data\"][data_dir] = os.path.join(data_base_dir, data_dir)\n test = paths[\"data\"][data_dir].split(data_dir)[-1]\n assert len(test) == 0\n\n for app_dir in [\"templates\", \"static\"]:\n app_base_dir = paths.get(\"base\").get(\"app\")\n paths[\"app\"][app_dir] = os.path.join(app_base_dir, app_dir)\n\n return paths", "def removeduppaths():\r\n # This ensures that the initial path provided by the interpreter contains\r\n # only absolute pathnames, even if we're running from the build directory.\r\n L = []\r\n known_paths = set()\r\n for dir in sys.path:\r\n # Filter out duplicate paths (on case-insensitive file systems also\r\n # if they only differ in case); turn relative paths into absolute\r\n # paths.\r\n dir, dircase = makepath(dir)\r\n if not dircase in known_paths:\r\n L.append(dir)\r\n known_paths.add(dircase)\r\n sys.path[:] = L\r\n return known_paths", "def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results", "def pathext_list():\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def site_paths(buildout, prefixes):\n\n def is_buildout_dir(path):\n return path.startswith(buildout['eggs-directory']) or \\\n path.startswith(buildout['develop-eggs-directory'])\n\n def is_in_prefixes(path):\n return any([path.startswith(k) for k in prefixes])\n\n retval = [os.path.realpath(k) for k in site.sys.path]\n return [k for k in retval if not (is_buildout_dir(k) or is_in_prefixes(k))]", "def path(self):\n if self._path:\n return self._path\n path = os.environ[\"PATH\"].split(os.pathsep)\n path = [os.path.expanduser(x) for x in path]\n path = [os.path.abspath(x) for x in path]\n path = [x for x in path if os.path.exists(x)]\n self._path = path\n return self._path", "def filepaths(self):\n pass", "def get_possible_paths():\n yield ('mtad', get_mtad_linter_path())\n yield ('bundled', get_bundled_linter_path())", "def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def path_defs(self):\n if self.input_xml is not None:\n self.input_xml = os.path.abspath(os.path.expandvars(self.input_xml))\n if self.pointing_file is not None:\n self.pointing_file = os.path.abspath(os.path.expandvars(self.pointing_file))\n self.output_dir = os.path.abspath(os.path.expandvars(self.output_dir))\n self.simdata_output_dir = os.path.abspath(os.path.expandvars(self.simdata_output_dir))\n if self.table_file is not None:\n self.table_file = os.path.abspath(os.path.expandvars(self.table_file))\n\n ensure_dir_exists(self.output_dir)\n ensure_dir_exists(self.simdata_output_dir)\n\n if self.observation_list_file is not None:\n self.observation_list_file = os.path.abspath(os.path.expandvars(self.observation_list_file))", "def _include_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS')\n if not paths:\n return []\n return paths.split(';')", "def test__find_dot_env_file__2():\n base_location = 'test'\n find_launched_location = lambda : join_paths(base_location, '__init__.py')\n is_file = lambda path : True\n \n expected_output = join_paths(base_location, '.env')\n \n find_dot_env_file_copy = FunctionType(\n find_dot_env_file.__code__,\n {**find_dot_env_file.__globals__, 'find_launched_location': find_launched_location, 'is_file': is_file},\n find_dot_env_file.__name__,\n find_dot_env_file.__defaults__,\n find_dot_env_file.__closure__,\n )\n \n output = find_dot_env_file_copy()\n vampytest.assert_instance(output, str, nullable = True)\n vampytest.assert_eq(output, expected_output)", "def _local_dir(self):\n return []", "def __get_environ_path(environ_key):\n environ_value = os.environ.get(environ_key)\n result = []\n\n if not environ_value:\n return result\n\n environ_path_list = environ_value.split(';')\n for each_path in environ_path_list:\n each_path = path.Path(each_path)\n\n if not each_path.exists():\n continue\n\n # make sure default directory first in the order\n if 'FrMaya' in each_path:\n result.insert(0, each_path)\n else:\n result.append(each_path)\n\n return result", "def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path", "def test_known_file_locations(dataset: linux.LinuxSourcesDataset):\n assert (dataset.src_tree_root / \"kernel\" / \"kexec.c\").is_file()\n assert (dataset.src_tree_root / \"kernel\" / \"smpboot.h\").is_file()", "def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))", "def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)", "def source_paths(self):\n paths = self.config.get('static_dirs')\n if paths:\n return paths\n return [self.config.get('static_dir')]", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def test__find_dot_env_file__1():\n base_location = 'test'\n find_launched_location = lambda : join_paths(base_location, '__init__.py')\n is_file = lambda path : False\n \n expected_output = None\n \n \n find_dot_env_file_copy = FunctionType(\n find_dot_env_file.__code__,\n {**find_dot_env_file.__globals__, 'find_launched_location': find_launched_location, 'is_file': is_file},\n find_dot_env_file.__name__,\n find_dot_env_file.__defaults__,\n find_dot_env_file.__closure__,\n )\n \n output = find_dot_env_file_copy()\n vampytest.assert_instance(output, str, nullable = True)\n vampytest.assert_eq(output, expected_output)" ]
[ "0.7171722", "0.6597074", "0.6507689", "0.6315025", "0.6311144", "0.6296824", "0.62492156", "0.6234693", "0.62341654", "0.61818033", "0.61814684", "0.6162761", "0.6095949", "0.60329336", "0.6010903", "0.59968036", "0.5987034", "0.59694135", "0.59489715", "0.5938563", "0.59279615", "0.59258145", "0.5923356", "0.5912038", "0.58984476", "0.58899385", "0.5876085", "0.5842718", "0.5817141", "0.580523" ]
0.7076356
1
Returns the best camera zoom given the atlas resolution
def zoom(self): res = np.max(self.metadata["resolution"]) if self.atlas_name == "allen_human_500um": logger.debug( "ATLAS: setting zoom manually for human atlas, atlas needs fixing" ) return 350 else: return 40 / res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_zoom(input_box, z=18):\n box_tile = smopy.get_tile_box(input_box, z)\n box = smopy.correct_box(box_tile, z)\n sx, sy = smopy.get_box_size(box)\n if sx * sy >= MAXTILES:\n z = get_zoom(input_box, z - 1)\n return z", "def compute_resolution(zoom, size_px):\n # Calibration data:\n dist_in_um = 10\n dist_in_px = np.array([21.13, 19.62, 8.93])\n zooms = np.array([1.5, 3, 4.5])\n image_max_sizes = np.array([330, 610, 410])\n \n return np.mean((dist_in_um/dist_in_px) * (zoom/zooms) * (image_max_sizes/size_px))", "def _get_zoom(self) :\n \n # TODO : make it absolute zoom value : a zoom of 1 displays one data\n # pixel in one viewport pixel.\n \n return self._zoom", "def get_best_zoom_level(input_file, tile_pyramid_type):\n tile_pyramid = BufferedTilePyramid(tile_pyramid_type)\n with rasterio.open(input_file, \"r\") as src:\n xmin, ymin, xmax, ymax = reproject_geometry(\n segmentize_geometry(\n box(\n src.bounds.left, src.bounds.bottom, src.bounds.right, src.bounds.top\n ),\n get_segmentize_value(input_file, tile_pyramid),\n ),\n src_crs=src.crs,\n dst_crs=tile_pyramid.crs,\n ).bounds\n x_dif = xmax - xmin\n y_dif = ymax - ymin\n size = float(src.width + src.height)\n avg_resolution = (x_dif / float(src.width)) * (float(src.width) / size) + (\n y_dif / float(src.height)\n ) * (float(src.height) / size)\n\n for zoom in range(0, 40):\n if tile_pyramid.pixel_x_size(zoom) <= avg_resolution:\n return max([0, zoom - 1])", "def zoom(self):\n return self['zoom']", "def getZoomFactor(imageSize, maxW, maxH):\n\timageW, imageH = imageSize\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\treturn max(zoomW, zoomH)", "def max_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / REGION_DIM)", "def find_suggested_tonemap_scale(session):\n avg_film_luminance = session.GetFilm().GetFilmY()\n return (1.25 / avg_film_luminance * (118 / 255))\n\n # TODO\n # measure this all the time, show a message to the user if\n # abs(old - new) > threshold\n # so the user can set the new value with one click\n\n # imagepipeline = scene.camera.data.luxcore.imagepipeline\n # imagepipeline.tonemapper.linear_scale = suggested_linear_scale\n # imagepipeline.tonemapper.use_autolinear = False", "def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")", "def zoom(self) -> float:\n return self._zoom", "def largestResolution(resolutions):\n return resolutions[0]", "def zoom(self) -> Optional[int]:\n return self.get(\"/Zoom\", None)", "def zoom(self):\n return self.container['zoom']", "def _get_max_rupture_projection_radius(self):\n if self.max_radius: # already computed\n return self.max_radius\n\n # extract maximum magnitude\n max_mag, _rate = self.get_annual_occurrence_rates()[-1]\n for (np_prob, np) in self.nodal_plane_distribution.data:\n # compute rupture dimensions\n rup_length, rup_width = _get_rupture_dimensions(self, max_mag, np)\n # compute rupture width surface projection\n rup_width = rup_width * math.cos(math.radians(np.dip))\n # the projection radius is half of the rupture diagonal\n radius = math.sqrt(rup_length ** 2 + rup_width ** 2) / 2.0\n if radius > self.max_radius:\n self.max_radius = radius\n return self.max_radius", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def guess(cls, pix_x, pix_y, optical_foclen):\n return guess_camera_geometry(pix_x, pix_y, optical_foclen)", "def get_view_resized(x, y, z, th, sim, world_ground, resolution=360):\n\n view = get_eye_image(sim, world_ground,\n x=x, y=y, z=z,\n theta_degrees=th, extract_channel=2,\n save=False)\n\n scalefactor = resolution / sim.viewport_size[0]\n\n width = int(np.floor(sim.viewport_size[0] * scalefactor))\n height = int(np.floor(sim.viewport_size[1] * scalefactor))\n\n view_lowres = resize(view, (height, width))\n\n view_cut = view_lowres[:int(height / 2), :]\n\n return view_cut", "def _get_camera(self):\n rect = (self._dim[0], self._dim[2], self._dim[1] - self._dim[0],\n self._dim[3] - self._dim[2])\n flip = (False, type(self).__name__ == 'ImageObj', False)\n return scene.cameras.PanZoomCamera(rect=rect, flip=flip)", "def _defaultZoom(self):\n return (-1.0, 1.0, -1.0, 1.0)", "def calculate_zoom(self):\n distances = [geopy.distance.geodesic(self.centre_location, centroid).km for centroid in self.centroids]\n a = 4 / 20000\n distances = [1 + 4 - a * distance for distance in distances]\n print(min(distances))\n return min(distances)", "def zoom_to_size(self, *p):\n\t\tif self.image is None or self.allocation is None:\n\t\t\treturn\n\t\tif __debug__: print self.allocation.width, self.image.get_width()\n\t\tif __debug__: print self.allocation.width, self.image.get_width(), self.allocation.width/self.image.get_width()\n\t\tz = min(\n\t\t\tself.allocation.width/self.image.get_width(),\n\t\t\tself.allocation.height/self.image.get_height()\n\t\t\t)\n\t\tif __debug__: print \"zoom_to_size\", \"z=\", z\n\t\tself.zoom = z", "def GetNativeResolution(self, transform=None, maximum=None):\n # Get the source projection's units for a 1x1 pixel, assuming square\n # pixels.\n width, height = self.GetPixelDimensions()\n src_pixel_size = min(abs(width), abs(height))\n\n if transform is None:\n dst_pixel_size = src_pixel_size\n dst_ref = self.GetSpatialReference()\n else:\n # Transform these dimensions into the destination projection\n dst_pixel_size = transform.TransformPoint(src_pixel_size, 0)[0]\n dst_pixel_size = abs(dst_pixel_size)\n dst_ref = transform.dst_ref\n\n # We allow some floating point error between src_pixel_size and\n # dst_pixel_size based on the major circumference so that the error is\n # in the destination units\n error = max(*dst_ref.GetPixelDimensions(resolution=0)) / 128\n\n # Find the resolution where the pixels are smaller than dst_pixel_size.\n for resolution in count():\n if maximum is not None and resolution >= maximum:\n return resolution\n\n res_pixel_size = max(\n *dst_ref.GetPixelDimensions(resolution=resolution)\n )\n if (res_pixel_size - dst_pixel_size) <= error:\n return resolution\n\n # Halve error each resolution\n error /= 2", "def max_cam2world(self):\n if not hasattr(self, '_max_cam2world'):\n occnet2gaps = self.occnet2gaps\n cam2occnet = self.max_cam2occnet\n assert cam2occnet.shape[0] == 16\n assert cam2occnet.shape[1] == 4\n assert cam2occnet.shape[2] == 4\n assert occnet2gaps.shape[0] == 4\n assert occnet2gaps.shape[1] == 4\n cam2worlds = []\n for i in range(16):\n cam2worlds.append(np.matmul(occnet2gaps, cam2occnet[i, :, :]))\n self._max_cam2world = np.stack(cam2worlds)\n return self._max_cam2world", "def getResolution(self):\n return self.resolution", "def min_zoom(self) -> float:\n return math.log(np.min(self.canvas_size) / np.max(self._m.world.t_size))", "def parallel_scale(self):\n return self.camera.parallel_scale", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def GetResolution(vDataSet):\r\n xmin,xmax,ymin,ymax,zmin,zmax = GetExtent(vDataSet)\r\n nx,ny,nz = vDataSet.GetSizeX(),vDataSet.GetSizeY(),vDataSet.GetSizeZ()\r\n\r\n return (xmax-xmin)/nx, (ymax-ymin)/ny, (zmax-zmin)/nz", "def closest_approach_to_camera(scene, speaker_object) -> (float, int):\n max_dist = sys.float_info.max\n at_time = scene.frame_start\n for frame in range(scene.frame_start, scene.frame_end + 1):\n scene.frame_set(frame)\n rel = speaker_object.matrix_world.to_translation() - scene.camera.matrix_world.to_translation()\n dist = norm(rel)\n\n if dist < max_dist:\n max_dist = dist\n at_time = frame\n\n return max_dist, at_time", "def get_voxel_resolution(pc, patch_size):\n\n if not pc.shape[1] == 3:\n raise Exception(\"Invalid pointcloud size, should be nx3, but is {}\".format(pc.shape))\n\n min_x = pc[:, 0].min()\n min_y = pc[:, 1].min()\n min_z = pc[:, 2].min()\n max_x = pc[:, 0].max()\n max_y = pc[:, 1].max()\n max_z = pc[:, 2].max()\n\n max_dim = max((max_x - min_x),\n (max_y - min_y),\n (max_z - min_z))\n\n voxel_resolution = (1.0 * max_dim) / patch_size\n\n return voxel_resolution" ]
[ "0.6454999", "0.64292175", "0.6390678", "0.62350464", "0.622483", "0.6209762", "0.61986816", "0.6183604", "0.6173395", "0.61674184", "0.60466546", "0.5988281", "0.5962575", "0.57636255", "0.5750779", "0.56561506", "0.56086415", "0.55965966", "0.55922043", "0.5578516", "0.5558103", "0.5511083", "0.5499946", "0.5484696", "0.54761755", "0.5472034", "0.5442193", "0.5421795", "0.5381997", "0.5351737" ]
0.7805432
0
Gets the rgb color of a region in the atlas
def _get_region_color(self, region): return [ x / 255 for x in self._get_from_structure(region, "rgb_triplet") ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color_in_region(self, start, end):\n # Input format: (start_x, start_y), (end_x, end_y)\n start_x, start_y = start\n end_x, end_y = end\n\n # x and y are flipped\n crop_img = self.img[start_x:(end_x + 1), start_y:(end_y + 1)]\n channels = cv2.mean(crop_img)\n\n # Return BGR\n return channels[0], channels[1], channels[2]", "def rgb_color(self):\n return self._color", "def color_in_rgb(self):\n return self._color_rgb", "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)", "def get_red(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3]", "def rgb_color(self):\n return self._COLORS[self._mystate]", "def toRGB(self):\r\n return mage_hsv_tuple_to_rgb(self.Coords)", "def GetRGB(self, *args):\n return _XCAFDoc.XCAFDoc_Color_GetRGB(self, *args)", "def get_color(self):\n self.view.present(\n \"sheet\",\n orientations=ORIENTATIONS,\n )\n self.view.wait_modal()\n return self.rgb", "def rgb(self):\n\n return self._variable", "def rgb_2_scalar_idx(r, g, b):\n return 256 ** 2 * r + 256 * g + b", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def get_rgb(self, r, g, b):\n\t\treturn None", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def rgb_color(self):\n return None", "def get_color(self, coord):\n return self.board[coord[0], coord[1]]", "def getColor(self):\r\n return self.color", "def get_rgb(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_rgb()", "def rgb(self):\n return (self.r, self.g, self.b)", "def getColor(self):\n return self.color", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def getColor(self):\n return self._l[2]", "def get_color(self):\r\n return self._color", "def get_color(self):\n return self.color", "def get_color(self):\r\n return self.__color", "def color(self):\n return self.container['color']", "def get_r(r, g, b):\n\n color = Color(r, g, b)\n return color.get_r()", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color" ]
[ "0.69011796", "0.6625769", "0.65982455", "0.65948516", "0.6562015", "0.6556168", "0.6497549", "0.63513184", "0.6319954", "0.63109565", "0.6263722", "0.6247156", "0.6227768", "0.6227081", "0.61992794", "0.6185941", "0.61626786", "0.6158018", "0.6136658", "0.6122602", "0.60735947", "0.6061321", "0.605484", "0.6048911", "0.60306156", "0.6028128", "0.6026035", "0.6018123", "0.6018123", "0.6018123" ]
0.77586186
0
Returns a plane going through a point at pos, oriented orthogonally to the vector norm and of width and height sx, sy.
def get_plane( self, pos=None, norm=None, plane=None, sx=None, sy=None, color="lightgray", alpha=0.25, **kwargs, ): axes_pairs = dict(sagittal=(0, 1), horizontal=(2, 0), frontal=(2, 1)) if pos is None: pos = self.root._mesh.centerOfMass() try: norm = norm or self.space.plane_normals[plane] except KeyError: # pragma: no cover raise ValueError( # pragma: no cover f"Could not find normals for plane {plane}. Atlas space provides these normals: {self.space.plane_normals}" # pragma: no cover ) # Get plane width and height idx_pair = ( axes_pairs[plane] if plane is not None else axes_pairs["horizontal"] ) bounds = self.root.bounds() root_bounds = [ [bounds[0], bounds[1]], [bounds[2], bounds[3]], [bounds[4], bounds[5]], ] wh = [float(np.diff(root_bounds[i])) for i in idx_pair] if sx is None: sx = wh[0] if sy is None: sy = wh[1] # return plane return Actor( Plane(pos=pos, normal=norm, sx=sx, sy=sy, c=color, alpha=alpha), name=f"Plane at {pos} norm: {norm}", br_class="plane", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def proj_to_plane(norm, d, pts):\n a = norm[0]\n b = norm[1]\n c = norm[2]\n\n p = []\n\n for i in range(len(pts)):\n x_p = pts[i][0]\n y_p = pts[i][1]\n z_p = pts[i][2]\n\n if a != 0:\n x_0 = (b * b + c * c) * x_p - a * b * y_p - a * c * z_p - a * d\n y_0 = (b * 1.0 / a) * (x_0 - x_p) + y_p\n z_0 = (c * 1.0 / a) * (x_0 - x_p) + z_p\n\n elif b != 0:\n x_0 = x_p \n y_0 = c * c * y_p - b * (d + c)\n z_0 = (c * 1.0 / b) *(y_0 - y_p) + z_p\n\n else:\n x_0 = x_p\n y_0 = y_p\n z_0 = - d * 1.0 / c\n\n p.append([x_0, y_0, z_0])\n \n return p", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts) \n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def build_coord(norm, d, pts):\n # Compute the origin as the mean point of the points, and this point has to be on the plane\n \n n = len(pts)\n x_total = 0\n y_total = 0\n z_total = 0\n \n for i in range(n):\n x_total += pts[i][0]\n y_total += pts[i][1]\n z_total += pts[i][2]\n\n x_o = x_total * 1.0 / n\n y_o = y_total * 1.0 / n\n z_o = z_total * 1.0 / n\n p_o = [x_o, y_o, z_o]\n \n # Choose p be the projection of a vector in the z-axis to the plane\n # If the plane is not perpendicular to the z-axis\n if ((norm[2] != 1) and (norm[2] != -1)): \n # Choose a point\n o_z = [x_o, y_o, z_o + 1]\n \n [[x_p, y_p, z_p]] = proj_to_plane(norm, d, [o_z])\n \n dist = np.linalg.norm([x_p - x_o, y_p - y_o, z_p - z_o])\n\n x_c = (x_p - x_o) * 1.0 / dist \n y_c = (y_p - y_o) * 1.0 / dist\n z_c = (z_p - z_o) * 1.0 / dist\n # Thus we have unit vector in x direction\n e_y = [x_c, y_c, z_c]\n #Compute the unit vector in y direction\n e_x = np.cross(e_y, norm).tolist()\n else:\n e_x = [1, 0, 0]\n e_y = [0, 1, 0]\n \n return [e_x, e_y, norm] , p_o", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def __pos__(self):\r\n return vec4(+self.x, +self.y, +self.z, +self.w)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()", "def plane(self):\n return plane(self.N, self.o)", "def perpendicular(self):\n return tuple.__new__(Vec2, (-self[1], self[0]))", "def fit_plane(xyz,z_pos=None):\n mean = np.mean(xyz,axis=0)\n xyz_c = xyz - mean[None,:]\n l,v = np.linalg.eig(xyz_c.T.dot(xyz_c))\n abc = v[:,np.argmin(l)]\n d = -np.sum(abc*mean)\n # unit-norm the plane-normal:\n abcd = np.r_[abc,d]/np.linalg.norm(abc)\n # flip the normal direction:\n if z_pos is not None:\n if np.sum(abcd[:3]*z_pos) < 0.0:\n abcd *= -1\n return abcd", "def plane_sphere(p, s):\n\n p.normalize()\n\n d = dot(s.o-p.o, p.n)\n\n if d > s.r:\n return False\n else:\n return (s.o - d*p.n, sqrt(s.r*s.r - d*d))", "def get_orthogonal_vec2d(vec):\n ortho = np.array([-vec[1], vec[0]])\n return ortho", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def _fit_plane_to_point_cloud(\n points_xyz: NDArrayFloat,\n) -> Tuple[float, float, float, float]:\n center_xyz: NDArrayFloat = np.mean(points_xyz, axis=0)\n out: Tuple[NDArrayFloat, NDArrayFloat, NDArrayFloat] = np.linalg.svd(\n points_xyz - center_xyz\n )\n vh = out[2]\n\n # Get the unitary normal vector\n a, b, c = float(vh[2, 0]), float(vh[2, 1]), float(vh[2, 2])\n d: float = -np.dot([a, b, c], center_xyz)\n return (a, b, c, d)", "def surface_norm(self, pt):\n\n return (pt - self.origin).normalize()", "def create_plot_plane_2d(axis=(1.0, 1.0), origin=(0.0,0.0), size=(2.0,2.0)):\n ft = ImageFont.truetype (FONT_RESOURCES_DIR+\"/courier.ttf\", 12)\n gl_font = GlFont('', ft)\n gl_font.color = [0.0, 0, 0, 1.0]\n gl_plot = PlotPlane2d(gl_font)\n gl_plot.i_axis = axis\n gl_plot.i_origin = origin\n gl_plot.o_wh = size\n gl_plot.i_axis_units = (axis[0]/10, axis[1]/10)\n\n gl_plot.prepare()\n return gl_plot", "def normalize(self):\r\n\r\n nlen = 1.0/math.sqrt(self*self)\r\n return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def vector_perp(v):\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def mirror_point_to_plane(point, plane):\n assert isinstance(plane, cg3d_plane.CGPlane)\n pn, norm = plane.get_point_and_normal()\n norm.normalize()\n return point - 2.0 * ((point - pn) * norm) * norm", "def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2", "def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)", "def add_rectangular_plane(center_loc=(0, 0, 0), point_to=(0, 0, 1), size=(2, 2), name=None):\n center_loc = np.array(center_loc)\n point_to = np.array(point_to)\n size = np.append(np.array(size), 0)\n\n bpy.ops.mesh.primitive_plane_add(location=center_loc)\n\n plane_obj = bpy.context.object\n\n if name is not None:\n plane_obj.name = name\n\n plane_obj.dimensions = size\n\n # Point it to target\n direction = Vector(point_to) - plane_obj.location\n # Find quaternion that rotates plane's 'Z' so that it aligns with 'direction'\n # This rotation is not unique because the rotated plane can still rotate about direction vector\n # Specifying 'Y' gives the rotation quaternion with plane's 'Y' pointing up\n rot_quat = direction.to_track_quat('Z', 'Y')\n plane_obj.rotation_euler = rot_quat.to_euler()\n\n # Scene update necessary, as matrix_world is updated lazily\n bpy.context.scene.update()\n\n return plane_obj", "def normal(axis_direction, axis_origin, point):\n # transform input into numpy arrays\n axis_direction = np.array(axis_direction, float)\n axis_origin = np.array(axis_origin, float)\n point = np.array(point, float)\n\n # vector from axis normal_origin to point\n vector = point - axis_origin\n\n # projection of vector on axis\n projection = np.dot(vector, axis_direction)*axis_direction\n\n # the normal vector from normal_origin to point\n normal_direction = vector - projection\n\n # normalized normal_direction\n normal_direction = normal_direction/np.linalg.norm(normal_direction)\n\n # opposite of the projection of vector on normal\n projection2 = - np.dot(normal_direction, vector)*normal_direction\n\n normal_origin = point + projection2\n\n return normal_direction, normal_origin", "def hyperplane(self):\n origin = (self.a+self.b+self.c)/3.\n normal = np.cross(self.a-self.b, self.a-self.c)\n return Hyperplane(origin, normal)", "def create_plane(self):\n\n # First we calculate our point increment for both the x and y values\n inc_x = (self.xmax - self.xmin)/(self.xlen - 1)\n inc_y = (self.ymax - self.ymin)/(self.ylen - 1)\n\n # This for-loop will add every x-value with every y-value, saving the values column wise\n # i.e. (-10,-10), (-10,-9), (-10.-8),...,(-10,n) for n = our y-values.\n # store these combinations into a list, and add that to our plane. \n # The nested loop will then traverse again and will get the combinations for the next x-value.\n # The loop will continue until all x-values and y-value combinations are added to our plane.\n for y in range(0, self.ylen + 1):\n temp_list = []\n for x in range(0, self.xlen + 1):\n temp_list.append(self.f((self.xmin + x*inc_x) + (self.ymin + y*inc_y)*1j))\n self.plane.append(temp_list)" ]
[ "0.6396546", "0.6396546", "0.6183506", "0.6174271", "0.6145053", "0.61160415", "0.6035465", "0.5979914", "0.59591293", "0.5913918", "0.59053445", "0.5904647", "0.5876737", "0.58654743", "0.58250505", "0.58204174", "0.57940704", "0.5756824", "0.57409114", "0.57186556", "0.5703341", "0.56912357", "0.56549335", "0.56547046", "0.56507766", "0.5639619", "0.56354505", "0.5618393", "0.5612076", "0.55957097" ]
0.68663776
0
compute z_n at previous time, i.e. z_n(t), z_n(t1)
def compute_z_prev(n, Z_opt, device): nzx, nzy = Z_opt.shape[2], Z_opt.shape[3] # no. of channel for noise input nc_z = 3 if n == 0: # z_rand is gaussian noise z_rand = functions.generate_noise([1, nzx, nzy], device= device) z_rand = z_rand.expand(1, 3, Z_opt.shape[2], Z_opt.shape[3]) z_prev1 = 0.95 * Z_opt +0.05 * z_rand z_prev2 = Z_opt else: z_prev1 = 0.95 * Z_opt +0.05 * functions.generate_noise([nc_z, nzx, nzy], device = device) z_prev2 = Z_opt return z_prev1, z_prev2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_z_curr(Z_opt, z_prev1, z_diff, alpha):\r\n z_curr = alpha * Z_opt + (1 - alpha) * (z_prev1 + z_diff)\r\n return z_curr", "def lookback_time(self, z, z0 = 0.0):\n lt_func = np.vectorize(lambda z, z0: \n si.quad(self._lookback_integrand, z0, z, limit=1000)\n )\n t_look, err = lt_func(z, z0)\n return(t_look)", "def f(z, t):\n zout = np.zeros_like(z)\n zout[:] = [z[1], -dpdx]\n return zout", "def calc_lookback_time(z):\n\n def integrand(z):\n return 1.0 / (H_z(z) * (1.0 + z)) * (H0_inverse*H0)\n\n t, t_err = quad(integrand, 0, z)\n\n return t", "def lookback_time(self, z):\n\n # Calculate the integrand.\n def f(z1):\n return 1.0 / (self.H(z1) * (1 + z1))\n\n return _intf_0_z(f, z) / self._unit_time", "def predict(self, t : float) -> vector :\n '''@ Zstar : vector'''\n '''@ dt : float'''\n '''@ dtau : float'''\n '''@ F : array'''\n dt = t - self.t\n dtau = self._normalizeDeltaTime(dt)\n F = self.stateTransitionMatrix(self.order+1, dtau)\n Zstar = F @ self.Z;\n return Zstar;", "def heun(func, z0, time):\r\n\r\n z = np.zeros((np.size(time), np.size(z0)))\r\n z[0,:] = z0\r\n zp = np.zeros_like(z0)\r\n\r\n for i, t in enumerate(time[0:-1]):\r\n dt = time[i+1] - time[i]\r\n zp = z[i,:] + np.asarray(func(z[i,:],t))*dt # Predictor step\r\n z[i+1,:] = z[i,:] + (np.asarray(func(z[i,:],t)) + np.asarray(func(zp,t+dt)))*dt/2.0 # Corrector step\r\n\r\n return z", "def forward(self, z_t_1): \n gate = self.gate(z_t_1) # compute the gating function\n proposed_mean = self.proposed_mean(z_t_1) # compute the 'proposed mean'\n mu = (1 - gate) * self.z_to_mu(z_t_1) + gate * proposed_mean # compute the scale used to sample z_t, using the proposed mean from\n logvar = F.softplus(self.z_to_logvar(self.relu(proposed_mean))) \n epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = mu + epsilon * torch.exp(0.5 * logvar) # [batch_sz x z_sz]\n z_t = mu + epsilon * logvar\n return z_t, mu, logvar", "def _compute_carry_and_output_fused(self, z, c_tm1):\n z0, z1, z2, z3 = z\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n return c, o", "def time_to_n_previous(df, cols, dummy_col, generated_feature_name, params={'n':1,'fillna':np.nan}):\n n = int(params['n'])\n if n > 0:\n n = -n\n params['n'] = n\n return time_to_n_next(df, cols, dummy_col, generated_feature_name, params)", "def get_z_from_t(t):\n\n\n global z_from_t_interp\n\n if z_from_t_interp is None: initialize()\n\n return z_from_t_interp(t)", "def start(self, t : float, Z : vector) -> None:\n self.n = 0;\n self.t0 = t;\n self.t = t;\n self.Z = self._normalizeState(self._conformState(Z));", "def rk4(func, z0, time):\r\n\r\n z = np.zeros((np.size(time),np.size(z0)))\r\n z[0,:] = z0\r\n zp = np.zeros_like(z0)\r\n\r\n for i, t in enumerate(time[0:-1]):\r\n dt = time[i+1] - time[i]\r\n dt2 = dt/2.0\r\n k1 = np.asarray(func(z[i,:], t)) # predictor step 1\r\n k2 = np.asarray(func(z[i,:] + k1*dt2, t + dt2)) # predictor step 2\r\n k3 = np.asarray(func(z[i,:] + k2*dt2, t + dt2)) # predictor step 3\r\n k4 = np.asarray(func(z[i,:] + k3*dt, t + dt)) # predictor step 4\r\n z[i+1,:] = z[i,:] + dt/6.0*(k1 + 2.0*k2 + 2.0*k3 + k4) # Corrector step\r\n\r\n return z", "def lookback_time(self, z = 1.):\n H_z = self.H_massive if massive_nu_approx else self.H\n integrand = lambda x: const.Mpc_to_km/(H_z(x)*(1.+x))/const.Myr_to_s\n lookback, _ = sint.quad(integrand, 0., z)\n return lookback", "def rhs1(t, state):\n\n # Get the number of prey\n n = (len(state) // 2) - 1\n # Extract the prey positions\n xs = state[2:].reshape(n, 2)\n\n # Extract the predator position\n z = state[:2]\n # Compute the differences between the prey\n # positions and the predator position\n xz_difs = xs - np.expand_dims(z, 0)\n xz_len2s = np.sum(xz_difs**2, axis=1)#**(p/2)\n lens = xz_len2s**(p/2)\n pred_dxdt = c / n * np.sum(xz_difs / np.expand_dims(lens, 1), axis=0)\n\n # Compute the differences between the prey positions\n dif_mat = build_difs_matrix(xs)\n # Compute the squared lengths between prey positions\n len2_mat = np.sum(dif_mat**2, axis=2)\n # Set the diagonal to 1 to avoid dividing by zero\n len2_mat[np.diag_indices(n)] = 1\n # Compute the prey->prey effects\n m = 1 / n * (dif_mat / np.expand_dims(len2_mat, 2) - a * dif_mat)\n prey_dxdt = np.sum(m, axis=1)\n # Compute the predator->prey effects\n prey_dxdt += b * xz_difs / np.expand_dims(xz_len2s, 1)\n\n # Flatten the derivatives the way solve_ivp wants\n dxdt = np.zeros(state.shape)\n dxdt[:2] = pred_dxdt.flatten()\n dxdt[2:] = prey_dxdt.flatten()\n return dxdt", "def backward(self, z):\n return self.forward(z) * (1 - self.forward(z))", "def forward(self, z_t_1, h_t_1, c_t_1): \n gate = self.gate(h_t_1) # compute the gating function\n \n _, (h_t, c_t) = self.lstm(z_t_1.view(1, z_t_1.shape[0], z_t_1.shape[1]).contiguous(), (h_t_1, c_t_1))\n \n \n \n proposed_mean = self.proposed_mean(h_t) # compute the 'proposed mean'\n mu = (1 - gate) * self.z_to_mu(z_t_1) + gate * proposed_mean # compute the scale used to sample z_t, using the proposed mean from\n logvar = self.z_to_logvar(self.relu(proposed_mean)) \n epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n z_t = mu + epsilon * torch.exp(0.5 * logvar) # [batch_sz x z_sz]\n return z_t, mu.view(mu.shape[1], mu.shape[2]), logvar.view(logvar.shape[1], logvar.shape[2]), h_t, c_t", "def forward(self, z_t_1, h_x):\n h_combined = 0.5*(self.z_to_h(z_t_1) + h_x)# combine the rnn hidden state with a transformed version of z_t_1\n mu = self.h_to_mu(h_combined)\n logvar = self.h_to_logvar(h_combined)\n std = F.softplus(logvar) \n epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, mu, logvar", "def new_time(self, state, time, last_value, independent_sources):\n sum1 = np.matmul(state.__active_control[1],last_value) - state.__active_control[2]\n if (abs(sum1) > self.__absolute_error):\n sum2 = np.matmul(state.__active_control[1], np.matmul(self.__A, last_value) + np.matmul(self.__B, independent_sources))\n sum3 = np.matmul(state.__active_control[1], np.matmul(self.__A**2, last_value) + np.matmul(self.__A, \\\n np.matmul(self.__B, independent_sources)))\n return time + 1.0 / (sum3 / 2 / sum2 - sum2 / sum1)\n else:\n return -1", "def get_z_delta(self, z):\n if self.z is None:\n raise UnknownCarriagePosition\n\n z_delta = z - self.z\n error = z_delta % copysign(self.stepper.MM_PER_STEP, z_delta)\n return z_delta, error", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def _init_time(self, zmin, zmax, nzbin):\n # lgz is actually lg(1+z)\n # this actually very ill-defined as we really want to pack the early time with more \n # sampling points...\n# self.lgz = np.linspace(log10(zmin+1), log10(zmax+1), num=nzbin)\n# self.lgzmax = self.lgz[-1]\n# self.lgzmin = self.lgz[0]\n# self.z = np.power(10., self.lgz) - 1.\n\n _lgzrev = np.linspace(log10(zmin+1), log10(zmax+1), num=nzbin)\n _zrev = zmin + zmax - (np.power(10.0, _lgzrev) - 1)\n self.z = _zrev[::-1] \n self.lgz = np.log10(self.z+1)\n self.lgzmax = self.lgz[-1]\n self.lgzmin = self.lgz[0]", "def _derZ(self, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * self.xInterpolators[y_pos - 1][z_pos](x)\n + alpha * self.xInterpolators[y_pos][z_pos](x)\n )\n - (\n (1 - alpha) * self.xInterpolators[y_pos - 1][z_pos - 1](x)\n + alpha * self.xInterpolators[y_pos][z_pos - 1](x)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha) * self.xInterpolators[i - 1][j](x[c])\n + alpha * self.xInterpolators[i][j](x[c])\n )\n - (\n (1 - alpha) * self.xInterpolators[i - 1][j - 1](x[c])\n + alpha * self.xInterpolators[i][j - 1](x[c])\n )\n ) / (self.z_list[j] - self.z_list[j - 1])\n return dfdz", "def naive_sum(z, n_times_atom):\n n_atoms, n_trials, n_times_valid = z.shape\n\n ztz = np.zeros(shape=(n_atoms, n_atoms, 2 * n_times_atom - 1))\n t0 = n_times_atom - 1\n for k0 in range(n_atoms):\n for k in range(n_atoms):\n for i in range(n_trials):\n for t in range(n_times_atom):\n if t == 0:\n ztz[k0, k, t0] += (z[k0, i] * z[k, i]).sum()\n else:\n ztz[k0, k, t0 + t] += (\n z[k0, i, :-t] * z[k, i, t:]).sum()\n ztz[k0, k, t0 - t] += (\n z[k0, i, t:] * z[k, i, :-t]).sum()\n return ztz", "def linear_backward(dZ, cache):\n pass", "def update_z(z, h, eta, a, alpha):\n pred = predictor(z, h, eta, a, alpha)\n z_updated = z + 0.5*(stuartLandau(z, a, alpha) + stuartLandau(pred, a, alpha))*h + 0.5*(noiseFunction(z) + noiseFunction(pred))*eta\n return z_updated", "def euler(func, z0, time):\r\n\r\n z = np.zeros((np.size(time), np.size(z0)))\r\n z[0,:] = z0\r\n\r\n for i in range(len(time)-1):\r\n dt = time[i+1] - time[i]\r\n z[i+1,:]=z[i,:] + np.asarray(func(z[i,:], time[i]))*dt\r\n\r\n return z", "def cubic_evolve(self,nt=1):\n #loop through time steps\n for l in range(nt):\n # temporary array\n y_temp = np.zeros(self.y.shape[0])\n # loop through array\n for i in range(self.y.shape[0]):\n # idx left to departure point\n x_dep = self.x[i]-self.u[i]*self.dt\n j = int(np.floor(x_dep/self.dx))\n # alpha\n a = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n # calculate next time step\n f = lambda x: x % self.y.shape[0] if x >= self.y.shape[0] else x\n y_temp[i] = - a * (1-a)*(2-a)/6 * self.y[f(j-1)]\n y_temp[i] += (1-a**2)*(2-a)/2 * self.y[f(j)]\n y_temp[i] += a*(1+a)*(2-a)/2 * self.y[f(j+1)]\n y_temp[i] -= a*(1-a**2)/6 * self.y[f(j+2)]\n self.y = np.copy(y_temp)\n return self.y", "def reverse_l_r(self, x_value, z_value):\r\n\t\t#~~~ x-values ~~~\r\n\t\tdiff_temp = np.diff(x_value) \t\t\t\t#step in x-values\r\n\t\tdiff_reverse = diff_temp[::-1]\t\t\t\t#reversing the step values above\r\n\t\ttemp = np.cumsum(diff_reverse)\t\t\t\t#computing cumulative sum on differences\r\n\t\tx_value = np.insert(temp, 0,0)\t\t\t\t#adding the initial zero-value\r\n\t\t\r\n\t\t#~~~ z-values ~~~\r\n\t\tz_value = z_value[::-1]\t\t\t\t\t\t#reversing z-values\r\n\t\t\r\n\t\treturn x_value, z_value", "def adjust_layer_temps(self):\n\n if self.layer_count == 1:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s = self.t_s_0\n\n elif self.layer_count == 2:\n if self.isothermal:\n self.t_s = FREEZE\n self.t_s_l = FREEZE\n self.t_s_0 = FREEZE\n else:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s_l = self.new_tsno(\n self.m_s_l,\n self.t_s_l,\n self.cc_s_l)\n self.t_s = self.new_tsno(\n self.m_s,\n self.t_s,\n self.cc_s)" ]
[ "0.6359763", "0.6280652", "0.5986535", "0.59398437", "0.58886546", "0.5796982", "0.57787", "0.57448745", "0.5621632", "0.55678546", "0.5563106", "0.55606556", "0.55116934", "0.54717535", "0.54433835", "0.544052", "0.54086614", "0.538945", "0.53769845", "0.53676015", "0.5366809", "0.53664404", "0.53105927", "0.530697", "0.53023046", "0.5287983", "0.5280066", "0.5278607", "0.5256301", "0.5244434" ]
0.6960639
0
Usage Compute all the free space on the boundary of cells in the diagram for polygonal chains P and Q and the given eps LF[(i,j)] is the free space of segment [Pi,Pi+1] from point Qj BF[(i,j)] is the free space of segment [Qj,Qj+1] from point Pj
def LF_BF(P, Q, p, q, eps, mdist, P_dist, Q_dist): LF = {} for j in range(q): for i in range(p - 1): LF.update({(i, j): free_line(Q[j], eps, P[i:i + 2], mdist[i, j], mdist[i + 1, j], P_dist[i])}) BF = {} for j in range(q - 1): for i in range(p): BF.update({(i, j): free_line(P[i], eps, Q[j:j + 2], mdist[i, j], mdist[i, j + 1], Q_dist[j])}) return LF, BF
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LR_BR(LF, BF, p, q):\n if not (LF[(0, 0)][0] <= 0 and BF[(0, 0)][0] <= 0 and LF[(p - 2, q - 1)][1] >= 1 and BF[(p - 1, q - 2)][1] >= 1):\n rep = False\n BR = {}\n LR = {}\n else:\n LR = {(0, 0): True}\n BR = {(0, 0): True}\n for i in range(1, p - 1):\n if LF[(i, 0)] != [-1, -1] and LF[(i - 1, 0)] == [0, 1]:\n LR[(i, 0)] = True\n else:\n LR[(i, 0)] = False\n for j in range(1, q - 1):\n if BF[(0, j)] != [-1, -1] and BF[(0, j - 1)] == [0, 1]:\n BR[(0, j)] = True\n else:\n BR[(0, j)] = False\n for i in range(p - 1):\n for j in range(q - 1):\n if LR[(i, j)] or BR[(i, j)]:\n if LF[(i, j + 1)] != [-1, -1]:\n LR[(i, j + 1)] = True\n else:\n LR[(i, j + 1)] = False\n if BF[(i + 1, j)] != [-1, -1]:\n BR[(i + 1, j)] = True\n else:\n BR[(i + 1, j)] = False\n else:\n LR[(i, j + 1)] = False\n BR[(i + 1, j)] = False\n rep = BR[(p - 2, q - 2)] or LR[(p - 2, q - 2)]\n return rep, LR, BR", "def dbgain_free_space(self, pt_1, pt_2):\n if (pt_1.ndim > 1) or (pt_2.ndim > 1):\n raise NotImplementedError\n dist = np.linalg.norm(pt_1 - pt_2)\n\n return self.dist_to_dbgain_free_space(\n dist,\n wavelength=self.wavelength,\n antenna_dbgain_tx=self.antenna_dbgain_tx,\n antenna_dbgain_rx=self.antenna_dbgain_rx,\n )", "def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))", "def compute_ghost_series(self,terms):\n\t\tp=self.p\n\t\tcomp=self.comp\n\t\tf1=self.f1\n\t\tf2=self.f2\n\n\t\tghost_coefs = [[] for i in range(terms+1)]\n\n\n\t\tk=comp;\n\t\tif k==0 or k==1:\n\t\t\tk=k+p-1\n\n\t\t## Precompute the needed dimensions\n\t\tself.f1v = [f1(k)]\n\t\tself.f2v = [f2(k)]\n\t\tk = k + p-1\n\t\twhile self.f1v[len(self.f1v)-1] <= terms+1:\n\t\t\tself.f1v += [f1(k)]\n\t\t\tself.f2v += [f2(k)]\n\t\t\tk = k + p-1\n\t\t\n\t\t## Starting at weight 2, we run through weights in the component,\n\t\t## compute the associated indices, and then record the weights at those\n\t\t## indices with appropriate multiplicities\n\n\t\tk = comp;\n\t\tif k==0 or k==1:\n\t\t\tk=k+p-1\n\t\tn = 0\n\n\t\tinds = range(self.f1v[n]+1,self.f1v[n]+self.f2v[n])\n\t\twhile (len(inds)==0 or inds[0]<=terms+1):\n\t\t\t## This loops adds the weights to the appropriate indices with the appropriate multiplicities\n\t\t\tfor m in range(floor((len(inds)+1)/2)):\n\t\t\t\tif m < floor(len(inds)/2):\n\t\t\t\t\tif inds[m]<=terms:\n\t\t\t\t\t\tghost_coefs[inds[m]] += [(k,m+1)]\n\t\t\t\t\tif (inds[len(inds)-1-m]<=terms):\n\t\t\t\t\t\tghost_coefs[inds[len(inds)-1-m]] += [(k,m+1)]\n\t\t\t\telse:\n\t\t\t\t\tif inds[m]<=terms:\n\t\t\t\t\t\tghost_coefs[inds[m]] += [(k,m+1)]\n\t\t\tk = k + p-1\n\t\t\tn = n + 1\n\t\t\tinds = range(self.f1v[n]+1,self.f1v[n]+self.f2v[n])\n\t\tself.series=ghost_coefs", "def bifurcation_diagram(args, Bpbmin, Bpbmax, ylim=(-1, 0.6)):\n\n xs = []\n Bpb_list = np.linspace(Bpbmin, Bpbmax, 100)\n Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl = args\n\n sol, t = calcODE(args, -1.5, -1.5, 0.5, 0.5, 0.5, 0.5, ts=4000, nt=2 ** 25)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n x0 = sol[0, :]\n n = np.array(ode(x0, t[0], *args))\n q, _ = np.linalg.qr(n[:, None], mode='complete')\n\n periods = []\n for Bpb in Bpb_list:\n args = (Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl)\n sol, t = calcODE(args, *sol[-1, :], ts=1000, nt=2 ** 15)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n for i in range(len(sol) - 1):\n x1 = sol[i]\n x2 = sol[i + 1]\n if np.sign(n @ (x2 - x0)) != np.sign(n @ (x1 - x0)):\n c1 = dist(x1, x0, n)\n c2 = dist(x2, x0, n)\n alpha = c2 / (c1 + c2)\n x_new = x1 + alpha * (x2 - x1)\n x = (x_new - x0).dot(q)\n xs.append((Bpb, x[0], x[1], x[2], x[3], x[4], x[5]))\n # if np.linalg.norm(x_new - x0) < 1e-2 and period is None:\n period = t[i] - periods[-1][-1] if len(periods) else 0\n periods.append((Bpb, period, np.linalg.norm(x_new - x0), t[i]))\n\n plt.figure(figsize=(15, 10))\n plt.scatter([i[0] for i in xs], [i[2] for i in xs], s=10)\n plt.xlabel('$B_{pb}$')\n\n # plt.ylim(ylim)\n plt.show()\n\n periods = [i for i in periods if i[1] > 0]\n\n return periods, xs", "def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")", "def measure_allocation_diversity_bounds_errors(self, slots_assegnation_probabilities, LP_news_pool, iter=5000):\n for tech in [\"rand_1\", \"rand_2\", \"rand_3\"]:\n max_errors_per_iter = []\n for k in range(iter):\n tmp_slots_assegnation_probabilities = []\n for elem in slots_assegnation_probabilities:\n tmp_slots_assegnation_probabilities.append(elem.copy())\n constraints_error = [0] * len(self.categories)\n promenance_per_category = [0] * len(self.categories)\n result = self.__de_randomize_LP(LP_news_pool, tmp_slots_assegnation_probabilities, tech)\n for i in range(len(result)):\n category_index = self.categories.index(result[i].news_category)\n promenance_per_category[category_index] += self.real_slot_promenances[i]\n\n for i in range(len(promenance_per_category)):\n if promenance_per_category[i] < self.B[i] * -1:\n constraints_error[i] += (self.B[i] * -1 - promenance_per_category[i]) / (self.B[i] * -1)\n\n max_errors_per_iter.append(np.mean(constraints_error))\n if tech == \"rand_1\":\n self.rand_1_errors += max_errors_per_iter\n elif tech == \"rand_2\":\n self.rand_2_errors += max_errors_per_iter\n else:\n self.rand_3_errors += max_errors_per_iter", "def make_percent_access_polygons(self):\r\n self.logger.info(\r\n f\"Processing FacilityID {self.facility_id}, FromBreak {self.from_break}, ToBreak {self.to_break}...\")\r\n self.scratch_gdb = self._create_output_gdb()\r\n selected_polygons = self._select_polygons()\r\n joined_polygons = self._join_polygons(selected_polygons)\r\n dissolved_polygons = self._dissolve_cells(joined_polygons)\r\n self.job_result[\"polygons\"] = dissolved_polygons", "def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n self.x_save = np.ones((n_t,int(self.n_c*self.b_extra),2))*np.nan\n self.tri_save = -np.ones((n_t,int(self.tris.shape[0]*self.b_extra),3),dtype=np.int32)\n self.generate_noise_boundary()\n if do_F_bound is True:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x,recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i,:self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours,self.vs)\n self.get_P(self.neighbours,self.vs)\n F = self.get_F(self.neighbours,self.vs)\n # F_bend = get_F_bend(self.n_c, self.CV_matrix, self.n_C, x, self.zeta)\n F_soft = weak_repulsion_boundary(self.Cents,self.a,self.k, self.CV_matrix,self.n_c,self.n_C)\n F_bound = boundary_tension(self.Gamma_bound,self.n_C,self.n_c,self.Cents,self.CV_matrix)\n x += self.dt*(F + F_soft + self.v0*self.noise[i,:x.shape[0]] + F_bound)\n # + F_bend + F_bound\n\n self.x = x\n self.x_save[i,:x.shape[0]] = x\n else:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x, recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i, :self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours, self.vs)\n self.get_P(self.neighbours, self.vs)\n F = self.get_F(self.neighbours, self.vs)\n F_soft = weak_repulsion_boundary(self.Cents, self.a, self.k, self.CV_matrix, self.n_c, self.n_C)\n x += self.dt * (F + F_soft + self.v0 * self.noise[i, :x.shape[0]])\n\n self.x = x\n self.x_save[i, :x.shape[0]] = x\n print(\"Simulation complete\")\n return self.x_save", "def get_boundary_layers(cell_cent, el, num_lyrs, bc_loc, struct_grd):\n dim = len(el)\n bound_range = np.zeros(2*dim, dtype=float)\n bound_nodes = {} #dict to store the node numbers of centroids that lie within bound_range\n if(struct_grd):\n fctr = 1\n corr = 0\n lyrs = float(num_lyrs-1)+ 0.0001\n else:\n fctr = 2\n corr = 1\n lyrs = float(num_lyrs)+ 0.0001\n\n lyrs = 1.0001*float(num_lyrs-1)\n for d in range(dim):\n bound_range[2*d] = np.min(cell_cent[:,d]) + corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] + lyrs*el[d]\n bound_range[2*d+1] = np.max(cell_cent[:,d]) - corr*np.diff(np.unique(cell_cent[:,d])[0:2])[0] - lyrs*el[d]\n\n bound_nodes[2*d] = np.where(cell_cent[:,d] <= bound_range[2*d])\n bound_nodes[(2*d+1)] = np.where(cell_cent[:,d] >= bound_range[2*d+1])\n\n #store only those key value pair that are in the bc_loc\n #this in the end returns mesh with ghost layer cells, \n #if they've been applied already\n keys = bound_nodes.keys()\n keys_temp = [kk for kk in keys]\n for kk in keys_temp:\n if kk not in bc_loc:\n bound_nodes.pop(kk, None)\n \n return bound_nodes", "def decision_problem(P, Q, p, q, eps, mdist, P_dist, Q_dist):\n LF, BF = LF_BF(P, Q, p, q, eps, mdist, P_dist, Q_dist)\n rep, _, _ = LR_BR(LF, BF, p, q)\n return rep", "def buffered_pts_to_periodic_network_parallelogram(xy, BL, PV, BBox='auto', flex_pvxy=False, check=False):\n if BBox == 'auto':\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-PV[0, 0], -PV[1, 1] - PV[0, 1]],\n [PV[0, 0], -PV[1, 1] + PV[0, 1]],\n [PV[0, 0], PV[1, 1] + PV[0, 1]],\n [-PV[0, 0], PV[1, 1] - PV[0, 1]]])\n\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n plt.plot(xy[:, 0], xy[:, 1], 'b.')\n plt.plot(BBox[:, 0], BBox[:, 1], 'r.-')\n plt.title('le.buffered_pts_to_periodic_network_parallelogram BBox')\n plt.show()\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n # Note that minY is the minY on the LEFT of the system, not total\n # Similarly, note that minY is the maxY on the RIGHT of the system, not total\n slope = PV[0, 1] / (maxX - minX)\n minY = BBox[0, 1]\n maxY = minY + PV[1, 1] + PV[0, 1]\n\n def lowerY(x):\n return minY + slope * (x - minX)\n\n def upperY(x):\n return minY + PV[1, 1] + slope * (x - minX)\n\n PVdict = {'e': PV[0],\n 'n': PV[1],\n 'w': -PV[0],\n 's': -PV[1],\n 'ne': PV[0] + PV[1],\n 'nw': -PV[0] + PV[1],\n 'sw': -PV[0] - PV[1],\n 'se': PV[0] - PV[1]}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=1, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=2)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=1, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=2)\n plt.title('Showing bonds that are cut, with pt #s')\n for ind in range(len(xy)):\n plt.text(xy[ind, 0] + 0.1, xy[ind, 1] - 0.1, str(ind))\n plt.show()\n\n # Prepare image to display NSWE scattered on top\n highlight_bonds(xy, BL, ax=plt.gca(), color='lightgrey', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='dimgray', lw=1, show=False)\n print 'preparing image....'\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {}\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n if xy[mpt, 0] < minX:\n # Mirror particle is to the left of the system (West)\n if xy[mpt, 1] < lowerY(xy[mpt, 0]):\n # Mirror particle is SW\n bPV = PVdict['sw']\n elif xy[mpt, 1] > upperY(xy[mpt, 0]):\n # Mirror particle is NW\n bPV = PVdict['nw']\n else:\n # Mirror particle is West\n bPV = PVdict['w']\n elif xy[mpt, 0] > maxX:\n # Mirror particles is the right of the system (East)\n if xy[mpt, 1] < lowerY(xy[mpt, 0]):\n # Mirror particle is SE\n bPV = PVdict['se']\n elif xy[mpt, 1] > upperY(xy[mpt, 0]):\n # Mirror particle is NE\n bPV = PVdict['ne']\n else:\n # Mirror particle is East\n bPV = PVdict['e']\n elif xy[mpt, 1] < lowerY(xy[mpt, 0]):\n # Mirror particle is South\n bPV = PVdict['s']\n else:\n # Mirror particle is North\n bPV = PVdict['n']\n\n if check:\n print 'adding pt...'\n if (bPV == PVdict['sw']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='r', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['w']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='g', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['nw']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='y', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['n']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='b', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['ne']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='c', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['e']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='m', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['se']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='k', edgecolor='none', zorder=9999)\n elif (bPV == PVdict['s']).all():\n plt.scatter(xy[mpt, 0], xy[mpt, 1], c='w', edgecolor='none', zorder=9999)\n\n # Link keep point (kpt) to the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - bPV)\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = bPV\n kk += 1\n\n if check:\n plt.plot(np.hstack((BBox[:, 0], np.array([BBox[0, 0]]))),\n np.hstack((BBox[:, 1], np.array([BBox[0, 1]]))), 'r-')\n plt.show()\n\n if check:\n print 'PVd = ', PVd\n xyshake = xy + 0.1 * np.random.rand(np.shape(xy)[0], np.shape(xy)[1])\n display_lattice_2D(xyshake, np.abs(BL), title=\"showing extended lattice (w/o PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n\n if check:\n print 'PVd = ', PVd\n xyshake = xy + 0.1 * np.random.rand(np.shape(xy)[0], np.shape(xy)[1])\n display_lattice_2D(xyshake, np.abs(BL), title=\"showing extended lattice with BL2add\", close=False)\n plt.scatter(xy[remove, 0], xy[remove, 1], c='c', zorder=999999)\n plt.show()\n\n # Use PVd (which included buffered pts) to make PVxydict\n PVxydict = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict[newkey] = PVd[key]\n # if lower0 > 0 or lower1 > 0:\n # print 'key =', key\n # print 'newkey =', newkey\n # print 'lower0 =', lower0\n # print 'lower1 =', lower1\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o PBCs\", close=False)\n for ind in range(len(xytrim)):\n plt.text(xytrim[ind, 0], xytrim[ind, 1], str(ind))\n\n plt.show()\n print 'PVxydict = ', PVxydict\n NL, KL = BL2NLandKL(BLtrim)\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL, check=check)\n print 'PVx = ', PVx\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, PVx=PVx, PVy=PVy,\n title=\"showing lattice connectivity with PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict", "def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def extract_boundary(xy, NL, KL, BL, check=False):\n # Clear periodic bonds from KL\n pbonds = np.where(KL.ravel() < 0)[0]\n if len(pbonds) > 0:\n print 'le: Found periodic bonds in le.extract_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles = np.where(~KL.any(axis=1))[0]\n if len(dangles) > 0:\n print 'le: extract_boundary: Removing dangling points: dangles = ', dangles\n if check:\n plt.plot(xy[:, 0], xy[:, 1], 'b.')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[dangles, 0], xy[dangles, 1], 'ro')\n plt.title('Original point indices, before removing dangles. Dangles circled in red.')\n plt.show()\n\n translate_at_end = True\n\n NP = len(xy)\n\n nondangles = np.setdiff1d(np.arange(NP), dangles)\n # Note that remove_pts can handle periodic BL\n\n if len(nondangles) == 0:\n print 'There are no particles that are not part of dangling bonds. All particles are part of the boundary.'\n return np.arange(len(xy))\n\n xy, NL, KL, BL, PVxydict = remove_pts(nondangles, xy, BL)\n\n # Remove bonds which were periodic.\n pbonds = np.where(KL.ravel() < 0)[0]\n print 'le: pbonds = ', pbonds\n if pbonds:\n print 'le: Found periodic bonds in extract_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n if check:\n print 'le: NL = ', NL\n display_lattice_2D(xy, BL, NL=NL, KL=KL, title='Removed points in extract_boundary()')\n\n # xy = xy[nondangles]\n # NL = NL[nondangles]\n # KL = KL[nondangles]\n\n # translation converts indices of long old xy to small new xy\n # backtrans converts indices of small, new xy to indices of long, old xy\n # .1 .0\n # .0 trans ----->\n # . 2 <----- backtrans .1\n # .3 .2\n translation = np.arange(NP, dtype=int)\n for IND in dangles:\n translation[IND:] -= 1\n # mark the removed point by -5\n translation[IND] = -5\n\n backtrans = np.where(translation > -1)[0]\n if check:\n print 'le: backtrans = ', backtrans\n print 'le: translation = ', translation\n\n # translation = np.where()\n\n else:\n translate_at_end = False\n\n # Initialize the list of boundary indices to be larger than necessary\n bb = np.zeros(2 * len(xy), dtype=int)\n\n # Start with the rightmost point, which is guaranteed to be\n # at the convex hull and thus also at the outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 0] == max(xy[:, 0]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_boundary(): Found rightmost pt: ', rightIND\n print 'le.extract_boundary(): with neighbors: ', NL[rightIND]\n print 'le.extract_boundary(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n neighbors = NL[rightIND, np.argwhere(KL[rightIND].ravel()).ravel()]\n print 'le.extract_boundary(): neighbors = ', neighbors\n print 'le.extract_boundary(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1], xy[neighbors, 0] - xy[rightIND, 0]).ravel(),\n 2 * np.pi)\n if check:\n print 'KL[rightIND] = ', KL[rightIND]\n print 'KL[rightIND,0] = ', KL[rightIND, 0]\n print 'KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'neighbors = ', neighbors\n print 'angles = ', angles\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n # print ' angles==min--> ', angles==min(angles)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, NL, KL)\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n return boundary", "def __init__(self, bounds, p=16, eps=1e-14):\n self.bounds = bounds\n self.p = p\n # compute the location of the collocation nodes\n nodes, weights = np.polynomial.legendre.leggauss(p)\n nodes = nodes * 0.5 + 0.5\n ranx = bounds[1] - bounds[0]\n rany = bounds[3] - bounds[2]\n if np.abs(ranx - rany) > 1e-15:\n raise Exception('For now, periodization bounds must be a square.')\n self.width = ranx\n self.weights = weights*0.5*self.width\n nodey = nodes*rany + bounds[2]\n rep = lambda x: np.repeat(x, p)\n self.node_left = np.row_stack([ rep(bounds[0]), nodey ])\n self.node_right = np.row_stack([ rep(bounds[1]), nodey ])\n nodex = nodes*ranx + bounds[0]\n self.node_bottom = np.row_stack([ nodex, rep(bounds[2]) ])\n self.node_top = np.row_stack([ nodex, rep(bounds[3]) ])\n self.check = np.column_stack([ self.node_left, self.node_right, \\\n self.node_bottom, self.node_top ])\n # get normals (not outward facing!)\n self.normal_left = np.row_stack([ rep(1.0), rep(0.0) ])\n self.normal_right = np.row_stack([ rep(1.0), rep(0.0) ])\n self.normal_bottom = np.row_stack([ rep(0.0), rep(1.0) ])\n self.normal_top = np.row_stack([ rep(0.0), rep(1.0) ])\n self.normals = np.column_stack([ self.normal_left, self.normal_right,\n self.normal_bottom, self.normal_top ])\n # generate sources\n self.n_check = 4*p\n self.n_sources = self.n_check\n self.center = [ 0.5*(self.bounds[0]+self.bounds[1]), \n 0.5*(self.bounds[2]+self.bounds[3]) ]\n radius = 0.5*np.sqrt(2)*self.width\n adj = np.log(eps)/self.n_sources\n if adj < -0.5:\n raise Exception('Increase p (or decrease eps) to guarantee convergence.')\n Radius = radius/(1 + adj)\n dd = 0.0\n Radius = 0.5*self.width*(4-np.sqrt(2)-2*dd)\n theta = np.linspace(0, 2*np.pi, self.n_check, endpoint=False)\n self.source = np.row_stack([ self.center[0] + Radius*np.cos(theta),\n self.center[1] + Radius*np.sin(theta) ])\n # generate source --> targ Stokes velocity matrix\n S2U, S2V = stokes_kernel(self.source, self.check)\n # generate source --> targ Stokes stress matrix\n S2Sxx, S2Sxy, S2Syy = stokes_kernel_stress(self.source, self.check)\n S2SNx = S2Sxx*self.normals[0][:,None] + S2Sxy*self.normals[1][:,None]\n S2SNy = S2Sxy*self.normals[0][:,None] + S2Syy*self.normals[1][:,None]\n # generate the full system that we'll have to solve\n self.MAT = np.zeros([2*self.n_check, 2*self.n_sources], dtype=float)\n self.MAT[0*p:1*p] = S2U[1*p:2*p] - S2U[0*p:1*p]\n self.MAT[1*p:2*p] = S2U[3*p:4*p] - S2U[2*p:3*p]\n self.MAT[2*p:3*p] = S2V[1*p:2*p] - S2V[0*p:1*p]\n self.MAT[3*p:4*p] = S2V[3*p:4*p] - S2V[2*p:3*p]\n self.MAT[4*p:5*p] = S2SNx[1*p:2*p] - S2SNx[0*p:1*p]\n self.MAT[5*p:6*p] = S2SNx[3*p:4*p] - S2SNx[2*p:3*p]\n self.MAT[6*p:7*p] = S2SNy[1*p:2*p] - S2SNy[0*p:1*p]\n self.MAT[7*p:8*p] = S2SNy[3*p:4*p] - S2SNy[2*p:3*p]\n self.BIG_MAT = np.zeros([2*self.n_check+2, 2*self.n_sources+2], dtype=float)\n self.BIG_MAT[:-2,:-2] = self.MAT\n self.BIG_MAT[-2,0*self.n_sources:1*self.n_sources] = 1.0\n self.BIG_MAT[-1,1*self.n_sources:2*self.n_sources] = 1.0\n self.BIG_MAT[0*self.n_sources:1*self.n_sources,-2] = 1.0\n self.BIG_MAT[1*self.n_sources:2*self.n_sources,-1] = 1.0\n # take the SVD of this matrix\n self.U, D, self.VT = np.linalg.svd(self.BIG_MAT, full_matrices=False)\n D[D < eps] = np.Inf\n self.DI = 1.0/D", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def irr_boundary_as_space(self):\n p1,p2 = self.next_marks()\n \n return Mgn(self.genus - 1, self.marks.union([p1,p2])), p1, p2", "def buffered_pts_to_periodic_network(xy, BL, LL, BBox=None, check=False):\n if BBox is None or isinstance(BBox, str):\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-LL[0], -LL[1]], [LL[0], -LL[1]], [LL[0], LL[1]], [-LL[0], LL[1]]])\n keep = np.where(np.logical_and(abs(xy[:, 0]) < LL[0] * 0.5, abs(xy[:, 1]) < LL[1] * 0.5))[0]\n else:\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n print 'checking that keep is not a logical ==> '\n print ' this would be bool keep = ', bpath.contains_points(xy)\n print ' and this is keep = ', keep\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n minY = np.min(BBox[:, 1])\n maxY = np.max(BBox[:, 1])\n PVdict = {'e': np.array([LL[0], 0.0]),\n 'n': np.array([0.0, LL[1]]),\n 'w': np.array([-LL[0], 0.0]),\n 's': np.array([0.0, -LL[1]]),\n 'ne': np.array([LL[0], LL[1]]),\n 'nw': np.array([-LL[0], LL[1]]),\n 'sw': np.array([-LL[0], -LL[1]]),\n 'se': np.array([LL[0], -LL[1]])}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=1, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=2)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {} # = np.zeros((len(BLcut),2), dtype=float)\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n if xy[mpt, 0] < minX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SW\n PV = PVdict['sw']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NW\n PV = PVdict['nw']\n else:\n # Mirror particle is West\n PV = PVdict['w']\n elif xy[mpt, 0] > maxX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SE\n PV = PVdict['se']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NE\n PV = PVdict['ne']\n else:\n # Mirror particle is East\n PV = PVdict['e']\n elif xy[mpt, 1] < minY:\n # Mirror particle is South\n PV = PVdict['s']\n else:\n # Mirror particle is North\n PV = PVdict['n']\n\n # Get index of the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - PV)\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = PV\n kk += 1\n\n if check:\n print 'PVd = ', PVd\n display_lattice_2D(xy, np.abs(BL), title=\"showing extended lattice (w/o PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n\n # PVxydict should be correct as is, from output of remove_pts...\n PVxydict_check = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict_check[newkey] = PVd[key]\n print 'PVxydict = ', PVxydict\n print 'PVxydict_check = ', PVxydict_check\n if PVxydict is None:\n PVxydict = PVxydict_check\n else:\n raise RuntimeError('Are these PVxydicts the same?')\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o PBCs\")\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, title=\"showing lattice connectivity with PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict", "def center_of_gravity_evaluation(F_PERC, P_PERC, afg, awg, mw, ed, ui, bi):\n\n max_seg_n = np.max([np.amax(afg.fuse_seg_nb), np.amax(awg.wing_seg_nb)])\n t_nb = afg.fus_nb + awg.w_nb # Number of parts not counting symmetry\n tot_nb = afg.fuse_nb + awg.wing_nb # Number of parts counting symmetry\n segments_nb = []\n fuse_fuel_vol = 0\n pass_vol = 0\n\n for i in range(1, afg.fus_nb + 1):\n segments_nb.append(afg.fuse_seg_nb[i - 1])\n if ui.F_FUEL[i - 1]:\n fuse_fuel_vol += afg.fuse_fuel_vol[i - 1]\n if np.all(afg.cabin_seg[:, i - 1]) == 1:\n pass_vol += afg.fuse_vol[i - 1]\n else:\n pass_vol += afg.fuse_cabin_vol[i - 1]\n\n htw = 0\n x0 = 0\n s = 0\n for i in range(1, awg.w_nb + 1):\n segments_nb.append(awg.wing_seg_nb[i - 1])\n if awg.wing_sym[i - 1] != 0:\n segments_nb.append(awg.wing_seg_nb[i - 1])\n s += 1\n if awg.is_horiz[i - 1 + s]:\n if i != awg.main_wing_index:\n htw = i\n else:\n x = np.amax(awg.wing_center_seg_point[:, i + s - 1, 0])\n if x > x0:\n tw = i\n x0 = x\n\n mass_seg_i = np.zeros((max_seg_n, tot_nb))\n oem_vol = (awg.wing_tot_vol - awg.wing_fuel_vol) + (np.sum(afg.fuse_vol) - fuse_fuel_vol)\n\n # Evaluating oem density, fuel density, passenger density\n if bi.USER_EN_PLACEMENT:\n oem_par = (mw.operating_empty_mass - mw.mass_engines) / oem_vol\n en = mw.mass_engines\n else:\n oem_par = mw.operating_empty_mass / oem_vol\n en = 0\n\n mpass_par = (mw.mass_payload * (P_PERC / 100.0)) / pass_vol\n\n mfuel_par = (mw.mass_fuel_tot * (F_PERC / 100.0)) / (awg.wing_fuel_vol + fuse_fuel_vol)\n\n mtom = (\n mw.operating_empty_mass\n + mw.mass_payload * (P_PERC / 100)\n + mw.mass_fuel_tot * (F_PERC / 100)\n - en\n )\n\n # Definition of the mass of each segment\n ex = False\n wg = []\n for i in range(1, afg.fus_nb + 1):\n if ui.F_FUEL[i - 1]:\n for j in range(1, afg.fuse_seg_nb[i - 1] + 1):\n mass_seg_i[j - 1][i - 1] = (\n oem_par + (mfuel_par * ui.F_FUEL[i - 1] / 100)\n ) * afg.fuse_seg_vol[j - 1][i - 1]\n else:\n for j in range(1, afg.fuse_seg_nb[i - 1] + 1):\n if int(afg.cabin_seg[j - 1][i - 1]) == 1:\n mass_seg_i[j - 1][i - 1] = (oem_par + mpass_par) * afg.fuse_seg_vol[j - 1][\n i - 1\n ]\n else:\n mass_seg_i[j - 1][i - 1] = oem_par * afg.fuse_seg_vol[j - 1][i - 1]\n w = 0\n for i in range(afg.fus_nb + 1, t_nb + 1):\n for j in range(1, awg.wing_seg_nb[i - 1 - afg.fus_nb] + 1):\n if awg.is_horiz[i + w - 1 - afg.fus_nb]:\n mass_seg_i[j - 1][i - 1 + w] = oem_par * (\n awg.wing_seg_vol[j - 1][i - 1 - afg.fus_nb]\n - awg.wing_fuel_seg_vol[j - 1][i - 1 - afg.fus_nb]\n ) + mfuel_par * (awg.wing_fuel_seg_vol[j - 1][i - 1 - afg.fus_nb])\n else:\n mass_seg_i[j - 1][i - 1 + w] = (\n oem_par * awg.wing_seg_vol[j - 1][i - 1 - afg.fus_nb]\n )\n wg.append(i - afg.fus_nb)\n if awg.wing_sym[i - 1 - afg.fus_nb] != 0:\n w += 1\n mass_seg_i[:, i - 1 + w] = mass_seg_i[:, i - 2 + w]\n wg.append(i - afg.fus_nb)\n if i + w == tot_nb:\n break\n # Mass check\n while not ex:\n if abs(round(mtom, 3) - round(np.sum(mass_seg_i), 3)) < 0.0001:\n ex = True\n else:\n mass = (round(mtom, 3) - round(np.sum(mass_seg_i), 3)) / 2\n if not ed.WING_MOUNTED:\n if htw != 0:\n a = wg.index(htw)\n else:\n a = wg.index(tw)\n else:\n a = wg.index(awg.main_wing_index)\n mass_seg_i[0][afg.fuse_nb + a] = mass_seg_i[0][afg.fuse_nb + a] + mass\n if awg.is_horiz[a]:\n mass_seg_i[0][afg.fuse_nb + a + 1] = mass_seg_i[0][afg.fuse_nb + a + 1] + mass\n else:\n mass_seg_i[0][afg.fuse_nb + a] = mass_seg_i[0][afg.fuse_nb + a] + mass\n\n awg.wing_center_seg_point.resize(max_seg_n, awg.wing_nb, 3)\n afg.fuse_center_seg_point.resize(max_seg_n, afg.fuse_nb, 3)\n\n airplane_centers_segs = np.concatenate(\n (afg.fuse_center_seg_point, awg.wing_center_seg_point), 1\n )\n\n # CoG evalution\n if bi.USER_EN_PLACEMENT:\n cog_enx = np.sum(ed.EN_PLACEMENT[:, 0] * ed.en_mass)\n cog_eny = np.sum(ed.EN_PLACEMENT[:, 1] * ed.en_mass)\n cog_enz = np.sum(ed.EN_PLACEMENT[:, 2] * ed.en_mass)\n else:\n cog_enx = 0.0\n cog_eny = 0.0\n cog_enz = 0.0\n\n center_of_gravity = []\n center_of_gravity.append(\n round((np.sum(airplane_centers_segs[:, :, 0] * mass_seg_i) + cog_enx) / mtom, 3)\n )\n center_of_gravity.append(\n round((np.sum(airplane_centers_segs[:, :, 1] * mass_seg_i) + cog_eny) / mtom, 3)\n )\n center_of_gravity.append(\n round((np.sum(airplane_centers_segs[:, :, 2] * mass_seg_i) + cog_enz) / mtom, 3)\n )\n\n for i in range(1, 4):\n if abs(center_of_gravity[i - 1]) < 10 ** (-5):\n center_of_gravity[i - 1] = 0.0\n\n return (center_of_gravity, mass_seg_i, airplane_centers_segs)", "def calculateElementBoundaryCoefficients(self):\n pass", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def single_eval_boundary(fg_boundary, gt_boundary, bound_pix=0):\n assert np.atleast_3d(fg_boundary).shape[2] == 1\n\n from skimage.morphology import binary_dilation, disk\n\n fg_dil = binary_dilation(fg_boundary, disk(bound_pix))\n gt_dil = binary_dilation(gt_boundary, disk(bound_pix))\n\n # Get the intersection\n gt_match = gt_boundary * fg_dil\n fg_match = fg_boundary * gt_dil\n\n # Area of the intersection\n n_fg = np.sum(fg_boundary)\n n_gt = np.sum(gt_boundary)\n\n # % Compute precision and recall\n if n_fg == 0 and n_gt > 0:\n precision = 1\n recall = 0\n elif n_fg > 0 and n_gt == 0:\n precision = 0\n recall = 1\n elif n_fg == 0 and n_gt == 0:\n precision = 1\n recall = 1\n else:\n precision = np.sum(fg_match) / float(n_fg)\n recall = np.sum(gt_match) / float(n_gt)\n\n # Compute F meas\n # ure\n if precision + recall == 0:\n F = 0\n else:\n F = 2 * precision * recall / (precision + recall)\n\n return F, precision, recall", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp", "def RTSpace( ref_el , deg ):\n sd = ref_el.get_spatial_dimension()\n\n vec_Pkp1 = polynomial_set.ONPolynomialSet( ref_el , deg+1 , (sd,) )\n\n dimPkp1 = expansions.polynomial_dimension( ref_el , deg+1 )\n dimPk = expansions.polynomial_dimension( ref_el , deg )\n dimPkm1 = expansions.polynomial_dimension( ref_el , deg-1 )\n\n vec_Pk_indices = reduce( lambda a,b: a+b , \\\n [ list(range(i*dimPkp1,i*dimPkp1+dimPk)) \\\n for i in range(sd) ] )\n vec_Pk_from_Pkp1 = vec_Pkp1.take( vec_Pk_indices )\n\n Pkp1 = polynomial_set.ONPolynomialSet( ref_el , deg + 1 )\n PkH = Pkp1.take( list(range(dimPkm1,dimPk)) )\n\n Q = quadrature.make_quadrature( ref_el , 2 * deg + 2 )\n\n # have to work on this through \"tabulate\" interface\n # first, tabulate PkH at quadrature points\n Qpts = numpy.array( Q.get_points() )\n Qwts = numpy.array( Q.get_weights() )\n\n zero_index = tuple( [ 0 for i in range(sd) ] )\n\n PkH_at_Qpts = PkH.tabulate( Qpts )[zero_index]\n Pkp1_at_Qpts = Pkp1.tabulate( Qpts )[zero_index]\n\n PkHx_coeffs = numpy.zeros( (PkH.get_num_members() , \\\n sd, \\\n Pkp1.get_num_members()) , \"d\" )\n\n import time\n t1 = time.time()\n for i in range( PkH.get_num_members() ):\n for j in range( sd ):\n fooij = PkH_at_Qpts[i,:] * Qpts[:,j] * Qwts\n PkHx_coeffs[i,j,:] = numpy.dot( Pkp1_at_Qpts , fooij )\n\n PkHx = polynomial_set.PolynomialSet( ref_el , \\\n deg , \\\n deg + 1 , \\\n vec_Pkp1.get_expansion_set() , \\\n PkHx_coeffs , \\\n vec_Pkp1.get_dmats() )\n\n return polynomial_set.polynomial_set_union_normalized( vec_Pk_from_Pkp1 , PkHx )", "def drawValidationNeedles(self, nb=None):\r\n # productive #onButton\r\n profprint()\r\n # reset report table\r\n # print \"Draw manually segmented needles...\"\r\n # self.table =None\r\n # self.row=0\r\n widget = slicer.modules.NeedleFinderWidget\r\n if nb: widget.editNeedleTxtBox.value = nb\r\n widget.initTableView()\r\n self.deleteEvaluationNeedlesFromTable()\r\n while slicer.util.getNodes('manual-seg_'+str(widget.editNeedleTxtBox.value)) != {}:\r\n nodes = slicer.util.getNodes('manual-seg_'+str(widget.editNeedleTxtBox.value))\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n\r\n tableValueCtrPt = [[[999, 999, 999] for i in range(100)] for j in range(100)]\r\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationFiducialNode')\r\n nbNode = modelNodes.GetNumberOfItems()\r\n for nthNode in range(nbNode):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass(nthNode, 'vtkMRMLAnnotationFiducialNode')\r\n modelNodeName = modelNode.GetName().strip('.')\r\n modelNodeName = modelNodeName.strip('\\r')\r\n if modelNode.GetName()[0] == '.' and len(modelNodeName.split('-')) == 2:\r\n needleNumber = int(modelNodeName.split('-')[0])\r\n needleStep = int(modelNodeName.split('-')[1])\r\n # if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\r\n # needleNumber = int(modelNode.GetAttribute(\"NeedleNumber\"))\r\n if needleNumber == widget.editNeedleTxtBox.value:\r\n # needleStep = int(modelNode.GetAttribute(\"NeedleStep\"))\r\n coord = [0, 0, 0]\r\n modelNode.GetFiducialCoordinates(coord)\r\n tableValueCtrPt[needleNumber][needleStep] = coord\r\n print needleNumber, needleStep, coord\r\n # print self.tableValueCtrPt[needleNumber][needleStep]\r\n\r\n for i in range(len(tableValueCtrPt)):\r\n if not all(e == [999, 999, 999] for e in tableValueCtrPt[i]):\r\n # if self.tableValueCtrPt[i][1] != [999, 999, 999]:\r\n colorVar = random.randrange(50, 100, 1) # ??? /(100.)\r\n controlPointsUnsorted = [val for val in tableValueCtrPt[i] if val != [999, 999, 999]]\r\n controlPoints = self.sortTable(controlPointsUnsorted, (2, 1, 0))\r\n # print \"Control points unsorted\", controlPointsUnsorted\r\n print \"Control points\", controlPoints\r\n self.addNeedleToScene(controlPoints, i, 'Validation')\r\n self.observeManualNeedles()\r\n else:\r\n # print i\r\n pass\r\n self.findAxialSegmentationLimitFromMarker(bForceFallback=True) #AM force the presence of the limit marker\r", "def GetBoundaryEdgesQuad(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = NodeArrangementQuad(p-1)[0]\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def buffered_pts_to_periodicstrip(xy, BL, LL, BBox='auto', check=False):\n if BBox == 'auto':\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-LL[0], -LL[1]], [LL[0], -LL[1]], [LL[0], LL[1]], [-LL[0], LL[1]]])\n keep = np.where(np.logical_and(abs(xy[:, 0]) < LL[0] * 0.5, abs(xy[:, 1]) < LL[1] * 0.5))[0]\n else:\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n print 'checking that keep is not a logical ==> '\n print ' this would be bool keep = ', bpath.contains_points(xy)\n print ' and this is keep = ', keep\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n minY = np.min(BBox[:, 1])\n maxY = np.max(BBox[:, 1])\n PVdict = {'e': np.array([LL[0], 0.0]),\n 'n': np.array([0.0, LL[1]]),\n 'w': np.array([-LL[0], 0.0]),\n 's': np.array([0.0, -LL[1]]),\n 'ne': np.array([LL[0], LL[1]]),\n 'nw': np.array([-LL[0], LL[1]]),\n 'sw': np.array([-LL[0], -LL[1]]),\n 'se': np.array([LL[0], -LL[1]])}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=5, alpha=0.4, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=1)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {} # = np.zeros((len(BLcut),2), dtype=float)\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n\n # Assume that the bond should remain broken unless the PV is 'e' or 'w' (east or west)\n ok_stripbc = False\n if xy[mpt, 0] < minX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SW\n PV = PVdict['sw']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NW\n PV = PVdict['nw']\n else:\n # Mirror particle is West\n PV = PVdict['w']\n ok_stripbc = True\n elif xy[mpt, 0] > maxX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SE\n PV = PVdict['se']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NE\n PV = PVdict['ne']\n else:\n # Mirror particle is East\n PV = PVdict['e']\n ok_stripbc = True\n elif xy[mpt, 1] < minY:\n # Mirror particle is South\n PV = PVdict['s']\n else:\n # Mirror particle is North\n PV = PVdict['n']\n\n if ok_stripbc:\n # Get index of the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - PV)\n if (kpt, ind) not in PVd and (ind, kpt) not in PVd:\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = PV\n print 'adding (kpt, ind) = ', (kpt, ind)\n kk += 1\n\n BL2add = BL2add[0:kk]\n\n if check:\n print 'PVd = ', PVd\n display_lattice_2D(xy, np.abs(BL), title=\"showing extended lattice (w/o strip PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n PVxydict = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict[newkey] = PVd[key]\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o strip PBCs\")\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, title=\"showing lattice connectivity with strip PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict" ]
[ "0.5799362", "0.56557685", "0.56553715", "0.55753076", "0.5484209", "0.5474942", "0.541693", "0.53881", "0.5384835", "0.5301325", "0.52949774", "0.5292436", "0.528571", "0.5250551", "0.52481675", "0.519482", "0.5136062", "0.5130518", "0.51211125", "0.51081985", "0.5103744", "0.5090339", "0.5088896", "0.508147", "0.50743675", "0.5059742", "0.505068", "0.50480723", "0.501098", "0.50035316" ]
0.6871466
0
Usage Compute all the free space,that are reachable from the origin (P[0,0],Q[0,0]) on the boundary of cells in the diagram for polygonal chains P and Q and the given free spaces LR and BR LR[(i,j)] is the free space, reachable from the origin, of segment [Pi,Pi+1] from point Qj BR[(i,j)] is the free space, reachable from the origin, of segment [Qj,Qj+1] from point Pj
def LR_BR(LF, BF, p, q): if not (LF[(0, 0)][0] <= 0 and BF[(0, 0)][0] <= 0 and LF[(p - 2, q - 1)][1] >= 1 and BF[(p - 1, q - 2)][1] >= 1): rep = False BR = {} LR = {} else: LR = {(0, 0): True} BR = {(0, 0): True} for i in range(1, p - 1): if LF[(i, 0)] != [-1, -1] and LF[(i - 1, 0)] == [0, 1]: LR[(i, 0)] = True else: LR[(i, 0)] = False for j in range(1, q - 1): if BF[(0, j)] != [-1, -1] and BF[(0, j - 1)] == [0, 1]: BR[(0, j)] = True else: BR[(0, j)] = False for i in range(p - 1): for j in range(q - 1): if LR[(i, j)] or BR[(i, j)]: if LF[(i, j + 1)] != [-1, -1]: LR[(i, j + 1)] = True else: LR[(i, j + 1)] = False if BF[(i + 1, j)] != [-1, -1]: BR[(i + 1, j)] = True else: BR[(i + 1, j)] = False else: LR[(i, j + 1)] = False BR[(i + 1, j)] = False rep = BR[(p - 2, q - 2)] or LR[(p - 2, q - 2)] return rep, LR, BR
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dbgain_free_space(self, pt_1, pt_2):\n if (pt_1.ndim > 1) or (pt_2.ndim > 1):\n raise NotImplementedError\n dist = np.linalg.norm(pt_1 - pt_2)\n\n return self.dist_to_dbgain_free_space(\n dist,\n wavelength=self.wavelength,\n antenna_dbgain_tx=self.antenna_dbgain_tx,\n antenna_dbgain_rx=self.antenna_dbgain_rx,\n )", "def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res", "def hinf_project_pole_alloc(A, B1, B2, C1, C2, D11, D12, D21, D22, q, r, solver=cvx.SCS):\n \n assert r > 0, 'r must be positive.'\n assert np.abs(q) + r < 1, 'the region must be inside the unit circle.'\n \n tol = 1e-20\n n = A.shape[0]\n \n L = cvx.Variable((B2.shape[1], n))\n P = cvx.Variable((n, n))\n gamma2 = cvx.Variable()\n \n LMI1 = cvx.bmat([\n [P, A*P + B2*L, B1, np.zeros((B1.shape[0], D11.shape[0]))],\n [P*A.T + L.T * B2.T, P, np.zeros((P.shape[0], B1.shape[1])), P*C1.T + L.T*D12.T],\n [B1.T, np.zeros((B1.shape[1], P.shape[1])), np.eye(B1.shape[1]), D11.T],\n [np.zeros((C1.shape[0], B1.shape[0])), C1*P + D12*L, D11, gamma2*np.eye(D11.shape[0])]\n ])\n \n cons1 = LMI1 >> tol\n \n LMI2 = cvx.bmat([\n [-r*P, -q*P + A*P + B2*L],\n [-q*P + P*A.T + L.T*B2.T, -r*P]\n ])\n \n cons2 = LMI2 << -tol\n \n cons3 = gamma2 >= tol\n \n cons4 = P == P.T\n \n cons5 = P >> tol\n \n prob = cvx.Problem(cvx.Minimize(gamma2), constraints=[cons1, cons2, cons3, cons4, cons5])\n prob.solve(solver=solver)\n \n status = prob.status\n if not status in [cvx.OPTIMAL_INACCURATE, cvx.OPTIMAL]:\n #variable.value will be None, better trow an exception\n raise OptException(f'Problem is {status}')\n \n Hinf_norm = np.sqrt(gamma2.value)\n Pop = P.value\n Lop = L.value\n \n K = Lop.dot(np.linalg.inv(Pop))\n \n return K, Hinf_norm, Pop, status", "def irr_boundary_as_space(self):\n p1,p2 = self.next_marks()\n \n return Mgn(self.genus - 1, self.marks.union([p1,p2])), p1, p2", "def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))", "def LF_BF(P, Q, p, q, eps, mdist, P_dist, Q_dist):\n LF = {}\n for j in range(q):\n for i in range(p - 1):\n LF.update({(i, j): free_line(Q[j], eps, P[i:i + 2], mdist[i, j], mdist[i + 1, j], P_dist[i])})\n BF = {}\n for j in range(q - 1):\n for i in range(p):\n BF.update({(i, j): free_line(P[i], eps, Q[j:j + 2], mdist[i, j], mdist[i, j + 1], Q_dist[j])})\n return LF, BF", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def free_curvature(self) -> None:\n self.n1.free = True\n self.n2.free = True", "def get_locations(nodes, tl, br):\n \n # Base cases:\n if len(nodes) == 1: # for singleton, only choice is to place in the single spot in 1x1 square\n return {nodes[0]: tl}\n if len(nodes) == 2: # for two nodes, arbitrarily chose to place the first node in top left\n return {nodes[0]: tl, nodes[1]: br}\n\n # Recursive case, need to create and solve subproblems:\n ret = {}\n\n num_edges = count_num_edges(nodes)\n if num_edges == 0: # for empty graphs, no need to run METIS, just assign arbitrarily\n i = 0\n for x in range(tl.x, br.x+1): \n for y in range(tl.y, br.y+1):\n if i < len(nodes):\n ret.update({nodes[i]: Point(x,y)})\n i += 1\n return ret\n\n filename = splitext(basename(sys.argv[1]))[0] + '.p.' + sys.argv[2] + '.yx.' + sys.argv[3] + '.drop.' + sys.argv[4] + '.' +\\\n '_'.join(['delete', str(tl.x), str(tl.y), str(br.x), str(br.y)]) \n\n # special case for the very first call of get_locations. For example, suppose that there are\n # 97 nodes on a 10x10 grid. Instead of dividing the 97 nodes into 2 equal partitions, we should\n # divide them into a partition of 90 nodes and a partition of 7 nodes. The former should be\n # placed on a 10x9 grid and te latter should be placed on a 1x7 grid.\n if len(nodes) < (br.x - tl.x + 1) * (br.y - tl.y + 1):\n assert tl == Point(0, 0)\n size_tl_nodes = (br.x + 1) * int(len(nodes) / (br.x + 1))\n if size_tl_nodes == len(nodes):\n ret.update(get_locations(nodes, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n return ret\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n # complicated indexing here. As an example, for the 97 into 10x10 case, we want to send 90 nodes\n # to a rectangle spanned by tl=Point(0, 0) and br=Point(9, 8) and we want to send 7 nodes to a \n # rectangle spanned by tl=Point(0, 9) and br=Point(6, 9)\n ret.update(get_locations(nodes_tl, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n ret.update(get_locations(nodes_br, tl=Point(0, len(nodes) / (br.x + 1)), br=Point(len(nodes) % (br.x + 1) - 1, len(nodes) / (br.x + 1))))\n return ret\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n half = tl.x + (br.x - tl.x - 1) / 2\n size_tl_nodes = (half - tl.x + 1) * (br.y - tl.y + 1)\n else: # split on x axis\n half = tl.y + (br.y - tl.y - 1) / 2\n size_tl_nodes = (br.x - tl.x + 1) * (half - tl.y + 1)\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(half, br.y)))\n ret.update(get_locations(nodes_br, tl=Point(half + 1,tl.y), br=br))\n else: # split on x axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(br.x, half)))\n ret.update(get_locations(nodes_br, tl=Point(tl.x, half + 1), br=br))\n\n return ret", "def create_space(num_rows, num_cols, goal=[], obstacles=[], *args):\n space = []\n for i in range (num_rows):\n space.append([])\n for i in range(num_rows):\n for j in range(num_cols):\n space[i].append([])\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j]=node()\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j].h = math.sqrt((goal[0]-i)**2 + (goal[1]-j)**2)\n space[i][j].f = 10000\n space[i][j].g = 10000\n \n for obs in obstacles:\n space[obs[0]][obs[1]].h = 1000\n \n heuristics = np.zeros((num_rows,num_cols))\n for i in range(num_rows):\n for j in range(num_cols):\n heuristics[i][j]=space[i][j].h\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j].cor = [i, j]\n \n return space, heuristics", "def find_direct_gap(self,rpts=5):\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]-self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n return valup,x1up", "def find_free_space(graph, position=(pos_x, pos_y)):\n # SET THE POSITION USING LIST COMPREHENSION\n position_x, position_y = position[0], position[1]\n # TRANSFORM THE POSITION TO THE PYGAME VECTOR\n position = vec(position_x, position_y)\n # IMPORT THE DEQUE TO PUT THE NODES\n frontier = deque()\n # APPEND THE FRONTIER WITH THE POSITION\n frontier.append(position)\n print(f'Frontier: {frontier}')\n # THE LIST OF VISITED NODES\n visited = []\n print(f'Visited: {visited}')\n # THE POSITION WILL BE PUT AT THE VISITED QUEUE (IS WHERE WE ARE)\n visited.append(position)\n # START OUR LOOP\n #* As long there's nodes on the frontier do\n while len(frontier) > 0:\n # THE CURRENT NODE WE WANT TO LOOK IS THE NEXT NODE\n #* Pop's the next on the queue list\n current = frontier.popleft()\n print(f'Current: {current}')\n print(graph.find_neighbors(vec(current)))\n # THE NEIGHBOORS OF THE CURRENT TILE\n for next in graph.find_neighbors(current):\n print(\"OK! Entered in the For LOOP\")\n # IF THE NEXT NODE IS NOT VISITED\n if next not in visited:\n # ADD THE NODE TO THE FRONTIER LIST\n frontier.append(next)\n # PUT ON THE VISITED NODES\n visited.append(next)\n # PRINT ALL THE VISITED NODES\n print(f'The Visited Nodes are:\\n{visited}')", "def RTSpace( ref_el , deg ):\n sd = ref_el.get_spatial_dimension()\n\n vec_Pkp1 = polynomial_set.ONPolynomialSet( ref_el , deg+1 , (sd,) )\n\n dimPkp1 = expansions.polynomial_dimension( ref_el , deg+1 )\n dimPk = expansions.polynomial_dimension( ref_el , deg )\n dimPkm1 = expansions.polynomial_dimension( ref_el , deg-1 )\n\n vec_Pk_indices = reduce( lambda a,b: a+b , \\\n [ list(range(i*dimPkp1,i*dimPkp1+dimPk)) \\\n for i in range(sd) ] )\n vec_Pk_from_Pkp1 = vec_Pkp1.take( vec_Pk_indices )\n\n Pkp1 = polynomial_set.ONPolynomialSet( ref_el , deg + 1 )\n PkH = Pkp1.take( list(range(dimPkm1,dimPk)) )\n\n Q = quadrature.make_quadrature( ref_el , 2 * deg + 2 )\n\n # have to work on this through \"tabulate\" interface\n # first, tabulate PkH at quadrature points\n Qpts = numpy.array( Q.get_points() )\n Qwts = numpy.array( Q.get_weights() )\n\n zero_index = tuple( [ 0 for i in range(sd) ] )\n\n PkH_at_Qpts = PkH.tabulate( Qpts )[zero_index]\n Pkp1_at_Qpts = Pkp1.tabulate( Qpts )[zero_index]\n\n PkHx_coeffs = numpy.zeros( (PkH.get_num_members() , \\\n sd, \\\n Pkp1.get_num_members()) , \"d\" )\n\n import time\n t1 = time.time()\n for i in range( PkH.get_num_members() ):\n for j in range( sd ):\n fooij = PkH_at_Qpts[i,:] * Qpts[:,j] * Qwts\n PkHx_coeffs[i,j,:] = numpy.dot( Pkp1_at_Qpts , fooij )\n\n PkHx = polynomial_set.PolynomialSet( ref_el , \\\n deg , \\\n deg + 1 , \\\n vec_Pkp1.get_expansion_set() , \\\n PkHx_coeffs , \\\n vec_Pkp1.get_dmats() )\n\n return polynomial_set.polynomial_set_union_normalized( vec_Pk_from_Pkp1 , PkHx )", "def planning(self, sx, sy, gx, gy):\n\n start_node = self.Node(self.calc_xy_index(sx, self.min_x),\n self.calc_xy_index(sy, self.min_y), 0.0, -1)\n goal_node = self.Node(self.calc_xy_index(gx, self.min_x),\n self.calc_xy_index(gy, self.min_y), 0.0, -1)\n\n open_set_A, closed_set_A = dict(), dict()\n open_set_B, closed_set_B = dict(), dict()\n open_set_A[self.calc_grid_index(start_node)] = start_node\n open_set_B[self.calc_grid_index(goal_node)] = goal_node\n\n current_A = start_node\n current_B = goal_node\n meet_point_A, meet_point_B = None, None\n\n while 1:\n if len(open_set_A) == 0:\n print(\"Open set A is empty..\")\n break\n\n if len(open_set_B) == 0:\n print(\"Open set B is empty..\")\n break\n\n c_id_A = min(\n open_set_A,\n key=lambda o: self.find_total_cost(open_set_A, o, current_B))\n\n current_A = open_set_A[c_id_A]\n\n c_id_B = min(\n open_set_B,\n key=lambda o: self.find_total_cost(open_set_B, o, current_A))\n\n current_B = open_set_B[c_id_B]\n\n # show graph\n if show_animation: # pragma: no cover\n plt.plot(self.calc_grid_position(current_A.x, self.min_x),\n self.calc_grid_position(current_A.y, self.min_y),\n \"xc\")\n plt.plot(self.calc_grid_position(current_B.x, self.min_x),\n self.calc_grid_position(current_B.y, self.min_y),\n \"xc\")\n # for stopping simulation with the esc key.\n plt.gcf().canvas.mpl_connect(\n 'key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\n if len(closed_set_A.keys()) % 10 == 0:\n plt.pause(0.001)\n\n if current_A.x == current_B.x and current_A.y == current_B.y:\n print(\"Found goal\")\n meet_point_A = current_A\n meet_point_B = current_B\n break\n\n # Remove the item from the open set\n del open_set_A[c_id_A]\n del open_set_B[c_id_B]\n\n # Add it to the closed set\n closed_set_A[c_id_A] = current_A\n closed_set_B[c_id_B] = current_B\n\n # expand_grid search grid based on motion model\n for i, _ in enumerate(self.motion):\n\n c_nodes = [self.Node(current_A.x + self.motion[i][0],\n current_A.y + self.motion[i][1],\n current_A.cost + self.motion[i][2],\n c_id_A),\n self.Node(current_B.x + self.motion[i][0],\n current_B.y + self.motion[i][1],\n current_B.cost + self.motion[i][2],\n c_id_B)]\n\n n_ids = [self.calc_grid_index(c_nodes[0]),\n self.calc_grid_index(c_nodes[1])]\n\n # If the node is not safe, do nothing\n continue_ = self.check_nodes_and_sets(c_nodes, closed_set_A,\n closed_set_B, n_ids)\n\n if not continue_[0]:\n if n_ids[0] not in open_set_A:\n # discovered a new node\n open_set_A[n_ids[0]] = c_nodes[0]\n else:\n if open_set_A[n_ids[0]].cost > c_nodes[0].cost:\n # This path is the best until now. record it\n open_set_A[n_ids[0]] = c_nodes[0]\n\n if not continue_[1]:\n if n_ids[1] not in open_set_B:\n # discovered a new node\n open_set_B[n_ids[1]] = c_nodes[1]\n else:\n if open_set_B[n_ids[1]].cost > c_nodes[1].cost:\n # This path is the best until now. record it\n open_set_B[n_ids[1]] = c_nodes[1]\n\n rx, ry = self.calc_final_bidirectional_path(\n meet_point_A, meet_point_B, closed_set_A, closed_set_B)\n\n return rx, ry", "def bk_p(g,p,r,x, counter):\n print(\"counter:\\t\", counter)\n print(\"p:\\t\", p)\n print(\"r:\\t\", r)\n print(\"x:\\t\", x)\n result = []\n pux = set(p).union(set(x))\n if len(pux) == 0:\n print(\"return r: \", r)\n return r\n else:\n pivot = list(pux)[0]\n pN = [n for n in g.neighbors(pivot)]\n p_copy = copy.deepcopy(p)\n print(\"P_COPY\",p_copy)\n print(\"P_N\",pN)\n for n in pN:\n p_copy.remove(n)\n for v in p_copy:\n print(\"v: \", v)\n vNeighbors = [a for a in g.neighbors(v)]\n print(\"vNeighbors: \\t\", vNeighbors)\n # pnnv, ruv, xnnv\n print(\"================================\")\n result.append(bk_p(g, intersection(p,vNeighbors), r+[v], intersection(x, vNeighbors), counter+1))\n print(\"================================\")\n print(\"result:\\t\", result, \"\\tv: \", v)\n p.remove(v)\n x.append(v)\n print(\"fp:\\t\", p)\n print(\"fr:\\t\", r)\n print(\"fx:\\t\", x)\n return result\n\n def bk_p2(g,r,p,x, counter=0):\n \"\"\"\n Bron-Kerbosch algorithm without pivots (implemented with python sets)\n g: an nx graph\n r: disjoint set of vertices of graph g\n p: disjoint set of vertices of graph g\n x: disjoint set of vertices of graph g\n \"\"\"\n pux = p.union(x)\n if not pux:\n print('Maximal clique found: ', r)\n\n # choose an pivot from pux\n pivot = next(iter(pux))\n neighborsP = list(g.neighbors(pivot))\n for v in p.difference(neighborsP):\n neighborsV = list(g.neighbors(v))\n bk_p(g, r.union([v]), p.intersection(neighborsV), x.intersection(neighborsV), counter+1)\n p.remove(v)\n x.add(v)", "def oracle_wer(self, ref):\n # Add start and end to ref\n ref = [NULL, SOS] + ref.split() + [EOS]\n # Most lattices contain the correct path, so check that first\n if self.in_lattice(ref):\n return (0, [(i, i) for i in ref])\n # Initialize the alignment matrix\n align_matrix = np.ones((len(ref),len(self.nodes)), 'i') * MAXINT\n # And the backpointer matrix\n bp_matrix = np.zeros((len(ref),len(self.nodes)), 'O')\n # Figure out the minimum distance to each node from the start\n # of the lattice, and construct a node to ID mapping\n nodeid = {}\n for i,u in enumerate(self.nodes):\n u.score = MAXINT\n nodeid[u] = i\n self.start.score = 1\n for u in self.nodes:\n for x in u.exits:\n dist = u.score + 1\n if dist < x.dest.score:\n x.dest.score = dist\n def find_pred(ii, jj):\n bestscore = MAXINT\n bestp = -1\n if len(self.nodes[jj].entries) == 0:\n return bestp\n for e in self.nodes[jj].entries:\n k = nodeid[e.src]\n if align_matrix[ii,k] < bestscore:\n bestp = k\n bestscore = align_matrix[ii,k]\n return bestp\n # Now fill in the alignment matrix\n for i, w in enumerate(ref):\n for j, u in enumerate(self.nodes):\n # Insertion = cost(w, prev(u)) + 1\n if u == self.start: # start node\n bestp = -1\n inscost = i + 2 # Distance from start of ref\n else:\n # Find best predecessor in the same reference position\n bestp = find_pred(i, j)\n inscost = align_matrix[i,bestp] + 1\n # Deletion = cost(prev(w), u) + 1\n if i == 0: # start symbol\n delcost = u.score + 1 # Distance from start of hyp\n else:\n delcost = align_matrix[i-1,j] + 1\n # Substitution = cost(prev(w), prev(u)) + (w != u)\n if i == 0 and bestp == -1: # Start node, start of ref\n subcost = int(w != u.sym)\n elif i == 0: # Start of ref\n subcost = (self.nodes[bestp].score\n + int(w != u.sym))\n elif bestp == -1: # Start node\n subcost = i - 1 + int(w != u.sym)\n else:\n # Find best predecessor in the previous reference position\n bestp = find_pred(i-1, j)\n subcost = (align_matrix[i-1,bestp]\n + int(w != u.sym))\n align_matrix[i,j] = min(subcost, inscost, delcost)\n # Now find the argmin\n if align_matrix[i,j] == subcost:\n bp_matrix[i,j] = (i-1, bestp)\n elif align_matrix[i,j] == inscost:\n bp_matrix[i,j] = (i, bestp)\n else:\n bp_matrix[i,j] = (i-1, j)\n # Find last node's index\n last = nodeid[self.end]\n # Backtrace to get an alignment\n i = len(ref)-1\n j = last\n bt = []\n while True:\n ip,jp = bp_matrix[i,j]\n if ip == i: # Insertion\n bt.append(('**INS**', '*%s*' % self.nodes[j].sym))\n elif jp == j: # Deletion\n bt.append(('*%s' % ref[i], '**DEL**'))\n else:\n if ref[i] == self.nodes[j].sym:\n bt.append((ref[i], self.nodes[j].sym))\n else:\n bt.append((ref[i], '*%s*' % self.nodes[j].sym))\n # If we consume both ref and hyp, we are done\n if ip == -1 and jp == -1:\n break\n # If we hit the beginning of the ref, fill with insertions\n if ip == -1:\n while True:\n bt.append(('**INS**', self.nodes[jp].sym))\n bestp = find_pred(i,jp)\n if bestp == -1:\n break\n jp = bestp\n break\n # If we hit the beginning of the hyp, fill with deletions\n if jp == -1:\n while ip >= 0:\n bt.append((ref[ip], '**DEL**'))\n ip = ip - 1\n break\n # Follow the pointer\n i,j = ip,jp\n bt.reverse()\n return align_matrix[len(ref)-1,last], bt", "def find_indirect_gap(self,rpts=5):\n # First find the miniumu of the upper band.\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n # Repeat the same for the lower band\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun2= lambda x: -self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1dn=optimize.minimize(fun2,x0dn).x\n valdn=fun2(x1dn)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n # Also always check special points in the BZ\n x0dn=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n x0dn=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n \n return valup+valdn,x1up,x1dn", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def red_boundaries_as_spaces(self):\n marks = set(self.marks)\n if self.n != 0:\n first_mark_list = [marks.pop()]\n \n \n p1,p2 = self.next_marks() \n \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n yield (Mgn(g1, r_marks.union([p1])), p1), (Mgn(self.genus - g1, marks.difference(r_marks).union([p2])), p2) \n \n else: # self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n yield (Mgn(g1, [1]), 1) , (Mgn(self.genus-g1, [2]), 2)", "def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:LENGTH:1, 0:WIDTH:1]\n pos = np.empty(x.shape + (2,))\n # x.shape = (LENGTH,WIDTH)\n # x.shape + (2,) = (LENGTH,WIDTH,2)\n pos[:, :, 0] = x\n pos[:, :, 1] = y\n # pos.shape = (1890, 2)\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n # If axis is an integer, it specifies the axis of x along which to compute the vector norms\n # axis = 1: h.shape = 1890\n # axis = 0: h.shape = 2\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n # print(\"Path\", closedSet)\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return [], closedSet", "def free(self):\n self.linit = False\n self.nx = 0\n self.nz = 0\n self.nsrc = 0\n self.nrec = 0\n self.fteik2d.fteik_solver2d_free()\n return", "def _get_free_capacity(self):\n\n capacity = np.ones(len(self.grid.T)) * len(self.grid)\n capacity -= np.count_nonzero(self.grid, axis=0)\n return capacity", "def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")", "def preallocate_g_and_B(no_of_dofs_unconstrained, dofs_by_object, no_of_constraints_by_object):\n # Initialize indptr and indices\n indptr = [0]\n indices = list()\n\n # Generate indptr and indices values\n for dofs, no_of_constraints in zip(dofs_by_object, no_of_constraints_by_object):\n for _ in np.arange(no_of_constraints):\n indptr.append(indptr[-1] + len(dofs))\n indices.extend(dofs.tolist())\n\n # Make empty data array of correct size\n data = np.zeros(len(indices))\n\n no_of_constraint_rows = len(indptr) - 1\n B = csr_matrix((data, indices, indptr), shape=(no_of_constraint_rows, no_of_dofs_unconstrained))\n g = np.zeros(no_of_constraint_rows)\n return g, B", "def bifurcation_diagram(args, Bpbmin, Bpbmax, ylim=(-1, 0.6)):\n\n xs = []\n Bpb_list = np.linspace(Bpbmin, Bpbmax, 100)\n Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl = args\n\n sol, t = calcODE(args, -1.5, -1.5, 0.5, 0.5, 0.5, 0.5, ts=4000, nt=2 ** 25)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n x0 = sol[0, :]\n n = np.array(ode(x0, t[0], *args))\n q, _ = np.linalg.qr(n[:, None], mode='complete')\n\n periods = []\n for Bpb in Bpb_list:\n args = (Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl)\n sol, t = calcODE(args, *sol[-1, :], ts=1000, nt=2 ** 15)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n for i in range(len(sol) - 1):\n x1 = sol[i]\n x2 = sol[i + 1]\n if np.sign(n @ (x2 - x0)) != np.sign(n @ (x1 - x0)):\n c1 = dist(x1, x0, n)\n c2 = dist(x2, x0, n)\n alpha = c2 / (c1 + c2)\n x_new = x1 + alpha * (x2 - x1)\n x = (x_new - x0).dot(q)\n xs.append((Bpb, x[0], x[1], x[2], x[3], x[4], x[5]))\n # if np.linalg.norm(x_new - x0) < 1e-2 and period is None:\n period = t[i] - periods[-1][-1] if len(periods) else 0\n periods.append((Bpb, period, np.linalg.norm(x_new - x0), t[i]))\n\n plt.figure(figsize=(15, 10))\n plt.scatter([i[0] for i in xs], [i[2] for i in xs], s=10)\n plt.xlabel('$B_{pb}$')\n\n # plt.ylim(ylim)\n plt.show()\n\n periods = [i for i in periods if i[1] > 0]\n\n return periods, xs", "def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))", "def fill_gap_2_0(R):\n suitable_positions = [[], [], [], [], []] # stores all suitable positions for pieces respectively\n for row_idx in range(8):\n for col_idx in range(8):\n if single_pos_no_conflict_check((row_idx, col_idx), R): # iterate all vacant positions\n if single_move_feasible_q(pos0[0], (row_idx, col_idx)) \\\n and single_move_feasible_q((row_idx, col_idx), R[1][0]):\n suitable_positions[0].append((row_idx, col_idx))\n if single_move_feasible_k(pos0[1], (row_idx, col_idx)) \\\n and single_move_feasible_k((row_idx, col_idx), R[1][1]):\n suitable_positions[1].append((row_idx, col_idx))\n if single_move_feasible_r(pos0[2], (row_idx, col_idx)) \\\n and single_move_feasible_r((row_idx, col_idx), R[1][2]):\n suitable_positions[2].append((row_idx, col_idx))\n if single_move_feasible_n(pos0[3], (row_idx, col_idx)) \\\n and single_move_feasible_n((row_idx, col_idx), R[1][3]):\n suitable_positions[3].append((row_idx, col_idx))\n if single_move_feasible_b(pos0[4], (row_idx, col_idx)) \\\n and single_move_feasible_b((row_idx, col_idx), R[1][4]):\n suitable_positions[4].append((row_idx, col_idx))\n if [] in suitable_positions:\n return []\n\n result_list = []\n for pos1 in itertools.product(*suitable_positions): # iterate all combinations\n if len(set(pos1)) == 5: # eliminate those different pieces with the same position\n tmp = copy.deepcopy(R)\n tmp[0] = pos1\n result_list.append(tmp)\n return result_list", "def rectangular_periodic(m_g, n_g, len1_g=1.0, len2_g=1.0, origin_g = (0.0, 0.0)):\n\n processor = 0\n numproc = 1\n\n\n n = n_g\n m_low = -1\n m_high = m_g +1\n\n m = m_high - m_low\n\n delta1 = float(len1_g)/m_g\n delta2 = float(len2_g)/n_g\n\n len1 = len1_g*float(m)/float(m_g)\n len2 = len2_g\n origin = ( origin_g[0]+float(m_low)/float(m_g)*len1_g, origin_g[1] )\n\n #Calculate number of points\n Np = (m+1)*(n+1)\n\n class VIndex(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return j+i*(self.n+1)\n\n class EIndex(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return 2*(j+i*self.n)\n\n\n I = VIndex(n,m)\n E = EIndex(n,m)\n\n points = num.zeros( (Np,2), float)\n\n for i in range(m+1):\n for j in range(n+1):\n\n points[I(i,j),:] = [i*delta1 + origin[0], j*delta2 + origin[1]]\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n #Calculate number of triangles\n Nt = 2*m*n\n\n\n elements = num.zeros( (Nt,3), int)\n boundary = {}\n Idgl = []\n Idfl = []\n Idgr = []\n Idfr = []\n\n full_send_dict = {}\n ghost_recv_dict = {}\n nt = -1\n for i in range(m):\n for j in range(n):\n\n i1 = I(i,j+1)\n i2 = I(i,j)\n i3 = I(i+1,j+1)\n i4 = I(i+1,j)\n\n #Lower Element\n nt = E(i,j)\n if i == 0:\n Idgl.append(nt)\n\n if i == 1:\n Idfl.append(nt)\n\n if i == m-2:\n Idfr.append(nt)\n\n if i == m-1:\n Idgr.append(nt)\n\n if i == m-1:\n if processor == numproc-1:\n boundary[nt, 2] = 'right'\n else:\n boundary[nt, 2] = 'ghost'\n\n if j == 0:\n boundary[nt, 1] = 'bottom'\n elements[nt,:] = [i4,i3,i2]\n\n #Upper Element\n nt = E(i,j)+1\n if i == 0:\n Idgl.append(nt)\n\n if i == 1:\n Idfl.append(nt)\n\n if i == m-2:\n Idfr.append(nt)\n\n if i == m-1:\n Idgr.append(nt)\n\n if i == 0:\n if processor == 0:\n boundary[nt, 2] = 'left'\n else:\n boundary[nt, 2] = 'ghost'\n if j == n-1:\n boundary[nt, 1] = 'top'\n elements[nt,:] = [i1,i2,i3]\n\n Idfl.extend(Idfr)\n Idgr.extend(Idgl)\n\n Idfl = num.array(Idfl, int)\n Idgr = num.array(Idgr, int)\n\n full_send_dict[processor] = [Idfl, Idfl]\n ghost_recv_dict[processor] = [Idgr, Idgr]\n\n\n return points, elements, boundary, full_send_dict, ghost_recv_dict", "def is_planar(G):\n result=True\n bad_minor=[]\n n=len(G.nodes())\n iterazione=0\n if n>5:\n print 'N >5'\n\n for subnodes in it.combinations(G.nodes(),6):\n iterazione+=1\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if bipartite.is_bipartite(G):# check if the graph G has a subgraph K(3,3)\n X, Y = bipartite.sets(G)\n if len(X)==3:\n result=False\n bad_minor=subnodes\n return result,bad_minor\n iterazione=0\n if n>4 and result:\n print 'N >4'\n\n for subnodes in it.combinations(G.nodes(),5):\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if len(subG.edges())==10:# check if the graph G has a subgraph K(5)\n result=False\n bad_minor=subnodes\n return result,bad_minor\n\n return result,bad_minor" ]
[ "0.5692554", "0.5665216", "0.5620372", "0.5388032", "0.5333929", "0.53203285", "0.5296268", "0.5269298", "0.5259146", "0.5242636", "0.5239064", "0.52265537", "0.52158135", "0.51837415", "0.51299995", "0.5123705", "0.5116886", "0.51046026", "0.50999355", "0.50996166", "0.50863117", "0.5081786", "0.50731176", "0.50610644", "0.50434846", "0.50238264", "0.5019723", "0.5011183", "0.5008735", "0.50036186" ]
0.57159686
0
Usage Compute all the critical values between trajectories P and Q
def compute_critical_values(P, Q, p, q, mdist, P_dist, Q_dist): origin = eucl_dist(P[0], Q[0]) end = eucl_dist(P[-1], Q[-1]) end_point = max(origin, end) cc = set([end_point]) for i in range(p - 1): for j in range(q - 1): Lij = point_to_seg(Q[j], P[i], P[i + 1], mdist[i, j], mdist[i + 1, j], P_dist[i]) if Lij > end_point: cc.add(Lij) Bij = point_to_seg(P[i], Q[j], Q[j + 1], mdist[i, j], mdist[i, j + 1], Q_dist[j]) if Bij > end_point: cc.add(Bij) return sorted(list(cc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(P, Q):\n Q = np.where(Q != 0, Q, 1e-12)\n P = np.where(P != 0, P, 1e-12)\n Div = P / Q\n C = np.sum(P * np.log(Div))\n return C", "def frechet(P, Q):\n p = len(P)\n q = len(Q)\n\n mdist = eucl_dist_traj(P, Q)\n P_dist = [eucl_dist(P[ip], P[ip + 1]) for ip in range(p - 1)]\n Q_dist = [eucl_dist(Q[iq], Q[iq + 1]) for iq in range(q - 1)]\n\n cc = compute_critical_values(P, Q, p, q, mdist, P_dist, Q_dist)\n eps = cc[0]\n while (len(cc) != 1):\n m_i = len(cc) / 2 - 1\n eps = cc[m_i]\n rep = decision_problem(P, Q, p, q, eps, mdist, P_dist, Q_dist)\n if rep:\n cc = cc[:m_i + 1]\n else:\n cc = cc[m_i + 1:]\n frech = eps\n return frech", "def algorithm_1_1(p, c, t, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (t[j + k] - x) / (t[j + k] - t[j]) * q[j] + (x - t[j]) / (\n t[j + k] - t[j]) * q[j + 1]\n return q[0]", "def u_crit(state, sys):\n s = state[0]\n i = state[1]\n tau = scipy.interpolate.interp1d(sys.tau.s, sys.tau.i, kind = \"cubic\")\n phi = scipy.interpolate.interp1d(sys.phi.s, sys.phi.i, kind = \"cubic\")\n cc = scipy.interpolate.interp1d(sys.commutation_curve[0],\n sys.commutation_curve[1],\n kind = \"cubic\")\n if i > sys.imax:\n return sys.umax\n if s <= sys.commutation_curve[0][-1]:\n #print(\"Case 1\")\n if s < sys.sbar or i < tau(s):\n return 0\n return sys.umax\n elif s > sys.commutation_curve[0][-1] and s < sys.commutation_curve[0][0]:\n #print(\"Case 2\")\n if ((i > tau(s)) and (i < cc(s))) or (i > sys.imax):\n return sys.umax\n elif i > cc(s) and i < sys.imax:\n return 0\n else:\n return 0\n else:\n #print(\"Case 3\")\n if i > sys.imax:\n return sys.umax\n elif s > sys.sstar and i > phi(s):\n return sys.umax\n return 0", "def __call__(self, p, q, verbosity=1, warn=True):\n if self.exactly_zero: return 0.0 # shortcut for trivial case\n if self.weight == 0:\n return _np.sum(_np.abs(q - p)) / 2\n\n #Set parameter values\n self.P.value[:] = p[:]\n self.Q.value[:] = q[:]\n\n treg_factor_ok = False\n self.Treg_factor.value = self.initial_treg_factor\n while not treg_factor_ok:\n\n obj1 = self._obj(self.t_params)\n if REBUILD:\n self._rebuild_problem()\n else:\n self._build_problem()\n\n self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1),\n **default_cvxpy_args(self.solver))\n\n failed = self.T.value is None # or self.resid_tvd.value is None\n\n if not failed: # sanity check\n t_chk = self.build_transfer_mx(self.T_params.value)\n assert(_np.linalg.norm(_np.abs(self.T.value) - t_chk) < 1e-6)\n\n self.warning_msg = None\n if failed:\n if self.solver == \"SCS\":\n #raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n for eps in [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]:\n if REBUILD:\n self._rebuild_problem()\n else:\n self._build_problem()\n self.prob.solve(solver=remove_kicked(self.solver), verbose=(verbosity > 1), eps=eps)\n failed = self.T.value is None # or self.resid_tvd.value is None\n\n if not failed:\n t_chk = self.build_transfer_mx(self.T_params.value)\n assert(_np.linalg.norm(self.T.value - t_chk) < 1e-6)\n\n if eps > 1e-4:\n self.warning_msg = (\"ResidualTVD: Needed to increase eps to %g.\"\n \" The resulting ResidualTVD values are less precise.\") % eps\n if warn: print(self.warning_msg)\n break\n else:\n raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n else:\n raise ValueError(\"ResidualTVD: Convex optimizer failure\")\n\n #check that Treg_factor term doesn't dominate\n\n # Update: just leave this alone, since norm-penalty doesn't get reported - TODO later\n treg_factor_ok = True\n\n # ------------------------------------------------------------------\n #EXPERIMENTAL algorithms for updating Treg_factor ------------------\n # ------------------------------------------------------------------\n\n #resid_tvd = self._obj(self.T_params.value)\n #if resid_tvd > 10 * self.Treg_factor.value * _np.linalg.norm(self.T_params.value, 1):\n # Treg_factor_ok = True\n #else:\n # self.Treg_factor.value = resid_tvd / 10 # self.Treg_factor.value / 10\n\n #obj2 = self._obj(self.T_params.value)\n #if obj2 < obj1:\n # Treg_factor_ok = True\n #else:\n # #maybe penalty term dominated - reduce norm(tparams) penalty term\n # self.T_params.value[:] = self.t_params[:] #REVERT\n # self.T.value[:, :] = _np.sum([self.t_params[ind] * self.t_basis[ind]\n # for ind in range(self.dim)], axis=0) + _np.eye(self.n) # REVERT\n # self.Treg_factor.value = self.Treg_factor.value / 10\n # if self.Treg_factor.value > 1e-7:\n # print(\"REDUCING treg factor to: \", self.Treg_factor.value)\n # else:\n # Treg_factor_ok = True # give up!\n\n if self.Treg_factor.value != self.initial_treg_factor:\n if verbosity > 0: print(\"NOTE: Treg_factor was reduced to %g.\" % self.Treg_factor.value)\n #_warnings.warn((\"Initial Treg_factor (%g) was too large, and was reduced to %g.\"\n # \" Consider reducing the initial value to avoid repeating calculations.\")\n # % (self.initial_treg_factor, self.Treg_factor.value))\n\n obj2 = self._obj(self.T_params.value)\n if obj2 <= obj1:\n self.t_params[:] = self.T_params.value[:]\n else:\n print_revert_msg(\"ResidualTVD failed to reduce objective function (%g > %g)\", (obj2, obj1), verbosity)\n self.T_params.value[:] = self.t_params[:]\n self.T.value[:, :] = self.build_transfer_mx(self.t_params)\n\n return self._obj(self.t_params) # not self.obj.value b/c that has additional norm regularization", "def pressure_equality_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].p.val_SI - self.outl[i].p.val_SI]\n return residual", "def conc_vent(V, R, Q, t):\n if Q > 0:\n C = 0.21/(Q+R) * (Q+R*math.e**-((Q+R)/V*t))\n elif abs(Q) <= R:\n C = 0.21*math.e**-(R/V*t)\n elif abs(Q) > R:\n C = 0.21*(1-R/abs(Q)*(1-math.e**-(abs(Q)*t/V)))\n return C", "def compute_demand(self, p):\n \n G, h = spdiag([-1.0]*self.n), matrix(0.0, (self.n, 1))\n \n if self.type == 'quad':\n Q, r = self.data\n return solvers.qp(-Q, p-r, G, h)['x']\n\n if self.type == 'sqrt':\n def F(x=None, z=None):\n if x is None: return 0, matrix(1.0, (self.n, 1))\n u, Du, H = self.utility(x)\n f, Df = p.T*x - u, p.T - Du\n if z is None: return f, Df\n return f, Df, -z[0]*H\n return solvers.cp(F, G, h)['x']", "def T_c(I, T_amb, V, D, R_list, N_cond=1, T_range=[298,323,348], a_s=0.9, e_s=0.9, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):\n\n # def Q_gen(I, R):\n # w = I * I * R\n # return w\n\n # def Q_rad_in(I_sun, A_s, a_s):\n # w = I_sun * D * a_s\n # return w\n\n # def Q_conv(htcoeff, A_s, T_lin, T_amb):\n # w = htcoeff * A_s * (T_line - T_amb)\n # return w\n\n # def Q_rad_out(e_s, A_s, sigma, T_line, T_amb):\n # w = e_s * D * sigma * (T_line**4 - T_amb**4)\n # return w\n\n def reynolds(V, D, v, Mair=1.103):\n r = V * D / v\n return r\n\n def nusselt(Re, Pr):\n a = 0.62 * ( (Re) ** (1.0/2.0) ) * ( Pr ** (1.0/3.0) )\n b = (1 + (0.4/(Pr**(2.0/3.0) ) ) ) ** (1.0/4.0)\n c = (Re / 282000) ** (5.0/8.0)\n n = 0.3 + (a/b) * ( (1 + c) ** (4.0/5.0) )\n return n\n\n def air_prop(T_amb):\n # temp v k Pr\n air_prop = np.array([[200, 7.59e-6, 18.1e-3, 0.737],\n [250, 11.44e-6, 22.3e-3, 0.720],\n [300, 15.89e-6, 26.3e-3, 0.707],\n [350, 20.92e-6, 30.0e-3, 0.700],\n [400, 26.41e-6, 33.8e-3, 0.690],\n [450, 32.39e-6, 37.3e-3, 0.686],\n [500, 38.79e-6, 40.7e-3, 0.684],\n [550, 45.57e-6, 43.9e-3, 0.683],\n [600, 52.69e-6, 46.9e-3, 0.685]])\n\n v, k, Pr = np.apply_along_axis(lambda x: np.interp(T_amb, air_prop[:,0], x),\n 0, air_prop[:,1:])\n return v, k, Pr\n\n def R_T(R_lo, R_mid, R_hi, T_line, N_cond, T_range=T_range):\n if 273 <= T_line <= 323:\n R = ((R_lo + \n ((R_lo - R_mid)/(T_range[0] - T_range[1]))\n *(T_line - T_range[0]))/N_cond)\n elif T_line > 323:\n R = ((R_mid + \n ((R_mid - R_hi)/(T_range[1] - T_range[2]))\n *(T_line - T_range[1]))/N_cond)\n else:\n R = R_lo\n print('Out of bounds')\n return R\n\n R_lo, R_mid, R_hi = R_list[0], R_list[1], R_list[2]\n temp_factor = 1\n wind_factor = 1\n sigma = 5.6703e-8 # Stefan-Boltzmann constant\n\n T_amb = T_amb*temp_factor\n V = V*wind_factor\n\n v, k, Pr = air_prop(T_amb)\n Re = reynolds(V, D, v)\n htcoeff = nusselt(Re, Pr) * k / D\n\n def T_line(T_init):\n \n R = R_T(R_lo, R_mid, R_hi, T_init, N_cond)\n print R\n\n C4 = e_s * sigma * D * math.pi\n C3 = 0.0\n C2 = 0.0\n C1 = htcoeff * D * math.pi\n C0 = - ( I ** 2 * R\n + I_sun * a_s * D\n + htcoeff * D * math.pi * T_amb\n + e_s * D * math.pi * sigma * (T_amb ** 4))\n\n return np.roots([C4, C3, C2, C1, C0])\n\n T_c = T_amb\n \n for i in range(n_iter):\n T_arr = T_line(T_c)\n T_c = np.real(T_arr[np.where((np.real(T_arr) > 0) & ~(np.iscomplex(T_arr)))]).mean()\n print T_c\n\n return T_c", "def evaluate_C_q(self, q):\n C_q_list = []\n\n\n GlobalVariables.q_i_dim[body_id]", "def get_value(self, c_puct):\n self._u = (c_puct * self._P *\n np.sqrt(self._parent._n_visits) / (1 + self._n_visits))\n return self._Q + self._u", "def get_value(self, c_puct):\n self._u = (c_puct * self._P *\n np.sqrt(self._parent._n_visits) / (1 + self._n_visits))\n return self._Q + self._u", "def get_value(self, c_puct):\n self._u = (c_puct * self._P *\n np.sqrt(self._parent._n_visits) / (1 + self._n_visits))\n return self._Q + self._u", "def get_value(self, c_puct):\n self._u = (c_puct * self._P *\n np.sqrt(self._parent._n_visits) / (1 + self._n_visits))\n return self._Q + self._u", "def get_value(self, c_puct):\n self._u = (c_puct * self._P *\n np.sqrt(self._parent._n_visits) / (1 + self._n_visits))\n return self._Q + self._u", "def Lq(self):\n if not self.isVaild():\n pass\n temp = ((self.r()**self.C)*self.Rho()) / \\\n (math.factorial(self.C)*((1 - self.Rho())**2))\n return temp*self.P0()", "def annihilation_cross_sections(self, Q):\n muon_contr = self.sigma_xx_to_a_to_ff(Q, \"mu\")\n electron_contr = self.sigma_xx_to_a_to_ff(Q, \"e\")\n pi0pipi_contr = self.sigma_xx_to_a_to_pi0pipi(Q)\n aa_contr = self.sigma_xx_to_aa(Q)\n\n total = muon_contr + electron_contr + pi0pipi_contr + aa_contr\n # pi0pipi_contr\n\n cross_secs = {\n \"mu mu\": muon_contr,\n \"e e\": electron_contr,\n \"pi0 pi pi\": pi0pipi_contr,\n \"a a\": aa_contr,\n \"total\": total,\n }\n\n return cross_secs", "def CpT(dict_, T_react): \t\t\t# T_column - name of the column in of Cp temperature in Data\n\tT = T_react\n\tif not dict_['T(Cp)']:\n\t\treturn 0, 0\n\telse:\n\t\tCpT_S_ = dict_['a']*(math.log(T/298)) + dict_['b']*math.pow(10,-3)*(T-298) - 0.5*dict_['c']*math.pow(10,6)*(math.pow(T, -2) - math.pow(298,-2)) + dict_['d']*(0.5*math.pow(10,-6)*(math.pow(T,2) - math.pow(298,2)))\n\t\t\t\t\n\t\tCpT_H_ = (dict_['a']*(T - 298) + dict_['b']*0.5*math.pow(10,-3)*(math.pow(T,2) - math.pow(298,2)) + dict_['c']*(math.pow(10,6)*(math.pow(298,-1) - math.pow(T, -1))) + dict_['d']*(1/3)*(math.pow(10,-6)*(math.pow(T,3) - math.pow(298,3))))\n\t\treturn CpT_S_, CpT_H_\n\t\t'''\n\telif isinstance(dict_['T(Cp)'], tuple):\t\t\t\t# This part doesn`t check! \n\t\t\"\"\"If more then one values of T(Cp) and 'a', 'b', 'c', 'd' this part calculate a sum of integrals of CpT\"\"\"\n\t\tT_start = 298\t\t\t\t\t\t\t\t\t\t# First temperature of integral calculation\n\t\tdCpT_S = []\n\t\tdCpT_H = []\n\t\tfor x in range(len(dict_['T(Cp)'])):\n\t\t\tif dict_['T(Cp)'][x] > T_react:\n\t\t\t\tT = T_react\n\t\t\telse:\n\t\t\t\tT = dict_['T(Cp)'][x]\n\t\t\t\n\t\t\tCpT_S_ = (dict_['a'][x]*math.log(T/298)) + (dict_['b'][x]*math.pow(10,-3)*(T-298)) - (0.5*dict_['c'][x]*(math.pow(T, -2) - math.pow(298,-2))) + (dict_['d'][x]*(0.5*math.pow(10,-6)*(math.pow(T,2) - math.pow(298,2))))\n\t\t\t\n\t\t\tCpT_H_ = (dict_['a'][x]*(T - 298) + (dict_['b'][x]*(0.5*math.pow(10,-3)*(math.pow(T,2))) - math.pow(298,2)) + (dict_['c'][x]*(math.pow(10,6)*(math.pow(298,-1) - math.pow(T, -1)))) + (dict_['d'][x]*(1/3*math.pow(10,-6)*(math.pow(T,3) - math.pow(298,3)))))\n\t\t\t\n\t\t\tdCpT_S.append(CpT_S_)\n\t\t\tdCpT_H.append(CpT_H_)\n\t\t\t\n\t\t\tT_start = dict_['T(Cp)'][x]\n\t\t\tif T == T_react:\n\t\t\t\treturn \t(sum(dCpT_S), sum(dCpT_H))\n\t\t'''", "def get_cape(temp,pres,dewpt,hght,startp,startt,startdp,totalcape=False): \n\n # Check units\n # Init temp is startt in C, Init dew point is stwrtdp,\n # pressure levels are in hPa \n temp = temp - 273.15 # convert temperature to celsius\n dewpt = dewpt - 273.15 # convert dewpoint to celsius\n pres = pres/100 # convert pressure to hPa\n \n \n inds = np.where( (pres < startp) ) \n tmp = pres[inds]\n del pres\n #pres = tmp[::-1]\n pres = tmp[:]\n del tmp \n startp = startp/100\n \n tmp = temp[inds]\n del temp\n #temp = tmp[::-1]\n temp = tmp[:]\n del tmp \n\n tmp = dewpt[inds]\n del dewpt\n #dewpt = tmp[::-1]\n dewpt = tmp[:]\n del tmp \n\n tmp = hght[inds]\n del hght\n #hght = tmp[::-1]\n hght = tmp[:]\n del tmp \n\n \n # Get Sub-LCL traces \n presdry,tempdry,tempiso=dry_ascent(startp,startt-degCtoK,startdp-degCtoK) \n \n\n # make lcl variables explicit\n P_lcl=presdry[-1]\n T_lcl=tempdry[-1]\n\n # Now lift a wet parcel from the intersection point\n # preswet=linspace(P_lcl,100,101)\n preswet,tempwet=moist_ascent(P_lcl,T_lcl)\n\n # tparcel is the concatenation of tempdry and \n # tempwet, and so on.\n \n tparcel=np.concatenate((tempdry,tempwet[1:]))\n pparcel=np.concatenate((presdry,preswet[1:]))\n\n # Interpolating the environmental profile onto the \n # parcel pressure coordinate\n # tempenv=interp(preswet,pres[::-1],temp[::-1])\n ## NEW, for total column:\n tempenv=interp(pparcel,pres[::-1],temp[::-1])\n\n\n # now solve for the equlibrium levels above LCL\n # (all of them, including unstable ones)\n # eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])\n # NEW, for total column:\n # On second thought, we don't really want/need\n # any equilibrium levels below LCL\n # eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])\n # This is equivalent to the old statement :\n eqlev,stab=solve_eq(pparcel[pparcel<=P_lcl][::-1],\\\n (tparcel-tempenv)[pparcel<=P_lcl][::-1])\n\n aa = tparcel-tempenv\n\n # Sorting index by decreasing pressure\n I=np.argsort(eqlev)[::-1]\n eqlev=eqlev[I]; stab=stab[I]\n\n # temperatures at the equilibrium level\n # tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])\n ## NEW, for total column:\n tempeq=interp(eqlev,pparcel[::-1],tparcel[::-1])\n\n # This helps with debugging\n # for ii,eq in enumerate(eqlev):\n # print \"%5.2f %5.2f %2d\"%(eq,tempeq[ii],stab[ii])\n\n # need environmental temperature at LCL\n tenv_lcl=interp(P_lcl,pparcel[::-1],tempenv[::-1])\n\n isstab=np.where(stab==1.,True,False)\n unstab=np.where(stab==1.,False,True) \n\n if eqlev.shape[0]==0:\n # no unstable layers in entire profile\n # because the parcel never crosses the tenv\n P_lfc=float('NaN')\n P_el=float('NaN')\n elif T_lcl>tenv_lcl:\n # check LCL to see if this is unstable\n P_lfc=P_lcl\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n elif eqlev.shape[0]>1:\n # Parcel is stable at LCL so LFC is the \n # first unstable equilibrium level and \n # \"EQ\" level is the first stable equilibrium \n # level\n P_lfc=eqlev[unstab][0]\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n else:\n # catch a problem... if there is only\n # one eqlev and it's stable (this is \n # unphysical), then it could be a vertical\n # resolution thing. This is a kind of \n # \"null\" option\n try:\n\t P_el=eqlev[isstab][0]\n P_lfc=eqlev[isstab][0]\n except:\n\t P_el=eqlev[unstab][0]\n P_lfc=eqlev[unstab][0]\t\n\t\n if np.isnan(P_lfc):\n return P_lcl,P_lfc,P_el,0,0\n\n # need to handle case where dwpt is not available \n # above a certain level for any reason. Most simplest \n # thing to do is set it to a reasonably low value; \n # this should be a conservative approach!\n \n #dwpt=dewpt.copy().soften_mask()\n [inds] = np.where(np.isnan(dewpt))\n dwpt = dewpt\n dwpt[inds] = dwpt.min()\n \n # raise ValueError\n #if dwpt[(pres>=P_el).data*(pres<P_lfc).data].mask.any():\n # print \"WARNING: substituting dwpt.min() for masked values of DWPT in this sounding\"\n #dwpt[dwpt.mask]=dwpt.min()\n # dwptenv=interp(preswet,pres[::-1],dwpt[::-1])\n # NEW:\n\n dwptenv=interp(pparcel,pres[::-1],dwpt[::-1])\n\n\n \n #if hght[(pres>=P_el).data].mask.any():\n # raise NotImplementedError, \"TODO: Implement standard atmosphere to substitute missing heights\"\n # hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])\n # NEW:\n hghtenv=interp(pparcel,pres[::-1],hght[::-1])\n \n\n # Areas of POSITIVE Bouyancy\n # cond1=(tempwet>=tempenv)*(preswet<=P_lfc)*(preswet>P_el)\n # NEW:\n cond1=(tparcel>=tempenv)*(pparcel<=P_lfc)*(pparcel>P_el)\n # Areas of NEGATIVE Bouyancy\n # cond2=(tempwet<tempenv)*(preswet<=P_lcl)*(preswet>P_el)\n # NEW:\n if totalcape:\n cond2=(tparcel<tempenv)*(pparcel>P_el)\n else:\n cond2=(tparcel<tempenv)*(pparcel>P_lfc)\n # Do CAPE calculation\n # 1. Virtual temperature of parcel... remember it's saturated above LCL.\n # e_parcel=SatVap(tempwet)\n # Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)\n # e_env=SatVap(dwptenv)\n # Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)\n # NEW:\n e_parcel=SatVap(tparcel)\n Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)\n e_env=SatVap(dwptenv)\n Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)\n\n CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/Tv_env[cond1],hghtenv[cond1])\n CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/Tv_env[cond2],hghtenv[cond2])\n\n return P_lcl,P_lfc,P_el,CAPE,CIN", "def computevaluefromqvalues(self, state):\n legalactions = env.getlegalactions(deepcopy(env.state_to_array(state)))\n if len(legalactions) == 0:\n return 0.0\n tmp = Counter()\n for action in legalactions:\n tmp[action] = self.getqvalue(state, action)\n return tmp[tmp.argMax()]", "def pk_iv_model(t, y, Q_pc, V_c, V_p, CL):\n q_c, q_p = y\n transfer = Q_pc * (q_c / V_c - q_p / V_p)\n dqc_dt = self.dosefunction(t) - q_c / V_c * CL - transfer\n dqp_dt = transfer\n return [dqc_dt, dqp_dt]", "def calc_expected_utility(qo_pi, C):\n n_steps = len(qo_pi)\n \n # initialise expected utility\n expected_util = 0\n\n # loop over time points and modalities\n num_modalities = len(C)\n for t in range(n_steps):\n for modality in range(num_modalities):\n lnC = spm_log_single(softmax(C[modality][:, np.newaxis]))\n expected_util += qo_pi[t][modality].dot(lnC)\n\n return expected_util", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def compute_coefficients(self, nrange, irange=None, prec=10, ef=True, Qadd=0):\n # we first need an initial set of coefficients\n C = self._coeffs\n P = self._principal_part\n M = self._space\n WR = M.multiplier()\n weight = M.weight()\n if(self.prec >= prec or len(C) > 0):\n # presumable we already have good coefficients\n pass\n else:\n # Need initial set first\n print(\"Computing initial set of coefficients!\")\n self.prec = prec\n [Y, M0] = self._space.get_Y_and_M(P, weight, prec)\n Q = M0 + 10\n W = vv_harmonic_wmwf_setupV(WR, P, Y, M0, Q, weight, self._space._sym_type, verbose=self._space._verbose)\n if (0, 0) in P:\n N = self._space.set_norm()\n # N=set_norm_vv_harmonic_weak_maass_forms(WR,cusp_form=True,holomorphic=self._holomorphic)\n else:\n N = self._space.set_norm()\n # N=set_norm_vv_harmonic_weak_maass_forms(WR,cusp_form=False,holomorphic=self._holomorphic)\n C = solve_system_for_vv_harmonic_weak_Maass_waveforms(W, N, verbose=self._verbose)\n\n # endif\n # check if we have all coefficients we wanted\n maxc = max(C[list(C.keys())[0]].keys())\n if maxc >= max(nrange):\n print(\"Have all we need!\")\n pass\n else:\n # we do not have all coefficients we need\n print(\"Need to compute more!!\")\n Ns = nrange # [maxc,max(nrange)]\n if irange is not None:\n Is = irange\n else:\n Is = [min(M.D()), max(M.D())]\n\n # Try to find good Y\n # Recall that the error in the negative part is usually smaller than in the positive part\n M_minus = abs(min(self._coeffs[list(self._coeffs.keys())[0]]))\n M_plus = abs(max(self._coeffs[list(self._coeffs.keys())[0]]))\n # Assume we computed these coefficients at (almost) the highest horocycle\n Y0 = mpmath.sqrt(3) / mpmath.mpf(2) * mpmath.mpf(0.995)\n [err_minus, err_plus] = self.get_error_estimates(Y0, M_minus, M_plus)\n kint = mpmath.mp.mpf(1 - self._space.weight())\n print(\"original:\")\n print(\"err-={0}\".format(err_minus))\n print(\"err+={0}\".format(err_plus))\n Y0 = mpmath.mpf(0.5)\n Yin = Y0\n for j in range(5000):\n Y = Y0 * mpmath.power(mpmath.mpf(0.99), j)\n t = mpmath.pi() * 2 * Y * abs(Ns[0])\n tmp1 = mpmath.exp(t)\n err1 = err_plus * tmp1\n # print \"err+=\",err1\n tmp2 = mpmath.gammainc(kint, 2 * t)\n err2 = err_plus * mpmath.exp(-t) / tmp2\n # print \"err-=\",err2\n if(max(err1, err2) < mpmath.power(10, -prec)):\n Yin = Y\n break\n # t=max(1.0,abs(mpmath.log10(prec)-mpmath.log10(self.prec)))\n # Yin=t/mpmath.mpf(Ns[0]+Ns[1])*mpmath.mpf(2.0) ## This should be good on average\n # Yin=Yin*mpmath.mpf(0.2)\n print(\"err={0}\".format(max(err1, err2)))\n print(\"Yin={0}\".format(Yin))\n sys.stdout.flush()\n # Qadd=40\n try:\n if(ef):\n CC = vv_harmonic_wmwf_phase2_2_ef(self, Ns, Is, prec, Yin, Qadd_in=Qadd)\n else:\n CC = vv_harmonic_wmwf_phase2_2(M, P, C, Ns, Is, prec, Yin)\n for x in CC.keys():\n C[x] = CC[x]\n except KeyboardInterrupt:\n print(\"Manually stopping...\")", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def yule_q(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = a + b + c + d\n\n if n == 0:\n return np.nan\n elif p1 == n:\n # c and d are zero\n return _div(a - b, p1)\n elif p2 == n:\n # b and d are zero\n return _div(a - c, p2)\n elif q1 == n:\n # a and b are zero\n return _div(d - c, q1)\n elif q2 == n:\n # a and c are zero\n return _div(d - b, q2)\n\n return _div(self.covar(), a * d + b * c)", "def calcMisc(t, q, u, p):\n m=dict()\n # Split state into positions and speeds (qx, qxd), uaero states (qxa_ua), dynamic inflow states (qxa_di)\n m['qx'], m['qxd'], m['qxa_ua'], m['qxa_di'] = split_q(q, p['Iqxs'], p['Iqxsd'], p['Iqxa_ua'], p['Iqxa_di'])\n\n # Structural states (length 3, even if not all DOFs are actice)\n m['q_full'], m['x'], m['xd'] = inflate_q(q, Iq=p['Iq'])\n\n # Orientation of the section\n m['Ux'], m['Uy'], m['theta_p'] = inputsAtTime(t, u)\n th = m['x'][2]\n m['omega'] = m['xd'][2]\n m['theta'] = th + m['theta_p'] + p['beta'] \n m['rho_x'] = (-p['x_AG']* np.sin(m['theta']) + p['y_AG']*np.cos(m['theta']) )\n m['rho_y'] = (-p['x_AG']* np.sin(m['theta']) + p['y_AG']*np.cos(m['theta']) )\n return m", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def clfqp(self,x,p):\n alp = self.alp_opt\n nu = self.nu_opt\n dt = self.dt\n n = self.n\n I = np.identity(n)\n M = self.ncm(x,p)\n nu = np.size(self.h_or_g(x,p),1)\n u = cp.Variable((nu,1))\n e = np.reshape(x,(n,1))\n fx = np.reshape(self.dynamicsf(x,p),(n,1))\n gx = self.h_or_g(x,p)\n dMdt = (nu*I-M)/dt\n constraints = [2*e.T@(fx+gx@u)+e.T@dMdt@e <= -2*alp*e.T@M@e]\n prob = cp.Problem(cp.Minimize(cp.sum_squares(u)),constraints)\n prob.solve()\n u = u.value\n u = np.ravel(u)\n return u" ]
[ "0.59205824", "0.58363307", "0.57980275", "0.5699093", "0.5682799", "0.5677158", "0.5676779", "0.5636182", "0.56267333", "0.5624366", "0.5620646", "0.5620646", "0.5620646", "0.5620646", "0.5620646", "0.5599994", "0.5571987", "0.55696845", "0.5563916", "0.55605155", "0.5541283", "0.5533253", "0.5532631", "0.5527401", "0.5521458", "0.5517854", "0.5502168", "0.54719764", "0.5461318", "0.54591864" ]
0.67792004
0
Usage Compute the frechet distance between trajectories P and Q
def frechet(P, Q): p = len(P) q = len(Q) mdist = eucl_dist_traj(P, Q) P_dist = [eucl_dist(P[ip], P[ip + 1]) for ip in range(p - 1)] Q_dist = [eucl_dist(Q[iq], Q[iq + 1]) for iq in range(q - 1)] cc = compute_critical_values(P, Q, p, q, mdist, P_dist, Q_dist) eps = cc[0] while (len(cc) != 1): m_i = len(cc) / 2 - 1 eps = cc[m_i] rep = decision_problem(P, Q, p, q, eps, mdist, P_dist, Q_dist) if rep: cc = cc[:m_i + 1] else: cc = cc[m_i + 1:] frech = eps return frech
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dist(p, q):\n return ((p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2)**0.5", "def dist(p,q):\n return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)", "def distance(M, p, q):\n\n return (math.sqrt(math.pow(M[p, 0] - M[q, 0],2) + math.pow(M[p, 1] - M[q, 1],2))) # obliczanie dystansu miedzy punktami", "def point_dist(p, q):\n\tif len(p) != len(q):\n\t\traise ValueError(\"Lengths don't match\", p, q)\n\treturn math.sqrt(sum((a-b)**2 for a,b in zip(p,q)))", "def distance(self,pt1,pt2):\n #productive #frequent\n if frequent: profprint()\n d = ( ( float(pt1[0]) - float(pt2[0]) )**2 + ( float(pt1[1]) - float(pt2[1]) )**2 + ( float(pt1[2]) - float(pt2[2]) )**2 )**0.5\n return d", "def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d", "def qpDist(pt0, pt1):\n a = hou.Vector2(pt0.x(), pt0.y())\n b = hou.Vector2(pt1.x(), pt1.y())\n return a.distanceTo(b)", "def distance(p, q):\n return norm(np.asarray(p) - np.asarray(q))", "def KB_Dist(P,Q):\r\n \r\n \r\n K=0\r\n Epsilon=0.000001\r\n Q+=Epsilon\r\n P+=Epsilon\r\n for x in range(len(Q)):\r\n K-=P[x]*np.log(Q[x]/P[x])\r\n return K", "def AttractionForce(q,q_k):\r\n return k_p*CalculateDistance(q,q_k)", "def float_euclidean_dist(p, q):\n px, py = p[0], p[1]\n qx, qy = q[0], q[1]\n diff_x = abs(qx - px)\n diff_y = abs(qy - py)\n return float(math.sqrt((diff_x * diff_x) + (diff_y * diff_y)))", "def getDistance(self,p1,p2):\n return sum([(p1[i]-p2[i])**2 for i in range(2)])", "def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance", "def calc_dist(self, p):\n p = np.array((p.x, p.y, p.z))\n return LA.norm(p - self.car_pos)", "def dist(self, p):\n return math.sqrt((p.x - self.x)**2 + (p.y - self.y)**2)", "def var_dist(P, Q, X=alph):\n\n dist = 0.5 * sum([abs(P[x] - Q[x]) for x in X])\n\n return dist", "def Dist(p1,p2):\n x1, y1 = p1\n x2, y2 = p2\n return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5", "def getDistance(p1, p2):\n\tdist = la.norm(p2 - p1)\n\treturn dist", "def distance(p, q):\n if not isinstance(p, Point):\n raise TypeError(\"distance() expects 2 Points or a Point and a Line\")\n if isinstance(q, Point):\n if p.dim() != q.dim():\n raise ValueError(\"the dimensions of two points don't match\")\n return (p - q).norm()\n elif isinstance(q, Line):\n return abs(L(p)) / L.normal_vector().norm()\n else:\n raise TypeError(\"distance() expects argument 2 to be a Point or a Line\")", "def distance(p1,p2):\n import numpy as np\n x = np.sqrt(sum(np.power(p2-p1,2)))\n return(x)", "def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )", "def distance(p1, p2):\n\n \"\"\"\n (p1[0] - p2[0]) ** 2 + \n (p1[1] - p2[1]) ** 2 + \n \"\"\"\n sum_all = 0\n for i, v in enumerate(p1):\n diff_squared = (v - p2[i]) ** 2\n sum_all += diff_squared\n return(math.sqrt(sum_all))", "def CalculateDistance(q1, q2):\r\n return np.sqrt((q1[0] - q2[0])**2 + (q1[1] - q2[1])**2)", "def dist(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)", "def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5", "def calculate(self, rxn: ComputedReaction) -> float:\n combos = chain(\n product(rxn.reactant_entries, rxn.product_entries),\n combinations(rxn.product_entries, 2),\n )\n distances = [\n self.cpd.shortest_domain_distance(\n combo[0].composition.reduced_formula,\n combo[1].composition.reduced_formula,\n )\n for combo in combos\n ]\n\n distance = self._mu_func(distances)\n return distance", "def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)", "def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def test_frechet_distance_univariate(self):\n mu_x = torch.rand((1,), device=self.device)\n sigma_x = torch.rand((1, 1), device=self.device)\n\n mu_y = torch.rand((1,), device=self.device)\n sigma_y = torch.rand((1, 1), device=self.device)\n\n # Matrix square root reduces to scalar square root.\n expected = (mu_x - mu_y) ** 2 + sigma_x + sigma_y - 2 * torch.sqrt(sigma_x * sigma_y)\n expected = expected.item()\n actual = F.frechet_distance(mu_x, sigma_x, mu_y, sigma_y)\n\n self.assertEqual(expected, actual)", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))" ]
[ "0.7173551", "0.69895643", "0.6843418", "0.6539303", "0.65212935", "0.650371", "0.63225406", "0.62851346", "0.6272369", "0.6259774", "0.62053066", "0.62031424", "0.6179285", "0.6176652", "0.6154126", "0.6151824", "0.6131871", "0.6069721", "0.60457736", "0.6016533", "0.5989678", "0.5989123", "0.597161", "0.5962558", "0.59619457", "0.59590685", "0.5956418", "0.5951651", "0.5951047", "0.5950841" ]
0.7064292
1
Append size with the specified number of entities
def appendsize(self, numents): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendsize(self, numents):\n self._numents += numents", "def inc_size(self):\r\n self.__length += 1", "def batch_size(self) -> int:\n ...", "def appenddictitemsize(self, key, numents):\n self._dentsvertsdata[key].appendsize(numents * self._multFactor)", "def _assign_sizes(self):", "def setDataSize(self, head,payload,eop):\n self.dataSize = len(head)+len(payload)+len(eop)", "def chunk_size(self) -> global___Expression:", "def update_size(self,\r\n entrylist=None,\r\n newsize=60):\r\n if entrylist is None:\r\n entrylist = []\r\n\r\n for i in entrylist:\r\n\r\n if str(i) in self.indexes():\r\n\r\n tempnote = self.get_note(i).change_size(newsize)\r\n self.add_note(i,note=tempnote)", "def size(self, size: int):\n\n self._size = size", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def getBatchSize(self, context, obj):\n return 10", "def size(self, gather=True):\n raise NotImplementedError", "def size(self, size):\n self._size = size", "def set_number_of_rows(self, number_of_rows):\n self.set_value_into_input_field(self.number_of_rows_inputbox_locator, number_of_rows, True)\n global bulk_add_number_of_rows\n bulk_add_number_of_rows = int(number_of_rows)", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def test_batch_size(self):\n\n class A(Document):\n s = StringField()\n\n A.drop_collection()\n\n for i in range(100):\n A.objects.create(s=str(i))\n\n # test iterating over the result set\n cnt = 0\n for _ in A.objects.batch_size(10):\n cnt += 1\n assert cnt == 100\n\n # test chaining\n qs = A.objects.all()\n qs = qs.limit(10).batch_size(20).skip(91)\n cnt = 0\n for _ in qs:\n cnt += 1\n assert cnt == 9\n\n # test invalid batch size\n qs = A.objects.batch_size(-1)\n with pytest.raises(ValueError):\n list(qs)", "def build(self, block_size):", "def getBatchSize(self, context, obj):\n return 100", "def record_batch_size(self):\n return 10000", "def _resize_list(self, new_size: int):\n for _ in range((new_size + 1) - len(self)):\n self.append(0)", "def set_batch_size(self, batch_size):\n final_sz = self.full_dataset_size % batch_size\n if not self.final_batch:\n self.dataset_size = self.full_dataset_size - final_sz\n self.enqueuer.set_num_elements(self.dataset_size)\n self.batch_size = batch_size", "def add(self, batch_size=10000):\n if self.N <= batch_size:\n self.index.add(self.database)\n else:\n [self.index.add(self.database[i:i + batch_size])\n for i in tqdm(range(0, len(self.database), batch_size),\n desc='[index] add')]", "def updateSize(self, *args):\n return None", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def test_extend_len(self):\n self.table.extend([\n ['Tom', 26],\n ['Chantelle', 24],\n ['Deccy', 8],\n ])\n self.assertEqual(len(self.table), 3)", "def size(*args):", "def chunked_insert(model, items, chunk_size=150):\n # https://www.sqlite.org/limits.html#max_compound_select\n with db.atomic():\n for idx in range(0, len(items), chunk_size):\n model.insert_many(items[idx:idx+chunk_size]).execute()" ]
[ "0.71058905", "0.62483996", "0.60852516", "0.60310906", "0.6018139", "0.5965686", "0.588384", "0.5873902", "0.5820619", "0.5802095", "0.57661766", "0.57575065", "0.57453406", "0.5733978", "0.5712476", "0.57062435", "0.5704675", "0.56961715", "0.56593245", "0.56401414", "0.5638335", "0.55875313", "0.5572744", "0.5556065", "0.5556065", "0.5556065", "0.5556065", "0.55386287", "0.5523628", "0.551991" ]
0.7674417
0
Append size with the specified number of entities
def appendsize(self, numents): self._numents += numents
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendsize(self, numents):\n pass", "def inc_size(self):\r\n self.__length += 1", "def batch_size(self) -> int:\n ...", "def appenddictitemsize(self, key, numents):\n self._dentsvertsdata[key].appendsize(numents * self._multFactor)", "def _assign_sizes(self):", "def setDataSize(self, head,payload,eop):\n self.dataSize = len(head)+len(payload)+len(eop)", "def chunk_size(self) -> global___Expression:", "def update_size(self,\r\n entrylist=None,\r\n newsize=60):\r\n if entrylist is None:\r\n entrylist = []\r\n\r\n for i in entrylist:\r\n\r\n if str(i) in self.indexes():\r\n\r\n tempnote = self.get_note(i).change_size(newsize)\r\n self.add_note(i,note=tempnote)", "def size(self, size: int):\n\n self._size = size", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def getBatchSize(self, context, obj):\n return 10", "def size(self, gather=True):\n raise NotImplementedError", "def size(self, size):\n self._size = size", "def set_number_of_rows(self, number_of_rows):\n self.set_value_into_input_field(self.number_of_rows_inputbox_locator, number_of_rows, True)\n global bulk_add_number_of_rows\n bulk_add_number_of_rows = int(number_of_rows)", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def test_batch_size(self):\n\n class A(Document):\n s = StringField()\n\n A.drop_collection()\n\n for i in range(100):\n A.objects.create(s=str(i))\n\n # test iterating over the result set\n cnt = 0\n for _ in A.objects.batch_size(10):\n cnt += 1\n assert cnt == 100\n\n # test chaining\n qs = A.objects.all()\n qs = qs.limit(10).batch_size(20).skip(91)\n cnt = 0\n for _ in qs:\n cnt += 1\n assert cnt == 9\n\n # test invalid batch size\n qs = A.objects.batch_size(-1)\n with pytest.raises(ValueError):\n list(qs)", "def build(self, block_size):", "def getBatchSize(self, context, obj):\n return 100", "def record_batch_size(self):\n return 10000", "def _resize_list(self, new_size: int):\n for _ in range((new_size + 1) - len(self)):\n self.append(0)", "def set_batch_size(self, batch_size):\n final_sz = self.full_dataset_size % batch_size\n if not self.final_batch:\n self.dataset_size = self.full_dataset_size - final_sz\n self.enqueuer.set_num_elements(self.dataset_size)\n self.batch_size = batch_size", "def add(self, batch_size=10000):\n if self.N <= batch_size:\n self.index.add(self.database)\n else:\n [self.index.add(self.database[i:i + batch_size])\n for i in tqdm(range(0, len(self.database), batch_size),\n desc='[index] add')]", "def updateSize(self, *args):\n return None", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def test_extend_len(self):\n self.table.extend([\n ['Tom', 26],\n ['Chantelle', 24],\n ['Deccy', 8],\n ])\n self.assertEqual(len(self.table), 3)", "def size(*args):", "def chunked_insert(model, items, chunk_size=150):\n # https://www.sqlite.org/limits.html#max_compound_select\n with db.atomic():\n for idx in range(0, len(items), chunk_size):\n model.insert_many(items[idx:idx+chunk_size]).execute()" ]
[ "0.7674417", "0.62483996", "0.60852516", "0.60310906", "0.6018139", "0.5965686", "0.588384", "0.5873902", "0.5820619", "0.5802095", "0.57661766", "0.57575065", "0.57453406", "0.5733978", "0.5712476", "0.57062435", "0.5704675", "0.56961715", "0.56593245", "0.56401414", "0.5638335", "0.55875313", "0.5572744", "0.5556065", "0.5556065", "0.5556065", "0.5556065", "0.55386287", "0.5523628", "0.551991" ]
0.71058905
1
! Allocate memory for the vertex data channels Allocation size is based on the information collected by client calls to appendsize()
def allocatememory(self): self._numvertstotal = self._numents * self._nvet self._cords = VertDataSingleChannel(GLDataType.FLOAT, 3, self._numvertstotal) self._colors = VertDataSingleChannel(GLDataType.UBYTE, 4, self._numvertstotal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _allocate_buffer_memory(self):\n for channel in self._channels_dict.values():\n if channel.enabled:\n channel.allocate(self._num_captures, self._num_samples)", "def allocatememory(self):\n\n for key, value in self._dentsvertsdata.items():\n value.allocatememory()", "def create_buffers(self):", "def add_vertices(\n graph, manufacturer, type, type_name, tags, data\n):\n # when we start adding more conections, we need to be careful with indexing\n # so here we make a note of the current index of the last vertex and the\n # number of vertices we're adding\n start_index = len(list(graph.get_vertices()))\n number_of_vertices = len(data)\n\n graph.add_vertex(number_of_vertices)\n\n # here we initate our string vertex property maps\n vprops = {\n 'manufacturer': manufacturer,\n 'type': type,\n 'name': type_name\n }\n\n # and then add these property maps as internal property maps (so they're)\n # included as part of our Graph\n for key, value in vprops.items():\n # check if property already exists\n if key in graph.vertex_properties:\n continue\n else:\n graph.vertex_properties[key] = (\n graph.new_vertex_property(\"string\")\n )\n for i in range(start_index, number_of_vertices):\n graph.vertex_properties[key][graph.vertex(i)] = value\n \n # initiate our internal property maps for the data and populate them\n for t, d in zip(tags, data.T):\n # check if property already exists\n if t in graph.vertex_properties:\n continue\n else:\n graph.vertex_properties[t] = (\n graph.new_vertex_property(\"double\")\n )\n # since these properties are scalar we can assign with arrays\n graph.vertex_properties[t].get_array()[\n start_index: number_of_vertices\n ] = d\n\n # overwrite the size - in case it didn't import properly from the pdf\n graph.vertex_properties['size'].get_array()[\n start_index: number_of_vertices\n ] = (\n graph.vertex_properties['pipe_body_inside_diameter'].get_array()[\n start_index: number_of_vertices\n ] + 2 * graph.vertex_properties['pipe_body_wall_thickness'].get_array()[\n start_index: number_of_vertices\n ]\n )\n\n # calculate and add our min and max pipe body burst pressures\n graph = add_burst_pressure_to_graph(graph)\n \n return graph", "def OnSize(self, event):\r\n size = self.GetClientSizeTuple()\r\n self.gList.SetSize(size)", "def num_vertices(self):\n return len(self)", "def size(self, gather=True):\n raise NotImplementedError", "def test_number_of_vertex_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.vsize, len(M.vert))", "def __len__(self):\n return len(self._vertices)", "def __init__(self):\n self.vert_list = {}\n self.num_vertices = 0", "def graph_data_size(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size or 0)", "def __init__(self, size = 0):\n self.data = []\n self.size = size", "def __init__(self):\n self.vert_dict = {}\n # self.vert_dict = []\n self.num_vertices = 0", "def _assign_sizes(self):", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def _check_data_size(self):\n if len(self.list_data) < self.n_cluster:\n self.n_cluster = len(self.list_data)", "def __len__(self):\n return self._vertices.shape[0]", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def allocatememory(self, key):\n self._dentsvertsdata[key].allocatememory()", "def build(self, block_size):", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def n_vertices(self):\n try: \n return self._n_vertices\n except AttributeError:\n self._n_vertices = 0\n for v in self.vertex_generator(): self._n_vertices += 1\n return self._n_vertices", "def update_dimensions(self):\n self.chunk = numpy.full((self.current_height, self.current_width), fill_value=Constants.VALUE_INITIALIZER,\n dtype=\"int16\")", "def _initialize_buffers(self) -> None:", "def allocate(self):\n raise NotImplementedError", "def set_num_channels(count):\r\n check_mixer()\r\n global _numchanneldata, _channeldata\r\n if count > _numchanneldata:\r\n _channeldata.extend([ChannelData() for i in\r\n range(count - _numchanneldata)])\r\n _numchanneldata = count\r\n sdl.Mix_AllocateChannels(count)", "def MAXMEM(self):", "def return_num_vertices(self):\n return self.__size", "def pc_output_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_output_buffers_full(self, *args)", "def num_vertices(self):\n return len(self.vertices)" ]
[ "0.6268719", "0.61186135", "0.57991874", "0.560458", "0.5575091", "0.55208147", "0.54995835", "0.5477305", "0.54349387", "0.54285145", "0.5426824", "0.54087883", "0.5403441", "0.5387381", "0.5375428", "0.5373897", "0.53613126", "0.5357203", "0.5356363", "0.53444767", "0.53117406", "0.52758205", "0.5226416", "0.5221795", "0.52177936", "0.521502", "0.5189603", "0.51533556", "0.5145403", "0.5143293" ]
0.76169854
0
! Clone the instance of VertDataCollectorCoord3fColor4ub class Overrides the base class abstract method
def clone(self): vdc = VertDataCollectorCoord3fColor4ub(self._enttype) return vdc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self):\n return vertex(self.x, self.y, self.z)", "def clone(self, *args):\n return _osgAnimation.VertexInfluenceMap_clone(self, *args)", "def Clone(self) -> \"itkBinaryContourImageFilterIUS3IUS3_Pointer\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUS3IUS3_Clone(self)", "def Clone(self) -> \"itkBinaryContourImageFilterIF3IF3_Pointer\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIF3IF3_Clone(self)", "def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts", "def make_raw_vertex_image(data1, cmap = 'hot', vmin = 0, vmax = 1, \n data2 = [], vmin2 = 0, vmax2 = 1, subject = 'fsaverage', data2D = False):\n \n # Get curvature\n curv = cortex.db.get_surfinfo(subject, type = 'curvature', recache=False)#,smooth=1)\n # Adjust curvature contrast / color. Alternately, you could work\n # with curv.data, maybe threshold it, and apply a color map. \n curv.data[curv.data>0] = .1\n curv.data[curv.data<=0] = -.1\n #curv.data = np.sign(curv.data.data) * .25\n \n curv.vmin = -1\n curv.vmax = 1\n curv.cmap = 'gray'\n \n # Create display data \n vx = cortex.Vertex(data1, subject, cmap = cmap, vmin = vmin, vmax = vmax)\n \n # Pick an arbitrary region to mask out\n # (in your case you could use np.isnan on your data in similar fashion)\n if data2D:\n data2[np.isnan(data2)] = vmin2\n norm2 = colors.Normalize(vmin2, vmax2) \n alpha = np.clip(norm2(data2), 0, 1)\n else:\n alpha = ~np.isnan(data1) #(data < 0.2) | (data > 0.4)\n alpha = alpha.astype(np.float)\n \n # Map to RGB\n vx_rgb = np.vstack([vx.raw.red.data, vx.raw.green.data, vx.raw.blue.data])\n vx_rgb[:,alpha>0] = vx_rgb[:,alpha>0] * alpha[alpha>0]\n \n curv_rgb = np.vstack([curv.raw.red.data, curv.raw.green.data, curv.raw.blue.data])\n # do this to avoid artifacts where curvature gets color of 0 valur of colormap\n curv_rgb[:,np.where((vx_rgb > 0))[-1]] = curv_rgb[:,np.where((vx_rgb > 0))[-1]] * (1-alpha)[np.where((vx_rgb > 0))[-1]]\n\n # Alpha mask\n display_data = curv_rgb + vx_rgb \n\n # Create vertex RGB object out of R, G, B channels\n vx_fin = cortex.VertexRGB(*display_data, subject, curvature_brightness = 0.4, curvature_contrast = 0.1)\n\n return vx_fin", "def __originate__(self):\n self.pos_to_num = deepcopy(self.o_pos_to_num)\n self.num_to_pos = deepcopy(self.o_num_to_pos)", "def Clone(self) -> \"itkBinaryContourImageFilterISS3ISS3_Pointer\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterISS3ISS3_Clone(self)", "def Copy(self) -> BaseVector:", "def __init__(self, w, h):\n self.w = w\n self.h = h\n self.size = self.w*self.h\n self.data = [IColor() for x in range(self.size)]\n self.temp = [IColor() for x in range(self.size)]", "def Clone(self) -> \"itkBinaryContourImageFilterIUC3IUC3_Pointer\":\n return _itkBinaryContourImageFilterPython.itkBinaryContourImageFilterIUC3IUC3_Clone(self)", "def __copy__(self):\n return self.__class__(self.m, self.n, self.data)", "def copy(self):\r\n return BasicMesh(self.gl_lists, list(self.pos),\r\n list(self.rotation), list(self.verts),\r\n self.scale, list(self.colorize))", "def copy(self, old):\n self.h = old.h\n self.L_h = old.L_h\n\n self.d = np.arange(1,self.L_h+1)\n\n self.it = old.it\n self.N_first = old.N_first\n self.la = old.la\n self.a = old.a\n self.e = np.copy(old.e)\n self.e2 = old.e2\n\n self.P = old.P\n self.alpha_g = np.copy(old.alpha_g)\n self.A = np.copy(old.A)\n self.sigma2 = old.sigma2\n self.mu = np.copy(old.mu)\n self.R = np.copy(old.R)\n\n self.b = np.copy(old.mu)\n self.w = np.copy(old.w)\n self.pie = np.copy(old.pie)\n self.pi = np.copy(old.pi)\n self.p = np.copy(old.p)\n\n self.mu_pad = np.copy(old.mu_pad)\n self.M_mu = np.copy(old.M_mu)\n self.R_pad = np.copy(old.R_pad)\n #self.M_R = np.copy(old.M_R)\n\n self.half_pie_var = np.copy(old.half_pie_var)\n self.half_pie_var_pad = np.copy(old.half_pie_var_pad)\n self.M_half_pie_var_pad = np.copy(old.M_half_pie_var_pad)\n self.pie_var = np.copy(old.pie_var)\n\n self.rev_A = np.copy(old.rev_A)\n\n self.LP = old.LP\n self.LP_list = old.LP_list\n self.la_list = old.la_list\n self.a_list = old.a_list\n self.sigma2_list = old.sigma2_list\n self.A_list = old.A_list", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.pixels = []\n self.r = 255\n self.g = 0\n self.b = 0\n self.pointSize = 30\n self.vr = 255\n self.vg = 200\n self.vb = 200\n self.glclear()", "def Clone(self) -> \"itkMeshSourceMUC3_Pointer\":\n return _itkMeshSourcePython.itkMeshSourceMUC3_Clone(self)", "def Clone(self) -> \"itkTransformMeshFilterMF3MF3TD33_Pointer\":\n return _itkTransformMeshFilterPython.itkTransformMeshFilterMF3MF3TD33_Clone(self)", "def copy(self):\n new = Face(np.array(self.norm[:]), self.colour[:])\n return new", "def copy(self):\n new = copy.copy(self)\n new._surf = self._surf.copy()\n return new", "def copy(self):\n new = copy.copy(self)\n new._surf = self._surf.copy()\n return new", "def Clone(self) -> \"itkMeshSourceMD3_Pointer\":\n return _itkMeshSourcePython.itkMeshSourceMD3_Clone(self)", "def Clone(self) -> \"itkMeshSourcePSUC3_Pointer\":\n return _itkMeshSourcePython.itkMeshSourcePSUC3_Clone(self)", "def Clone(self) -> \"itkSquaredDifferenceImageFilterIF3IF3IF3_Pointer\":\n return _itkSquaredDifferenceImageFilterPython.itkSquaredDifferenceImageFilterIF3IF3IF3_Clone(self)", "def __init__(self, *args, **kwargs):\n super(Dummy, self).__init__()\n \n self.affine = np.eye(4, dtype = np.float32)\n self._update_glaffine()\n \n self.vertices = np.random.random( (10,3)).astype(np.float32) * 10\n\n self.colors = np.array( [[255,255,0,255],\n [255,255,0,255],\n [0,255,0,255],\n [0,255,0,255]], dtype = np.ubyte )\n \n self.indices = np.array( [[0,1], [1,2], [5,6], [8,9]] , dtype = np.uint32).ravel()\n self.vertices = self.vertices[self.indices,:]\n self.indices = np.array( range(len(self.indices)), dtype = np.uint32)\n self.colors = self.colors.repeat(2, axis = 0)\n self.colors_ptr = self.colors.ctypes.data\n \n self.vertices_ptr = self.vertices.ctypes.data\n self.indices_ptr = self.indices.ctypes.data\n self.indices_nr = self.indices.size\n self.mode = GL_LINES\n self.type = GL_UNSIGNED_INT", "def _CopyCoords(self):\n self.ccoords = numpy.zeros((self.mol.n_atoms, const.NUMDIM))\n for i in range(self.mol.n_atoms):\n for j in range(const.NUMDIM):\n self.ccoords[i][j] = self.mol.atoms[i].coords[j]", "def Clone(self) -> \"itkTransformMeshFilterMF3MF3TF33_Pointer\":\n return _itkTransformMeshFilterPython.itkTransformMeshFilterMF3MF3TF33_Clone(self)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(pixel_point0, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.name is None:\n self.name = ''\n if self.red_u is None:\n self.red_u = []\n if self.red_v is None:\n self.red_v = []\n if self.yellow_u is None:\n self.yellow_u = []\n if self.yellow_v is None:\n self.yellow_v = []\n if self.green_u is None:\n self.green_u = []\n if self.green_v is None:\n self.green_v = []\n if self.purple_u is None:\n self.purple_u = []\n if self.purple_v is None:\n self.purple_v = []\n if self.orange_u is None:\n self.orange_u = []\n if self.orange_v is None:\n self.orange_v = []\n else:\n self.name = ''\n self.red_u = []\n self.red_v = []\n self.yellow_u = []\n self.yellow_v = []\n self.green_u = []\n self.green_v = []\n self.purple_u = []\n self.purple_v = []\n self.orange_u = []\n self.orange_v = []", "def copy(self):\n return self.__class__(\n self.xs.copy(), self.ys.copy(),\n self.gauge_length,\n self.sample_width,\n self.sample_thickness,\n self.name\n )", "def clone(self):", "def __init__(self,r,x_c,y_c,z_c):\n self.r = r\n self.x_c = x_c\n self.y_c = y_c\n self.z_c = z_c" ]
[ "0.56691396", "0.56231135", "0.5584617", "0.5574439", "0.55721986", "0.5510009", "0.5508986", "0.5500849", "0.5500065", "0.54964495", "0.5489479", "0.5352548", "0.53463817", "0.53232324", "0.53192043", "0.5311618", "0.53026754", "0.52971894", "0.5295959", "0.5295959", "0.52792907", "0.5279172", "0.52715635", "0.52672714", "0.52545744", "0.52523124", "0.52467626", "0.52344054", "0.52214813", "0.5216362" ]
0.7961676
0
Generate test image with random pixels and save as an image file.
def test_image(filename, x_size=350, y_size=350): # Create image and loop over all pixels im = Image.new("RGB", (x_size, y_size)) pixels = im.load() for i in range(x_size): for j in range(y_size): x = remap(i, 0, x_size, -1, 1) y = remap(j, 0, y_size, -1, 1) pixels[i, j] = (random.randint(0, 255), # Red channel random.randint(0, 255), # Green channel random.randint(0, 255)) # Blue channel im.save(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)\n return 'saved'", "def test_image(filename, x_size=def_x_size, y_size=def_y_size):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)", "def new_test_image():\n warnings.warn(DeprecationWarning(\n \"new_test_image() is deprecated in favour of the get_sample_image() \"\n \"context manager.\"), stacklevel=2)\n image_name = 'test-{}.png'.format(uuid.uuid4())\n image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))\n ImageDraw.Draw(image)\n byte_io = BytesIO()\n image.save(byte_io, 'png')\n byte_io.seek(0)\n return image_name, ContentFile(byte_io.read(), image_name)", "def generate_image(filename, x_size=350, y_size=350):\n global timeflag\n timeflag = 0\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(13, 15)\n green_function = build_random_function(13, 15)\n blue_function = build_random_function(13,15)\n print \"red_function:\\t\" + str(red_function)+\"\\n\"\n print \"green_function:\\t\" + str(green_function)+\"\\n\"\n print \"blue_function:\\t\" + str(blue_function)+\"\\n\"\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def create_image(path, pxcount):\n img = Image.open(path, 'r').convert('L')\n pixels = img.load()\n for i in range(pxcount):\n x = randint(0, img.size[0]-1)\n y = randint(0, img.size[0]-1)\n if pixels[x, y] == 0:\n pixels[x, y] = 255\n else:\n pixels[x, y] = 0\n return img", "def testImage():\n width = 200\n height = 200\n image = BitMap( width, height )\n \n # create a loop in order to draw some pixels\n \n for col in range(width):\n if col % 10 == 0: print 'col is', col\n for row in range(height):\n if col % 10 == 0 or row % 10 == 0:\n image.plotPoint( col, row ) \n \n # we have now looped through every image pixel\n # next, we write it out to a file\n \n image.saveFile( \"test.bmp\" )\n #changing the col and row number determines how big the grid is for the picture or how zoomed in it is. Changing the and to or just makes the grid go from dotted grid to lines.", "def generate_art(filename, x_size=350, y_size=350):\n # Functions for red, green, and blue channels - where the magic happens!\n r_lb = random.randint(6, 10)\n g_lb = random.randint(6, 10)\n b_lb = random.randint(6, 10)\n red_function = build_random_function(r_lb, r_lb+1)\n green_function = build_random_function(g_lb, g_lb+1)\n blue_function = build_random_function(b_lb, b_lb+1)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n im.save(filename+'.png')\n return 'saved'", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def test_save_jpg():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.jpg', 'data': [img]}\n\n assert images.save(parameters)", "def test_save_png():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.png', 'data': [img]}\n\n assert images.save(parameters)", "def generate_image(noise_list, save_path):\n check_points_path = os.path.join(save_path, 'check_points')\n output_image_path = os.path.join(save_path, 'images')\n components.create_folder(output_image_path, False)\n latest_checkpoint = tf.train.latest_checkpoint(check_points_path)\n assert latest_checkpoint is not None, \"no check points found\"\n saver = tf.train.import_meta_graph(latest_checkpoint + '.meta')\n with tf.Session() as sess:\n saver.restore(sess, latest_checkpoint)\n iterations = sess.run('saved_iterations:0')\n for i in range(len(noise_list)):\n generated_images = sess.run('generator/output_layer/tanh/during_inference:0',\n feed_dict={\"noise_for_inference:0\": noise_list[i]})\n Gan.__save_images(output_image_path, generated_images, int(np.sqrt(generated_images.shape[0])), iterations, i)", "def test_write_img(img_: Tensor, ext: str) -> None:\n with NamedTemporaryFile(\"w\") as f:\n path = f\"{f.name}{ext}\"\n write_img(img_, path)\n img = read_image(path)\n torch.testing.assert_allclose(img, img_)", "def generate_image_file(color):\n img = generate_image(color)\n img.save(IMAGE_FILE)\n return IMAGE_FILE", "def generate_images(generator_model, output_dir, epoch):\n test_image_stack = generator_model.predict(np.random.normal(size=(10, 100)))\n test_image_stack = (test_image_stack * 255)\n test_image_stack = np.squeeze(np.round(test_image_stack).astype(np.uint8))\n tiled_output = tile_images(test_image_stack)\n tiled_output = Image.fromarray(tiled_output)\n outfile = os.path.join(output_dir, 'epoch_{}.png'.format(epoch))\n tiled_output.save(outfile)", "def generate_art(filename, x_size=1920, y_size=1080):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7,8)\n green_function = build_random_function(4,6)\n blue_function = build_random_function(3,5)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def save_image(start, stop, imgcount, label):\n text = \"\"\n imgfile = select_file(label)\n for p in range(imgcount):\n pxcnt = randint(start, stop)\n imgcurrent = create_image(imgfile, pxcnt)\n filename = \"img_train_\" + str(label) + \"_\" + str(p) + \"_\" + str(pxcnt) + \".png\"\n text += \"ctq/dataset/train/\" + filename + \" \" + str(label) + \"\\n\"\n imgcurrent.save(filename)\n text_file = open(imgfile + \"_train_label.txt\", \"w\")\n text_file.write(text)\n text_file.close()", "def test_save_tif():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.tif', 'data': [img]}\n\n assert images.save(parameters)", "def generate_art(filename, x_size=350, y_size=350):\n # Functions for red, green, and blue channels - where the magic happens!\n \n red_function = build_random_function(7,15)\n green_function = build_random_function(7,15)\n blue_function = build_random_function(7,15)\n \n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def generate_image(self):\n pass", "def test(train_out_dir):\n above = os.path.join(train_out_dir, '..')\n os.chdir(above)\n if not os.path.exists(\"test\"):\n os.mkdir(\"test\")\n\n for sdir in os.listdir(train_out_dir):\n cur_dir = os.path.join(train_out_dir, sdir)\n list_curr_dir = os.listdir(cur_dir)\n random.seed()\n rand_num = random.randint(0, len(list_curr_dir) - 1)\n rand_img = list_curr_dir[rand_num]\n rand_img_path = os.path.join(cur_dir, rand_img)\n dst_path = os.path.join(\"test\", sdir)\n if not os.path.exists(dst_path):\n os.mkdir(dst_path)\n\n shutil.move(rand_img_path, os.path.join(dst_path, os.path.basename(rand_img_path)))", "def mock_raw_image(x_dim=1024, y_dim=1024, num_channels=3,\n output_path=None, write_image=True):\n\n rand_shape = (x_dim, y_dim, num_channels)\n\n if num_channels != 3:\n raise NotImplementedError(\"mock_raw_image for channels != 3 not yet \"\n \"implemented.\")\n\n img = np.random.random(rand_shape)\n img = np.uint8(img*255)\n\n if write_image:\n image_obj = allen_brain.PIL_Image()\n pil_img = image_obj.fromarray(img, mode=\"RGB\")\n with tf.gfile.Open(output_path, \"w\") as f:\n pil_img.save(f, \"jpeg\")\n\n return img", "def test_image(self):\r\n self.testdata = open(TESTDATA_FILENAME).read()", "def get_rand_img():\n import urllib\n import os\n import glob\n\n pics = glob.glob('/home/cody_techngs/PycharmProjects/ProjTest/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/rand*')\n nums = []\n\n for pic in pics:\n nums.append(int(pic.split('rand_img')[1].split('.')[0]))\n\n unique_num = False\n new_rand_num = 0\n\n while not unique_num:\n new_rand_num = random.randrange(1, 2000)\n if new_rand_num not in nums:\n unique_num = True\n\n img_name = 'rand_img{}.jpg'.format(new_rand_num)\n dl_location = os.getcwd() + '/ActiveAMT/ActiveAMT_FLASK/static/images/HITs/' + img_name\n url = 'https://unsplash.it/400/300/?random'\n urllib.urlretrieve(url, dl_location)\n\n return 'static/images/HITs/{}'.format(img_name)", "def sample_image(n_row, batches_done):\n # Sample noise\n z = Variable(Tensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))\n gen_imgs = decoder(z)\n save_image(\n gen_imgs.data, \"images/%d.png\" % batches_done, nrow=n_row, normalize=True\n )", "def gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape, samples_limit=None):\n\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n for image_file in tqdm(image_paths, desc='Processing: ', unit='images', total=len(image_paths)):\n yield process_image_file(image_file, sess, logits, keep_prob, image_pl, image_shape)" ]
[ "0.7916328", "0.7276964", "0.72279763", "0.7180069", "0.7180069", "0.7180069", "0.6833925", "0.67390573", "0.6636539", "0.6577365", "0.6552666", "0.654833", "0.6493901", "0.64547956", "0.6446572", "0.6445794", "0.6444757", "0.6440958", "0.6415097", "0.6344109", "0.63346565", "0.6326465", "0.6290015", "0.6278635", "0.6267804", "0.6240479", "0.6235593", "0.6222436", "0.6220511", "0.61738044" ]
0.73138684
1
Generates computational art and save as an image file. All args optional complexity base complexity (depth of recursion) for image creation num_frames determines how many frames will be drawn
def gen_art(complexity=7, num_frames=1, x_size=350, y_size=350): # Functions for red, green, and blue channels - where the magic happens! red_function = bld_func(complexity, complexity+2) green_function = bld_func(complexity, complexity+2) blue_function = bld_func(complexity, complexity+2) # Create image and loop over all pixels for t in range(0, num_frames+1): print "Generating frame %d ... Please be patient." % t t_val = (t-(num_frames/2.0))/(num_frames/2.0) im = Image.new("RGB", (x_size, y_size)) pixels = im.load() for i in range(x_size): for j in range(y_size): x = remap(i, 0, x_size, -1, 1) y = remap(j, 0, y_size, -1, 1) pixels[i, j] = ( c_map(eval_func(red_function, x, y, t_val)), c_map(eval_func(green_function, x, y, t_val)), c_map(eval_func(blue_function, x, y, t_val)) ) im.save('frame%d.png' % t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genFrameImages((widthPixels, heightPixels), flashColourGen, flashColourGenPipTrain, numFrames, FPS, superSamplingScale=8, BG_COLOUR=(0,0,0), TEXT_COLOUR=(255,255,255), GFX_COLOUR=(255,255,255), title=\"\", TITLE_COLOUR=(255,255,255), FRAMES_AS_FIELDS=False, frameSkipChecker=None, segments=[]):\n\n # we're going to draw a larger (super sampled) image and then scale it down\n # to get smoothing (compensating for the lack of anti-aliased drawing functions\n # in PIL)\n\n width = widthPixels * superSamplingScale\n height = heightPixels * superSamplingScale\n\n flashCols = list(flashColourGen)[0:numFrames]\n flashColsPipTrain = list(flashColourGenPipTrain)[0:numFrames]\n\n # we'll pretend we're working within a rectangle (0,0) - (160,90)\n # and use a scaling function to map to out actual dimensions\n scaler = AspectPreservingCoordinateScaler((160,90),(width,height))\n\n # load a font for text\n font = loadFont(sizePt = scaler.s(4))\n smallfont = loadFont(sizePt = scaler.s(4))\n \n # work out the segment description text, then check its size and adjust the fontsize to ensure it fits within bounding area\n if segments:\n segment_description_text = \"\\n\".join(map(lambda seg : seg[\"description\"], segments))\n tmpimg = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n tmpdraw = ImageDraw.Draw(tmpimg)\n w,h = tmpdraw.multiline_textsize(segment_description_text, font=smallfont)\n max_w, max_h = scaler.xy((140,13))\n \n shrink_factor = min(float(max_w) / w, float(max_h) / h, 1)\n smallfont = loadFont(sizePt = scaler.s(4*shrink_factor))\n \n poy = 0 # pie Y offset\n dfy = 65 # duration and FPS labels Y offset\n if segments:\n poy = -10\n dfy = 19\n\n\n\n WHITE=(255,255,255)\n BLACK=(0,0,0)\n\n if FRAMES_AS_FIELDS:\n imageName = \"field\"\n labelFps = FPS / 2\n else:\n imageName = \"frame\"\n labelFps = FPS\n\n\n for frameNum in range(0,numFrames):\n if frameSkipChecker is not None:\n shouldSkip=frameSkipChecker(frameNum)\n if shouldSkip:\n yield None\n continue\n\n timecode = frameNumToTimecode(frameNum, FPS, framesAreFields=FRAMES_AS_FIELDS)\n timeSecs = float(frameNum) / FPS\n nextTimeSecs = float(frameNum+1) / FPS # time of next frame after this\n durationTimecode = frameNumToTimecode(numFrames, FPS)\n\n # create black image and an object to let us draw on it\n img = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n draw = ImageDraw.Draw(img)\n\n # draw a flashing rectangular box on the left side\n flashColour = flashCols[frameNum]\n topLeft = scaler.xy((10, 30))\n bottomRight = scaler.xy((40, 60))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=GFX_COLOUR)\n topLeft = scaler.xy((11, 31))\n bottomRight = scaler.xy((39, 59))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=flashColour)\n\n # draw text label explaining to attach light sensor to the flashing box\n topLeft = scaler.xy((41, 37))\n draw.text(topLeft, \"Use light detector\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 41))\n draw.text(topLeft, \"on centre of\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 45))\n draw.text(topLeft, \"this box\", font=font, fill=TEXT_COLOUR)\n\n # draw text labels giving frame number, timecode and seconds covered by this frame\n topLeft = scaler.xy((10, 4))\n draw.text(topLeft, timecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 9))\n draw.text(topLeft, \"%06d of %d %ss\" % (frameNum, numFrames, imageName), font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 14))\n draw.text(topLeft, u\"%08.3f \\u2264 t < %08.3f secs\" % (timeSecs, nextTimeSecs), font=font, fill=TEXT_COLOUR)\n\n topLeft = scaler.xy((10,dfy))\n draw.text(topLeft, \"Duration: \" + durationTimecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10,dfy+5))\n draw.text(topLeft, \"%d fps\" % labelFps, font=font, fill=TEXT_COLOUR)\n\n # and more text labels, but this time right justified\n text = title\n w,h = font.getsize(text)\n topLeft = scaler.xy((150,4))\n topLeft = topLeft[0] - w, topLeft[1]\n draw.text(topLeft, text, font=font, fill=TITLE_COLOUR)\n\n # draw an outer ring segment indicating the time period covered by the current frame\n topLeft = scaler.xy((105, 20+poy))\n bottomRight = scaler.xy((155, 70+poy))\n angle1 = 360 * (frameNum % FPS) / FPS\n angle2 = 360 * ((frameNum % FPS) + 1) / FPS\n draw.pieslice(topLeft + bottomRight, start=270+angle1, end=270+angle2, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((108, 23+poy))\n bottomRight = scaler.xy((152, 67+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n\n\n # draw frame num ring\n topLeft = scaler.xy((110, 25+poy))\n bottomRight = scaler.xy((150, 65+poy))\n angle = 360 * (frameNum % FPS) / FPS\n if (frameNum / FPS) % 2 == 0: # if this is an even second (0-0.9, 2-2.9, 4-4.9 etc)\n draw.pieslice(topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n else:\n draw.pieslice(topLeft + bottomRight, start=270+angle, end=270+360, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((113, 28+poy))\n bottomRight = scaler.xy((147, 62+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n \n # draw outer for segments\n if segments:\n topLeft = scaler.xy((115-0.25, 30+poy-0.25))\n bottomRight = scaler.xy((145+0.25, 60+poy+0.25))\n draw.ellipse(topLeft + bottomRight, fill=WHITE, outline=None)\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n draw.ellipse(topLeft + bottomRight, fill=BLACK, outline=None)\n\n # draw progress pie\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n angle = 360.0*frameNum/numFrames\n precise_filled_pieslice(draw, topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n\n # draw segments over the pieslice\n if segments:\n for i in range(0, len(segments)):\n angle = math.radians(270 + 360.0*segments[i][\"startSecs\"]/numFrames*FPS)\n centre = scaler.xy((130,45+poy))\n armEnd = scaler.xy((130 + 15*math.cos(angle), 45+poy + 15*math.sin(angle)))\n draw.line([centre, armEnd], fill=WHITE, width=int(scaler.s(0.25)))\n \n segStartFrame = segments[i][\"startSecs\"] * FPS\n nextStartFrame = segments[(i+1) % len(segments)][\"startSecs\"] * FPS\n if nextStartFrame <= segStartFrame:\n nextStartFrame += numFrames\n midAngle = math.radians(270 + 360.0* (segStartFrame+nextStartFrame)/2/numFrames)\n w,h = font.getsize(segments[i][\"label\"])\n centre = scaler.xy((130 + 15*math.cos(midAngle)*0.7, 45+poy + 15*math.sin(midAngle)*0.7))\n topLeft = centre[0] - w/2, centre[1] - h/2\n draw.text(topLeft, segments[i][\"label\"], fill=WHITE, font=font)\n\n # draw segment long labels\n topLeft = scaler.xy((10,61))\n draw.multiline_text(topLeft, segment_description_text, fill=WHITE, font=smallfont)\n \n # draw pulse train at the bottom\n LIM=FPS\n NUM_BLOBS = 2*LIM + 1\n blobSpacing = 150.0/NUM_BLOBS\n\n for offset in range(-LIM, +LIM+1):\n left = 80+blobSpacing*(offset-0.5)\n right = 80+blobSpacing*(offset+0.5)\n\n topLeft = scaler.xy(( left, 80 ))\n bottomRight = scaler.xy(( right, 85 ))\n\n seqIndex = offset + frameNum\n if seqIndex >= 0 and seqIndex < numFrames:\n colour = flashColsPipTrain[seqIndex]\n draw.rectangle(topLeft + bottomRight, outline=None, fill = colour)\n\n if offset == 0:\n # draw blob above\n topLeft = scaler.xy(( left, 75 ))\n bottomRight = scaler.xy(( right, 80 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # and below\n topLeft = scaler.xy(( left, 85 ))\n bottomRight = scaler.xy(( right, 90 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # shrink the image using high quality downsampling\n try:\n scalingMode = Image.LANCZOS\n except AttributeError:\n scalingMode = Image.BICUBIC\n\n rescaledImage = img.resize((widthPixels,heightPixels), scalingMode)\n\n yield rescaledImage", "def run(self, n=1, speed=1.0, rnd=0, filename=None, start_frame=0, verbose=True, crop=None):\n if verbose and filename:\n print 'rendering %s frames as %s ... %s' % (n, (filename % start_frame), (filename % (start_frame + n - 1)))\n for k in xrange(n):\n self.z += rnd * rand(*self.z.shape)\n self.step(speed=speed)\n if filename:\n out = self.rgb_image()\n if crop:\n out = out[crop[0]:crop[1],crop[2]:crop[3],...]\n imsave(filename % (k + start_frame), out)\n if verbose:\n print n - k,\n sys.stdout.flush()", "def generate(self, size, count, channels=3):\n self.size = size\n self.h, self.w = size\n self.count = count\n self.channels = channels\n self.make_path()\n self.create_list()\n self.create_json()\n t = time.time()\n for i, (path, img, mask) in enumerate(self.gen()):\n cv2.imwrite(path, img)\n if mask:\n *p, id_ = path.split(\"/\")\n cv2.imwrite(f\"{self.save_path}{self.name}/masks/{id_}\", mask)\n if self.print:\n print(\"[Done {:6d}] [Time: {:.2f} s]\".format(i, time.time() - t))\n t = time.time()", "def generate_multi_art(filename=def_art_name, min_depth=def_min_depth, max_depth=def_max_depth,\n x_size=def_x_size, y_size=def_y_size, write_funcs=def_save,\n func_filename=def_func_name, num_images=def_num_images, index=def_index):\n for j in range(num_images):\n generate_art(filename+str(index+j), min_depth, max_depth,\n x_size, y_size, write_funcs, func_filename+str(index+j))", "def apng(self, savefile=None, show_path=False, delay=20, iterations=0):\n pngdir = self.png()\n if savefile is None:\n savefile = graphics_filename('.png')\n with open(savefile, \"wb\") as out:\n apng = APngAssembler(\n out, len(self),\n delay=delay, num_plays=iterations)\n for i in range(len(self)):\n png = os.path.join(pngdir, \"%08d.png\" % i)\n apng.add_frame(png)\n if show_path:\n print(\"Animation saved to file %s.\" % savefile)", "def produce(frame_gen: fg.FrameGenerator, fps: float,\r\n dpi: typing.Union[int, float], bitrate: typing.Union[int, float],\r\n outfile: str,\r\n settings: PerformanceSettings = None, time_per_print: float = 15.0,\r\n logger: logging.Logger = None) -> PerformanceSettings:\r\n\r\n try:\r\n mp.set_start_method('spawn')\r\n except RuntimeError:\r\n pass\r\n\r\n if settings is None:\r\n settings = PerformanceSettings()\r\n if logger is None:\r\n logger = logging.getLogger('pympanim.worker')\r\n logger.setLevel(logging.DEBUG)\r\n logging.basicConfig(\r\n format='%(asctime)s [%(filename)s:%(lineno)d] %(message)s',\r\n datefmt='%m/%d/%Y %I:%M:%S %p')\r\n\r\n ms_per_frame = 1000 / fps\r\n num_frames = int(frame_gen.duration / ms_per_frame)\r\n logger.info('Settings: %0.1f seconds; %d frames at %d fps with %d workers...',\r\n frame_gen.duration / 1000, num_frames, fps, settings.num_workers)\r\n\r\n workers = []\r\n paused_workers = []\r\n stopping_workers = [] # closed when we process their last frame\r\n\r\n perf = imgst.ISRunningAveragePerfHandler(settings.window_size)\r\n isticher = imgst.ImageSticher(frame_gen.frame_size, dpi, bitrate, fps,\r\n outfile, settings.ooo_error)\r\n isticher.perfs.append(perf)\r\n\r\n for i in range(settings.num_workers):\r\n worker = _spawn_worker(frame_gen, ms_per_frame, i)\r\n isticher.register_queue(worker.img_queue)\r\n workers.append(worker)\r\n\r\n worker_counter = settings.num_workers\r\n\r\n for worker in workers:\r\n worker.start_sync()\r\n isticher.start()\r\n\r\n all_synced = False\r\n while not all_synced:\r\n all_synced = True\r\n for worker in workers:\r\n if not worker.check_sync():\r\n all_synced = False\r\n time.sleep(0.001)\r\n\r\n old_perf = None\r\n cur_optim = None # magical string values\r\n frame_batch_dyn_min = settings.frame_batch_min\r\n frame_batch_dyn_max = settings.frame_batch_max\r\n frame_batch_min_next_decay = float('inf')\r\n frame_batch_max_next_decay = float('inf')\r\n next_optim = time.time() + settings.perf_delay + settings.window_size\r\n next_progress = time.time() + max(settings.perf_delay + settings.window_size, time_per_print)\r\n\r\n\r\n cur_frame = 0\r\n syncing = False\r\n\r\n while cur_frame < num_frames:\r\n if not syncing:\r\n frames_per_worker_since_sync = 0\r\n for worker in workers:\r\n worker.check_ack_queue()\r\n while worker.offer(cur_frame, settings.worker_queue_size):\r\n cur_frame += 1\r\n frames_per_worker_since_sync = max(\r\n frames_per_worker_since_sync, worker.num_since_sync)\r\n if cur_frame >= num_frames:\r\n break\r\n for i in range(settings.frame_batch_amount - 1):\r\n worker.send(cur_frame)\r\n cur_frame += 1\r\n frames_per_worker_since_sync = max(\r\n frames_per_worker_since_sync, worker.num_since_sync)\r\n if cur_frame >= num_frames:\r\n break\r\n if cur_frame >= num_frames:\r\n break\r\n if cur_frame >= num_frames:\r\n break\r\n if cur_frame >= num_frames:\r\n break\r\n\r\n if frames_per_worker_since_sync > settings.frames_per_sync:\r\n for worker in workers:\r\n worker.start_sync()\r\n syncing = True\r\n else:\r\n syncing = False\r\n for worker in workers:\r\n if not worker.check_sync():\r\n syncing = True\r\n break\r\n\r\n for i in range(settings.work_per_dispatch):\r\n isticher.do_work()\r\n\r\n while len(isticher.ooo_frames) > settings.ooo_cap:\r\n isticher.do_work()\r\n\r\n for i in range(len(stopping_workers) - 1, 0, -1):\r\n worker = stopping_workers[i]\r\n if worker.check_finish() and isticher.next_frame > worker.last_frame:\r\n worker.check_sync() # cleanup just in case\r\n isticher.remove_queue(worker.img_queue)\r\n worker.close()\r\n stopping_workers.pop(i)\r\n\r\n thetime = time.time()\r\n if thetime >= next_progress:\r\n next_progress = thetime + time_per_print\r\n recpsec, procpsec = perf.mean()\r\n frames_to_proc = num_frames - isticher.next_frame\r\n time_left_sec = frames_to_proc / procpsec if procpsec > 0 else float('inf')\r\n logger.info('[%0.1f secs remaining] Generating %0.2f images/sec and ' # pylint: disable=logging-not-lazy\r\n + 'processing %0.2f images/sec', time_left_sec,\r\n recpsec, procpsec)\r\n\r\n if thetime >= next_optim:\r\n next_optim = thetime + settings.perf_delay + settings.window_size\r\n if frame_batch_min_next_decay < thetime:\r\n frame_batch_dyn_min -= 1\r\n frame_batch_min_next_decay = (\r\n float('inf') if frame_batch_dyn_min <= settings.frame_batch_min\r\n else thetime + settings.frame_batch_dyn_min_decay_time\r\n )\r\n if frame_batch_max_next_decay < thetime:\r\n frame_batch_dyn_max += 1\r\n frame_batch_max_next_decay = (\r\n float('inf') if frame_batch_dyn_max >= settings.frame_batch_max\r\n else thetime + settings.frame_batch_dyn_max_decay_time\r\n )\r\n\r\n recpsec, procpsec = perf.mean()\r\n if old_perf is not None and cur_optim is not None:\r\n oldrecpsec, oldprocpsec = old_perf # pylint: disable=unpacking-non-sequence, unused-variable\r\n\r\n if cur_optim == 'reduce_frame_batch_amount':\r\n relative_performance = 0 if procpsec == 0 else oldprocpsec / procpsec\r\n if relative_performance > settings.frame_batch_max_badness:\r\n # keep the change\r\n logger.debug(\r\n 'found better setting: frame_batch_amount=%d (rel performance: %0.3f)',\r\n settings.frame_batch_amount, relative_performance)\r\n frame_batch_dyn_max = settings.frame_batch_amount\r\n frame_batch_max_next_decay = (\r\n thetime + settings.frame_batch_dyn_max_decay_time\r\n )\r\n else:\r\n # revert the change\r\n # we're evil scientists so we dont report null results\r\n settings.frame_batch_amount += 1\r\n frame_batch_dyn_min = settings.frame_batch_amount\r\n frame_batch_min_next_decay = (\r\n thetime + settings.frame_batch_dyn_min_decay_time\r\n )\r\n elif cur_optim == 'increase_frame_batch_amount':\r\n relative_performance = 0 if procpsec == 0 else oldprocpsec / procpsec\r\n if relative_performance > settings.frame_batch_min_improvement:\r\n # keep the change\r\n logger.debug(\r\n 'found better setting: frame_batch_amount=%d (rel performance: %0.3f)',\r\n settings.frame_batch_amount, relative_performance)\r\n frame_batch_dyn_min = settings.frame_batch_amount\r\n frame_batch_min_next_decay = (\r\n thetime + settings.frame_batch_dyn_min_decay_time\r\n )\r\n else:\r\n # revert the change\r\n # we're evil scientists so we dont report null results\r\n settings.frame_batch_amount -= 1\r\n frame_batch_dyn_max = settings.frame_batch_amount\r\n frame_batch_max_next_decay = (\r\n thetime + settings.frame_batch_dyn_max_decay_time\r\n )\r\n else:\r\n raise RuntimeError(f'unknown cur_optim = {cur_optim}')\r\n\r\n cur_optim = None\r\n\r\n perc_rec_proc = procpsec / recpsec\r\n reason_str = (f'(processing {perc_rec_proc:.3f} images for every '\r\n + f'image generated, have {len(isticher.ooo_frames)} '\r\n + 'frames awaiting processing)')\r\n\r\n threshold_spawn, threshold_kill = (\r\n (settings.spawn_worker_threshold_low,\r\n settings.kill_worker_threshold_low)\r\n if len(isticher.ooo_frames) < settings.ooo_balance\r\n else (settings.spawn_worker_threshold_high,\r\n settings.kill_worker_threshold_high)\r\n )\r\n\r\n if (perc_rec_proc > threshold_spawn\r\n and settings.num_workers < settings.max_workers):\r\n settings.num_workers += 1\r\n if settings.frames_per_sync > settings.min_frames_per_sync:\r\n settings.frames_per_sync -= 1\r\n if paused_workers:\r\n unpaused = paused_workers.pop()\r\n workers.append(unpaused)\r\n logger.debug('Unpaused a worker %s', reason_str)\r\n else:\r\n worker = _spawn_worker(frame_gen, ms_per_frame, worker_counter)\r\n isticher.register_queue(worker.img_queue)\r\n workers.append(worker)\r\n worker_counter += 1\r\n logger.debug('Spawned a worker %s', reason_str)\r\n elif (perc_rec_proc < threshold_kill\r\n and settings.num_workers > 1):\r\n settings.num_workers -= 1\r\n if settings.frames_per_sync > settings.min_frames_per_sync:\r\n settings.frames_per_sync -= 1\r\n settings.frames_per_sync -= 1\r\n if not paused_workers:\r\n paused = workers.pop()\r\n paused_workers.append(paused)\r\n logger.debug('Paused a worker %s', reason_str)\r\n else:\r\n paused = workers.pop()\r\n killed = paused_workers.pop()\r\n paused_workers.append(paused)\r\n stopping_workers.append(killed)\r\n killed.start_finish()\r\n logger.debug('Killed a worker %s', reason_str)\r\n elif settings.frames_per_sync < settings.max_frames_per_sync:\r\n settings.frames_per_sync += 1\r\n\r\n want_reduce_frame_batch = perc_rec_proc < 1\r\n # if we have processed fewer than we have received it's not as\r\n # important that we optimize image generation\r\n can_reduce_frame_batch = (\r\n settings.frame_batch_amount > frame_batch_dyn_min\r\n )\r\n can_increase_frame_batch = (\r\n settings.frame_batch_amount < frame_batch_dyn_max\r\n )\r\n\r\n if ((want_reduce_frame_batch or not can_increase_frame_batch)\r\n and can_reduce_frame_batch):\r\n cur_optim = 'reduce_frame_batch_amount'\r\n settings.frame_batch_amount -= 1\r\n elif can_increase_frame_batch:\r\n cur_optim = 'increase_frame_batch_amount'\r\n settings.frame_batch_amount += 1\r\n\r\n\r\n old_perf = (recpsec, procpsec)\r\n\r\n\r\n logger.debug('Shutting down workers...')\r\n workers.extend(paused_workers)\r\n paused_workers = []\r\n for worker in workers:\r\n worker.start_finish()\r\n workers.extend(stopping_workers)\r\n stopping_workers = []\r\n\r\n all_finished = False\r\n while not all_finished:\r\n all_finished = not isticher.do_work()\r\n if not all_finished:\r\n for worker in workers:\r\n if not worker.check_finish():\r\n all_finished = False\r\n break\r\n if not all_finished:\r\n for worker in stopping_workers:\r\n if not worker.check_finish():\r\n all_finished = False\r\n break\r\n\r\n logger.debug('All workers shut down, processing remaining frames...')\r\n while isticher.next_frame < num_frames:\r\n if not isticher.do_work():\r\n time.sleep(0.001)\r\n\r\n isticher.finish()\r\n for worker in workers:\r\n worker.check_sync() # just in case we leaked one\r\n worker.close()\r\n logger.info('Finished')\r\n return settings", "def generate_art(filename=def_art_name, min_depth=def_min_depth, max_depth=def_max_depth,\n x_size=def_x_size, y_size=def_y_size, write_funcs=def_save, func_filename=def_func_name):\n # Functions for red, green, and blue channels - where the magic happens!\n name_funcs = []\n lam_funcs = []\n for i in range(3):\n functions = build_random_function(min_depth, max_depth)\n name_funcs.append(functions[0])\n lam_funcs.append(functions[1])\n\n # generate art always uses lambdas because they are faster, while regenerate\n # art requrires use of nested lists, leading to this fixed 'True' value to specify\n make_art(lam_funcs, filename, True, x_size, y_size)\n write_func(name_funcs, func_filename)", "def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n if not os.path.exists(os.path.join(self._images_dir, 'imgs')):\n os.makedirs(os.path.join(self._images_dir, 'imgs'))\n \n names = ['inputB_', 'fakeB_depth_' , 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n x1_t, name1 = self.dataset.next_batch()\n count = 0\n fake_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.cycle_images_b], \n feed_dict={self.input_b: x1_t})\n \n fakedepth = fake_A_temp[:,:,:,-1]\n tensors = [x1_t, fakedepth, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n #print(name)\n # if name == 'inputB_' or name == 'fakeB_depth_':\n # image_name = name1[count] + '_' + name + str(epoch) + \"_\" + str(i) + \".jpg\"\n # imsave(os.path.join(self._images_dir, 'imgs', image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n # else:\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")\n count += 1", "def genImage(self, img_num=1, mode=\"stabilization\"):\n self.Gmodel.eval()\n with torch.no_grad():\n for i in range(img_num):\n latent_z = torch.randn(1, 512, 1, 1).normal_().to(self.device)\n output = self.Gmodel(latent_z, mode)\n print(\"output size: \", output.size())\n output = torch.clamp(output, min=0, max=1)\n output = output.cpu().squeeze().numpy()\n fake_img = output.transpose(1, 2, 0)\n print(\"fake image size: \", fake_img.shape)\n plt.imshow(fake_img)\n plt.show()\n save_file = os.path.join(self.save_dir, str(self.load_resl), \"%05d.jpg\" % i)\n os.makedirs(os.path.dirname(save_file), exist_ok=True)\n plt.imsave(save_file, fake_img)", "def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")", "def generate_art_3(filename, x_size=350, y_size=350, t_size=30):\n # Functions for red, green, and blue channels - where the magic happens!\n r_lb = random.randint(1, 5)\n g_lb = random.randint(1, 10)\n b_lb = random.randint(1, 5)\n red_function = build_random_function_3(r_lb, r_lb+1)\n green_function = build_random_function_3(g_lb, g_lb+1)\n blue_function = build_random_function_3(b_lb, b_lb+1)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for time in range(t_size):\n for i in range(x_size):\n for j in range(y_size):\n t = remap_interval(time, 0, t_size, -1, 1)\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(eval_r_func_3(red_function, x, y, t)),\n color_map(eval_r_func_3(green_function, x, y, t)),\n color_map(eval_r_func_3(blue_function, x, y, t))\n )\n str_num = '0' * (5 - len(str(time))) + str(time)\n print(str_num)\n im.save(filename + str_num + '.png')\n return 'saved'", "def gen_fps():\n global data_src ,output_dir \n logger = TaskFileLogger(\"GenFP\")\n\n h_vars = load_hydro_var()\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for h_type,var_d in h_vars.items():\n print \"considering %s\" %h_type\n\n t_output_dir = os.path.join(output_dir,h_type)\n if not os.path.exists(t_output_dir):\n print \"creating path %s\" %t_output_dir\n os.mkdir(t_output_dir)\n logger.log(\"%s started\" %(h_type))\n\n for fname in glob.glob(data_src):\n complex_id = os.path.basename(fname).split('.')[0] \n fp_path = os.path.join(t_output_dir,complex_id + \".fp\" )\n if os.path.exists(fp_path):\n #print \"%s processed\" %complex_id\n continue\n print \"processing %s,fp saved as %s\" %(fname , fp_path )\n c = Complex(fname,hydro_dict = var_d)\n c.get_fp()\n c.write_fp_to_file(fp_path)\n\n logger.log(\"%s finished\" %(h_type))", "def generate_movie(filename, x_size=640, y_size=360, numframes=150, dpi=100):\n global timeflag\n timeflag = 1\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n print \"red_function:\\t\" + str(red_function)\n print \"green_function:\\t\" + str(green_function)\n print \"blue_function:\\t\" + str(blue_function)\n\n for n in range(1, numframes+1):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n t = remap_interval(n, 0, numframes, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, t)),\n color_map(evaluate_random_function(green_function, x, y, t)),\n color_map(evaluate_random_function(blue_function, x, y, t))\n )\n im.save(\"movie_images/\"+'%03d'%n+\".png\")\n\n os.system(\"echo 'yes'|avconv -r 24 -i movie_images/%03d.png -vb 20M myart.mp4\")\n\n \"\"\"fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n im = Image.new(\"RGB\", (x_size, y_size))\n\n def update_img(n):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, n)),\n color_map(evaluate_random_function(green_function, x, y, n)),\n color_map(evaluate_random_function(blue_function, x, y, n))\n )\n im.save(\"test.png\")\n return im\n ani = animation.FuncAnimation(fig, update_img, numframes, interval=24) #TODO: FIX THIS\n writer = animation.writers['avconv'](fps=24)\n\n ani.save(filename, writer=writer, dpi=dpi)\"\"\"", "def createAnimation(start_id, anim_count, frame_count, base_sprites):\n for a in range(anim_count):\n img_batch = []\n cnd_batch = []\n\n for f in range(frame_count):\n # Attaches encodings for each frame of the animation.\n cnd_vector = np.zeros(16)\n cnd_vector[start_id + a] = 1\n img_batch.append(base_sprites[a])\n cnd_batch.append(np.append(cnd_vector, [f]))\n\n f_count = np.zeros((len(cnd_batch), 1)) # Animation's frame count.\n\n # Creates a batch of images for one animation.\n anim = animator.run(y_ap, feed_dict= {\n b_ap: img_batch,\n l_ap: cnd_batch,\n b_asize: f_count\n })\n output_anim = np.concatenate(([base_sprites[a]], anim)) # Add base image to the output animation file.\n scipy.misc.imsave(app.root_path + \"/static/images/animations/a\" + str(a + start_id) + \".png\", joinImages(output_anim))\n\n return output_anim", "def create_gif(self, number_of_images=80, duration=0.1, output_filename=\"plot.gif\"):\n if self.quick_plot is None:\n self.quick_plot = pybamm.QuickPlot(self._solution)\n\n self.quick_plot.create_gif(\n number_of_images=number_of_images,\n duration=duration,\n output_filename=output_filename,\n )", "def pnghack(filepath, width=2000, height=2000):\t#cmd.png() doesnt work with api\n cmd.set('ray_trace_frames', 1) # Frames are raytraced before saving an image.\n cmd.viewport(width, height) # Set resolution\n cmd.mpng(filepath, 1, 1) # Use batch png mode with 1 frame only\n cmd.mplay() # cmd.mpng needs the animation to 'run'", "def create_png(input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, f_max, f_min, wavefile, palette, channel):\n \n print \"processing file %s:\\n\\t\" % input_file,\n \n audio_file = audiolab.sndfile(input_filename, 'read') #opens the wavfile; audio_file is an object now\n \n samples_per_pixel = audio_file.get_nframes() / float(image_width)\n nyquist_freq = (audio_file.get_samplerate() / 2) + 0.0\n \"\"\"\n Initializes AudioProcessor class, which does FFT analysis and spits \n out amplitudes and frequencies to the SpectrogramImage and WaveformImage \n classes below later. For a stereo wav file, this selects a single channel \n to analyze. We might want to analyze both channels to give more input to\n the visualizer,though.\n \"\"\"\n processor = AudioProcessor(audio_file, fft_size, channel, numpy.hanning)\n \n if wavefile==1:\n waveform = WaveformImage(image_width, image_height, palette)\n spectrogram = SpectrogramImage(image_width, image_height, fft_size, f_max, f_min, nyquist_freq, palette)\n \n for x in range(image_width):\n #shows progress\n if x % (image_width/10) == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n \n seek_point = int(x * samples_per_pixel)\n next_seek_point = int((x + 1) * samples_per_pixel)\n \n (spectral_centroid, db_spectrum) = processor.spectral_centroid(seek_point)\n \n #let's have a look at the spectral centroid and the db_spectrum\n #print \"Spectral Centroid:\" + str(spectral_centroid)\n #print \"DB Spectrum:\" + str(db_spectrum)\n \n if wavefile==1:\n #aha! The peaks and spectral centroid make up the waveform.\n #Since the spectral centroid indicates timbre (often referred to as color),\n #it's probably what colors the waveform.\n peaks = processor.peaks(seek_point, next_seek_point)\n #let's have a look at these peaks\n #print \"Peaks:\" + str(peaks)\n waveform.draw_peaks(x, peaks, spectral_centroid)\n \n spectrogram.draw_spectrum(x, db_spectrum)\n \n if wavefile==1:\n waveform.save(output_filename_w)\n spectrogram.save(output_filename_s)\n \n print \" done\"", "def main(folder, outputfile):\n parser = argument_parser()\n args = parser.parse_args()\n\n show_all = args.show_all\n verbose = args.verbose\n\n random.seed(args.rng_seed)\n\n args.files = folder\n print args.files\n\n try:\n image = Image.open(args.files[0])\n except IOError, msg:\n print >> sys.stderr, msg\n return 1\n if image.mode == 'P':\n image = image.convert('RGB')\n \n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n\n if not show_all:\n def nothing(a, b):\n pass\n do_something = nothing\n elif args.saving:\n do_something = Imsave(\"saved/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]) + \"/\").save\n else:\n import im_debug\n do_something = im_debug.show\n\n if verbose:\n import time\n class Logger:\n def __init__(self):\n self.t = 0\n\n def __call__(self, m):\n t_n = time.time()\n if self.t > 0:\n print >> sys.stderr, \"\\t\" + str(t_n - self.t)\n print >> sys.stderr, m\n self.t = t_n\n logger = Logger()\n\n else:\n def logger(m):\n pass\n \n if args.manual_mode:\n import manual\n try:\n lines = manual.find_lines(image)\n except manual.UserQuitError:\n #TODO ask user to try again\n return 1\n else:\n if args.l_cache:\n filename = (\"saved/cache/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]))\n cache_dir = \"/\".join(filename.split('/')[:-1])\n if os.path.exists(filename):\n lines, l1, l2, bounds, hough = pickle.load(open(filename))\n print >> sys.stderr, \"using cached results\"\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n d_file = open(filename, 'wb')\n pickle.dump((lines, l1, l2, bounds, hough), d_file)\n d_file.close()\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n\n grid, lines = gridf.find(lines, image.size, l1, l2, bounds, hough,\n show_all, do_something, logger)\n if show_all:\n im_g = image.copy()\n draw = ImageDraw.Draw(im_g)\n for l in grid[0] + grid[1]:\n draw.line(l, fill=(64, 255, 64), width=1)\n do_something(im_g, \"grid\", name=\"grid\")\n\n intersections = intrsc.b_intersects(image, lines, show_all, do_something, logger)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n\n logger(\"finished\")\n\n # TODO! refactor this mess:\n if len(args.files) == 1:\n\n if args.sgf_output:\n print board.asSGFsetPos()\n else:\n print board\n \n else:\n game = output.Game(19, board) #TODO size parameter\n #for f in args.files[1:]:\n for i, f in enumerate(args.files):\n try:\n image = Image.open(f)\n except IOError, msg:\n print >> sys.stderr, msg\n continue\n if verbose:\n print >> sys.stderr, \"Opening\", f\n if image.mode == 'P':\n image = image.convert('RGB')\n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n if args.sgf_output:\n game.addMove(board)\n else:\n with open(outputfile + str(i) + \".txt\", \"w\") as f:\n f.write(str(board))\n\n if args.sgf_output:\n print game.asSGF()\n\n return 0", "def concatenate_frames(I, Stokes, AOP, DOP, path_process, k, imgs_polar): #, Min, Max, im_cos, im_sin, rho, phi):\n\n \"\"\"# Fusion\n im_fusion = np.zeros((500, 500, 5), dtype=int)\n im_fusion[:, :, 0] = Stokes[0]\n im_fusion[:, :, 1] = Stokes[1]\n im_fusion[:, :, 2] = Stokes[2]\n im_fusion[:, :, 3] = AOP\n im_fusion[:, :, 4] = DOP\n if not os.path.exists(path_process + \"Fusion/\"):\n os.mkdir(path_process + \"Fusion/\")\n np.save(path_process + \"Fusion/\" + imgs_polar[k].split(\".\")[0], im_fusion.astype(np.uint8))\"\"\"\n\n \"\"\"# RetinaNet intensities\n im_I04590 = np.zeros((500, 500, 3))\n im_I04590[:, :, 0] = I[0]\n im_I04590[:, :, 1] = I[1]\n im_I04590[:, :, 2] = I[2]\n if not os.path.exists(path_process + \"I04590/\"):\n os.mkdir(path_process + \"I04590/\")\n imageio.imwrite(path_process + \"I04590/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I04590)\n\n # Min Max total intensity\n im_min_max = np.zeros((500, 500, 3))\n im_min_max[:, :, 0] = Stokes[0]\n im_min_max[:, :, 1] = Max\n im_min_max[:, :, 2] = Min\n if not os.path.exists(path_process + \"MinMax/\"):\n os.mkdir(path_process + \"MinMax/\")\n imageio.imwrite(path_process + \"MinMax/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_min_max)\n\n # Cos Sin total intensity\n im_cos_sin = np.zeros((500, 500, 3))\n im_cos_sin[:, :, 0] = Stokes[0]\n im_cos_sin[:, :, 1] = im_cos\n im_cos_sin[:, :, 2] = im_sin\n if not os.path.exists(path_process + \"CosSin/\"):\n os.mkdir(path_process + \"CosSin/\")\n imageio.imwrite(path_process + \"CosSin/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_cos_sin)\"\"\"\n\n \"\"\"# Cos Sin total intensity\n im_cos_sin = np.zeros((500, 500, 3))\n im_cos_sin[:, :, 0] = DOP\n im_cos_sin[:, :, 1] = im_cos\n im_cos_sin[:, :, 2] = im_sin\n if not os.path.exists(path_process + \"CosSin2_s/\"):\n os.mkdir(path_process + \"CosSin2_s/\")\n imageio.imwrite(path_process + \"CosSin2_s/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_cos_sin)\"\"\"\n\n\n \"\"\"im_I045135 = np.zeros((500, 500, 3))\n im_I045135[:, :, 0] = I[0]\n im_I045135[:, :, 1] = I[3]\n im_I045135[:, :, 2] = I[1]\n if not os.path.exists(path_process + \"I013545/\"):\n os.mkdir(path_process + \"I013545/\")\n imageio.imwrite(path_process + \"I013545/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I045135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0]\n im_I090135[:, :, 1] = I[2]\n im_I090135[:, :, 2] = I[3]\n if not os.path.exists(path_process + \"I090135/\"):\n os.mkdir(path_process + \"I090135/\")\n imageio.imwrite(path_process + \"I090135/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I090135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[1]\n im_I4590135[:, :, 1] = I[2]\n im_I4590135[:, :, 2] = I[3]\n if not os.path.exists(path_process + \"I4590135/\"):\n os.mkdir(path_process + \"I4590135/\")\n imageio.imwrite(path_process + \"I4590135/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I4590135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0] - I[1]\n im_I090135[:, :, 1] = I[0]\n im_I090135[:, :, 2] = I[0] + I[1]\n if not os.path.exists(path_process + \"RetinaNet_Ieq1/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq1/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq1/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0] - I[3]\n im_I090135[:, :, 1] = I[0]\n im_I090135[:, :, 2] = I[0] + I[3]\n if not os.path.exists(path_process + \"RetinaNet_Ieq2/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq2/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq2/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[1] - I[2]\n im_I090135[:, :, 1] = I[1]\n im_I090135[:, :, 2] = I[1] + I[2]\n if not os.path.exists(path_process + \"RetinaNet_Ieq3/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq3/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq3/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0]/I[1]\n im_I090135[:, :, 1] = I[0]/I[2]\n im_I090135[:, :, 2] = I[0]/I[3]\n if not os.path.exists(path_process + \"RetinaNet_Ieq4/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq4/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq4/\" + str(k) + \".png\", im_I090135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]\n im_I4590135[:, :, 1] = I[0]/I[1]\n im_I4590135[:, :, 2] = I[0]/I[2]\n if not os.path.exists(path_process + \"RetinaNet_eq5/\"):\n os.mkdir(path_process + \"RetinaNet_eq5/\")\n imageio.imwrite(path_process + \"RetinaNet_eq5/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]\n im_I4590135[:, :, 1] = I[0] / I[2]\n im_I4590135[:, :, 2] = I[0] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq6/\"):\n os.mkdir(path_process + \"RetinaNet_eq6/\")\n imageio.imwrite(path_process + \"RetinaNet_eq6/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[1] / I[0]\n im_I4590135[:, :, 1] = I[1] / I[2]\n im_I4590135[:, :, 2] = I[1] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq7/\"):\n os.mkdir(path_process + \"RetinaNet_eq7/\")\n imageio.imwrite(path_process + \"RetinaNet_eq7/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[2] / I[0]\n im_I4590135[:, :, 1] = I[2] / I[1]\n im_I4590135[:, :, 2] = I[2] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq8/\"):\n os.mkdir(path_process + \"RetinaNet_eq8/\")\n imageio.imwrite(path_process + \"RetinaNet_eq8/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[3] / I[0]\n im_I4590135[:, :, 1] = I[3] / I[1]\n im_I4590135[:, :, 2] = I[3] / I[2]\n if not os.path.exists(path_process + \"RetinaNet_eq9/\"):\n os.mkdir(path_process + \"RetinaNet_eq9/\")\n imageio.imwrite(path_process + \"RetinaNet_eq9/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]/I[1]\n im_I4590135[:, :, 1] = I[0] / I[2]\n im_I4590135[:, :, 2] = DOP/255\n if not os.path.exists(path_process + \"RetinaNet_eq10/\"):\n os.mkdir(path_process + \"RetinaNet_eq10/\")\n imageio.imwrite(path_process + \"RetinaNet_eq10/\" + str(k) + \".png\", im_I4590135)\"\"\"\n\n # retinaNet Stokes\n im_Stokes = np.zeros((Stokes.shape[1], Stokes.shape[2], 3))\n im_Stokes[:, :, 0] = Stokes[0]\n im_Stokes[:, :, 1] = Stokes[1]\n im_Stokes[:, :, 2] = Stokes[2]\n if not os.path.exists(path_process + \"Stokes/\"):\n os.mkdir(path_process + \"Stokes/\")\n imageio.imwrite(path_process + \"Stokes/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_Stokes)\n \"\"\"\n\n # RetinaNet Params\n im_Params = np.zeros((500, 500, 3))\n im_Params[:, :, 0] = Stokes[0]\n im_Params[:, :, 1] = AOP\n im_Params[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Params/\"):\n os.mkdir(path_process + \"Params/\")\n imageio.imwrite(path_process + \"Params/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_Params)\"\"\"\n\n \"\"\"# HSV image\n HSV = np.zeros((500, 500, 3))\n HSV[:, :, 0] = AOP / 255 * 179\n HSV[:, :, 1] = DOP\n HSV[:, :, 2] = Stokes[0]\n if not os.path.exists(path_process + \"HSV/\"):\n os.mkdir(path_process + \"HSV/\")\n imageio.imwrite(path_process + \"HSV/\" + imgs_polar[k].split(\".\")[0] + \".png\", HSV)\"\"\"\n\n \"\"\"inten = (I[0] + I[1] + I[2] + I[3]) / 2\n\n hsv = np.uint8(cv2.merge(((phi + np.pi/2)/np.pi*180,rho/np.max(rho)*255, inten/inten.max()*255)))\n if not os.path.exists(path_process + \"HSV_2/\"):\n os.mkdir(path_process + \"HSV_2/\")\n imageio.imwrite(path_process + \"HSV_2/\" + imgs_polar[k].split(\".\")[0] + \".png\", hsv)\"\"\"\n\n \"\"\"# TSV image\n TSV = np.zeros((500, 500, 3))\n TSV[:, :, 0] = AOP\n TSV[:, :, 1] = DOP\n TSV[:, :, 2] = inten / inten.max() * 255\n if not os.path.exists(path_process + \"RetinaNet_TSV/\"):\n os.mkdir(path_process + \"RetinaNet_TSV/\")\n imageio.imwrite(path_process + \"RetinaNet_TSV/\" + str(k) + \".png\", TSV)\n\n # Pauli image\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0]\n if not os.path.exists(path_process + \"RetinaNet_Pauli/\"):\n os.mkdir(path_process + \"RetinaNet_Pauli/\")\n imageio.imwrite(path_process + \"RetinaNet_Pauli/\" + str(k) + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0] + I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0] - I[2]\n if not os.path.exists(path_process + \"Pauli2/\"):\n os.mkdir(path_process + \"Pauli2/\")\n imageio.imwrite(path_process + \"Pauli2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0] + I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0] - I[2]\n if not os.path.exists(path_process + \"Pauli2_inv/\"):\n os.mkdir(path_process + \"Pauli2_inv/\")\n imageio.imwrite(path_process + \"Pauli2_inv/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = Stokes[0]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = Stokes[1]\n if not os.path.exists(path_process + \"Pauli2/\"):\n os.mkdir(path_process + \"Pauli2/\")\n imageio.imwrite(path_process + \"Pauli2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = (I[1]+I[3])/2\n Pauli[:, :, 2] = I[2]\n if not os.path.exists(path_process + \"Sinclair/\"):\n os.mkdir(path_process + \"Sinclair/\")\n imageio.imwrite(path_process + \"Sinclair/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = Stokes[0]\n Pauli[:, :, 1] = I[1] + I[3]\n Pauli[:, :, 2] = Stokes[1]\n if not os.path.exists(path_process + \"Pauli/\"):\n os.mkdir(path_process + \"Pauli/\")\n imageio.imwrite(path_process + \"Pauli/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[2]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test/\"):\n os.mkdir(path_process + \"Test/\")\n imageio.imwrite(path_process + \"Test/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[1]\n Pauli[:, :, 1] = I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test1/\"):\n os.mkdir(path_process + \"Test1/\")\n imageio.imwrite(path_process + \"Test1/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test2/\"):\n os.mkdir(path_process + \"Test2/\")\n imageio.imwrite(path_process + \"Test2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[1] + I[2] - I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test3/\"):\n os.mkdir(path_process + \"Test3/\")\n imageio.imwrite(path_process + \"Test3/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = (I[0]/I[1]) #/ np.amax(I[0] / I[1]) * 255\n if not os.path.exists(path_process + \"Pauli3/\"):\n os.mkdir(path_process + \"Pauli3/\")\n imageio.imwrite(path_process + \"Pauli3/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Rachel = np.zeros((500, 500, 3))\n Rachel[:, :, 0] = Stokes[0]\n Rachel[:, :, 1] = Stokes[1]\n Rachel[:, :, 2] = DOP\n if not os.path.exists(path_process + \"RetinaNet_Rachel/\"):\n os.mkdir(path_process + \"RetinaNet_Rachel/\")\n imageio.imwrite(path_process + \"RetinaNet_Rachel/\" + str(k) + \".png\", Rachel)\n\n Rachel = np.zeros((500, 500, 3))\n Rachel[:, :, 0] = I[1]\n Rachel[:, :, 1] = I[0]\n Rachel[:, :, 2] = DOP\n if not os.path.exists(path_process + \"RetinaNet_Rachel2/\"):\n os.mkdir(path_process + \"RetinaNet_Rachel2/\")\n imageio.imwrite(path_process + \"RetinaNet_Rachel2/\" + str(k) + \".png\", Rachel)\"\"\"", "def generate(options, args):\n\n size = (256,256)\n # Expect option of the form '64,40'.\n if options.size:\n size = re.findall(r'\\d+', options.size)\n if len(size) not in [1,2]:\n raise ValueError(\n 'size should be one or two numbers, separated by punctuation')\n if len(size) == 1:\n size *= 2\n assert len(size) == 2\n size = map(int, size)\n options.bitdepth = options.depth\n\n pattern = args[0]\n\n pixels = generate_image(size, options.bitdepth, pattern)\n\n writer = png.Writer(size[0], size[1],\n bitdepth=options.bitdepth,\n greyscale=True,\n alpha=False)\n writer.write_array(sys.stdout, pixels)", "def generate_gif(frames, reward, path, number=None, evaluation=False):\n for i, frame in enumerate(frames):\n frames[i] = resize(frame, (420, 320, 3),\n order=0, preserve_range=True).astype(np.uint8)\n if evaluation:\n path += '/atari-step-{}-reward-{}.gif'.format(number, reward)\n else:\n path += '/atari-play-reward-{}.gif'.format(reward)\n imageio.mimsave(path, frames, duration=1/30)", "def generate_art(filename, x_size=350, y_size=350):\n # Functions for red, green, and blue channels - where the magic happens!\n r_lb = random.randint(6, 10)\n g_lb = random.randint(6, 10)\n b_lb = random.randint(6, 10)\n red_function = build_random_function(r_lb, r_lb+1)\n green_function = build_random_function(g_lb, g_lb+1)\n blue_function = build_random_function(b_lb, b_lb+1)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n im.save(filename+'.png')\n return 'saved'", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1", "def saveImage(self, fileName=\"mandelbrot.frac\"):\n # Save the image as a PNG\n if fileName == \"\":\n fileName = \"mandelbrot.frac\"\n directories = fileName.split(\"/\")\n for n in directories:\n if \".frac\" in n:\n name = n.rsplit(\".\", 1)[0]\n self.img.write(f\"{name}.png\")\n print(f\"Wrote image {name}.png\")", "def generate_report(\n in_file_size: int,\n out_file_size: int,\n in_file_path: str,\n out_file_path: str,\n in_image_size: Size,\n out_image_size: Size,\n elapsed_time: float,\n no_op: bool = False,\n) -> str:\n size_delta_bytes = out_file_size - in_file_size\n in_relative = os.path.relpath(in_file_path)\n out_relative = os.path.relpath(out_file_path)\n no_op_msg = \"**Image not saved due to -n flag; reporting only**\"\n report_title = \" Processing Summary \"\n report_end = \" End \"\n report_arrow = \"->\"\n report = []\n report.append(\n [\n \"File Name:\",\n in_relative,\n report_arrow if out_file_path is not None else \"\",\n out_relative if out_file_path is not None else \"\",\n ]\n )\n report.append(\n [\"Image Size:\", str(in_image_size), report_arrow, str(out_image_size)]\n )\n # TODO: black up arrow \\u25b2 throws UnicodeEncodeError on Windows when used with `fd -x`\n report.append(\n [\n \"File Size:\",\n humanize_bytes(in_file_size),\n report_arrow,\n f\"{humanize_bytes(out_file_size)} (▲ {(size_delta_bytes/in_file_size) * 100:2.1f}%)\",\n ]\n )\n report.append([\"Elapsed:\", f\"{elapsed_time*1000:.1f} ms\"])\n for c in report:\n for n in range(4):\n try:\n c[n] = c[n]\n except IndexError:\n c.append(\"\")\n c[2] = \"\" if c[3] == c[1] else c[2]\n c[3] = \" \" if c[3] == c[1] else c[3]\n\n padding = 2\n col0w = max([len(str(c[0])) for c in report]) + padding\n col1w = max([len(str(c[1])) for c in report]) + padding\n col2w = max([len(str(c[2])) for c in report]) + padding\n col3w = max([len(str(c[3])) for c in report]) + padding\n out = []\n out.append(\n f\"{ef.b}{report_title:{'-'}^{col0w + col1w + col2w + col3w + 1}}{rs.all}\"\n )\n if no_op:\n out.append(\n f\"{fg.li_cyan}{ef.b}{no_op_msg:^{col0w + col1w + col2w + col3w + 1}}{rs.all}\"\n )\n for line in report:\n out.append(\n f\"{line[0]:<{col0w}}{rs.all} {line[1]:{col1w}}\"\n + f\"{line[2]:{col2w}} {ef.i}{line[3]:{col3w}}{rs.all}\"\n )\n out.append(f\"{ef.b}{report_end:{'-'}^{col0w + col1w + col2w + col3w + 1}}{rs.all}\")\n return \"\\n\".join(out)", "def render_and_save():\n\n rendering_config = configuration.get_config()\n rendering_config = ml_collections.FrozenConfigDict(rendering_config)\n aspect_ratio = rendering_config.aspect_ratio\n height = rendering_config.height\n width = int(aspect_ratio * height)\n\n scene_camera = build_camera(rendering_config, aspect_ratio)\n world = build_world(rendering_config)\n\n # Render.\n logging.info(\"Tracing rays...\")\n render_image_fn = jax.jit(\n render.generate_image,\n static_argnames=[\"height\", \"width\", \"config\"])\n image = render_image_fn(height, width, scene_camera, world, rendering_config)\n\n image = render.correct_gamma(image, gamma=rendering_config.gamma_correction)\n\n logging.info(\"Saving to file...\")\n output.export_as_ppm(image, rendering_config.output_file)\n\n return image", "def write_frame(self, img):\n if img.shape[0] % 2 != 0:\n print(\"Warning: height is not divisible by 2! Dropping last row\")\n img = img[:-1]\n if img.shape[1] % 2 != 0:\n print(\"Warning: width is not divisible by 2! Dropping last column\")\n img = img[:, :-1]\n if self.post_processor:\n img = self.post_processor.process(img)\n if self.width is None:\n self.width = img.shape[0]\n self.height = img.shape[1]\n assert os.path.exists(self.directory)\n fn = FRAME_FN_TEMPLATE % self.frame_counter\n self.frame_fns.append(fn)\n imwrite(img, os.path.join(self.frame_directory, fn))\n self.frame_counter += 1\n if self.frame_counter % self.next_video_checkpoint == 0:\n if self.automatic_build:\n self.make_video()\n self.next_video_checkpoint *= 2", "def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n names = ['inputA_', 'inputB_', 'fakeA_',\n 'fakeB_', 'cycA_', 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'\n ), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n inputs = sess.run(self.inputs)\n fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.fake_images_b,\n self.cycle_images_a,\n self.cycle_images_b\n ], feed_dict={\n self.input_a: inputs['images_i'],\n self.input_b: inputs['images_j']\n })\n\n tensors = [inputs['images_i'], inputs['images_j'],\n fake_B_temp, fake_A_temp, cyc_A_temp, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name),\n ((tensor[0] + 1) * 127.5).astype(np.uint8)\n )\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")", "def saveFrames(filepath, frames):\n\n for i, frame in enumerate(frames):\n image = Image.fromarray(frame)\n image.save(filepath + str(i).zfill(8) + '.png')" ]
[ "0.6099129", "0.60725677", "0.59570324", "0.57942635", "0.5732733", "0.5721138", "0.5702478", "0.5699087", "0.56475115", "0.56121796", "0.5609035", "0.56052744", "0.5589004", "0.5555326", "0.5541834", "0.5528516", "0.5511197", "0.55007917", "0.5499388", "0.54913527", "0.54791605", "0.5475442", "0.5443264", "0.54303384", "0.5371421", "0.534102", "0.53292656", "0.53282416", "0.53265744", "0.53225476" ]
0.76743954
0
Calculates and returns Y position to draw the graph or the border lines on canvas. Correct calculation is based on given sensor.
def calculate_y_pos(value, sensor): if GraphModel.check_value(value, sensor): return ((32 - int(value)) * 12.5) + 50 if sensor == 't' else 450 - (int(value) / 10 * 40) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_y_position(self):\n return self.actual_coordinates[1]", "def _get_y(self):\n return self.position.y", "def get_y_position(self): \n return self.rect.y", "def _get_y(self):\n enabled = self.num_enabled\n\n if self.heart_enabled:\n self._heart_y = 45*(self.num_enabled - enabled) + 75\n enabled -= 1\n if self.speed_enabled:\n self._speed_y = 45*(self.num_enabled - enabled) + 75\n enabled -= 1\n if self.cadence_enabled:\n self._cadence_y = 45*(self.num_enabled - enabled) + 75\n enabled -= 1\n if self.ams_enabled:\n self._ams_y = 45*(self.num_enabled - enabled) + 75\n enabled -= 1", "def y_coord(self):\n\n return self.y0 + np.arange(self.ny) * self.dy", "def get_ly(self):\r\n return self.dy * self.ny - self.oy", "def y_coords(self):\n y_coords = self.get_fre_band_arr()\n y_coords = np.insert(y_coords, 0, self.low_fre)\n return y_coords", "def xycurves_read_y(self) -> float:\n return float(self.dss_obj.XYCurvesF(ctypes.c_int32(2), ctypes.c_double(0)))", "def y(self) -> int:\n return self.data.y_centre >> 4", "def get_y_coordinate(height, rank):\n # Divided the line chart frame by MAX_RANK vertically and equally AND get y by the current rank.\n if rank > MAX_RANK:\n # Set y as the bottom frame line when the current rank is over MAX_RANK.\n y = height - GRAPH_MARGIN_SIZE\n else:\n y = (height - GRAPH_MARGIN_SIZE * 2) / MAX_RANK * rank + GRAPH_MARGIN_SIZE\n return y", "def get_pos_y(self):\n return self.__pos_y", "def y(self):\r\n return self.position.y", "def get_y(self):\n return self.coords[1]", "def getYpos(self):\n return self.y", "def findY(self):\n return self.y", "def __get_y__(self):\n return self.Direction['y']", "def getY(self):\n return self.position[1]", "def y(self):\n return self.coords[1]", "def get_virtual_y_position(self):\n x_real = (\n - 1 * (self.get_x_position() - self.get_origin_x_position()) * cos(\n self.get_origin_direction() * pi / 180\n )\n )\n y_real = (\n (self.get_y_position() - self.get_origin_y_position()) *\n sin(self.get_origin_direction() * pi / 180)\n )\n return x_real + y_real", "def y(self):\n return self.top", "def y(self):\n return self.axes[0]", "def getYCoordinate(self) -> float:\n return self.y_coord", "def _hLine(self, y):\n left, _top, width, _height = self.plot.getPlotBoundsInPixels()\n\n dataPos1 = self.plot.pixelToData(left, y, check=False)\n dataPos2 = self.plot.pixelToData(left + width, y, check=False)\n return dataPos1, dataPos2", "def GetY(self):\r\n\r\n return self._y", "def xycurves_read_y_shift(self) -> float:\n return float(self.dss_obj.XYCurvesF(ctypes.c_int32(6), ctypes.c_double(0)))", "def get_origin_y_position(self):\n return self.origin_coordinates[1]", "def y(self):\n self._sort_measurements()\n return self._distances*np.sin(self._angles)", "def get_y(self):\n return self.posY", "def getY(self):\n return self.y", "def getY(self):\n return self.y" ]
[ "0.66574275", "0.6487656", "0.6412374", "0.6384742", "0.63770056", "0.63079816", "0.63051313", "0.62791324", "0.6248244", "0.6188574", "0.61683464", "0.61539483", "0.6150818", "0.6133438", "0.61187756", "0.6089482", "0.6060128", "0.605848", "0.60405856", "0.60292876", "0.6007037", "0.60058916", "0.599354", "0.59916", "0.59851515", "0.59619784", "0.5932547", "0.59287506", "0.5908922", "0.5908922" ]
0.78490245
0
Returns mean value for values in mean_t or mean_l list based on sensor.
def calculate_mean(cls, sensor): try: if sensor == 't': return cls.calculate_y_pos(sum(cls.mean_t) / len(cls.mean_t), sensor) if sensor == 'l': return cls.calculate_y_pos(sum(cls.mean_l) / len(cls.mean_l), sensor) except ZeroDivisionError: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_t_mean_value(self, value_list):\n if not len(value_list):\n return None\n else:\n return round(statistics.mean(value_list), 1)", "def add_value_mean(cls, sensor, values, device_id):\n if values[device_id] is not None:\n if sensor == 't':\n cls.mean_t.append(int(values[device_id][sensor]))\n if sensor == 'l':\n cls.mean_l.append(int(values[device_id][sensor]))", "def action_store_mean(raw_val):\n\n if isinstance(raw_val, list):\n val_med = None\n values = []\n for val in raw_val:\n val = auto_type_convert(val)\n if isinstance(val, (int, float)):\n values.append(val)\n\n values_length = len(values)\n if values_length != 0:\n mean = sum(values)/values_length\n return mean\n else:\n return None\n else:\n return None", "def _get_u_mean(self, nodelist: List[Tuple[int, int]]) -> Optional[float]:\n meanlist = [self.u_matrix[u_node] for u_node in nodelist]\n u_mean = None\n if self.u_mean_mode_ == \"mean\":\n u_mean = np.mean(meanlist)\n elif self.u_mean_mode_ == \"median\":\n u_mean = np.median(meanlist)\n elif self.u_mean_mode_ == \"min\":\n u_mean = np.min(meanlist)\n elif self.u_mean_mode_ == \"max\":\n u_mean = np.max(meanlist)\n return u_mean", "def mean(vals):", "def get_th_mean_values(self, value_list):\n if not len(value_list):\n return None, None, None\n t, h, d = map(list, zip(*value_list))\n return (\n round(statistics.mean(t), 1),\n int(round(statistics.mean(h), 0)),\n round(statistics.mean(d), 1),\n )", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def mean(list_of_values):\r\n # Explicit float(...) to allow for Python 2 division.\r\n return sum(list_of_values) / float(len(list_of_values))", "def _getMean(self, mean):\n if isinstance(mean, str):\n if mean == 'rgb':\n R, G, B = 126.408, 122.788, 132.129 \n return np.asarray([R, G, B])\n elif mean == 'hof':\n R, G, B = 10.757, 10.832, 10.758\n return np.asarray([R, G, B])\n elif mean == 'ycbcr':\n R, G, B = 132.058, 128.828, 122.513\n return np.asarray([B, R, G])\n elif isfile(mean) and mean.endswith('.npy'):\n return np.load(mean).mean(1).mean(1)\n elif isinstance(mean, list) and len(mean) == 3:\n return mean", "def _mean(listvalue):\n\treturn sum(listvalue)/len(listvalue)", "def get_rain_mean_values(self, value_list):\n if not len(value_list):\n return None, None\n\n rate, total = map(list, zip(*value_list))\n rain = round(total[-1] - total[0], 1)\n\n # Rain can't be negative and in january many rain sensors are\n # resetted to 0 which leads to negative values\n if rain < 0:\n rain = 0.0\n return round(statistics.mean(rate)), rain", "def calculate_mean(weather_data):\n sum_value=0\n\n for value in weather_data:\n sum_value += float(value)\n \n mean = sum_value/len(weather_data)\n\n return (mean)", "def mean_sensor_id_get(sensor_id, start_date=None, end_date=None): # noqa: E501\n try:\n client = InfluxDBClient('influxdb', 8086, 'user', 'user', 'sensor')\n sensor_id = \"laptop_temperature_1\"\n str = \"\"\n if start_date is not None:\n str = f\"WHERE time > '{datetime.fromtimestamp(start_date)}'\"\n if end_date is not None:\n if len(str) > 0:\n str += \" AND \"\n else:\n str = \"WHERE \"\n str += f\"time < '{datetime.fromtimestamp(end_date)}'\"\n request = f\"SELECT mean({sensor_id}) from client1 {str} GROUP BY *;\"\n print(request)\n result = client.query(request)\n mean = list(result.get_points())[0]['mean']\n except:\n traceback.print_exc()\n return []\n return [mean]", "def get_uv_mean_value(self, value_list):\n if len(value_list):\n return int(round(statistics.mean(value_list), 0))\n else:\n return None", "def get_mean(numlist):\n return np.mean(numlist)", "def mean(values):\n # Write the mean() function\n mean = sum(values) / len(values)\n return mean", "def GetMean(trrecord, samplelists=[], uselength=True):\n if len(samplelists) == 0: samplelists.append(None)\n return [utils.GetMean(trrecord.GetAlleleFreqs(samplelist=sl, uselength=True)) for sl in samplelists]", "def reset_mean(cls, sensor):\n if sensor == 't':\n cls.mean_t.clear()\n return cls.mean_t == []\n if sensor == 'l':\n cls.mean_l.clear()\n return cls.mean_l == []", "def get_sol_mean_value(self, value_list):\n if len(value_list):\n return int(round(statistics.mean(value_list), 0))\n else:\n return None", "def mean(values):\n # Write the mean() function\n mean = sum(values) / len(values)\n return mean", "def find_mean(values):\n mean = sum(values) / len(values)\n return mean", "def get_wind_mean_values(self, value_list):\n if not len(value_list):\n return None, None, None, None\n\n dir, gust, avg, chill = map(list, zip(*value_list))\n return (\n int(round(statistics.mean(dir), 0)),\n round(statistics.mean(gust), 1),\n round(statistics.mean(avg), 1),\n round(statistics.mean(chill), 1),\n )", "def mean(list_of_values):\n # so don't have to worry about getting the divisor.\n # Explicit float(...) to allow for Python 2 division.\n try:\n mean = sum(list_of_values) / float(len(list_of_values))\n return mean\n except:\n return False", "def mean(self, values):\n return self.aggregate(values, \"mean\")", "def mean_value( values ):\n return sum( values ) / len( values )", "def mean(values):\r\n return sum(values) / float(len(values))", "def _get_mean(self):\n return (0.485, 0.456, 0.406)", "def average(numbers, averagetype='mean'):\n\n try:\n # Try to get the mean of the numbers\n statistics.mean(numbers)\n\n except RuntimeError:\n # Raise a warning\n raise ValueError('Unable to parse the list.')\n\n # If the lowercase version of the average type is 'mean'\n if averagetype.lower() == 'mean':\n # Return the answer\n return statistics.mean(numbers)\n\n # If the lowercase version of the average type is 'mode'\n elif averagetype.lower() == 'mode':\n # Return the answer\n return statistics.mode(numbers)\n\n # If the lowercase version of the average type is 'median'\n elif averagetype.lower() == 'median':\n # Return the answer\n return statistics.median(numbers)\n\n # If the lowercase version of the average type is 'min'\n elif averagetype.lower() == 'min':\n # Return the answer\n return min(numbers)\n\n # If the lowercase version of the average type is 'max'\n elif averagetype.lower() == 'max':\n # Return the answer\n return max(numbers)\n\n # If the lowercase version of the average type is 'range'\n elif averagetype.lower() == 'range':\n # Return the answer\n return max(numbers) - min(numbers)\n\n # Raise a warning\n raise ValueError('Invalid average type provided.')", "def Mean(data):\n return data.mean()", "def get_thb_mean_values(self, value_list):\n if not len(value_list):\n return None, None, None, None, None\n\n temp, hum, dew, baro, forecast, sealevel = map(list, zip(*value_list))\n return (\n round(statistics.mean(temp), 1),\n int(round(statistics.mean(hum), 0)),\n round(statistics.mean(dew), 1),\n round(statistics.mean(baro), 1),\n round(statistics.mean(sealevel), 1),\n )" ]
[ "0.72287035", "0.7126963", "0.69162667", "0.6880043", "0.68731856", "0.6742288", "0.6600956", "0.6521696", "0.6425413", "0.64079094", "0.6404101", "0.6390297", "0.63823843", "0.6371286", "0.6350793", "0.630408", "0.63019705", "0.62969667", "0.6281619", "0.62799853", "0.62172556", "0.62116146", "0.62102807", "0.6181874", "0.6173741", "0.6139673", "0.6137812", "0.61249393", "0.60550225", "0.60530496" ]
0.74634945
0
This method is used internally to check if the current animation needs to be skipped or not. It also checks if the number of animations that were played correspond to the number of animations that need to be played, and raises an EndSceneEarlyException if they don't correspond.
def update_skipping_status(self): # there is always at least one section -> no out of bounds here if self.file_writer.sections[-1].skip_animations: self.skip_animations = True if ( config["from_animation_number"] and self.num_plays < config["from_animation_number"] ): self.skip_animations = True if ( config["upto_animation_number"] and self.num_plays > config["upto_animation_number"] ): self.skip_animations = True raise EndSceneEarlyException()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def skip_next_animation(self):\n current_num_animations = len(self.queued_animations)\n while len(self.queued_animations) >= current_num_animations and len(self.queued_animations) > 0:\n self.update(100)", "def verify_ending(self):\n self._fast_forward_to_penultimate_play()\n if self.game_status.game_over:\n # Game shouldn't be over quite yet!\n self.reset()\n return False\n\n self.apply_next_event()\n game_over = self.game_status.game_over\n excess_outs = self.game_status.excess_outs\n self.reset()\n return game_over and not excess_outs", "def check_anim_layers(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n if len(pm.ls(type=\"animLayer\")) > 0:\n progress_controller.complete()\n raise PublishError(\"There should be no <b>Animation Layers</b> in the scene!!!\")\n progress_controller.complete()", "def _checkRoundOver(self):\n\n # if we already ended it doesn't matter\n if self.hasEnded():\n return\n\n if not any(player.isAlive() for player in self.teams[0].players):\n # allow continuing after wave 1\n if self._wave > 1:\n self.continueOrEndGame()\n else:\n self.endGame()", "def is_skip(self):\n\n return self.severity == AlertSeverity.TOLERABLE and self.kind == AlertKind.ABORTION", "def isFallthrough(self) -> bool:\n ...", "def hasFallthrough(self) -> bool:\n ...", "def run_no_learn(self):\n\n for agent in self.match_controller.agents:\n assert agent.get_agent_type() == Constants.AGENT_TYPE.AGENT, \"Both agents must be in inference mode\"\n\n self.current_step = 0\n self.last_observation_object = None\n\n # Reset game + map\n self.match_controller.reset(randomize_team_order=False)\n # Running\n self.match_generator = self.match_controller.run_to_next_observation()\n try:\n next(self.match_generator)\n except StopIteration:\n # The game episode is done.\n is_game_error = False\n print('Episode run finished successfully!')\n except GameStepFailedException:\n # Game step failed.\n is_game_error = True\n\n return is_game_error", "def run_no_learn(self):\n\n for agent in self.match_controller.agents:\n assert agent.get_agent_type() == Constants.AGENT_TYPE.AGENT, \"Both agents must be in inference mode\"\n\n self.current_step = 0\n self.last_observation_object = None\n\n # Reset game + map\n self.match_controller.reset(randomize_team_order=False)\n # Running\n self.match_generator = self.match_controller.run_to_next_observation()\n try:\n next(self.match_generator)\n except StopIteration:\n # The game episode is done.\n is_game_error = False\n print('Episode run finished successfully!')\n except GameStepFailedException:\n # Game step failed.\n is_game_error = True\n\n return is_game_error", "def badExitPrevMolecule(self):\n if self.molecules > 0:\n # collect list of any atoms where num departed is not expected num per molecule\n departErrors = [(atom.name, count) for atom, count in self.departed.items() if self.departed[atom] != atom.value]\n if len(departErrors) > 0:\n print(\"too many or too few atoms exited between previous and this molecule creations.\")\n print( \"Exit counts:\", departErrors)\n return False\n return True", "def _check_scene_open(self):\n return self._engine.current_file_path() is not None", "def skip_all_animations(self):\n for child in self.children:\n child.skip_all_animations()\n \n # remove unskippable animations from queue\n unskippables = [anim for anim in self.queued_animations if not anim.skippable]\n self.queued_animations = list(filter(lambda anim: anim.skippable, self.queued_animations))\n while len(self.queued_animations) > 0:\n self.update(100)\n self.queued_animations = unskippables", "def hasPrevFrame(self):\n self.deleteDouble()\n return (len(self.activeFrames) > 1)", "def _animation_over(self) -> bool:\n \treturn self.current_height == 0 or self.current_height == self.original_height", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def _check_episode_start_condition(self):\n vehicles = self._get_available_vehicles()\n if np.sum(vehicles == 0) >= self.episode_threshold:\n return True\n else:\n return False", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def _continue_decoding(_current_time_step, _all_finished, *_):\n continuation_check = \\\n tf.logical_and(tf.less(_current_time_step, max_prediction_length),\n tf.logical_not(tf.reduce_all(_all_finished)))\n\n return continuation_check", "def test_step_negative_indices(self):\n _, data_directory = self._collect_episode_data(\n num_episodes=6, max_episodes_per_file=3)\n with riegeli_backend_reader.RiegeliBackendReader(\n data_directory) as data_reader:\n np.testing.assert_equal(data_reader.steps[-1],\n data_reader.steps[len(data_reader.steps) - 1])\n np.testing.assert_equal(data_reader.steps[-len(data_reader.steps)],\n data_reader.steps[0])", "def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500", "def _test_for_missing_move_ids(self):\n move_id = 0\n for s in self.program.steps:\n move_id += 1\n self.assertNotEqual(len(s.playback_frames), 0,\n f\"Step {s.name} has no playbackFrames. Move Id {move_id} is missing\")", "def num_remaining_scenes(self):\n options = (len(self.valid_scene_choices), self.NUM_SCENES)\n return min(options)", "def check_early_stop(self) -> bool:\n if self.args.early_stopping_steps == -1:\n return False\n return self._steps_since_new_prefix >= self.args.early_stopping_steps", "def check_miss(self):\n if self.ball.center.x > SCREEN_WIDTH:\n # We missed!\n self.score -= SCORE_MISS\n self.ball.restart()", "def is_class_absent(self):\n self.q(css='#spinner').first.click()\n self.wait_for_element_absence('.playing', 'Animation Stopped')", "def _should_continue(self):\n # should_continue = self.iter < 20\n # self.iter += 1\n # return should_continue\n if self.iter > self.max_iter:\n return False\n elif self.prev_elbo is None:\n self.prev_elbo = self._get_elbo()\n return True\n elbo = self._get_elbo()\n improvement = (elbo - self.prev_elbo) / self.prev_elbo\n self.prev_elbo = elbo\n self.iter += 1\n return self.epsilon < improvement", "def IsSkipped(self):\n state = self.GetState()\n return state.status == TestState.SKIPPED", "def try_advance(self):\n if not self.step.toclick:\n self.step.finished = True\n return True\n return False", "def test_step_out_of_bounds_indices(self):\n _, data_directory = self._collect_episode_data(\n num_episodes=6, max_episodes_per_file=3)\n with riegeli_backend_reader.RiegeliBackendReader(\n data_directory) as data_reader:\n self.assertRaises(IndexError, operator.getitem, data_reader.steps,\n len(data_reader.steps))\n self.assertRaises(IndexError, operator.getitem, data_reader.steps,\n -len(data_reader.steps) - 1)", "def KeepAdvancingSolutionLoop(self):\n return self.step < self.nsteps" ]
[ "0.5900172", "0.55246276", "0.5460398", "0.5384421", "0.53802437", "0.5294638", "0.5283734", "0.52820474", "0.52820474", "0.5280884", "0.5170907", "0.5156991", "0.5137927", "0.5131808", "0.5128726", "0.50902385", "0.50789577", "0.5078137", "0.50726855", "0.50664717", "0.5052654", "0.50353116", "0.5006745", "0.50040644", "0.4980986", "0.49799913", "0.4955757", "0.49453402", "0.49254307", "0.49098817" ]
0.746912
0
Returns an image from the current frame. The first argument passed to image represents the mode RGB with the alpha channel A. The data we read is from the currently bound frame buffer. We pass in 'raw' as the name of the decoder, 0 and 1 args are specifically used for the decoder tand represent the stride and orientation. 0 means there is no padding expected between bytes and 1 represents the orientation and means the first line of the image is the bottom line on the screen. Returns PIL.Image The PIL image of the array.
def get_image(self) -> Image.Image: raw_buffer_data = self.get_raw_frame_buffer_object_data() image = Image.frombytes( "RGBA", self.get_pixel_shape(), raw_buffer_data, "raw", "RGBA", 0, -1, ) return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grabFrame(self):\r\n \r\n data, w, h, orientation = self.grabRawFrame()\r\n return Image.fromstring(\"RGB\", (w, h), data, \"raw\", \"BGR\", 0, orientation)", "def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1", "def decodeFrame(self, image):\n return image", "def getFrame(self):\n s, image = self.capture.read()\n return image", "def read(self):\n\n # Obtém frame da câmera.\n status , frame = super().read()\n\n if not status: return\n\n # Obtém a imagem.\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n \n # Se a opção de efeito espelho estiver ativa, a imagem será invertida.\n if self.__mirror:\n frame = frame.transpose(Image.FLIP_LEFT_RIGHT)\n \n return ImageTk.PhotoImage(frame) , frame.size", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]", "def raw_image(self):\n return self.data16.transpose()", "def get_frame(self):\n self._serial_port.close()\n self._serial_port.open()\n\n self._request_frame()\n\n serial_data = self._serial_port.readall()\n\n frame_start_idx = serial_data.find(BEGIN_FRAME) + len(BEGIN_FRAME)\n frame_end_idx = serial_data.find(END_FRAME)\n\n print serial_data[0:frame_start_idx]\n print serial_data[frame_end_idx:]\n\n raw_frame = serial_data[frame_start_idx:frame_end_idx]\n\n np_frame = np.fromstring(raw_frame, dtype=np.uint8)\n # np_frame = np_frame.reshape((30, 30))\n\n # image = cv2.fromarray(np_frame)\n\n # return image\n return np_frame", "def readFrame(self):\n\t\tsuccess, self.frameImage = self.vidcap.read()\n\t\treturn success, self.frameImage", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def read(self):\r\n\t\t# get data from camera\r\n\t\tarray = self.ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\r\n\t\t# get frame as numpy array\r\n\t\tframe = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tcamera_matrix = np.array([\r\n\t\t\t[4.5330796457901283e+02, 0., 6.1902229288626302e+02],\r\n\t\t\t[0., 4.5369175559310276e+02, 5.1298362120979994e+02],\r\n\t\t\t[0., 0., 1.]])\r\n\t\t\r\n\t\tdist_coeffs = np.array([\r\n\t\t\t-3.1812973406286371e-01, 9.6396352148682182e-02,\r\n\t\t\t2.9601124432187590e-03, 9.7700591472463412e-04,\r\n\t\t\t-1.1929681608809075e-02\r\n\t\t])\r\n\r\n\t\tframe = cv2.undistort(frame, camera_matrix, dist_coeffs, camera_matrix)\r\n\t\t\"\"\"\r\n\r\n\t\treturn frame", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb", "def image_decoder(rawbytes):\n img = Image.open(BytesIO(rawbytes))\n array = np.asarray(img, dtype=np.uint8)\n return array", "def getFrame(this, error=3, **kargs):\n\t\tthis.checkInit()\n\t\t\n\t\t# Arguments\n\t\terror = kargs.get('error', 3)\n\t\tnoReset = kargs.get('noReset', False)\n\t\t\n\t\t# \"\"\"\n\t\twhile error >= 0:\n\t\t\tret, frame = this._CAP.read()\n\t\t\tif ret:\n\t\t\t\ta = this._BAND.x * height(frame)\n\t\t\t\tb = this._BAND.y * height(frame)\n\t\t\t\tframe = frame[a:b:this._RES,:,:]\n\t\t\t\tif this._KERNEL is not None: # On applique un flou uniquement pour lisser le bruit\n\t\t\t\t\tframe = cv2.filter2D(frame, -1, this._KERNEL)\n\t\t\t\tthis._FRAME = this.onFrameGet(frame)\n\t\t\t\tbreak #bye\n\t\t\t\n\t\t\t# On a pas eu d'image...\n\t\t\telse: error -= 1\n\t\t\t\n\t\t# On doit reset ou pas ?\n\t\tif not noReset: this.resetBin()\n\t\treturn ret", "def parse_image(self, image):\n # parse the image data into a pygame surface for display or screenshot\n # raw image is BGRA\n # if image_type is segmentation, here will convert to the pre-defined color\n image.convert(self.image_type)\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1] # BGR -> RGB\n self.rgb_image = array\n self.pygame_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n\n self.last_image_seconds = image.timestamp\n self.last_image_frame_num = image.frame", "async def _retrieve_frame(self, mode: BufferRetrieveMode) -> RawArray:", "def get_frame(self, index):\n filename = self.get_filename(index)\n return plt.imread(fname=filename)", "def get_raw_img(image_name):\n # IMREAD_COLOR ignores transparency (!)\n return cv2.imread(image_name, cv2.IMREAD_COLOR)", "def frombuffer(mode, size, data, decoder_name=\"raw\", *args):\r\n\r\n _check_size(size)\r\n\r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\":\r\n if args == ():\r\n args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6\r\n if args[0] in _MAPMODES:\r\n channels, depth = Image()._get_channels_and_depth(mode)\r\n im = np.frombuffer(data)\r\n im = im.reshape((size[1], size[0], channels))\r\n im = im.astype(depth)\r\n im_ = new(mode, (1, 1))\r\n im_._instance = im\r\n im_.readonly = 1\r\n return im_\r\n\r\n return frombytes(mode, size, data, decoder_name, args)", "def _get_single_frame(self, real_t: int, **kwargs) -> Image:\n if self._is_tiff:\n ret = self._reader.read(index=..., page=real_t, **kwargs)\n else:\n ret = self._reader.read(index=real_t, **kwargs)\n ret = ret.view(Image)\n ret.frame_no = real_t\n return ret", "def get_frame(self,t):\n\n return pyfx.util.to_array(self._img_list[t],dtype=np.uint8,\n num_channels=4)", "def initialize_image(self):\n\n # Initialize image along the input frame shape\n image = np.zeros(self.frame_shape)\n\n # Crop or expand according to the reconstruction mode\n if self.mode == 'same':\n pass\n elif self.mode == 'full':\n image = np.pad(image, self.alignment.reference_pad_vector, mode='constant')\n elif self.mode == 'valid':\n # Estimate minimum overlap\n _shifts = np.array(self.alignment.shifts)\n _crop_by = np.max(_shifts, axis=0) - np.min(_shifts, axis=0)\n image = image[_crop_by[0]:, _crop_by[1]:]\n\n return image", "def read_disp_png(file_name):\n image_object = png.Reader(filename=file_name)\n image_direct = image_object.asDirect()\n image_data = list(image_direct[2])\n (w, h) = image_direct[3]['size']\n channel = len(image_data[0]) / w\n flow = np.zeros((h, w, channel), dtype=np.uint16)\n for i in range(len(image_data)):\n for j in range(channel):\n flow[i, :, j] = image_data[i][j::channel]\n return flow[:, :, 0] / 256", "async def get(self) -> RawArray:\r\n if self.empty():\r\n return None\r\n frame = self.frames[self._read_index]\r\n\r\n self._read_index = (self._read_index + 1) % self.capacity()\r\n self._is_full = False\r\n\r\n return frame", "def convertFrame(self):\n try:\n height,width=self.currentFrame.shape[:2]\n img=QtGui.QImage(self.currentFrame,\n width,\n height,\n QtGui.QImage.Format_RGB888)\n img=QtGui.QPixmap.fromImage(img)\n self.previousFrame = self.currentFrame\n return img\n except:\n return None", "def frombytes(mode, size, data, decoder_name=\"raw\", *args):\r\n\r\n _check_size(size)\r\n \r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\" and args == ():\r\n args = mode\r\n\r\n im = new(mode, size)\r\n im.frombytes(mode, size, data, decoder_name, args)\r\n return im", "def convertFrame(self):\r\n try:\r\n height, width = self.currentFrame.shape[:2]\r\n img = QtGui.QImage(self.currentFrame,\r\n width,\r\n height,\r\n QtGui.QImage.Format_RGB888)\r\n img = QtGui.QPixmap.fromImage(img)\r\n self.previousFrame = self.currentFrame\r\n return img\r\n except:\r\n return None", "def get_image(self, frame):\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - (frame % self.height)\n return self.double.subsurface(rect)" ]
[ "0.6702811", "0.6255629", "0.62121195", "0.5816756", "0.5771386", "0.57592493", "0.56033903", "0.5568716", "0.55405825", "0.5520708", "0.54805356", "0.54444456", "0.54290974", "0.5426007", "0.5381863", "0.5361281", "0.5349747", "0.5332689", "0.5305605", "0.53024167", "0.52890664", "0.5279853", "0.5256148", "0.52420264", "0.52360904", "0.5213078", "0.5206609", "0.5200334", "0.5174259", "0.5173469" ]
0.6270685
1
Set the commitment to sha256(serialization of public key P2) Return in hex to calling function
def get_commitment(self): if not self.P2: raise PoDLEError("Cannot construct commitment, no P2 available") if not isinstance(self.P2, secp256k1.PublicKey): raise PoDLEError("Cannot construct commitment, P2 is not a pubkey") self.commitment = hashlib.sha256(self.P2.serialize()).digest() return safe_hexlify(self.commitment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sha256(self):\n return self._sha256", "def hash(self) -> bytes:", "def printsha(self):\n print(self.sha256.hex())", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def hash(self) -> str:\r\n ...", "def sha256(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sha256\")", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def commit_hash(self):\n return self._commit_hash", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def test_hash_sha256(self):\n block = self.blockchain.new_block(self.proof, self.previous_hash)\n hash_ = self.blockchain.hash(block)\n\n self.assertIsInstance(hash_, str)\n self.assertEqual(hashlib.sha256(json.dumps(block, sort_keys=True).encode()).hexdigest(), hash_)", "def sha256(self):\n return sha256file(self.abspath)", "def get_hash(self, params):\n return self.sha", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def _get_signature(value):\n mySha = hashlib.sha256()\n mySha.update(value)\n # print mySha.hexdigest()\n return mySha.hexdigest()", "def setHash(self):\n chash_string = str(self.code) + str(\"CAMPAIGN\") + str(self.created_at)\n chash = hashlib.sha1()\n chash.update(chash_string)\n \n self.chash = chash.hexdigest()\n self.save()", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def prehash_message(self, timestamp, account, method, params, nonce):\n first = hashlib.sha256(py23_bytes(timestamp + account + method + params, self.ENCODING))\n return self.K + first.digest() + nonce", "def sha256(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)\n d.update(data)\n return d.digest()", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def fingerprint_public_key_blob(blob):\n hash = sha256(blob).digest()\n encoded = b64encode(hash).decode('UTF-8').rstrip('=')\n return 'SHA256:{}'.format(encoded)", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def sha256(cls, value):\n assert type(value) is str\n return int(sha256(value.encode()).hexdigest(), 16)", "def sha1(self) -> str:\n return self.data.sha1", "def sha256(self, sha256):\n\n self._sha256 = sha256", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def _electrum_script_hash(script: bytes) -> str:\n bytes = bytearray(scripts.sha256(script))\n bytes.reverse()\n return bytes.hex()", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")" ]
[ "0.6685336", "0.6628729", "0.65991706", "0.6530299", "0.64776933", "0.6459898", "0.6440979", "0.6411019", "0.6385267", "0.62253946", "0.6224505", "0.6205821", "0.61614484", "0.6151875", "0.61337245", "0.6097328", "0.60924083", "0.60726136", "0.60650444", "0.6061097", "0.60603863", "0.60590285", "0.60402334", "0.60160035", "0.6001646", "0.5998102", "0.59861934", "0.59641415", "0.59641415", "0.59559804" ]
0.70035625
0
Encapsulate all the data representing the proof in a dict for client functions. Data output in hex.
def reveal(self): if not all([self.u, self.P, self.P2, self.s, self.e]): raise PoDLEError("Cannot generate proof, data is missing") if not self.commitment: self.get_commitment() Phex, P2hex, shex, ehex, commit = [ safe_hexlify(x) for x in [self.P.serialize(), self.P2.serialize(), self.s, self.e, self.commitment]] return {'used': str(self.used), 'utxo': self.u, 'P': Phex, 'P2': P2hex, 'commit': commit, 'sig': shex, 'e': ehex}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_data(self):\n self.check_proof()\n return {\n \"vars\": [{'name': v.name, 'T': str(v.T)} for v in self.vars],\n \"proof\": sum([printer.export_proof_item(self.thy, item, unicode=True, highlight=True)\n for item in self.prf.items], []),\n \"report\": self.rpt.json_data(),\n \"method_sig\": self.get_method_sig()\n }", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def final_proof(self, data):\n n = data['n']\n c = data['c']\n t = data['t']\n n_s = aes_decode(n, c, t, self.aes)\n to_send = {'id': self.id, 'dest': 'confirmation', 'n': n_s}\n print(Colors.BOLD + 'N --> S: N_S' + Colors.ENDC)\n print('\\t' + Colors.BOLD + 'N_S: ' + Colors.ENDC + str(n_s))\n self.nodesocket.sendall(pickle.dumps(to_send))\n data_return = pickle.loads(self.nodesocket.recv(MAX_SIZE))\n return data_return", "def to_data(self) -> dict:\n return {'pingData': {'challenge': self.ping_challenge}}", "def to_bytes(self) -> bytes:\n proposal_info_in_dict = vars(self)\n proposal_info_in_dict[\"id\"] = bytes.hex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = str(proposal_info_in_dict[\"proposer\"])\n return json_dumps(proposal_info_in_dict).encode()", "def main():\n print(dumps(get_data()))\n return 0", "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "def data_from_result():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(1802, 1921),\n alt_pos_range=(140453074, 140453193),\n alt_aln_method=\"splign\",\n tx_exon_id=780494,\n alt_exon_id=1927263\n )", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T, 'y': self.y, 'pi': self.pi}", "def to_dict(self):\n return {\n 'type': 'fulfillment',\n 'type_id': self.TYPE_ID,\n 'bitmask': self.bitmask,\n 'public_key': self.public_key.encode(encoding='base58').decode(),\n 'signature': base58.b58encode(self.signature) if self.signature else None\n }", "def __bytes__(self):\n byteout = bytearray()\n for index in range(1, 15):\n key = \"d\" + str(index)\n if self._user_data.get(key) is not None:\n byteout.append(self._user_data[key])\n else:\n byteout.append(0x00)\n return bytes(byteout)", "def serialize(self):\n\n data = {}\n\n data[\"verified\"] = True\n\n return data", "def hex_probabilities(self):\n return {hex(key): value for key, value in self.items()}", "def dict() -> Dict[str, Pin]:", "async def verify_proof(self, proof_req: dict, proof: dict, schema: dict, claim_def: dict) -> str: # verifier\n\n return json.dumps(\n await anoncreds.verifier_verify_proof(\n json.dumps(proof_req),\n json.dumps(proof),\n json.dumps({ # schemas_json\n claim_uuid: schema for claim_uuid in proof['proofs']\n }),\n json.dumps({ # claim_defs_json\n claim_uuid: claim_def for claim_uuid in proof['proofs']\n }),\n json.dumps({}) # revoc_regs_json\n )\n )", "def getJSONData(self):\n return {\"pubkey\": self.pubkey, \"privkey\": self.privkey}", "def test_to_rich_dict(self):\n F81().to_rich_dict()\n HKY85().to_rich_dict()\n GN().to_rich_dict()\n # TODO need to assess ability to reconstruct from this", "def get_data(self):\n return {\"bcode\": self.barcode, \"prodid\": self.product_id}", "def data() -> str:\n return \"1721\\n979\\n366\\n299\\n675\\n1456\"", "def __repr__(self):\n # For unknown rdata just default to hex\n return binascii.hexlify(self.data).decode()", "def update(self, proof: dict) -> dict:\n proof[\"proofPurpose\"] = self.term\n return proof", "def dump(self) -> dict[Any, str]:\r\n ...", "def data(self) -> dict[str, Any]:\n raise NotImplementedError()", "def print_dict(data):\n print data", "def __dict__(self):\r\n result = {}\r\n result['block_type'] = 'register'\r\n result['prev_hash'] = base64.b64encode(self.prev_hash).decode()\r\n result['timestamp'] = self.time\r\n result['user_id'] = self.user_id\r\n result['public_key'] = base64.b64encode(self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)).decode()\r\n return result", "def serialize(self):\n return {\n 'did' : self.did,\n 'name' : self.name,\n 'passwd' : self.passwd,\n 'email' : self.email,\n 'phone' : self.phone,\n 'addr_1' : self.addr_1,\n 'addr_2' : self.addr_2,\n 'city' : self.city,\n 'state' : self.state,\n 'zip' : self.zip,\n 'grade' : self.grade,\n }", "def pro_code_dict(code=False, inverse=False, return_all=False):\n\n pro_code_dict = {\"0500\": \"Date\",\n \"0501\": \"height [> 0: top, < 0: bottom of elem.] (cm)\",\n \"0502\": \"element density (kg m-3)\",\n \"0503\": \"element temperature (degC)\",\n \"0504\": \"element ID (1)\",\n \"0506\": \"liquid water content by volume (%)\",\n \"0508\": \"dendricity (1)\",\n \"0509\": \"sphericity (1)\",\n \"0510\": \"coordination number (1)\",\n \"0511\": \"bond size (mm)\",\n \"0512\": \"grain size (mm)\",\n \"0513\": \"grain type (Swiss Code F1F2F3)\",\n \"0514\": \"grain type, grain size (mm), and density (kg m-3) of SH at surface\",\n \"0515\": \"ice volume fraction (%)\",\n \"0516\": \"air volume fraction (%)\",\n \"0517\": \"stress in (kPa)\",\n \"0518\": \"viscosity (GPa s)\",\n \"0519\": \"soil volume fraction (%)\",\n \"0520\": \"temperature gradient (K m-1)\",\n \"0521\": \"thermal conductivity (W K-1 m-1)\",\n \"0522\": \"absorbed shortwave radiation (W m-2)\",\n \"0523\": \"viscous deformation rate (1.e-6 s-1)\",\n \"0531\": \"deformation rate stability index Sdef\",\n \"0532\": \"natural stability index Sn38\",\n \"0533\": \"stability index Sk38\",\n \"0534\": \"hand hardness either (N) or index steps (1)\",\n \"0535\": \"optical equivalent grain size (mm)\",\n \"0540\": \"bulk salinity (g/kg)\",\n \"0541\": \"brine salinity (g/kg)\",\n \"0601\": \"snow shear strength (kPa)\",\n \"0602\": \"grain size difference (mm)\",\n \"0603\": \"hardness difference (1)\",\n \"0604\": \"ssi\",\n \"0605\": \"inverse texture index ITI (Mg m-4)\",\n \"0606\": \"critical cut length (m)\", }\n\n if inverse:\n inverse = {value: key for key, value in pro_code_dict.items()}\n return(inverse[code])\n if code:\n return (pro_code_dict[code])\n if return_all:\n return (pro_code_dict)", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T,\n 'C': self.C.to_dictionary(), 'D': self.D.to_dictionary(), 'sigma': self.sigma.to_dictionary()}", "async def create_proof(\n self,\n proof_req: dict,\n schema: dict,\n claim_def: dict,\n requested_claims: dict = None) -> str: # prover\n\n if self._master_secret is None:\n raise ValueError('Master secret is not set')\n\n # TODO: support empty requested-attributes?\n # TODO: support multiple schemata? Tricky.\n\n proof_json = await anoncreds.prover_create_proof(\n self.wallet_handle,\n json.dumps(proof_req),\n json.dumps(requested_claims),\n json.dumps({ # schemas_json\n claim_uuid[0]: schema\n for claim_uuid in requested_claims['requested_attrs'].values()\n }),\n self._master_secret,\n json.dumps({ # claim_defs_json\n claim_uuid[0]: claim_def\n for claim_uuid in requested_claims['requested_attrs'].values()\n }),\n json.dumps({}) # revoc_regs_json\n )\n\n return proof_json", "def toData(self):\n\n lines = []\n # 1. Request and protocol version\n lines.append(self.request + \" \" + BANNER)\n # 2. Request arguments\n lines.extend(['%s: %s' % (arg, self.args[arg]) for arg in self.args])\n # 3. End of message (double CR-LF)\n data = \"\\r\\n\".join(lines) + \"\\r\\n\\r\\n\"\n # In debug mode, parse our own message to check it is well-formed\n assert checkMessage(data), \"Bad generated message: \" + data\n return data" ]
[ "0.60062224", "0.59404445", "0.58804053", "0.585302", "0.5616752", "0.5561285", "0.55145526", "0.5495601", "0.5492765", "0.54911596", "0.5489464", "0.54656714", "0.5438447", "0.5421299", "0.5415515", "0.54064167", "0.5376681", "0.53762263", "0.53696877", "0.5365422", "0.5362772", "0.53577816", "0.53486305", "0.53047717", "0.5303368", "0.53028846", "0.52971685", "0.52744544", "0.5274115", "0.5256612" ]
0.6377318
0
For an object created without a private key, check that the opened commitment verifies for at least one NUMS point as defined by the range in index_range
def verify(self, commitment, index_range): if not all([self.P, self.P2, self.s, self.e]): raise PoDLE("Verify called without sufficient data") if not self.get_commitment() == commitment: return False for J in [getNUMS(i) for i in index_range]: sig_priv = secp256k1.PrivateKey(self.s, raw=True, ctx=ctx) sG = sig_priv.pubkey sJ = J.tweak_mul(self.s) e_int = decode(self.e, 256) minus_e = encode(-e_int % N, 256, minlen=32) minus_e_P = self.P.tweak_mul(minus_e) minus_e_P2 = self.P2.tweak_mul(minus_e) KG = dummy_pub.combine([sG.public_key, minus_e_P.public_key]) KJ = dummy_pub.combine([sJ.public_key, minus_e_P2.public_key]) KGser = secp256k1.PublicKey(KG, ctx=ctx).serialize() KJser = secp256k1.PublicKey(KJ, ctx=ctx).serialize() #check 2: e =?= H(K_G || K_J || P || P2) e_check = hashlib.sha256( KGser + KJser + self.P.serialize() + self.P2.serialize()).digest() if e_check == self.e: return True #commitment fails for any NUMS in the provided range return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_validity(self):\n cnt = np.array([len(v) for v in self.t_signatures.values()])\n cnt_n = len(cnt) - self.min_bins\n idx = None\n if cnt_n < 0:\n self.valid = False\n else:\n y = [np.all(cnt[i:(i + self.min_bins)] >= self.min_neigh) for i in range(cnt_n)]\n if sum(y) <= 0:\n self.valid = False\n elif sum(y) == 1:\n self.valid = True\n idx = np.where(y)[0][0]\n else:\n # If many sequences are valid, select the one with the most letters\n self.valid = True\n w_list = [self.w_signatures[i] for i in range(len(self.w_signatures))]\n w = [sum(w_list[i:(i + self.min_bins)]) if y[i] else 0 for i in range(cnt_n)]\n idx = np.argmax(w)\n self._valid_idx = idx", "def __is_valid(self, pos):\n return 0 <= pos[0] < self._n and 0 <= pos[1] < self._n", "def _inrange(self, index):\n if len(index) != self.ndim:\n raise Exception('SparseN tensor has %d dimensions, and requires the same number of indices.'%self.ndim)\n for ii, ss in zip(index,self.shape):\n if ii < 0 or ii >= ss:\n raise Exception('Index is out of range: %d'%index)", "def check_collisions(self):", "def valid_index(self, index):\n if 0 <= index < self._list_size:\n return True\n else:\n return False", "def check_validity(self):", "def __verify_index(self, index):\n if not isinstance(index, int):\n raise TypeError(\"Index must be of type int\")\n elif index >= self.length or index < -self.length:\n raise IndexError(\"Index out of bounds\")\n return True", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def check(self, num_objfun):\n if not self.gp:\n raise Exception(\"No number of grid points provided\")\n\n if self.nadir_p and len(self.nadir_p) != num_objfun - 1:\n raise Exception(\"Too many or too few nadir points provided\")", "def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])", "def _randomValidStateIndex(self, minimum_without_terminal):\n index_lowerBound = minimum_without_terminal - 1\n # We try out an index in the acceptable range of the replay memory\n index = self._random_state.randint(index_lowerBound, self.n_elems-1) \n\n # Check if slice is valid wrt terminals\n # The selected index may correspond to a terminal transition but not \n # the previous minimum_without_terminal-1 transition\n firstTry = index\n startWrapped = False\n while True:\n i = index-1\n processed = 0\n for _ in range(minimum_without_terminal-1):\n if (i < 0 or self._terminals[i]):\n break;\n\n i -= 1\n processed += 1\n if (processed < minimum_without_terminal - 1):\n # if we stopped prematurely, shift slice to the left and try again\n index = i\n if (index < index_lowerBound):\n startWrapped = True\n index = self.n_elems - 1\n if (startWrapped and index <= firstTry):\n raise SliceError(\"Could not find a state with full histories\")\n else:\n # else index was ok according to terminals\n return index", "def __check(self):\n if len(self._data)!=len(self._ptbins)+1: \n raise IndexError('Pt bins mismatch')\n for ptbin in self._data:\n if len(ptbin)!=len(self._etabins)+1:\n raise IndexError('Eta bins mismatch')", "def __is_valid(self, subscript):\n return ((0,0) <= subscript and subscript < self.size)", "def _validate_indexes(self, row, col):\n if min(row, col) < 0 or max(row, col) >= self._n:\n raise IndexError(\n \"Incorrect position (%d, %d) in grid of size %d\" % (\n row, col, self._n\n )\n )", "def _check_validity(self):\n pass", "def test_cv_input_out_of_range(self):\n index = self.module.user_lookup_table_maker(b\"S|0090|3110|38399|CS\")\n self.assertEqual(index, 5001,\n msg=f\"test_LS_input_out_of_range returned and \"\n f\"index of {index} instead of 5001\")", "def _randomValidStateIndex(self, minimum_without_terminal):\n index_lowerBound = minimum_without_terminal - 1\n # We try out an index in the acceptable range of the replay memory\n # REMOVED -1 FROM UPPER BOUND (self.n_elems - 1)\n index = self._random_state.randint(index_lowerBound, self.n_elems)\n\n # Check if slice is valid wrt terminals\n # The selected index may correspond to a terminal transition but not\n # the previous minimum_without_terminal-1 transition\n firstTry = index\n startWrapped = False\n while True:\n i = index-1\n processed = 0\n for _ in range(minimum_without_terminal-1):\n if (i < 0 or self._terminals[i]):\n break;\n\n i -= 1\n processed += 1\n if (processed < minimum_without_terminal - 1):\n # if we stopped prematurely, shift slice to the left and try again\n index = i\n if (index < index_lowerBound):\n startWrapped = True\n index = self.n_elems - 1\n if (startWrapped and index <= firstTry):\n raise SliceError(\"Could not find a state with full histories\")\n else:\n # else index was ok according to terminals\n return index", "def check_bounds(self, index):\n if index < self.lower_bound or index > self.upper_bound:\n return False\n return True", "def test_block_bad_consensus(self):\n pass", "def test_out_of_bounds(self) -> None:\n\n self.assertIsInstance(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10)), np.ndarray)\n self.pop.persons[:,idx.speed] = 1\n self.pop.persons[:,idx.x_axis] = 1.1\n self.pop.persons[:,idx.y_axis] = 1.1\n self.pop.persons[:,idx.x_dir] = 0.5\n self.pop.persons[:,idx.y_dir] = 0.5\n\n self.assertLess(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)\n self.assertLess(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)\n\n self.pop.persons[:,idx.x_axis] = -0.1\n self.pop.persons[:,idx.y_axis] = -0.1\n self.pop.persons[:,idx.x_dir] = -0.5\n self.pop.persons[:,idx.y_dir] = -0.5\n self.assertGreater(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)\n self.assertGreater(list(self.movement.out_of_bounds(self.pop.get_person(),\n np.array([[0,1]] * 10),np.array([[0,1]] * 10))[:,idx.x_dir]), [0]*10)", "def isRangeValid(self) -> bool:\n ...", "def _position_validity_checker(position, start, n_elements):\n _assert_shape(position, (MaxDimension.value(), n_elements + 1), \"position\")\n\n # Check if the start position of the rod and first entry of position array are the same\n assert_allclose(\n position[..., 0],\n start,\n atol=Tolerance.atol(),\n err_msg=str(\n \"First entry of position\" + \" (\" + str(position[..., 0]) + \" ) \"\n \" is different than start \" + \" (\" + str(start) + \" ) \"\n ),\n )", "def test_pasture_bounds():\n p = Player(\"p0\", wood=20, shape=(2, 2), rooms=[])\n pastures = [Pasture([(0, 0), (1, 0)]), Pasture([(0, 1), (1, 1)])]\n p.build_pastures(pastures)\n\n p = Player(\"p0\", wood=20, shape=(2, 1), rooms=[])\n with pytest.raises(IndexError):\n p.build_pastures(pastures)", "def _is_valid_pose(self):\n contacts = self.gc.getRobot().robot.contacts\n n_object_contacts = 0\n is_thumb_in_contact = False\n for contact in contacts:\n if contact.body1 == self.object_name:\n n_object_contacts += 1\n if contact.body2 == '_chain4_link2':\n is_thumb_in_contact = True\n elif contact.body2 == self.object_name:\n n_object_contacts += 1\n if contact.body1 == '_chain4_link2':\n is_thumb_in_contact = True\n\n is_valid = n_object_contacts >= 2\n return is_valid", "def omt_check(grade_list_idx, grade_list_i, grade_list_j):\n return grade_list_idx == (grade_list_i + grade_list_j)", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def _is_valid_count(self, count: int, gp: GriddedPerm) -> bool:\n return self._point_in_fuse_region(gp) + 1 == count", "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def violated(self) -> bool:\n ...", "def test_creation_incorrect_softbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, softbounds=[0, 10, 20])" ]
[ "0.60313314", "0.57843703", "0.5700702", "0.5683255", "0.5668263", "0.5665159", "0.56576747", "0.56090784", "0.5576112", "0.55565137", "0.54981154", "0.5470356", "0.54564524", "0.5453247", "0.5430382", "0.53956276", "0.5395109", "0.5362661", "0.5359905", "0.53594095", "0.5355234", "0.5320283", "0.52765554", "0.5276155", "0.5272891", "0.5261806", "0.5254955", "0.5252991", "0.52510065", "0.5249149" ]
0.66429484
0
Returns the public key binary representation of secp256k1 G
def getG(compressed=True): priv = "\x00"*31 + "\x01" G = secp256k1.PrivateKey(priv, ctx=ctx).pubkey.serialize(compressed) return G
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def generate_ecc_public_key(private_key: EllipticCurvePrivateKeyWithSerialization) -> EllipticCurvePublicKey:\n return private_key.public_key()", "def gen_public_key(g, private, p):\n return pow(g, private, p)", "def encode_public_key(value: PublicKey) -> bytes:\n return bytes([value.algo.value]) + value.pbk", "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def get_public_compressed_curve_point(private_key):\n encoded_point = private_key.public_key().public_numbers().encode_point()\n return base64.b64encode(encoded_point)", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "async def client_public_key(self) -> bytes:\n raise NotImplementedError", "def get_pub_key(self):\n return \"RSA {0}\".format(self._cert.get_pubkey().bits)", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def sendPublicKey(g, p, s):\r\n status = \"120 PubKey \" + str(computePublicKey(g, p, s))\r\n return status", "def genPublicKey(self):\n return pow(self.generator, self.privateKey, self.prime)", "def PublicKey(self) -> _n_9_t_1:", "def PublicKey(self) -> _n_9_t_1:", "async def server_public_key(self) -> bytes:\n raise NotImplementedError", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def generate_signing_keys():\n return SigningKey.generate(curve=SECP256k1)", "def rawPubkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.pubkey)[1:-4]", "def generate_key_pair(G):\r\n\r\n global random\r\n\r\n if random == None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n if G.order == None:\r\n raise RuntimeError(\"Base point must have order.\")\r\n\r\n key_size = log(ec.leftmost_bit(G.order)) / log(2)\r\n key_size = int(ceil(key_size) / 2)\r\n private_key = 1\r\n\r\n while private_key <= 1:\r\n private_key = random(key_size) #generates a random number\r\n #with twice the required bits\r\n private_key %= G.order\r\n\r\n return (private_key, G * private_key)", "def public_key(self):\n return PublicKey(self._sk.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw))", "def public_key(self):", "def gen_public_key(n, e):\n\n # Assign key parameters\n key_params = (n, e)\n # Construct private key\n key = RSA.construct(key_params)\n\n return key.exportKey()", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def get_public_key(self) -> str:\n raise NotImplementedError(\"Please implement your own get_public_key() method\")", "def generate_sharedsecret_bytes(self):\n return number_to_string(\n self.generate_sharedsecret(),\n self.private_key.curve.order)", "def generate_key(self)->bytes:\n return os.urandom(32)", "def get_public_key_in_der(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public", "def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())", "def getPublicKey(self):\n\n return PublicKey(POW = self.get_POW().getPublicKey())", "def getPublicKey(self):\n\n return PublicKey(POW = self.get_POW().getPublicKey())" ]
[ "0.6999427", "0.69973236", "0.69607085", "0.6955401", "0.6817513", "0.6802411", "0.6732746", "0.6685661", "0.6667977", "0.654074", "0.6512128", "0.64995784", "0.6488834", "0.6488834", "0.6424222", "0.6328244", "0.6264105", "0.6260353", "0.6245178", "0.6235156", "0.61760545", "0.61738944", "0.61338454", "0.60941005", "0.60821444", "0.6058184", "0.6051219", "0.60469013", "0.6027736", "0.6027736" ]
0.7992583
0
Taking secp256k1's G as a seed, either in compressed or uncompressed form, append "index" as a byte, and append a second byte "counter" try to create a new NUMS base point from the sha256 of that bytestring. Loop counter and alternate compressed/uncompressed until finding a valid curve point. The first such point is considered as "the" NUMS base point alternative for this index value. The search process is of course deterministic/repeatable, so it's fine to just store a list of all the correct values for each index, but for transparency left in code for initialization by any user. The NUMS generator generated is returned as a secp256k1.PublicKey.
def getNUMS(index=0): assert index in range(256) nums_point = None for G in [getG(True), getG(False)]: seed = G + chr(index) for counter in range(256): seed_c = seed + chr(counter) hashed_seed = hashlib.sha256(seed_c).digest() #Every x-coord on the curve has two y-values, encoded #in compressed form with 02/03 parity byte. We just #choose the former. claimed_point = "\x02" + hashed_seed try: nums_point = secp256k1.PublicKey(claimed_point, raw=True, ctx=ctx) return nums_point except: continue assert False, "It seems inconceivable, doesn't it?" # pragma: no cover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_podle(self, index=0):\n #TODO nonce could be rfc6979?\n k = os.urandom(32)\n J = getNUMS(index)\n KG = secp256k1.PrivateKey(k, ctx=ctx).pubkey\n KJ = J.tweak_mul(k)\n self.P2 = getP2(self.priv, J)\n self.get_commitment()\n self.e = hashlib.sha256(''.join(\n [x.serialize() for x in [KG, KJ, self.P, self.P2]])).digest()\n k_int = decode(k, 256)\n priv_int = decode(self.priv.private_key, 256)\n e_int = decode(self.e, 256)\n sig_int = (k_int + priv_int*e_int) % N\n self.s = encode(sig_int, 256, minlen=32)\n return self.reveal()", "def deterministic_k(self, z):\n k = b'\\x00' * 32\n v = b'\\x01' * 32\n if z > N:\n z -= N\n z_bytes = z.to_bytes(32, 'big')\n secret_bytes = self.secret.to_bytes(32, 'big')\n s256 = sha256\n k = hmac.new(k, v + b'\\x00' + secret_bytes + z_bytes, s256).digest()\n v = hmac.new(k, v, s256).digest()\n k = hmac.new(k, v + b'\\x01' + secret_bytes + z_bytes, s256).digest()\n v = hmac.new(k, v, s256).digest()\n while True:\n v = hmac.new(k, v, s256).digest()\n candidate = int.from_bytes(v, 'big')\n if candidate >= 1 and candidate < N:\n return candidate\n k = hmac.new(k, v + b'\\x00', s256).digest()\n v = hmac.new(k, v, s256).digest()", "def generate_index(size=20):\n return hexlify(np.random.rand(100))[:size].decode()", "def gen_small(s, n):\n\tdeg = n\n\tcoeff_vector = deg*[_sage_const_0 ]\n\tcoeff_vector[deg-_sage_const_1 ] = _sage_const_1 \n\tcoeff_vector[_sage_const_0 ] = _sage_const_1 \n\tindex_set = set({_sage_const_0 ,deg-_sage_const_1 })\n\tfor i in range(s-_sage_const_2 ):\n\t# add 1's\n\t\twhile True:\n\t\t\tindex1 = ZZ.random_element(_sage_const_1 ,deg-_sage_const_1 )\n\t\t\tif not index1 in index_set:\n\t\t\t\tcoeff_vector[index1] = _sage_const_1 \n\t\t\t\tindex_set = index_set.union({index1})\n\t\t\t\tbreak\n\t# add -1's\n\tfor i in range(s):\n\t\twhile True:\n\t\t\tindex2 = ZZ.random_element(_sage_const_1 ,deg-_sage_const_1 )\n\t\t\tif not index2 in index_set:\n\t\t\t\tcoeff_vector[index2] = -_sage_const_1 \n\t\t\t\tindex_set = index_set.union({index2})\n\t\t\t\tbreak\n\treturn coeff_vector", "def hash_to_point(self, message, salt):\r\n n = self.n\r\n if q > (1 << 16):\r\n raise ValueError(\"The modulus is too large\")\r\n\r\n k = (1 << 16) // q\r\n # Create a SHAKE object and hash the salt and message.\r\n shake = SHAKE256.new()\r\n shake.update(salt)\r\n shake.update(message)\r\n # Output pseudorandom bytes and map them to coefficients.\r\n hashed = [0 for i in range(n)]\r\n i = 0\r\n j = 0\r\n while i < n:\r\n # Takes 2 bytes, transform them in a 16 bits integer\r\n twobytes = shake.read(2)\r\n elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x\r\n # Implicit rejection sampling\r\n if elt < k * q:\r\n hashed[i] = elt % q\r\n i += 1\r\n j += 1\r\n return hashed", "def hash_to_point(self, message, salt):\r\n n = self.n\r\n if q > (1 << 16):\r\n raise ValueError(\"The modulus is too large\")\r\n\r\n k = (1 << 16) // q\r\n # Create a SHAKE object and hash the salt and message.\r\n shake = SHAKE256.new()\r\n shake.update(salt)\r\n shake.update(message)\r\n # Output pseudorandom bytes and map them to coefficients.\r\n hashed = [0 for i in range(n)]\r\n i = 0\r\n j = 0\r\n while i < n:\r\n # Takes 2 bytes, transform them in a 16 bits integer\r\n twobytes = shake.read(2)\r\n elt = (twobytes[0] << 8) + twobytes[1] # This breaks in Python 2.x\r\n # Implicit rejection sampling\r\n if elt < k * q:\r\n hashed[i] = elt % q\r\n i += 1\r\n j += 1\r\n return hashed", "def getG(compressed=True):\n priv = \"\\x00\"*31 + \"\\x01\"\n G = secp256k1.PrivateKey(priv, ctx=ctx).pubkey.serialize(compressed)\n return G", "def sign(self, private_key, key_idx, message):\n # Step 1. generate a deterministic key for the \"encrypt\" function\n symkey = self.gen_symkey(message)\n sig = [None] * self.n_keys\n\n # Step 2. Select an initialization (\"glue\") value at random in [0, max)\n u = random.randint(0, self.max_val)\n c = v = self.concat_hash(u, symkey)\n\n # Step 3. Choose a random X[i] for each other ring member that isn't us\n # starting from the next key in the ring, iterate over all of the keys\n # that aren't ours\n for i in (range(key_idx + 1, self.n_keys) + range(key_idx)):\n\n # choose random value for x[i]\n sig[i] = random.randint(0, self.max_val)\n\n # compute y for the random x\n e = self.g(sig[i], self.public_keys[i].e, self.public_keys[i].n)\n\n # update the v and continue along the ring\n v = self.concat_hash(v ^ e, symkey)\n\n # set c to the v you should have at the end of the ring\n if (i + 1) % self.n_keys == 0:\n c = v\n\n # Step 4. Solve for y[s], the missing, but now constrained, y value\n sig[key_idx] = self.g(v ^ u, private_key.d, private_key.n)\n return [c] + sig", "def SignatureHashLegacy(self, script, inIdx, hashtype):\n from .script import FindAndDelete, CScript, OP_CODESEPARATOR\n\n HASH_ONE = b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n if inIdx >= len(self.vin):\n return (HASH_ONE, \"inIdx %d out of range (%d)\" % (inIdx, len(self.vin)))\n\n # create copy as it is going to be modified with FindAndDelete(..)\n txtmp = CTransaction(self)\n\n for txin in txtmp.vin:\n txin.scriptSig = b''\n txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))\n\n if (hashtype & 0x1f) == SIGHASH_NONE:\n txtmp.vout = []\n\n for i in range(len(txtmp.vin)):\n if i != inIdx:\n txtmp.vin[i].nSequence = 0\n\n elif (hashtype & 0x1f) == SIGHASH_SINGLE:\n outIdx = inIdx\n if outIdx >= len(txtmp.vout):\n return (HASH_ONE, \"outIdx %d out of range (%d)\" % (outIdx, len(txtmp.vout)))\n\n tmp = txtmp.vout[outIdx]\n txtmp.vout = []\n for i in range(outIdx):\n txtmp.vout.append(CTxOut())\n txtmp.vout.append(tmp)\n\n for i in range(len(txtmp.vin)):\n if i != inIdx:\n txtmp.vin[i].nSequence = 0\n\n if hashtype & SIGHASH_ANYONECANPAY:\n tmp = txtmp.vin[inIdx]\n txtmp.vin = []\n txtmp.vin.append(tmp)\n\n s = txtmp.serialize()\n s += struct.pack(b\"<I\", hashtype)\n\n hash = hash256(s)\n\n return (hash, None)", "def genSeed():\n\tseed_length = int(''.join(random.SystemRandom().choice(string.digits) for _ in range(0, 3)))\n\tseed = os.urandom(seed_length)\n\thashing_algorithm = hashlib.shake_128()\n\thashing_algorithm.update(seed)\n\t# 2200 bytes from SHAKE-128 function is enough data to get 1024 coefficients\n\t# smaller than 5q, from Alkim, Ducas, Pöppelmann, Schwabe section 7:\n\tseed_hash = hashing_algorithm.digest(100)\n\treturn seed, seed_hash", "def get_initial_nonce(self):\n\n #First we will initiate the nonce with the prng.\n bit_nonce = int_to_bitstr(self.prng, 16)\n\n \"\"\" Then we generate the second part by taking only \n the last 16 bits until we have 32 bits in total. \"\"\"\n for i in range(16):\n bit_nonce += self.prng_feedback(bit_nonce[i:i+16])\n\n \"\"\" The new state of the prng will be the last 16 bits\n of the nonce, because we discarded 16 bits during the\n feedback loop. The initial nonce has 32 bits now. \"\"\"\n bit_prng = bit_nonce[16:]\n\n self.prng = bitstr_to_int(bit_prng)\n self.nonce = bitstr_to_int(bit_nonce)\n\n return self.nonce", "def generate_key_pair(G):\r\n\r\n global random\r\n\r\n if random == None:\r\n random = hash_drbg.HashDRBG()\r\n\r\n if G.order == None:\r\n raise RuntimeError(\"Base point must have order.\")\r\n\r\n key_size = log(ec.leftmost_bit(G.order)) / log(2)\r\n key_size = int(ceil(key_size) / 2)\r\n private_key = 1\r\n\r\n while private_key <= 1:\r\n private_key = random(key_size) #generates a random number\r\n #with twice the required bits\r\n private_key %= G.order\r\n\r\n return (private_key, G * private_key)", "def generate_number(string):\r\n return int_c(crc32(string.encode()))", "def tpm3_1_8_start_genomic():\n return \"TPM3\", \"NC_000001.11\", 154191901, 154192135, -1", "def getRandomInZ_N(N):\n n_length = N.bit_length()\n if PYCRYPTO:\n rand = number.getRandomInteger(n_length, os.urandom)\n while(rand > N):\n rand = number.getRandomInteger(n_length, os.urandom)\n return rand\n else:\n raise NotImplementedError(\"Couldn't find PyCrypto. No futher method implemented. Please install PyCrypto.\")", "def hash_gen(n):\n domain = \"abcdefghijklmnopqrstuvwxyz\"\n temp = \"\"\n for i in range(0, n):\n temp += domain[random.randrange(0, 26)]\n return temp", "def crackRsaBruteForce (e, n):\r\n p = getFirstFactor(n)\r\n q = n/p\r\n # phi = Euler Tortient\r\n phi = (p-1)*(q-1)\r\n\r\n d = 1\r\n while d < phi:\r\n # If the public key times the private key % phi = 1, then you have found\r\n # the correct private key\r\n if (e*d) % phi == 1:\r\n return d\r\n\r\n d += 1\r\n\r\n return -1", "def CNPJGenerator(amount=1,cnpjn=None):\n\n d1weight = [5,4,3,2,9,8,7,6,5,4,3,2]\n d2weight = [6] + d1weight\n\n cnpjs=set()\n\n while len(cnpjs) < amount:\n\n if not cnpjn:\n randns = [randint(0,9) for x in range(8)] + [0,0,0,randint(0,9)]\n else:\n randns = cnpjn\n\n d1,d2 = get_digits(randns,d1weight,d2weight)\n\n # transform cnpj in a string\n cnpj = (\"%s\"*14) % tuple(randns+[d1,d2])\n\n # if not exist, add in cnpjs\n if not cnpj in cnpjs:\n cnpjs.add(cnpj)\n\n cnpjs = list(cnpjs)\n if len(cnpjs) != 1:\n return cnpjs\n else:\n return cnpjs[0]", "def get_block_hash(index):\n # TODO: Require implementation\n pass", "def generate_keys(self):\n\n\t\tmin_ext = 1 << self.size_ext - 1\n\t\tmax_ext = 1 << self.size_ext\n\t\t\n\t\t\t\n\t\t# step 1 : chose random primary numbers p and q\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._p = n\n\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\twhile(n == self._p):\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\tself._q = n\n\n\t\t#step 2 : compute n = pq\n\t\tself.n = self._p * self._q\n\n\t\t#step 3 : compute phi(n)\n\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t#step 4 : chose the exponent\n\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\textension = extension + self.D\n\t\twhile (gcd(self._phi,n) != 1):\n\t\t\textension = randint(min_ext,max_ext) << math.ceil(math.log2(self.D))\n\t\tself._d = extension\n\n\t\t#step 5 : compute d (private key)\n\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def generate_hotp(secret, counter=4):\n # https://tools.ietf.org/html/rfc4226\n msg = struct.pack('>Q', counter)\n digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()\n\n ob = digest[19]\n if python_version == 2:\n ob = ord(ob)\n\n pos = ob & 15\n base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff\n token = base % 1000000\n return token", "def genus(P, E, F, V=None):\n\n return euler_characteristic(P, E, F, V)-2", "def generateRandomeTestSampleOfbLastBit(i):\n randomPlainTextArray = [random.choice('0123456789abcdef')\n for n in range(24)]\n randomPlainText = \"\".join(randomPlainTextArray)\n encryptText = randomPlainText\n randomBitsString = ''\n\n for n in trange(i):\n encryptText = a.encrypt(encryptText, const.KEY)\n randomBitsString = randomBitsString+(str(int(encryptText[23], 16) % 2))\n return randomBitsString", "def nonce():\n return random.randint(0, 4294967295)", "def nonce():\n return random.randint(0, 4294967295)", "def generate(self):\n start = self.nu_idx\n stop = start + self.score\n return self.orig_nu[start:stop]", "def generateNonce():\r\n hash = hashlib.sha1()\r\n hash.update(str(time.time()).encode('utf-8'))\r\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def iv_gen():\r\n rndiv = os.urandom(16)\r\n return rndiv", "def verify_curve(curve):\n # What follows is the implementation of the verification algorithm\n # described in \"The Elliptic Curve Digital Signature Algorithm (ECDSA)\",\n # from Certicom. There just a few difference between the original algorithm\n # and the implementation:\n #\n # * a few variable names have been changed for the sake of clarity;\n # * the document from Certicom allows arbritrary seeds with bit length\n # >= 160; here we only care about seeds that are exactly 160-bit long.\n\n if curve.seed.bit_length() > 160:\n raise VerificationFailed('seed too long')\n\n seed_bytes = curve.seed.to_bytes(length=160 // 8, byteorder='big')\n\n # Define t, s and v as specified on the document.\n t = curve.p.bit_length()\n s = (t - 1) // 160\n v = t - 160 * s\n\n # 1. Compute h = SHA-1(seed_bytes) and let c0 denote the bit string of\n # length v bits obtained by taking the v rightmost bits of h.\n h = hashlib.sha1(seed_bytes).digest()\n h = int.from_bytes(h, byteorder='big')\n\n c0 = h & ((1 << v) - 1)\n\n # 2. Let w[0] denote the bit string of length v bits obtained by setting\n # the leftmost bit of c0 to 0.\n #\n # Note: here we use 160 bit instead of v bits, as required by the document.\n # We do so to make the code easier, and because it does not make any\n # difference (see the step 6).\n w0 = c0 & ((1 << v - 1) - 1)\n w = [w0.to_bytes(length=160 // 8, byteorder='big')]\n\n # 3. Let z be the integer whose binary expansion is given by 160-bit string\n # seed_bytes.\n z = curve.seed\n\n # 4. For i from 1 to s do:\n for i in range(1, s + 1):\n # 4.1 Let s_i be 160-bit string which is the binary expansion of the\n # integer (z + i) % (2 ** g).\n z_i = ((z + i) % (2 ** 160))\n s_i = z_i.to_bytes(length=160 // 8, byteorder='big')\n\n # 4.2 Compute w_i = SHA-1(s_i).\n w_i = hashlib.sha1(s_i).digest()\n w.append(w_i)\n\n # 5. Let w be the bit string obtained by concatenating w_0,w_1,...,w_s.\n w = b''.join(w)\n\n # 6. Let c be the integer whose integer expansion is given by w.\n #\n # On step 2, we said that we used a longer bit length for the first element\n # of w. This is correct because the resulting c does not change: using 160\n # bits instead of v bits is equivalent to add some zeroes to the left of c.\n c = int.from_bytes(w, 'big')\n\n # If b ** 2 * c == a ** 3 (mod p) then accept; otherwise reject.\n if (curve.b * curve.b * c - curve.a * curve.a * curve.a) % curve.p != 0:\n raise VerificationFailed('curve verification failed')", "def _hash(self) -> None:\r\n # for a unit cube there are 8 possible hashes\r\n # returns the tuple of with all 8 hashes\r\n\r\n self.hashes[\"aaa\"] = P[P[P[self.xi] + self.yi] + self.zi]\r\n self.hashes[\"aab\"] = P[P[P[self.xi] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"aba\"] = P[P[P[self.xi] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"abb\"] = P[P[P[self.xi] + self._inc(self.yi)] + self._inc(self.zi)]\r\n self.hashes[\"baa\"] = P[P[P[self._inc(self.xi)] + self.yi] + self.zi]\r\n self.hashes[\"bab\"] = P[P[P[self._inc(self.xi)] + self.yi] + self._inc(self.zi)]\r\n self.hashes[\"bba\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self.zi]\r\n self.hashes[\"bbb\"] = P[P[P[self._inc(self.xi)] + self._inc(self.yi)] + self._inc(self.zi)]" ]
[ "0.588711", "0.55167645", "0.5486731", "0.5389965", "0.5368635", "0.5368635", "0.5340812", "0.5231569", "0.5220929", "0.5193509", "0.51206154", "0.5085955", "0.5046237", "0.5041022", "0.502793", "0.5023869", "0.50096273", "0.49974316", "0.49673843", "0.49673653", "0.49479038", "0.49402565", "0.49390903", "0.49318555", "0.49318555", "0.49293453", "0.49230516", "0.49216408", "0.49086583", "0.48948824" ]
0.78247774
0
Check that the algorithm produces the expected NUMS values; more a sanity check than anything since if the file is modified, all of it could be; this function is mostly for testing, but runs fast with precomputed context so can be run in user code too.
def verify_all_NUMS(write=False): nums_points = {} for i in range(256): nums_points[i] = safe_hexlify(getNUMS(i).serialize()) if write: with open("nums_basepoints.txt", "wb") as f: from pprint import pformat f.write(pformat(nums_points)) assert nums_points == precomp_NUMS, "Precomputed NUMS points are not valid!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_converges_to_accurate_values(fake_args):\n with tempfile.TemporaryDirectory() as tmp_dir:\n fake_args.return_value = FakeArguments('orvara/tests/diagnostic_config.ini', tmp_dir)\n tt = run()[1].data\n # check params\n i = -1 # walker index.\n burn = 250 # number of burn in steps to discard\n rv_jitter = np.mean(tt['jitter'][i, burn:])\n rv_jitter_err = np.std(tt['jitter'][i, burn:])\n companion_jup_mass = np.mean(tt['msec0'][i, burn:]*1989/1.898)\n companion_mass_stderr = np.std(tt['msec0'][i, burn:]*1989/1.898)\n separation_AU = np.mean(tt['sau0'][i, burn:])\n separation_stderr = np.std(tt['sau0'][i, burn:])\n eccentricity = np.mean(tt['esino0'][i, burn:]**2 + tt['ecoso0'][i, burn:]**2)\n eccentricity_stderr = np.std(tt['esino0'][i, burn:]**2 + tt['ecoso0'][i, burn:]**2)\n inclination_deg = np.mean(tt['inc0'][i, burn:]*180/np.pi)\n inclination_err = np.std(tt['inc0'][i, burn:]*180/np.pi)\n\n expected_1_sigma_errors = [0.6282, 2.9215, 0.44668, 0.0030392, 2.3431]\n expected_values = [4.9378, 67.04218, 10.189, 0.73568, 49.89184]\n values = [rv_jitter, companion_jup_mass, separation_AU, eccentricity, inclination_deg]\n errors = [rv_jitter_err, companion_mass_stderr, separation_stderr,\n eccentricity_stderr, inclination_err]\n for value, expected, sigma in zip(values, expected_values, expected_1_sigma_errors):\n assert np.isclose(value, expected, atol=3 * sigma)\n assert np.allclose(errors, expected_1_sigma_errors, rtol=.5)", "def test_uss_num_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_uss_num(input_val)\n self.assertEqual(output_val, self.line.uss_num)", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1", "def testCTIcorrection(log, files, sigma=0.75, iterations=4, xcen=1900, ycen=1900, side=20):\n settings = dict(sigma=sigma, iterations=iterations)\n\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n for file in files:\n #load no cti data\n nocti = pf.getdata(file.replace('CTI', 'nocti'))[ycen-side:ycen+side, xcen-side:xcen+side]\n #subtract background\n nocti -= 27.765714285714285\n nocti[nocti < 0.] = 0. #remove negative numbers\n\n #load CTI data\n CTI = pf.getdata(file)[ycen-side:ycen+side, xcen-side:xcen+side]\n CTI[CTI < 0.] = 0. #remove negative numbers\n\n sh = shape.shapeMeasurement(nocti, log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n\n sh = shape.shapeMeasurement(CTI, log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eCTI.append(results['ellipticity'])\n e1CTI.append(results['e1'])\n e2CTI.append(results['e2'])\n R2CTI.append(results['R2'])\n\n results = {'eclean' : np.asarray(eclean),\n 'e1clean' : np.asarray(e1clean),\n 'e2clean' : np.asarray(e2clean),\n 'R2clean' : np.asarray(R2clean),\n 'eCTI' : np.asarray(eCTI),\n 'e1CTI' : np.asarray(e1CTI),\n 'e2CTI' : np.asarray(e2CTI),\n 'R2CTI' : np.asarray(R2CTI)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, 'results.pk')\n\n return results", "def _check(self):\n for molname in self.options.keys():\n for key in self.options[molname].keys():\n if key in [\"Ncopies\"]:\n try:\n self.options[molname][key]=int(self.options[molname][key])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n if key in [\"Cutoff\"]:\n try:\n self.options[molname][key]=float(self.options[molname][key])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n if key in [\"Addon\"]: # test the addon part and convert variables\n for item in self.options[molname][key]: # Iterate over all attachments\n if item is not None:\n # attachment point\n dtypes={\"attachment\":int}\n try:\n item[\"attachment\"]=int(item[\"attachment\"])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n # position\n #~ try:\n #~ print self.options[molname][key][\"position\"]\n #~ self.options[molname][key][\"position\"]=int(self.options[molname][key][\"position\"])\n #~ except:\n #~ raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))", "def test_measure_nondeterministic_without_sampling(self):\n shots = 2000\n circuits = ref_measure.measure_circuits_nondeterministic(allow_sampling=False)\n targets = ref_measure.measure_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_check(self):\n\n self.assertTrue(Naive().check(self.file_gitignore))\n self.assertTrue(Naive().check(self.file_tests))\n self.assertTrue(Naive().check(self.file_bin))\n self.assertTrue(Naive().check(self.file_py))\n self.assertTrue(Naive().check(self.file_authors))", "def invalid_files_processed(self) -> float:\n return pulumi.get(self, \"invalid_files_processed\")", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def test_is_gene_continuously_amplified_wrong_input(self):\n self.assertEqual(\"Wrong input data\", is_gene_continuously_amplified(13))", "def check_parameters_for_stupid_errors( file ):\n import os\n \n # print('~~~~~~~~~~~~~~~~~~~~~ini-file~~~~~~~~~~~')\n # # read jobfile\n # with open(file) as f:\n # # loop over all lines\n # for line in f:\n # line = line.lstrip()\n # line = line.rstrip()\n # if len(line)>0:\n # if ';' in line:\n # line = line[0:line.index(\";\")]\n # if len(line)>0:\n # if '[' in line and ']' in line:\n # print(bcolors.OKBLUE + line + bcolors.ENDC)\n # else:\n # print(line)\n # print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n print(\"We scan %s for stupid errors.\" % (file) )\n\n # check if the file exists, at least\n if not os.path.isfile(file):\n raise ValueError(\"Stupidest error of all: we did not find the INI file.\")\n\n wavelet = get_ini_parameter(file, 'Wavelet', 'wavelet', str, default=\"CDF40\") \n \n # since 05 Jul 2023, g is set automatically, unless we do something stupid.\n if wavelet == 'CDF20':\n g_default = 2\n elif wavelet=='CDF22':\n g_default = 3\n elif wavelet=='CDF40':\n g_default = 4\n elif wavelet=='CDF42':\n g_default = 5\n elif wavelet=='CDF44' or wavelet=='CDF62':\n g_default = 7\n else:\n g_default = 1\n \n jmax = get_ini_parameter(file, 'Blocks', 'max_treelevel', int)\n jmin = get_ini_parameter(file, 'Blocks', 'min_treelevel', int)\n adapt_mesh = get_ini_parameter(file, 'Blocks', 'adapt_tree', int)\n ceps = get_ini_parameter(file, 'Blocks', 'eps')\n bs = get_ini_parameter(file, 'Blocks', 'number_block_nodes', int, vector=True)\n g = get_ini_parameter(file, 'Blocks', 'number_ghost_nodes', int, default=g_default)\n g_rhs = get_ini_parameter(file, 'Blocks', 'number_ghost_nodes_rhs', int, default=g)\n dealias = get_ini_parameter(file, 'Blocks', 'force_maxlevel_dealiasing', int)\n dim = get_ini_parameter(file, 'Domain', 'dim', int)\n L = get_ini_parameter(file, 'Domain', 'domain_size', vector=True)\n discretization = get_ini_parameter(file, 'Discretization', 'order_discretization', str)\n time_step_method = get_ini_parameter( file, 'Time', 'time_step_method', str, default=\"RungeKuttaGeneric\")\n CFL = get_ini_parameter( file, 'Time', 'CFL', float, default=1.0)\n CFL_eta = get_ini_parameter( file, 'Time', 'CFL_eta', float, default=0.99)\n CFL_nu = get_ini_parameter( file, 'Time', 'CFL_nu', float, default=0.99*2.79/(float(dim)*np.pi**2))\n c0 = get_ini_parameter( file, 'ACM-new', 'c_0', float)\n nu = get_ini_parameter( file, 'ACM-new', 'nu', float)\n ceta = get_ini_parameter( file, 'VPM', 'C_eta', float, default=0.0)\n penalized = get_ini_parameter( file, 'VPM', 'penalization', bool, default=False)\n geometry = get_ini_parameter( file, 'VPM', 'geometry', str, default='default')\n sponged = get_ini_parameter( file, 'Sponge', 'use_sponge', bool, default=False)\n csponge = get_ini_parameter( file, 'Sponge', 'C_sponge', float, default=0.0)\n sponge_type = get_ini_parameter( file, 'Sponge', 'sponge_type', str, default='default')\n L_sponge = get_ini_parameter( file, 'Sponge', 'L_sponge', default=0.0)\n time_max = get_ini_parameter( file, 'Time', 'time_max', float)\n time_stepper = get_ini_parameter( file, 'Time', 'time_step_method', str, default=\"RungeKuttaGeneric\")\n CFL = get_ini_parameter( file, 'Time', 'CFL', float, default=0.5)\n CFL_nu = get_ini_parameter( file, 'Time', 'CFL_nu', float, default=0.99*2.79/(float(dim)*np.pi**2) )\n CFL_eta = get_ini_parameter( file, 'Time', 'CFL_eta', float, default=0.99)\n filter_type = get_ini_parameter( file, 'Discretization', 'filter_type', str, default='no_filter')\n filter_freq = get_ini_parameter( file, 'Discretization', 'filter_freq', int, default=-1)\n \n \n dx = L[0]*2**-jmax/(bs[0])\n keta = np.sqrt(ceta*nu)/dx\n \n \n print(\"======================================================================================\")\n print(\"Bs= %i g= %i g_rhs= %i dim= %i Jmax= %i L= %2.2f %s==> dx= %2.3e N_equi= %i N= %i per unit length%s\" % \n (bs[0],g,g_rhs, dim,jmax,L[0],bcolors.OKBLUE, dx, int(L[0]/dx), int(1.0/dx), bcolors.ENDC))\n print(\"equidistant grids: Jmin=%i^%i, Jmax=%i^%i\" % (int(bs[0]*2**jmin), dim, int(bs[0]*2**jmax), dim) )\n print(\"discretization= %s\" % (discretization))\n print(\"T_max = %2.2f CFL= %2.2f CFL_eta= %2.2f CFL_nu= %2.3f time_stepper= %s\" % (time_max, CFL, CFL_eta, CFL_nu, time_stepper))\n \n \n print(\"use_penalization= %i geometry= %s C_eta= %2.2e %s ==> K_eta = %2.2f%s\" % \n (penalized, geometry, ceta, bcolors.OKBLUE, keta, bcolors.ENDC))\n if sponged:\n print(\"use_sponge=%i type=%s C_sponge=%2.2e L_sponge=%2.2f %s==> Ntau = %2.2f%s\" % \n (sponged, sponge_type, csponge, L_sponge, bcolors.OKBLUE, L_sponge/(c0*csponge), bcolors.ENDC))\n print(\"C_0 = %2.2f delta_shock= %2.2f dx nu=%e\" % (c0, c0*ceta/dx, nu))\n print(\"C_eps = %2.2e wavelet= %s dealias=%i adapt_mesh=%i\" % (ceps, wavelet, dealias, adapt_mesh))\n \n print(\"dt_CFL= %2.3e\" % (CFL*dx/c0))\n print(\"filter_type= %s filter_freq=%i\" % (filter_type, filter_freq))\n print(\"======================================================================================\")\n \n \n if len(bs) > 1:\n bs = bs[0]\n\n if bs % 2 == 0:\n warn('The block size is bs=%i which is an EVEN number.' % (bs) )\n\n if bs < 3:\n warn('The block size is bs=%i is very small or even negative.' % (bs) )\n \n \n if (wavelet == \"CDF22\") and g<3:\n warn(\"Not enough ghost nodes for wavelet %s g=%i < 3\" % (wavelet, g) )\n if (wavelet == \"CDF42\") and g<5:\n warn(\"Not enough ghost nodes for wavelet %s g=%i < 5\" % (wavelet, g) ) \n if (wavelet == \"CDF44\" or wavelet == \"CDF62\") and g<7:\n warn(\"Not enough ghost nodes for wavelet %s g=%i < 7\" % (wavelet, g) )\n if (wavelet == \"CDF40\") and g<4:\n warn(\"Not enough ghost nodes for wavelet %s g=%i < 4\" % (wavelet, g) )\n \n \n if time_step_method == \"RungeKuttaChebychev\":\n if CFL_eta < 999:\n warn('are you sure you did not forget to adjustl CFL_eta for the RKC scheme???')\n if CFL_nu < 999:\n warn('are you sure you did not forget to adjustl CFL_nu for the RKC scheme???')\n if CFL != 0.75:\n warn('are you sure you did not forget to adjustl CFL for the RKC scheme??? often we used 0.75.') \n \n if time_step_method == \"RungeKuttaGeneric\":\n if CFL_eta > 1.0:\n warn('are you sure you did not forget to adjustl CFL_eta for the RK scheme? it may be unstable.')\n if CFL_nu > 0.99*2.79/(float(dim)*np.pi**2):\n warn('are you sure you did not forget to adjustl CFL_nu for the RK scheme? it may be unstable.')\n if CFL > 1.0:\n warn('are you sure you did not forget to adjustl CFL for the RK scheme? it may be unstable.') \n \n # if somebody modifies the standard parameter file, users have to update their\n # ini files they use. this is often forgoten and obnoxious. Hence, if we find\n # value sthat no longer exist, warn the user.\n if exists_ini_parameter( file, \"Blocks\", \"number_data_fields\" ) :\n warn('Found deprecated parameter: [Blocks]::number_data_fields')\n\n if exists_ini_parameter( file, \"Physics\", \"initial_cond\" ) :\n warn('Found deprecated parameter: [Physics]::initial_cond')\n\n if exists_ini_parameter( file, \"Dimensionality\", \"dim\" ) :\n warn('Found deprecated parameter: [Dimensionality]::dim')\n\n if exists_ini_parameter( file, \"DomainSize\", \"Lx\" ) :\n warn('Found deprecated parameter: [DomainSize]::Lx')\n\n if exists_ini_parameter( file, \"Time\", \"time_step_calc\" ) :\n warn('Found deprecated parameter: [Time]::time_step_calc')\n \n if exists_ini_parameter( file, \"ACM\", \"forcing\" ):\n warn('Found deprecated parameter: [ACM]::forcing')\n \n if exists_ini_parameter( file, \"ACM\", \"forcing_type\" ):\n warn('Found deprecated parameter: [ACM]::forcing_type')\n \n if exists_ini_parameter( file, \"ACM\", \"p_mean_zero\" ):\n warn('Found deprecated parameter: [ACM]::p_mean_zero')\n \n if exists_ini_parameter( file, \"ACM\", \"compute_laplacian\" ):\n warn('Found deprecated parameter: [ACM]::compute_laplacian')\n \n if exists_ini_parameter( file, \"ACM\", \"compute_nonlinearity\" ):\n warn('Found deprecated parameter: [ACM]::compute_nonlinearity')\n \n if exists_ini_parameter( file, \"Blocks\", \"adapt_mesh\" ):\n warn('Found deprecated parameter: [Blocks]::adapt_mesh ===> adapt_tree')\n \n HIT = get_ini_parameter( file, 'ACM-new', 'use_HIT_linear_forcing', bool, default=False)\n if HIT:\n print(type(HIT))\n print(HIT)\n warn('You use HIT linear forcing, which is HIGHLY EXPERIMENTAL')\n\n jmax = get_ini_parameter( file, 'Blocks', 'max_treelevel', int)\n\n if jmax > 18:\n warn('WABBIT can compute at most 18 refinement levels, you set more!')\n\n if sponged:\n # default value is TRUE so if not found, all is well\n mask_time_dependent = get_ini_parameter( file, 'VPM', 'mask_time_dependent_part', int, default=1)\n\n if mask_time_dependent != 1:\n warn(\"\"\"you use sponge, but mask_time_dependent_part=0! The sponge\n is treated as if it were time dependent because it does not have\n to be at the maximum refinement level.\"\"\")\n\n\n\n # loop over ini file and check that each non-commented line with a \"=\" contains the trailing semicolon \";\"\n with open(file) as f:\n # loop over all lines\n linenumber = 0\n for line in f:\n # remove trailing & leading spaces\n line = line.strip()\n linenumber += 1\n if line != \"\" :\n if line[0] != \"!\" and line[0] != \"#\" and line[0] != \";\" :\n if \"=\" in line and \";\" not in line:\n warn('It appears the line #%i does not contain the semicolon' % (linenumber) )\n\n restart = get_ini_parameter( file, 'Physics', 'read_from_files', int)\n print(\"read_from_files=%i\" %(restart))\n\n if restart == 1:\n info(\"This simulation is being resumed from file\")\n\n infiles = get_ini_parameter( file, 'Physics', 'input_files', str)\n infiles = infiles.split()\n for file in infiles:\n print(file)\n if not os.path.isfile(file):\n raise ValueError(\"CRUTIAL: read_from_files=1 but infiles NOT found!.\")\n else:\n info(\"This simulation is being started from initial condition (and not from file)\")", "def checkStats(checkmateOutput):\n\n if not os.path.isfile(checkmateOutput):\n print(\"Files %s not found\" %checkmateOutput)\n return False\n\n # Get CMS-SUS-16-032 data:\n data = np.genfromtxt(checkmateOutput,names=True,\n dtype=None,encoding=None)\n\n data = np.delete(data,np.where(data['sr'] == 'Combined'))\n ibest = np.argmax(data['rexp'])\n pt = data[ibest]\n if not pt['s']:\n ratio = 100.0\n else:\n ratio = pt['signalsumofweights']/pt['s']\n nEvts = pt['signalsumofweights']\n\n return ratio,nEvts", "def t0check(resultFile, resultType, method, period, perioderr):\n\n inputData = np.genfromtxt(resultFile, dtype=None, delimiter=', ', comments='#', encoding = None)\n if 't0' in str(inputData[0]):\n if resultType == 'LSQ':\n k = np.where(inputData[0] == 't0_lsq')[0][0]\n elif resultType == 'Mean':\n k = np.where(inputData[0] == 't0_mu')[0][0]\n elif resultType == 'Median':\n k = np.where(inputData[0] == 't0_med')[0][0]\n else:\n raise ValueError(\"Result type not recognised, please chose Mean, Median or LSQ.\")\n\n l = np.where(inputData[0] == 't0_std')[0][0]\n\n elif 't_secondary' in str(inputData[0]):\n\n if resultType == 'LSQ':\n k = np.where(inputData[0] == 't_secondary_lsq')[0][0]\n elif resultType == 'Mean':\n k = np.where(inputData[0] == 't_secondary_mu')[0][0]\n elif resultType == 'Median':\n k = np.where(inputData[0] == 't_secondary_med')[0][0]\n else:\n raise ValueError(\"Result type not recognised, please chose Mean, Median or LSQ.\")\n\n l = np.where(inputData[0] == 't_secondary_std')[0][0]\n\n t0s = [float(line[k]) for line in inputData if method in line]\n t0errs = [float(line[l]) for line in inputData if method in line]\n\n if len(t0s) == 2:\n nperiods = round((t0s[1] - t0s[0])/period)\n t0_diff = (t0s[1] - t0s[0]) - nperiods*period\n t0_diff_err = np.sqrt( ( t0errs[1] )**2 + ( t0errs[0] )**2 + (nperiods*perioderr)**2 )\n sigma_sec = t0_diff_err*60*60*24\n nsigma = t0_diff/t0_diff_err\n else:\n raise ValueError(\"Not implemented for more than 2 transits yet.\")\n\n return sigma_sec, nsigma", "def CustomMathTest(): \n \n def CheckEqual(iterator):\n return len(set(iterator)) <= 1\n \n print(\"\")\n print(\" ..Testing.. \")\n print(\"\")\n \n Tests = []\n\n #Setup\n c = [1,2,3,nan,3]\n c2 = ma.masked_array(c,isnan(c))\n #Python has a less-comfortable handling of missing values.\n c3 = [2,3,-1,4,0]\n \n\n print(\"Testing MeanNa...\")\n Expected = [1.0, 2.0, 3.0, 2.25, 3.0]\n Actual = MeanNa(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n print(\"Testing Catch...\")\n Expected = [0,1,.5,0]\n Actual = [Catch(.4),Catch(.6),Catch(.4,.3),Catch(.4,.1)]\n print(Expected)\n print(Actual)\n print(Actual==Expected)\n Tests.append((Actual==Expected))\n print(\"\")\n \n print(\"Testing Influence...\")\n Expected = [array([ 0.88888889]), array([ 1.33333333]), array([ 1.]), array([ 1.33333333])]\n Actual = Influence(GetWeight(c2))\n print(Expected)\n print(Actual)\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True\n print(Flag)\n Tests.append(Flag) \n print(\"\")\n \n print(\"Testing ReWeight...\")\n Expected = [0.08888888888888889, 0.17777777777777778, 0.26666666666666666, 0.2, 0.26666666666666666]\n Actual = ReWeight(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n Votes = array([[1,1,0,0], \n [1,0,0,0],\n [1,1,0,0],\n [1,1,1,0],\n [0,0,1,1],\n [0,0,1,1]])\n \n Votes = ma.masked_array(Votes,isnan(Votes))\n \n print(\"Testing ReverseMatrix...\")\n Expected = array([[0, 0, 1, 1],\n [0, 1, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 1],\n [1, 1, 0, 0],\n [1, 1, 0, 0]])\n Actual = ReverseMatrix(Votes)\n print(Expected)\n print(Actual)\n Flag=False\n if(sum(Expected==Actual)==24):\n Flag=True\n print(Flag)\n Tests.append(Flag)\n print(\"\") \n \n print(\"Testing WeightedPrinComp...\")\n Expected = array([-0.81674714, -0.35969107, -0.81674714, -0.35969107, 1.17643821, 1.17643821])\n Actual = WeightedPrinComp(Votes)[1]\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True \n print(Flag)\n Tests.append(Flag) \n print(\"\") \n \n print(\" *** TEST RESULTS ***\")\n print(Tests)\n print(CheckEqual(Tests))\n \n return(CheckEqual(Tests))", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def test_ML_check_full_epem_ttx(self):\n \n self.setup_logFile_for_logger('madgraph.check_cmd')\n try:\n cmd = os.getcwd()\n self.do('import model loop_sm')\n self.do('check full -reuse e+ e- > t t~ [virt=QCD]')\n self.assertEqual(cmd, os.getcwd())\n self.assertTrue(path.isfile(pjoin(MG5DIR,'TMP_CHECK',\\\n 'SubProcesses/P0_epem_ttx/result.dat')))\n shutil.rmtree(pjoin(MG5DIR,'TMP_CHECK'))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n # Needs the loop_sm feynman model to successfully run the gauge check.\n # self.assertTrue('Gauge results' in res)\n self.assertTrue('Lorentz invariance results' in res)\n self.assertTrue('Process permutation results:' in res)\n self.assertTrue('Gauge results' in res)\n self.assertTrue('Summary: passed' in res)\n self.assertTrue('Passed' in res)\n self.assertTrue('Failed' not in res)\n self.assertTrue('1/1 failed' not in res)\n self.assertTrue('1/1 passed' in res)\n except:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n raise\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def test_fluxes(self):\n\n t, x_n, x_p, r_n, r_p = (\n self.t,\n self.x_n,\n self.x_p,\n self.r_n_edge,\n self.r_p_edge,\n )\n if self.model.options[\"particle\"] == \"uniform profile\":\n # Fluxes are zero everywhere since the concentration is uniform\n np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)\n else:\n if self.operating_condition == \"discharge\":\n if self.model.options[\"particle\"] == \"quartic profile\":\n # quartic profile has a transient at the beginning where\n # the concentration \"rearranges\" giving flux of the opposite\n # sign, so ignore first three times\n np.testing.assert_array_less(0, self.N_s_n(t[3:], x_n, r_n[1:]))\n np.testing.assert_array_less(self.N_s_p(t[3:], x_p, r_p[1:]), 0)\n else:\n np.testing.assert_array_less(\n -1e-16, self.N_s_n(t[1:], x_n, r_n[1:])\n )\n np.testing.assert_array_less(self.N_s_p(t[1:], x_p, r_p[1:]), 1e-16)\n if self.operating_condition == \"charge\":\n np.testing.assert_array_less(self.N_s_n(t[1:], x_n, r_n[1:]), 1e-16)\n np.testing.assert_array_less(-1e-16, self.N_s_p(t[1:], x_p, r_p[1:]))\n if self.operating_condition == \"off\":\n np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)\n\n np.testing.assert_array_almost_equal(0, self.N_s_n(t, x_n, r_n[0]), decimal=4)\n np.testing.assert_array_almost_equal(0, self.N_s_p(t, x_p, r_p[0]), decimal=4)", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def test_verify_mol_input():\n # good input with extra spaces\n mol_input_dict = read_mol_input(os.path.join(TEST_DIR, \"example2_mol_input_file.txt\"))\n verify_mol_input(mol_input_dict)\n\n # check the good file\n # {'qcm': 'hf', 'basis': 'sto-3g', 'struct_input': 'C=CC=C',\n # 'struct_type': 'smiles', 'prog': 'psi4', 'charge': '0', 'multip': '1'}\n mol_input_dict = read_mol_input(os.path.join(TEST_DIR, \"example_mol_input_file.txt\"))\n verify_mol_input(mol_input_dict)\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n # struct input is spelt wrong\n mol_input_dict2 = read_mol_input(os.path.join(TEST_DIR, \"bad3_mol_input_file.txt\"))\n assert_raises(ValueError, verify_mol_input, mol_input_dict2)\n\n mol_input_dict[\"qcm\"] = \"not-a-method\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"qcm\"] = \"hf\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"basis\"] = \"not-a-basis\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"basis\"] = \"sto-3g\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"struct_input\"] = \"very-bad-smiles-string\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"struct_input\"] = \"C=CC=C\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"struct_type\"] = \"not-an-option\"\n assert_raises(AssertionError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"struct_type\"] = \"smiles\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"prog\"] = \"unavailable-prog\"\n assert_raises(AssertionError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"prog\"] = \"psi4\"\n # reset all params that are modified by verification\n mol_input_dict[\"charge\"] = '0'\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"charge\"] = \"0.34\"\n assert_raises(ValueError, verify_mol_input, mol_input_dict)\n mol_input_dict[\"charge\"] = \"0\"\n # reset all params that are modified by verification\n mol_input_dict[\"multip\"] = '1'\n\n mol_input_dict[\"multip\"] = \"-2\"\n assert_raises(AssertionError, verify_mol_input, mol_input_dict)", "def test_unit_checks():\n\n # Arrhenius-type reaction without R\n xml_filename = \"tests/test_xml_files/unit_check_arr.xml\"\n parser = XMLParser(xml_filename, convert_to_SI_units=True)\n A = parser.reaction_list[0].rate_coeffs_components['A']\n E = parser.reaction_list[0].rate_coeffs_components['E']\n assert numpy.isclose(A, 35200, atol=1e-16)\n assert numpy.isclose(E, 298737.6, atol=1e-16)\n\n # Arrhenius-type reaction with R\n xml_filename = \"tests/test_xml_files/unit_check_arr_with_R.xml\"\n parser = XMLParser(xml_filename, convert_to_SI_units=False)\n A = parser.reaction_list[0].rate_coeffs_components['A']\n E = parser.reaction_list[0].rate_coeffs_components['E']\n R = parser.reaction_list[0].rate_coeffs_components['R']\n assert numpy.isclose(A, 3.52e+10, atol=1e-16)\n assert numpy.isclose(E, 7.14e+04, atol=1e-16)\n assert numpy.isclose(R, 8.3144598, atol=1e-16)\n\n # Modified Arrhenius-type reaction without R\n xml_filename = \"tests/test_xml_files/unit_check_modarr.xml\"\n parser = XMLParser(xml_filename, convert_to_SI_units=True)\n A = parser.reaction_list[0].rate_coeffs_components['A']\n E = parser.reaction_list[0].rate_coeffs_components['E']\n b = parser.reaction_list[0].rate_coeffs_components['b']\n assert numpy.isclose(A, 35200, atol=1e-16)\n assert numpy.isclose(E, 298737.6, atol=1e-16)\n assert numpy.isclose(b, 2.7, atol=1e-16)\n\n # Modified Arrhenius-type reaction with R\n xml_filename = \"tests/test_xml_files/unit_check_modarr_with_R.xml\"\n parser = XMLParser(xml_filename, convert_to_SI_units=False)\n A = parser.reaction_list[0].rate_coeffs_components['A']\n E = parser.reaction_list[0].rate_coeffs_components['E']\n b = parser.reaction_list[0].rate_coeffs_components['b']\n R = parser.reaction_list[0].rate_coeffs_components['R']\n assert numpy.isclose(A, 3.52e+10, atol=1e-16)\n assert numpy.isclose(E, 7.14e+04, atol=1e-16)\n assert numpy.isclose(b, 2.7, atol=1e-16)\n assert numpy.isclose(R, 8.3144598, atol=1e-16)", "def check_data(raw_data, nsamples, verbose):\n if verbose:\n print 'raw_data', raw_data\n print 'raw_data.shape', raw_data.shape\n\n uc_timings, uc_run_counts = np.unique(raw_data[:,0], return_counts=True)\n if verbose:\n print 'uc_timings', uc_timings\n print 'uc_run_counts', uc_run_counts\n\n n_of_runs = set(uc_run_counts)\n if len(n_of_runs) != 1 and len(n_of_runs) != 2:\n raise AssertionError(\n 'Something bad happened!\\nn_of_runs = {0}\\nlen(n_of_runs) = '\n '{1}'.format(n_of_runs, len(n_of_runs))\n )\n\n if len(n_of_runs) == 2:\n if np.diff(list(n_of_runs))[0] != 1:\n raise AssertionError(\n 'Something bad happened!\\nn_of_runs = '\n '{0}\\nnp.diff(list(n_of_runs))[0] = '\n '{1}'.format(n_of_runs, np.diff(list(n_of_runs))[0])\n )\n n_incomplete_pulse = np.sum(uc_run_counts == np.max(list(n_of_runs)))\n if verbose:\n print 'n_incomplete_pulse', n_incomplete_pulse\n clean_data = raw_data[:-n_incomplete_pulse]\n else:\n clean_data = raw_data\n\n if verbose:\n print 'clean_data', clean_data\n print 'clean_data.shape', clean_data.shape\n\n return clean_data", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def testInput(filename, expected):\n print(\"\\n------- Test \" + filename + \" -------\\n\")\n found = 0\n graphList = Graph.fromFile(filename)\n for graph in graphList:\n found += testOnGraph(graph)\n print(\"\\nEuler circuits expected: \" + str(expected) + \", found: \" + str(found))\n return found", "def checkProperUnpack(self):\n print(\"Checking Randomizer files\")\n\n self.folderStatus = False\n self.aiRefStatus = False\n self.ffxRefStatus = True\n self.validNewStatus = False\n self.validReplaceStatus = False\n self.originalRefMissing = 0\n\n if (os.path.isdir(\"enemyRandomizerData/\")):\n self.folderStatus = True\n\n if (os.path.isfile(\"enemyRandomizerData/airef.csv\")):\n self.aiRefStatus = True\n\n if (os.path.isfile(\"enemyRandomizerData/replacement_ref/valid_new.txt\")):\n self.validNewStatus = True\n\n if (os.path.isfile(\"enemyRandomizerData/replacement_ref/valid_replacements.txt\")):\n self.validReplaceStatus = True\n\n for iFile in [\"m10_00_00_00\", \"m10_01_00_00\", \"m10_02_00_00\", \"m11_00_00_00\", \"m12_00_00_00\", \"m12_01_00_00.ptde\", \"m12_01_00_00.remaster\", \"m12_00_00_01\", \"m13_00_00_00.remaster\", \"m13_01_00_00\", \"m13_02_00_00\", \"m14_00_00_00\", \"m14_01_00_00\", \"m15_00_00_00\", \"m15_01_00_00\", \"m16_00_00_00\", \"m17_00_00_00\", \"m18_00_00_00\", \"m18_01_00_00\"]:\n if not (os.path.isfile('enemyRandomizerData/original_enemies_ref/' + iFile + '.txt')):\n self.originalRefMissing += 1", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success", "def test_standardize_volpianoN(self):\n lib = mei2volpiano.MEItoVolpiano()\n f1 = \"./resources/neume_mei/016r_reviewed.mei\"\n f2 = \"./resources/neume_mei/CDN-Hsmu_M2149.L4_003r.mei\"\n f3 = \"./resources/neume_mei/CDN-Hsmu_M2149.L4_003v.mei\"\n\n files = [f1, f2, f3]\n\n for i, element in enumerate(n_standard):\n func = lib.convert_mei_volpiano(files[i])\n func = lib.standardize_volpiano(func) \n self.assertTrue(func, w_standard[i])" ]
[ "0.6062466", "0.6013086", "0.58547235", "0.5534653", "0.5532871", "0.5531744", "0.55294245", "0.5479131", "0.54670936", "0.54526573", "0.543836", "0.5416768", "0.53805554", "0.53707004", "0.5367765", "0.53630483", "0.53561896", "0.533691", "0.533691", "0.53347796", "0.5329929", "0.5320993", "0.53051776", "0.5302888", "0.5298016", "0.52849007", "0.527249", "0.52692175", "0.52692175", "0.52674454" ]
0.63798946
0
Given a secp256k1.PrivateKey priv and a secp256k1.PublicKey nums_pt, an alternate
def getP2(priv, nums_pt): priv_raw = priv.private_key return nums_pt.tweak_mul(priv_raw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def check_equal_rsa_pub_key(sk2_, sk_):\n pub_n = sk_.public_numbers()\n pub_n2 = sk2_.public_numbers()\n\n self.assertEqual(pub_n2.e, pub_n.e)\n self.assertEqual(pub_n2.n, pub_n.n)", "def check_equal_rsa_priv_key(sk2_priv, sk_priv):\n pri_n = sk_priv.private_numbers()\n pri_n2 = sk2_priv.private_numbers()\n\n # the library guarantees this: p is the larger factor\n self.assertTrue(pri_n.p > pri_n.q)\n\n self.assertTrue(\n pri_n2.p == pri_n.p and\n pri_n2.q == pri_n.q and\n pri_n2.d == pri_n.d and\n pri_n2.dmp1 == pri_n.dmp1 and\n pri_n2.dmq1 == pri_n.dmq1 and\n pri_n2.iqmp == pri_n.iqmp)", "def gen_public_key(g, private, p):\n return pow(g, private, p)", "def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def o_priv_to_pub(priv):\n pub = base10_multiply(G, priv)\n return '0' + str(2 + (pub[1] % 2)) + encode(pub[0], 16, 64)", "def generate_keypair(bits):\n p = generate_prime(bits // 2)\n #print(p)\n q = generate_prime(bits // 2)\n #print(q)\n n = p * q\n return PrivateKey(p, q, n), PublicKey(n)", "def PublicKey(self) -> _n_9_t_1:", "def PublicKey(self) -> _n_9_t_1:", "def test_private_public():\n\n alice_priv = ECScalar(\n bytes.fromhex(\"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a\")\n )\n alice_public = ECPoint(\n bytes.fromhex(\"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a\")\n )\n\n assert x25519_scalarmult_base(alice_priv) == alice_public\n\n bob_priv = ECScalar(\n bytes.fromhex(\"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb\")\n )\n bob_public = ECPoint(\n bytes.fromhex(\"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f\")\n )\n\n assert x25519_scalarmult_base(bob_priv) == bob_public\n\n k = ECPoint(bytes.fromhex(\"4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742\"))\n\n alice_k = x25519_scalarmult(alice_priv, bob_public)\n bob_k = x25519_scalarmult(bob_priv, alice_public)\n\n assert alice_k == bob_k\n assert alice_k == k", "def generateKeys(bits=256):\n #print \"generating first prime number\"\n p = generatePrime(bits/2)\n #print \"generating second prime number\"\n q = generatePrime(bits/2)\n \n assert p != q\n #print p, \"\\n\", q\n assert gcd(p*q, (p-1)*(q-1)) == 1\n \n priv = PrivateKey(p, q)\n pub = PublicKey(p, q)\n \n priv.saveToFile()\n pub.saveToFile()\n \n return priv, pub", "def genSecret(self, privateKey, otherKey):\n if(self.checkPublicKey(otherKey) is True):\n sharedSecret = pow(otherKey, privateKey, self.prime)\n return sharedSecret\n else:\n raise Exception(\"Invalid public key.\")", "def get_shared_key(public, private, p):\n s = pow(public, private, p)\n s_hex = hex(s)[2:]\n # Make the length of s_hex a multiple of 2\n if len(s_hex) % 2 != 0:\n s_hex = '0' + s_hex\n # Convert hex to bytes\n s_bytes = binascii.unhexlify(s_hex)\n # Hash and return the hex result\n return sha256(s_bytes).digest()", "def get_public_compressed_curve_point(private_key):\n encoded_point = private_key.public_key().public_numbers().encode_point()\n return base64.b64encode(encoded_point)", "def extractPkScriptAddrs(version, pkScript, netParams):\n if version != 0:\n raise DecredError(\"invalid script version\")\n\n # Check for pay-to-pubkey-hash script.\n pkHash = extractPubKeyHash(pkScript)\n if pkHash:\n return PubKeyHashTy, pubKeyHashToAddrs(pkHash, netParams), 1\n\n # Check for pay-to-script-hash.\n scriptHash = extractScriptHash(pkScript)\n if scriptHash:\n return ScriptHashTy, scriptHashToAddrs(scriptHash, netParams), 1\n\n # Check for pay-to-alt-pubkey-hash script.\n data, sigType = extractPubKeyHashAltDetails(pkScript)\n if data:\n addrs = [addrlib.AddressPubKeyHash(data, netParams, sigType)]\n return PubkeyHashAltTy, addrs, 1\n\n # Check for pay-to-pubkey script.\n data = extractPubKey(pkScript)\n if data:\n pk = Curve.parsePubKey(data)\n addrs = [addrlib.AddressSecpPubKey(pk.serializeCompressed(), netParams)]\n return PubKeyTy, addrs, 1\n\n # Check for pay-to-alt-pubkey script.\n pk, sigType = extractPubKeyAltDetails(pkScript)\n if pk:\n raise NotImplementedError(\"only secp256k1 signatures are currently supported\")\n # addrs = []\n # if sigType == dcrec.STEd25519:\n # addr = addrlib.NewAddressEdwardsPubKey(pk, netParams)\n # addrs.append(addr)\n\n # elif sigType == crypto.STSchnorrSecp256k1:\n # addr = addrlib.NewAddressSecSchnorrPubKey(pk, netParams)\n # addrs.append(addr)\n\n # return PubkeyAltTy, addrs, 1\n\n # Check for multi-signature script.\n details = extractMultisigScriptDetails(version, pkScript, True)\n if details.valid:\n # Convert the public keys while skipping any that are invalid.\n addrs = []\n for encodedPK in details.pubKeys:\n pk = Curve.parsePubKey(encodedPK)\n addrs.append(addrlib.AddressSecpPubKey(pk.serializeCompressed(), netParams))\n return MultiSigTy, addrs, details.requiredSigs\n\n # Check for stake submission script. Only stake-submission-tagged\n # pay-to-pubkey-hash and pay-to-script-hash are allowed.\n pkHash = extractStakePubKeyHash(pkScript, opcode.OP_SSTX)\n if pkHash:\n return StakeSubmissionTy, pubKeyHashToAddrs(pkHash, netParams), 1\n scriptHash = extractStakeScriptHash(pkScript, opcode.OP_SSTX)\n if scriptHash:\n return StakeSubmissionTy, scriptHashToAddrs(scriptHash, netParams), 1\n\n # Check for stake generation script. Only stake-generation-tagged\n # pay-to-pubkey-hash and pay-to-script-hash are allowed.\n pkHash = extractStakePubKeyHash(pkScript, opcode.OP_SSGEN)\n if pkHash:\n return StakeGenTy, pubKeyHashToAddrs(pkHash, netParams), 1\n scriptHash = extractStakeScriptHash(pkScript, opcode.OP_SSGEN)\n if scriptHash:\n return StakeGenTy, scriptHashToAddrs(scriptHash, netParams), 1\n\n # Check for stake revocation script. Only stake-revocation-tagged\n # pay-to-pubkey-hash and pay-to-script-hash are allowed.\n pkHash = extractStakePubKeyHash(pkScript, opcode.OP_SSRTX)\n if pkHash:\n return StakeRevocationTy, pubKeyHashToAddrs(pkHash, netParams), 1\n scriptHash = extractStakeScriptHash(pkScript, opcode.OP_SSRTX)\n if scriptHash:\n return StakeRevocationTy, scriptHashToAddrs(scriptHash, netParams), 1\n\n # Check for stake change script. Only stake-change-tagged\n # pay-to-pubkey-hash and pay-to-script-hash are allowed.\n pkHash = extractStakePubKeyHash(pkScript, opcode.OP_SSTXCHANGE)\n if pkHash:\n return StakeSubChangeTy, pubKeyHashToAddrs(pkHash, netParams), 1\n scriptHash = extractStakeScriptHash(pkScript, opcode.OP_SSTXCHANGE)\n if scriptHash:\n return StakeSubChangeTy, scriptHashToAddrs(scriptHash, netParams), 1\n\n # Check for null data script.\n if isNullDataScript(version, pkScript):\n # Null data transactions have no addresses or required signatures.\n return NullDataTy, [], 0\n\n # Don't attempt to extract addresses or required signatures for nonstandard\n # transactions.\n return NonStandardTy, [], 0", "def public_from_private(self, private_key):", "def extractPubKeyAltDetails(script):\n # A pay-to-alt-pubkey script is of the form:\n # PUBKEY SIGTYPE OP_CHECKSIGALT\n #\n # The only two currently supported alternative signature types are ed25519\n # and schnorr + secp256k1 (with a compressed pubkey).\n #\n # OP_DATA_32 <32-byte pubkey> <1-byte ed25519 sigtype> OP_CHECKSIGALT\n # OP_DATA_33 <33-byte pubkey> <1-byte schnorr+secp sigtype> OP_CHECKSIGALT\n\n # The script can't possibly be a pay-to-alt-pubkey script if it doesn't\n # end with OP_CHECKSIGALT or have at least two small integer pushes\n # preceding it (although any reasonable pubkey will certainly be larger).\n # Fail fast to avoid more work below.\n if len(script) < 3 or script[-1] != opcode.OP_CHECKSIGALT:\n return None, 0\n\n if (\n len(script) == 35\n and script[0] == opcode.OP_DATA_32\n and isSmallInt(script[33])\n and asSmallInt(script[33]) == crypto.STEd25519\n ):\n return script[1:33], crypto.STEd25519\n\n if (\n len(script) == 36\n and script[0] == opcode.OP_DATA_33\n and isSmallInt(script[34])\n and asSmallInt(script[34]) == crypto.STSchnorrSecp256k1\n and isStrictPubKeyEncoding(script[1:34])\n ):\n return script[1:34], crypto.STSchnorrSecp256k1\n\n return None, 0", "def pubKeyHashToAddrs(pkHash, netParams):\n return [addrlib.AddressPubKeyHash(pkHash, netParams, crypto.STEcdsaSecp256k1)]", "def privtopub(private_key_bin: bytes) -> bytes:\n raw_pubkey = privatekey_to_publickey(private_key_bin)\n assert raw_pubkey.startswith(b'\\x04')\n return raw_pubkey[1:]", "def getNUMS(index=0):\n\n assert index in range(256)\n nums_point = None\n for G in [getG(True), getG(False)]:\n seed = G + chr(index)\n for counter in range(256):\n seed_c = seed + chr(counter)\n hashed_seed = hashlib.sha256(seed_c).digest()\n #Every x-coord on the curve has two y-values, encoded\n #in compressed form with 02/03 parity byte. We just\n #choose the former.\n claimed_point = \"\\x02\" + hashed_seed\n try:\n nums_point = secp256k1.PublicKey(claimed_point, raw=True, ctx=ctx)\n return nums_point\n except:\n continue\n assert False, \"It seems inconceivable, doesn't it?\" # pragma: no cover", "def test_privtopub(self):\n self.assertEqual(\n arithmetic.privtopub(sample_privatesigningkey),\n hexlify(sample_pubsigningkey)\n )\n self.assertEqual(\n arithmetic.privtopub(sample_privateencryptionkey),\n hexlify(sample_pubencryptionkey)\n )", "def make_final_key(prime, public, private):\n\n key = (public ** private) % prime\n return key", "def alt_stubbed_receiver() -> PublicKey:\n return PublicKey(\"J3dxNj7nDRRqRRXuEMynDG57DkZK4jYRuv3Garmb1i98\")", "def sendPublicKey(g, p, s):\r\n status = \"120 PubKey \" + str(computePublicKey(g, p, s))\r\n return status", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def generate_ecc_public_key(private_key: EllipticCurvePrivateKeyWithSerialization) -> EllipticCurvePublicKey:\n return private_key.public_key()", "def _get_pubkey_from_der_private_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n privkey = serialization.load_der_private_key(filedata, None, backend=backend)\n return privkey.public_key(), None\n except Exception:\n return None, None", "def checkPublicKey(self, otherKey):\n if(otherKey > 2 and otherKey < self.prime - 1):\n if(pow(otherKey, (self.prime - 1)//2, self.prime) == 1):\n return True\n return False", "def from_public_parts(self, x: bytes, y: bytes):\n return asymmetric.ec.EllipticCurvePublicNumbers(\n int.from_bytes(x, 'big'),\n int.from_bytes(y, 'big'),\n asymmetric.ec.SECP256R1()\n ).public_key()" ]
[ "0.59067875", "0.5829378", "0.5765356", "0.5526576", "0.5433772", "0.54143405", "0.5405053", "0.53853583", "0.53798145", "0.53798145", "0.5323323", "0.53160423", "0.528461", "0.5262551", "0.5256528", "0.5253128", "0.5232929", "0.5206507", "0.52053124", "0.51826376", "0.51765466", "0.51599175", "0.5157904", "0.51577896", "0.5141709", "0.5126116", "0.51162547", "0.50973654", "0.50952107", "0.5053405" ]
0.64084935
0
To allow external functions to add PoDLE commitments that were calculated elsewhere;
def add_external_commitments(ecs): update_commitments(external_to_add=ecs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_commitments(commitment=None, external_to_remove=None,\n external_to_add=None):\n c = {}\n if os.path.isfile(PODLE_COMMIT_FILE):\n with open(PODLE_COMMIT_FILE, \"rb\") as f:\n try:\n c = json.loads(f.read())\n except ValueError:\n print \"the file: \" + PODLE_COMMIT_FILE + \" is not valid json.\"\n sys.exit(0)\n\n if 'used' in c:\n commitments = c['used']\n else:\n commitments = []\n if 'external' in c:\n external = c['external']\n else:\n external = {}\n if commitment:\n commitments.append(commitment)\n #remove repeats\n commitments = list(set(commitments))\n if external_to_remove:\n external = {\n k: v for k, v in external.items() if k not in external_to_remove}\n if external_to_add:\n external.update(external_to_add)\n to_write = {}\n to_write['used'] = commitments\n to_write['external'] = external\n with open(PODLE_COMMIT_FILE, \"wb\") as f:\n f.write(json.dumps(to_write, indent=4))", "def _do_commit(self):", "def addChange(change):", "def addChange(change):", "def hotfix():\n\tprint \"In a Sweat.yet !\"\n\tlocal(\"git commit -a -m\")\n\n\td_ploy()", "def changeAdded(change):", "def commit(self):", "def add_commit(repo, cfg, model, developer_gen, date):\n model, kwargs = model_note_change(model, developer_gen, date)\n msg = message_of(\n cfg, model.ticket if model.ticket is not None else \"\", \"general_commit_words\"\n )\n repo.index.commit(msg, **kwargs)\n return repo, model", "def prepare_for_commit(self):", "def add(self, bento_name, bento_version):", "def stage_changes(c):\n c.run(f\"git add -u\")", "def maybe_commit(job):", "def svn_client_commit_item3_create(apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def contract_pepo_pbc():\n pass", "def patch_repos(self):", "def exo2():", "def commit_patch_to_code_metrics_vector(commit_id, commit_patch):\n # init vars\n # bad var names, since those are not addition/removal\n nb_added_if, nb_removed_if = 0, 0\n nb_added_loop, nb_removed_loop = 0, 0\n nb_added_file, nb_removed_file = 0, 0\n nb_added_function, nb_removed_function = 0, 0\n nb_added_paren, nb_removed_paren = 0, 0\n nb_added_bool, nb_removed_bool = 0, 0\n nb_added_assignement, nb_removed_assignement = 0, 0\n nb_added_break, nb_removed_break = 0, 0\n nb_added_sizeof, nb_removed_sizeof = 0, 0\n nb_added_return, nb_removed_return = 0, 0\n nb_added_continue, nb_removed_continue = 0, 0\n nb_added_INTMAX, nb_removed_INTMAX = 0, 0\n nb_added_goto, nb_removed_goto = 0, 0\n nb_added_define, nb_removed_define = 0, 0\n nb_added_struct, nb_removed_struct = 0, 0\n nb_added_void, nb_removed_void = 0, 0\n nb_added_offset, nb_removed_offset = 0, 0\n nb_added_line, nb_removed_line = 0, 0\n # FIXME:\n kbp = 0\n kbn = 0\n\n for line in commit_patch:\n if line.startswith(\"+\"):\n nb_added_line += 1\n if \"if (\" in line:\n nb_added_if += 1\n if (\"for (\" in line) or (\"while (\" in line):\n nb_added_loop += 1\n if line.startswith('+++'):\n # a file is modified pay this patch\n nb_added_file += 1\n if any(\n i in line for i in\n [\"int\", \"static\", \"void\", \"float\", \"char\", \"char*\", \"string\"]\n ):\n if \"(\" in line and \")\" in line:\n nb_added_function += 1\n\n if (\"(\" in line) and (\")\" in line):\n # parenthesis expr detection\n nb_added_paren += 1\n if any(i in line for i in [\"||\", \"&&\", \"!\"]):\n # Boolean operator\n if \"!=\" in line:\n # FIXME: What does kbp mean ???\n kbp += 1\n nb_added_bool += 1\n nb_added_assignement += sum([1 for char in line if char == '='])\n if \"sizeof\" in line:\n nb_added_sizeof += 1\n if \"break\" in line:\n nb_added_break += 1\n if \"return\" in line:\n nb_added_return += 1\n if \"continue\" in line:\n nb_added_continue += 1\n if \"int max\" in line:\n nb_added_INTMAX += 1\n if \"goto\" in line:\n nb_added_goto += 1\n if \"#define\" in line:\n nb_added_define += 1\n if \"struct\" in line:\n nb_added_struct += 1\n if \"void\" in line:\n nb_added_void += 1\n if (\"offset =\" in line) or (\"offset=\" in line):\n nb_added_offset += 1\n # same thing, but for removal\n nb_removed_line += 1\n if line.startswith(\"-\"):\n if \"if (\" in line:\n nb_removed_if += 1\n if \"sizeof\" in line:\n nb_removed_sizeof += 1\n if \"break\" in line:\n nb_removed_break += 1\n if \"return\" in line:\n nb_removed_return += 1\n if \"continue\" in line:\n nb_removed_continue += 1\n if \"int max\" in line:\n nb_removed_INTMAX += 1\n if \"goto\" in line:\n nb_removed_goto += 1\n if \"#define\" in line:\n nb_removed_define += 1\n if \"struct\" in line:\n nb_removed_struct += 1\n if \"void\" in line:\n nb_removed_void += 1\n if \"offset =\" in line:\n nb_removed_offset += 1\n if (\"for (\" in line) or (\"while (\" in line):\n nb_removed_loop += 1\n if line.startswith('---'):\n nb_removed_file += 1\n if any(\n i in line for i in [\n \"int\", \"static\", \"void\", \"float\", \"char\", \"char*\",\n \"string\"\n ]\n ):\n if \"(\" in line and \")\" in line:\n nb_removed_function += 1\n if (\"(\" in line) and (\")\" in line):\n nb_removed_paren += 1\n if any(i in line for i in [\"||\", \"&&\", \"!\"]):\n if \"!=\" in line:\n # FIXME: What does kbn mean ???\n kbn = kbn + 1\n nb_removed_bool += 1\n nb_removed_assignement += sum(\n [1 for char in line if char == '=']\n )\n f1 = nb_added_if - nb_removed_if\n f2 = nb_added_loop - nb_removed_loop\n f3 = nb_added_line - nb_removed_line\n f4 = nb_added_file - nb_removed_file\n f5 = nb_added_function - nb_removed_function\n f6 = nb_added_paren - nb_removed_paren\n f7 = nb_added_bool - nb_removed_bool\n f8 = nb_added_assignement - nb_removed_assignement\n f17 = nb_added_struct - nb_removed_struct\n f19 = nb_added_void - nb_removed_void\n f21 = nb_added_offset - nb_removed_offset\n\n f9 = nb_added_if + nb_removed_if\n f10 = nb_added_loop + nb_removed_loop\n f11 = nb_added_line + nb_removed_line\n f12 = nb_added_file + nb_removed_file\n f13 = nb_added_function + nb_removed_function\n f14 = nb_added_paren + nb_removed_paren\n f15 = nb_added_bool + nb_removed_bool\n f16 = nb_added_assignement + nb_removed_assignement\n f18 = nb_added_struct + nb_removed_struct\n f20 = nb_added_void + nb_removed_void\n f22 = nb_added_offset + nb_removed_offset\n\n row = [\n commit_id,\n nb_added_line,\n nb_removed_line,\n nb_added_if,\n nb_removed_if,\n nb_added_loop,\n nb_removed_loop,\n nb_added_file,\n nb_removed_file,\n nb_added_function,\n nb_removed_function,\n nb_added_paren,\n nb_removed_paren,\n nb_added_bool,\n nb_removed_bool, # FIXME: see FIXME: kbn and kbpS\n nb_added_assignement,\n nb_removed_assignement,\n f1,\n f2,\n f3,\n f4,\n f5,\n f6,\n f7,\n f8,\n f9,\n f10,\n f11,\n f12,\n f13,\n f14,\n f15,\n f16,\n nb_added_offset,\n nb_removed_offset,\n nb_added_return,\n nb_removed_return,\n nb_added_break,\n nb_removed_break,\n nb_added_continue,\n nb_removed_continue,\n nb_added_INTMAX,\n nb_removed_INTMAX,\n nb_added_define,\n nb_removed_define,\n nb_added_struct,\n nb_removed_struct,\n nb_added_void,\n nb_removed_void,\n nb_added_offset,\n nb_removed_offset,\n f17,\n f18,\n f19,\n f20,\n f21,\n f22\n ]\n return row", "def commit(self):\n # PEP 249\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def svn_client_commit_item_create(svn_client_commit_item3_t_item, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def mergenotes():", "def main():\n print(\"Hello World!!!\")\n print(\"Commit to DLNA\")\n print(\"Another commit\")\n print(\"Commit after PR merge\")", "def add_to_revision_data_points(new_point, revision, revision_data_points):\n if (revision not in revision_data_points):\n revision_data_points[revision] = []\n revision_data_points[revision].append(new_point)", "def svn_client_commit_item2_dup(svn_client_commit_item2_t_item, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def add(obj):", "def add_commit( self\n , cl\n , mark_number\n , parent_commit_list\n , first_branch_from_branch_id\n , first_branch_from_change_number\n , dest_branch\n , branch_name):\n with Timer(OVERALL):\n with Timer(BUILD):\n self.__append(NTR('commit refs/heads/{0}\\n').format(branch_name))\n self.__append(NTR('mark : {0}\\n').format(mark_number))\n desc_info = DescInfo.from_text(cl.description)\n committer_added = False\n if desc_info:\n for key in ('author', 'committer'):\n v = desc_info[key]\n if v:\n self.__append(NTR('{key} {fullname} {email} {time} {timezone}\\n').\n format( key = key\n , fullname = v['fullname']\n , email = v['email' ]\n , time = v['time' ]\n , timezone = v['timezone']))\n committer_added = True\n desc = desc_info.clean_desc\n\n # Convoluted logic gates but avoids duplicating code. The point\n # is that we add the best possible committer data _before_\n # adding the description.\n if not committer_added:\n if desc_info:\n # old change description that lacked detailed author info,\n # deserves a warning, but otherwise push onward even if the\n # commit checksums will likely differ from the originals\n LOG.warn('commit description did not match committer regex: @{} => {}'.\n format(cl.change, desc_info.suffix))\n timezone = self.__get_timezone_offset(cl.time)\n self.__append(NTR('committer {fullname} {email} {time} {timezone}\\n').\n format(fullname=self.__full_name_for_user(cl.user),\n email=self.__email_for_user(cl.user),\n time=cl.time,\n timezone=timezone))\n desc = cl.description\n self.__add_data(desc)\n\n self._add_commit_parent_list(parent_commit_list)\n if first_branch_from_branch_id \\\n and first_branch_from_change_number:\n self.__branch_from( dest_branch\n , cl\n , first_branch_from_branch_id\n , first_branch_from_change_number)\n self.__add_files(cl.files)\n if desc_info and desc_info.gitlinks:\n self.__add_gitlinks(desc_info.gitlinks)" ]
[ "0.6084426", "0.6054412", "0.5841496", "0.5841496", "0.5827327", "0.5769322", "0.5695153", "0.5672127", "0.55997235", "0.5597223", "0.55949086", "0.5594832", "0.5516829", "0.54900813", "0.54555416", "0.53672564", "0.53643566", "0.53592396", "0.535497", "0.535497", "0.535497", "0.535497", "0.535497", "0.53247637", "0.531596", "0.52671117", "0.52567667", "0.5237143", "0.51914454", "0.51825064" ]
0.6456808
0
Optionally add the commitment commitment to the list of 'used', and optionally remove the available external commitment whose key value is the utxo in external_to_remove, persist updated entries to disk.
def update_commitments(commitment=None, external_to_remove=None, external_to_add=None): c = {} if os.path.isfile(PODLE_COMMIT_FILE): with open(PODLE_COMMIT_FILE, "rb") as f: try: c = json.loads(f.read()) except ValueError: print "the file: " + PODLE_COMMIT_FILE + " is not valid json." sys.exit(0) if 'used' in c: commitments = c['used'] else: commitments = [] if 'external' in c: external = c['external'] else: external = {} if commitment: commitments.append(commitment) #remove repeats commitments = list(set(commitments)) if external_to_remove: external = { k: v for k, v in external.items() if k not in external_to_remove} if external_to_add: external.update(external_to_add) to_write = {} to_write['used'] = commitments to_write['external'] = external with open(PODLE_COMMIT_FILE, "wb") as f: f.write(json.dumps(to_write, indent=4))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_external_commitments(ecs):\n update_commitments(external_to_add=ecs)", "def commit(self):\n\t\t## Loops through ALL items\n\t\tfor k in self.data.keys():\n\t\t\tfor item in self[k]:\n\n\t\t\t\t## If the object needs committing, commit it!\n\t\t\t\tif item['meta']['needs_commit']:\n\t\t\t\t\t## Create file contents as an empty string\n\t\t\t\t\tfile_contents = \"\"\n\n\t\t\t\t\t## find any other items that may share this config file\n\t\t\t\t\textra_items = self._get_items_in_file(item['meta']['filename'])\n\t\t\t\t\tif len(extra_items) > 0:\n\t\t\t\t\t\tfor commit_item in extra_items:\n\t\t\t\t\t\t\t## Ignore files that are already set to be deleted:w\n\t\t\t\t\t\t\tif commit_item['meta']['delete_me']:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t## Make sure we aren't adding this thing twice\n\t\t\t\t\t\t\tif item != commit_item:\n\t\t\t\t\t\t\t\tfile_contents += self.print_conf(commit_item)\n\n\t\t\t\t\t## This is the actual item that needs commiting\n\t\t\t\t\tif not item['meta']['delete_me']:\n\t\t\t\t\t\tfile_contents += self.print_conf(item)\n\n\t\t\t\t\t## Write the file\n\t\t\t\t\tf = open(item['meta']['filename'], 'w')\n\t\t\t\t\tf.write(file_contents)\n\t\t\t\t\tf.close()\n\n\t\t\t\t\t## Recreate the item entry without the commit flag\n\t\t\t\t\tself.data[k].remove(item)\n\t\t\t\t\titem['meta']['needs_commit'] = None\n\t\t\t\t\tself.data[k].append(item)", "def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()", "def _update_provider_details_without_commit(provider_details):\n provider_details.version += 1\n provider_details.updated_at = datetime.utcnow()\n history = ProviderDetailsHistory.from_original(provider_details)\n db.session.add(provider_details)\n db.session.add(history)", "def clean_for_commit(self):", "def add(self, transaction, markers):\n if not _enabled:\n raise util.Abort('obsolete feature is not enabled on this repo')\n new = [m for m in markers if m not in self._all]\n if new:\n f = self.sopener('obsstore', 'ab')\n try:\n # Whether the file's current position is at the begin or at\n # the end after opening a file for appending is implementation\n # defined. So we must seek to the end before calling tell(),\n # or we may get a zero offset for non-zero sized files on\n # some platforms (issue3543).\n f.seek(0, _SEEK_END)\n offset = f.tell()\n transaction.add('obsstore', offset)\n # offset == 0: new file - add the version header\n for bytes in _encodemarkers(new, offset == 0):\n f.write(bytes)\n finally:\n # XXX: f.close() == filecache invalidation == obsstore rebuilt.\n # call 'filecacheentry.refresh()' here\n f.close()\n self._load(new)\n # new marker *may* have changed several set. invalidate the cache.\n self.caches.clear()\n return len(new)", "def remove_contribution(project_info):\n owner = user_collection.find_one({\"_id\": project_info[\"OWNER_ID\"]})\n incoming_list = owner[\"incoming\"]\n incoming_list.remove(\n {\n \"user_id\": project_info[\"USER_ID\"],\n \"project_id\": project_info[\"PROJECT_ID\"],\n }\n )\n user_collection.find_one_and_update(\n {\"_id\": project_info[\"OWNER_ID\"]},\n {\n \"$set\": {\n \"incoming\": incoming_list,\n }\n },\n upsert=False,\n )\n user = user_collection.find_one({\"_id\": project_info[\"USER_ID\"]})\n user_outgoing = user[\"outgoing\"]\n user_outgoing.remove(project_info[\"PROJECT_ID\"])\n user_collection.find_one_and_update(\n {\"_id\": project_info[\"USER_ID\"]},\n {\n \"$set\": {\n \"outgoing\": user_outgoing,\n }\n },\n upsert=False,\n )", "def _create_utxo_block_unlocked(r,\n curr_block: Block,\n referenced_txos: Set[bytes],\n new_utxos: Mapping[bytes, bytes]) -> None:\n utxo_prev_block = r.dump(\"blockchain:utxo-block:\".encode() + curr_block.previous_hash)\n r.restore(name=\"blockchain:utxo-block:\".encode() + curr_block.current_hash,\n ttl=0,\n value=utxo_prev_block,\n replace=True)\n r.hdel(\"blockchain:utxo-block:\".encode() + curr_block.current_hash, *referenced_txos)\n r.hmset(\"blockchain:utxo-block:\".encode() + curr_block.current_hash, new_utxos)", "def load_committees(collection, committees):\n collection.remove()\n for c in committees:\n if not collection.find_one({\"committee_id\": c[\"committee_id\"]}):\n collection.insert(c)", "def potentials(self, potential_list):\n for item in potential_list:\n item.store()\n potential_list_uuids = [item.uuid for item in potential_list]\n self.set_attribute('potentials', potential_list_uuids)", "def commit(self, amount=None):\n if amount is None:\n amount = self.autocommit_amount\n\n self.autocommit_amount -= amount\n\n del self.buff[0:amount]", "def check_utxo_blacklist(commitment, persist=False):\n #TODO format error checking?\n fname = \"blacklist\"\n if jm_single().config.get(\"BLOCKCHAIN\", \"blockchain_source\") == 'regtest':\n fname += \"_\" + jm_single().nickname\n with jm_single().blacklist_file_lock:\n if os.path.isfile(fname):\n with open(fname, \"rb\") as f:\n blacklisted_commitments = [x.strip() for x in f.readlines()]\n else:\n blacklisted_commitments = []\n if commitment in blacklisted_commitments:\n return False\n elif persist:\n blacklisted_commitments += [commitment]\n with open(fname, \"wb\") as f:\n f.write('\\n'.join(blacklisted_commitments))\n f.flush()\n #If the commitment is new and we are *not* persisting, nothing to do\n #(we only add it to the list on sending io_auth, which represents actual\n #usage).\n return True", "def commit(self):\n for user_name, user in self._users.items():\n self._execute(\n \"UPDATE users \\\n SET credits = ? \\\n WHERE name == ?\",\n (user.credits, user_name)\n )\n for item_name, amount in user.items.items():\n self._execute(\n \"REPLACE INTO users_items VALUES (?, ?, ?)\",\n (user_name, item_name, amount)\n )", "def remove(self, egg):", "def test_heads_delitem_pass(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n del heads[\"branch\"]\n assert \"branch\" not in heads", "def _remove_unused_assets(write_cursor: 'DBCursor') -> None:\n log.debug('Deleting unused asset ids')\n write_cursor.execute(\"\"\"\n WITH unique_assets AS (SELECT DISTINCT asset FROM(\n SELECT currency AS asset FROM timed_balances UNION\n SELECT asset1 AS asset FROM aave_events UNION\n SELECT asset2 AS asset FROM aave_events UNION\n SELECT from_asset AS asset FROM yearn_vaults_events UNION\n SELECT to_asset AS asset FROM yearn_vaults_events UNION\n SELECT asset FROM manually_tracked_balances UNION\n SELECT base_asset AS asset FROM trades UNION\n SELECT quote_asset AS asset FROM trades UNION\n SELECT fee_currency AS asset FROM trades UNION\n SELECT pl_currency AS asset FROM margin_positions UNION\n SELECT fee_currency AS asset FROM margin_positions UNION\n SELECT asset FROM asset_movements UNION\n SELECT fee_asset AS asset FROM asset_movements UNION\n SELECT asset FROM ledger_actions UNION\n SELECT rate_asset AS asset FROM ledger_actions UNION\n SELECT token0_identifier AS asset FROM amm_events UNION\n SELECT token1_identifier AS asset FROM amm_events UNION\n SELECT token AS asset FROM adex_events UNION\n SELECT pool_address_token AS asset FROM balancer_events UNION\n SELECT identifier AS asset FROM nfts UNION\n SELECT last_price_asset AS asset FROM nfts UNION\n SELECT asset from history_events\n ) WHERE asset IS NOT NULL)\n DELETE FROM assets WHERE identifier NOT IN unique_assets AND identifier IS NOT NULL\n \"\"\")", "def remove_not_added(target, xtal_list):\n all_prots = Protein.objects.filter(target_id=target)\n # make sure not to delete any of the computed set proteins (which are protected)\n computed_prots = [mol.pdb for mol in ComputedMolecule.objects.filter(pdb__target_id=target)]\n unprotected = [x for x in all_prots if x not in computed_prots]\n\n for prot in unprotected:\n # Code consists of 'directory:alternate_name' if exists (code is renamed based on the metadata)\n code_first_part = prot.code.split(\":\")[0]\n if code_first_part not in xtal_list:\n prot.delete()\n return None", "def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)", "def remove():", "def push_ele(self, ele, cleanup = True):\n\t\tif cleanup and self._contains_id(ele.id):\n\t\t\tself._delete_id(ele.id)\n\t\tfiles = ele.get_completed_files()\n\t\tif len(files)>0 and not any(False == files[key] for key in files):\n\t\t\tself._completed.append(ele.to_obj())\n\t\t\treturn True\n\t\telse:\n\t\t\tself._failed.append(ele.to_obj())\n\t\t\treturn False", "def delete_committer(self, name: str) -> None:\n for index, committer in enumerate(self._info[\"committers\"]):\n if committer[\"name\"] == name:\n del self._info[\"committers\"][index]\n return\n raise ValueError(f\"Committer {name} is not on the committer list\")", "def svn_client_remove_from_changelists(apr_array_header_t_paths, svn_depth_t_depth, apr_array_header_t_changelists, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def on_remove(self, event, prompt=True):\n if prompt:\n msg = \"This operation will delete the data sets checked \"\n msg += \"and all the dependents.\"\n msg_box = wx.MessageDialog(None, msg, 'Warning', wx.OK|wx.CANCEL)\n if msg_box.ShowModal() != wx.ID_OK:\n return\n\n data_to_remove, theory_to_remove, _ = self.set_data_helper()\n data_key = []\n theory_key = []\n # remove data from treectrl\n for d_key, item in self.list_cb_data.iteritems():\n data_c, _, _, _, _, _, _, _ = item\n if data_c.IsChecked():\n self.tree_ctrl.Delete(data_c)\n data_key.append(d_key)\n if d_key in self.list_cb_theory.keys():\n theory_list_ctrl = self.list_cb_theory[d_key]\n theory_to_remove += theory_list_ctrl.keys()\n # Remove theory from treectrl\n for _, theory_dict in self.list_cb_theory.iteritems():\n for key, value in theory_dict.iteritems():\n item, _, _ = value\n if item.IsChecked():\n try:\n self.tree_ctrl.Delete(item)\n except:\n pass\n theory_key.append(key)\n\n # Remove data and related theory references\n for key in data_key:\n del self.list_cb_data[key]\n if key in theory_key:\n del self.list_cb_theory[key]\n # remove theory references independently of data\n for key in theory_key:\n for _, theory_dict in self.list_cb_theory.iteritems():\n if key in theory_dict:\n for key, value in theory_dict.iteritems():\n item, _, _ = value\n if item.IsChecked():\n try:\n self.tree_ctrl_theory.Delete(item)\n except:\n pass\n del theory_dict[key]\n\n self.parent.remove_data(data_id=data_to_remove,\n theory_id=theory_to_remove)\n self.enable_remove()\n self.enable_freeze()\n self.enable_remove_plot()", "def removeCommitted(self, repos):\n newTroveInfoList = [ x.getNewNameVersionFlavor() for x in\n self.iterNewTroveList() if x.getNewVersion()\n is not None ]\n present = repos.hasTroves(newTroveInfoList)\n\n for (newTroveInfo, isPresent) in present.iteritems():\n if isPresent:\n self.delNewTrove(*newTroveInfo)\n\n if self.newTroves:\n return True\n\n return False", "def update_stock_info(self, entry, item_name, item_url, item_stock, item_cost):\n self.items_list.delete(entry)\n self.items_list.insert(\n \"\", \"end\", values=(item_name, item_url, item_stock, item_cost)\n )", "def _handleMarkernoChangedDelete(self):\n \n # Get previous markerno\n # update markerno's >prev_markerno to markerno + 1\n # update of_places set markerno = markerno + 1 where territoryno = '4-1-2' and markerno is not null\n x=0\n pass", "def _remove_extra_repositories(frozen_repos, ret, **kwargs):\n repos = __salt__[\"pkg.list_repos\"](**kwargs)\n extra_repos = set(repos) - set(frozen_repos)\n for repo in extra_repos:\n try:\n __salt__[\"pkg.del_repo\"](repo, **kwargs)\n ret[\"repos\"][\"remove\"].append(repo)\n log.info(\"Removed extra repository %s\", repo)\n except Exception as e: # pylint: disable=broad-except\n msg = \"Error removing %s repository: %s\"\n log.error(msg, repo, e)\n ret[\"comment\"].append(msg % (repo, e))", "def upsert_asset_tags(user_id, tag_id_list, commit=True):\n db_entries = NotificationAssetTag.find_all_with(user_id = user_id)\n already_in_db = set([row.tag_id for row in db_entries])\n for tag_id in tag_id_list:\n if tag_id not in already_in_db:\n db.session.add(NotificationAssetTag(user_id = user_id, tag_id = tag_id))\n if commit:\n db.session.commit()", "def _remove_from_weakref(self, tx: BaseTransaction) -> None:\n if self._tx_weakref_disabled:\n return\n assert tx.hash is not None\n self._tx_weakref.pop(tx.hash, None)", "def delete_self_ownership(self):\n current_ownership_list = self.msg.get_ownershipList()\n self.old_ownership_list = current_ownership_list\n for comp in self.deleted_comp_list:\n if comp in current_ownership_list:\n current_ownership_list.remove(comp)\n self.logger.debug(\"After removing transfer component ownership, \\\n new ownership: %s\" % current_ownership_list)\n self.msg.set_ownershipList(current_ownership_list)" ]
[ "0.58187145", "0.5580867", "0.50022554", "0.4993998", "0.4935813", "0.49136874", "0.4909131", "0.49072638", "0.4864383", "0.48420364", "0.48227012", "0.4786407", "0.47639194", "0.4718446", "0.46703878", "0.46602783", "0.46588057", "0.465066", "0.46503636", "0.46418327", "0.46223035", "0.46160343", "0.46076676", "0.45793855", "0.4578909", "0.45736748", "0.45604727", "0.4559401", "0.45385268", "0.45340914" ]
0.7458837
0
Given a list of privkeys, try to generate a PoDLE which is not yet used more than tries times.
def generate_podle(priv_utxo_pairs, tries=1, allow_external=None): used_commitments, external_commitments = get_podle_commitments() for priv, utxo in priv_utxo_pairs: for i in range(tries): #Note that we will return the *lowest* index #which is still available. p = PoDLE(u=utxo, priv=priv) c = p.generate_podle(i) if c['commit'] in used_commitments: continue #persist for future checks update_commitments(commitment=c['commit']) return c if allow_external: filtered_external = dict( [(x, external_commitments[x]) for x in allow_external]) for u, ec in filtered_external.iteritems(): #use as many as were provided in the file, up to a max of tries m = min([len(ec['reveal'].keys()), tries]) for i in [str(x) for x in range(m)]: p = PoDLE(u=u,P=ec['P'],P2=ec['reveal'][i]['P2'], s=ec['reveal'][i]['s'], e=ec['reveal'][i]['e']) if p.get_commitment() not in used_commitments: update_commitments(commitment=p.get_commitment()) return p.reveal() #If none of the entries in the 'reveal' list for this external #commitment were available, they've all been used up, so #remove this entry if m == len(ec['reveal'].keys()): update_commitments(external_to_remove=u) #Failed to find any non-used valid commitment: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_keys(g, o):\n priv = o.random()\n pub = priv * g\n\n return (priv, pub)", "def calc_pool(players):\n players = [str(x) for x in players]\n n = len(players)\n for player in players:\n nopool = payoff_nopool(p=percentages[player])\n print(nopool)\n p = {i: percentages[key] for i, key in zip([x for x in range(2, n+1)],\n [x for x in players if x != player])}\n p[1] = percentages[player]\n pool = payoff_n_p(p=p, n=n)\n print(pool)", "def gen_pass(*, pw_length=10, use_nums=True, use_special=True,\n no_dupes=False, no_ambiguous=True):\n # Build up desired population of characters\n charset = LETTERS\n if use_nums:\n charset += NUMS\n if use_special:\n charset += SPECIALS\n if no_ambiguous:\n charset = ''.join([x for x in charset if x not in AMBIGUOUS])\n\n if no_dupes:\n x, tmp = pw_length, []\n while x > 0:\n val = ''.join(random.sample(charset, 1))\n if val not in tmp:\n tmp.append(val)\n x -= 1\n return ''.join(tmp)\n else:\n return ''.join(random.sample(charset, pw_length))", "def generate_pairs(names, emails, seed=123):\n gift_givers = names\n gift_receivers = names\n reqs_met = False\n random.seed(seed)\n count = 0\n while not reqs_met:\n count += 1\n gift_receivers = random.sample(gift_receivers, len(gift_receivers))\n reqs_met = requirements(gift_givers, gift_receivers)\n if count > 100:\n print(\n \"*\" * 70,\n \"\\nTried over 100 times... Could not find a suitable match.\"\n \"\\nExiting ... Try again with a different seed?\",\n )\n sys.exit()\n break\n return gift_givers, emails, gift_receivers", "def generateKeys(filename: str=\"monRSA\", keylength: int=10):\n minn = int(\"1\".ljust(int(keylength/2), '0'))\n maxx = int(\"9\".ljust(int(keylength/2), '9'))\n if args.verbose : print(\"min max of the possble primes :\", minn, maxx)\n pos1 = randint(minn, maxx)\n pos2 = randint(minn, maxx)\n\n if args.verbose : print(\"position of the primes chosen :\", pos1, pos2)\n p = primesieve.nth_prime(pos1)\n q = primesieve.nth_prime(pos2)\n \n # fixed values used to generate my key paire (i don't care if you hack me)\n # p = primesieve.nth_prime(97885344)\n # q = primesieve.nth_prime(85785656)\n \n # smaller primes used for testing\n # p = nth_prime(1256)\n # q = nth_prime(1478)\n \n if args.verbose : print(\"p\", p)\n if args.verbose : print(\"q\", q)\n n = p*q\n if args.verbose : print(\"n\", n)\n if args.verbose : print(\"length\", len(str(n)))\n nn = (p-1)*(q-1)\n if args.verbose : print(\"nn\",nn)\n temp = genED(nn)\n e = temp[0]\n if args.verbose : print(\"e\",e)\n d = temp[1]\n if args.verbose : print(\"d\",d)\n ed = temp[2]\n if args.verbose : print(\"ed\",ed)\n generateKeyFile(n, e, \"public\", filename)\n generateKeyFile(n, d, \"private\", filename)", "def make_random_passphrase():\n import random\n prng = random.SystemRandom()\n templates = ['aababbab', 'aabbabab', 'aabbabba', 'abaabbab', 'abababab',\n 'abababba', 'ababbaab', 'ababbaba', 'abbaabab', 'abbaabba',\n 'abbabaab', 'abbababa', 'abbabbaa', 'baababab', 'baababba',\n 'baabbaab', 'baabbaba', 'babaabab', 'babaabba', 'bababaab',\n 'babababa', 'bababbaa', 'babbaaba', 'babbabaa']\n alphabet = {'a':\"aeiou\", 'b':list(\"bcdfghjklmnprsvwxyz\") + [\"ch\",\"ph\",\"st\"]}\n for n in (1,2,3):\n template = prng.choice(templates)\n password = \"\".join([prng.choice(alphabet[c]) for c in template])\n print password.capitalize() + prng.choice(\"0123456789\"),\n return 0", "def find_rand_pairs(stu_lst, blk_lst):\n bad_pair_dict = build_blacklist(blk_lst)\n stu_set = set(stu_lst)\n rand_pairs = []\n try:\n for stu, invalid_pairings in bad_pair_dict.items():\n if len(stu_set) == 0:\n return rand_pairs\n if len(stu_set) == 1:\n loner = stu_set.pop()\n print(loner, \" is loner :(\")\n rand_pairs.append(loner)\n return rand_pairs\n valid_stu_set = stu_set - invalid_pairings\n valid_partner = random.sample(valid_stu_set, 1)[0]\n rand_pairs.append((stu, valid_partner))\n stu_set -= {stu, valid_partner}\n except ValueError:\n print(\"No valid pairings. Too many blacklisted.\")\n return None", "def outer_loop_lp(self, profile, missed_winners):\r\n\r\n # Initialize\r\n stats = self.Stats()\r\n\r\n wmg = profile.getWmg()\r\n known_winners = set()\r\n I = list(wmg.keys())\r\n\r\n G = nx.DiGraph()\r\n G.add_nodes_from(I)\r\n\r\n E = nx.DiGraph()\r\n E.add_nodes_from(I)\r\n for cand1, cand2 in itertools.permutations(wmg.keys(), 2):\r\n if wmg[cand1][cand2] > 0:\r\n E.add_edge(cand1, cand2, weight=wmg[cand1][cand2])\r\n\r\n # print(wmg)\r\n # self.output_graph(E)\r\n\r\n # Add any bridge edges from any tier in E\r\n # These are guaranteed to never be in a cycle, so will always be in the final graph after RP procedure\r\n Gc = G.copy()\r\n Gc.add_edges_from(E.edges())\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n E.remove_edges_from(bridges)\r\n\r\n stats.num_initial_bridges = len(bridges)\r\n\r\n\r\n # Each node contains (G, E, T, P)\r\n # P is path, where each item is of form (G, E, K, a)\r\n # root = Node(value=(self.edges2string(G.edges(), I), self.edges2string(E.edges(), I)))\r\n root = Node(value=(G, E, [], []))\r\n stackNode = []\r\n stackNode.append(root)\r\n\r\n hashtable = set()\r\n\r\n END = self.BEGIN + self.TIMEOUT\r\n\r\n self.missed_winners = set(missed_winners)\r\n\r\n self.data = {}\r\n for w in missed_winners:\r\n self.data[w] = []\r\n\r\n while stackNode:\r\n # Pop new node to explore\r\n node = stackNode.pop()\r\n (G, E, T, P) = node.value\r\n\r\n if time.perf_counter() > END:\r\n print(\"TIMEOUT\")\r\n return sorted(known_winners), stats\r\n\r\n # Check hash\r\n hash_state = self.edges2string(G.edges(), I) + self.edges2string(E.edges(), I) + self.edges2string(T, I)\r\n if hash_state in hashtable:\r\n stats.num_hashes += 1\r\n if self.debug_mode == 3:\r\n print(\"hashed in outer hashtable\")\r\n continue\r\n hashtable.add(hash_state)\r\n\r\n stats.num_nodes += 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"Popped new node: \")\r\n print(\"G:\", sorted(G.edges()))\r\n print(\"E:\", sorted(E.edges()))\r\n print(\"T:\", sorted(T))\r\n\r\n # Flag for whether expanding the current tier required finding max children\r\n f_found_max_children = 0\r\n\r\n # Continue performing RP on this state as long as tie-breaking order doesn't matter\r\n while len(E.edges()) != 0 or len(T) != 0:\r\n if self.stop_conditions(G, E, T, P, I, known_winners, stats) != -1:\r\n # Stop condition hit\r\n break\r\n\r\n if len(T) == 0:\r\n # Get a new tier\r\n (max_weight, max_edge) = max([(d['weight'], (u, v)) for (u, v, d) in E.edges(data=True)])\r\n T = [(u, v) for (u, v, d) in E.edges(data=True) if d['weight'] == max_weight]\r\n E.remove_edges_from(T)\r\n\r\n if self.debug_mode == 3:\r\n print(\"New tier =\", T)\r\n\r\n if len(T) == 1:\r\n # Tier only has one edge, just add it\r\n if self.debug_mode == 3:\r\n print(\"Only 1 edge in tier\")\r\n\r\n if nx.has_path(G, max_edge[1], max_edge[0]) is False:\r\n E.add_edges_from(T)\r\n P.append((self.edges2string(G.edges(), I), self.edges2string(E.edges(), I), known_winners.copy(), max_edge))\r\n E.remove_edges_from(T)\r\n G.add_edges_from(T)\r\n continue\r\n\r\n\r\n # Perform reductions every step:\r\n\r\n # Compute \"bridge edges\" which are not in any cycle\r\n Gc = G.copy()\r\n Gc.add_edges_from(T)\r\n scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if len(g.edges()) != 0]\r\n bridges = set(Gc.edges()) - set(itertools.chain(*scc))\r\n G.add_edges_from(bridges)\r\n T = list(set(T) - bridges)\r\n\r\n G_tc = nx.transitive_closure(G)\r\n\r\n # Remove \"inconsistent edges\" that cannot be added to G without causing cycle\r\n reverse_G = nx.DiGraph.reverse(G_tc)\r\n T = list(set(T) - set(reverse_G.edges()))\r\n\r\n # Remove \"redundant edges\": if there is already path from e[0] to e[1], can immediately add e\r\n redundant_edges = set()\r\n for e in T:\r\n if G_tc.has_edge(e[0], e[1]):\r\n redundant_edges.add(e)\r\n G.add_edges_from([e])\r\n stats.num_redundant_edges += len(redundant_edges)\r\n T = list(set(T) - redundant_edges)\r\n\r\n if len(T) == 0:\r\n # No need to find further children, as tier is now empty\r\n if self.debug_mode == 3:\r\n print(\"Tier empty\")\r\n continue\r\n\r\n # Used to break ties\r\n index = 0\r\n\r\n # Add each edge to stack by priority\r\n children = dict()\r\n T = sorted(T)\r\n for e in T:\r\n if not G_tc.has_edge(e[1], e[0]):\r\n f_found_max_children = 1\r\n\r\n Gc = G.copy()\r\n Gc.add_edges_from([e])\r\n Ec = E.copy()\r\n Tc = copy.deepcopy(T)\r\n Tc.remove(e)\r\n Pc = copy.deepcopy(P)\r\n\r\n EUT = E.copy()\r\n EUT.add_edges_from(T)\r\n Pc.append((self.edges2string(G.edges(), I), self.edges2string(EUT.edges(), I), known_winners.copy(), e))\r\n child_node = Node(value=(Gc,Ec,Tc,Pc))\r\n\r\n # LPwinners\r\n G_in_degree = Gc.in_degree(I)\r\n potential_winners = set([x[0] for x in G_in_degree if x[1] == 0])\r\n priority = len(potential_winners - known_winners)\r\n\r\n children[child_node] = (priority, index)\r\n index = index + 1\r\n\r\n if self.debug_mode == 3:\r\n print(\"added edge\", e)\r\n\r\n children_items = sorted(children.items(), key=lambda x: (x[1][0], x[1][1]))\r\n sorted_children = [key for key, value in children_items]\r\n stackNode += sorted_children\r\n break\r\n\r\n if len(E.edges()) == 0 and f_found_max_children == 0:\r\n # E is empty\r\n if self.debug_mode >= 2:\r\n print(\"E is empty\")\r\n self.add_winners(G, P, I, known_winners, stats)\r\n\r\n return sorted(known_winners), stats, self.data", "def pass_multi_request(message, num_words=1):\n try:\n tries = int(num_words)\n except ValueError:\n message.reply(Strings['NONSENSE'])\n return\n if (tries > 10):\n message.reply(Strings['TOO_MANY_PASSWORDS'])\n return\n if (tries < 1):\n message.reply(Strings['NONSENSE'])\n return\n for x in range(tries):\n message.reply(\"```\" + generate_password() + \"```\")", "def gen_malicious(num_per_dga=10000):\n domains = []\n labels = []\n\n # We use some arbitrary seeds to create domains with banjori\n banjori_seeds = ['somestring', 'firetruck', 'bulldozer', 'airplane', 'racecar',\n 'apartment', 'laptop', 'laptopcomp', 'malwareisbad', 'crazytrain',\n 'thepolice', 'fivemonkeys', 'hockey', 'football', 'baseball',\n 'basketball', 'trackandfield', 'fieldhockey', 'softball', 'redferrari',\n 'blackcheverolet', 'yellowelcamino', 'blueporsche', 'redfordf150',\n 'purplebmw330i', 'subarulegacy', 'hondacivic', 'toyotaprius',\n 'sidewalk', 'pavement', 'stopsign', 'trafficlight', 'turnlane',\n 'passinglane', 'trafficjam', 'airport', 'runway', 'baggageclaim',\n 'passengerjet', 'delta1008', 'american765', 'united8765', 'southwest3456',\n 'albuquerque', 'sanfrancisco', 'sandiego', 'losangeles', 'newyork',\n 'atlanta', 'portland', 'seattle', 'washingtondc']\n\n segs_size = max(1, num_per_dga/len(banjori_seeds))\n for banjori_seed in banjori_seeds:\n domains += banjori.generate_domains(segs_size, banjori_seed)\n labels += ['banjori']*segs_size\n\n domains += corebot.generate_domains(num_per_dga)\n labels += ['corebot']*num_per_dga\n\n # Create different length domains using cryptolocker\n crypto_lengths = range(8, 32)\n segs_size = max(1, num_per_dga/len(crypto_lengths))\n for crypto_length in crypto_lengths:\n domains += cryptolocker.generate_domains(segs_size,\n seed_num=random.randint(1, 1000000),\n length=crypto_length)\n labels += ['cryptolocker']*segs_size\n\n domains += dircrypt.generate_domains(num_per_dga)\n labels += ['dircrypt']*num_per_dga\n\n # generate kraken and divide between configs\n kraken_to_gen = max(1, num_per_dga/2)\n domains += kraken.generate_domains(kraken_to_gen, datetime(2016, 1, 1), 'a', 3)\n labels += ['kraken']*kraken_to_gen\n domains += kraken.generate_domains(kraken_to_gen, datetime(2016, 1, 1), 'b', 3)\n labels += ['kraken']*kraken_to_gen\n\n # generate locky and divide between configs\n locky_gen = max(1, num_per_dga/11)\n for i in range(1, 12):\n domains += lockyv2.generate_domains(locky_gen, config=i)\n labels += ['locky']*locky_gen\n\n # Generate pyskpa domains\n domains += pykspa.generate_domains(num_per_dga, datetime(2016, 1, 1))\n labels += ['pykspa']*num_per_dga\n\n # Generate qakbot\n domains += qakbot.generate_domains(num_per_dga, tlds=[])\n labels += ['qakbot']*num_per_dga\n\n # ramdo divided over different lengths\n ramdo_lengths = range(8, 32)\n segs_size = max(1, num_per_dga/len(ramdo_lengths))\n for rammdo_length in ramdo_lengths:\n domains += ramdo.generate_domains(segs_size,\n seed_num=random.randint(1, 1000000),\n length=rammdo_length)\n labels += ['ramdo']*segs_size\n\n # ramnit\n domains += ramnit.generate_domains(num_per_dga, 0x123abc12)\n labels += ['ramnit']*num_per_dga\n\n # simda\n simda_lengths = range(8, 32)\n segs_size = max(1, num_per_dga/len(simda_lengths))\n for simda_length in range(len(simda_lengths)):\n domains += simda.generate_domains(segs_size,\n length=simda_length,\n tld=None,\n base=random.randint(2, 2**32))\n labels += ['simda']*segs_size\n\n # matsnu\n domains += matsnu.generate_domains(num_per_dga, include_tld=False)\n labels += ['matsnu']*num_per_dga\n\n # suppobox\n domains += suppobox.generate_domains(num_per_dga, include_tld=False)\n labels += ['suppobox']*num_per_dga\n\n # gozi\n domains += gozi.generate_domains(num_per_dga, include_tld=False)\n labels += ['gozi']*num_per_dga\n\n return domains, labels", "def _retry_bootstrap_candidates(self):\n if __debug__: dprint(\"unable to resolve all bootstrap addresses\", level=\"warning\")\n for counter in count(1):\n yield 1.0 if counter < 30 else 30.0\n if __debug__: dprint(\"attempt #\", counter, level=\"warning\")\n candidates = get_bootstrap_candidates(self)\n for candidate in candidates:\n if candidate is None:\n break\n else:\n if __debug__: dprint(\"resolved all bootstrap addresses\")\n self._bootstrap_candidates = dict((candidate.sock_addr, candidate) for candidate in candidates if candidate)\n break", "def pass_multi_request(message, num_words=1):\n try:\n tries = int(num_words)\n except ValueError:\n message.reply(Strings['NONSENSE'])\n return\n if (tries > 10):\n message.reply(Strings['TOO_MANY_PASSWORDS'])\n return\n if (tries < 1):\n message.reply(Strings['NONSENSE'])\n return\n for x in range(tries):\n message.reply(\"```\" + hf.generate_password() + \"```\")", "def keygen(cls, bits, num_shares, threshold):\n if threshold < 2:\n raise('Threshold should be at least 2, but is {}'.format(threshold))\n primes = PrimeStorage()\n ((p, p_), (q, q_)) = primes.getRandomSafePrimes(bits // 2)\n\n n = p * q\n m = p_ * q_\n\n # find secret\n d = ext_euclid(n, m)\n\n pk = PublicPaillierKey(n)\n\n # Shamir secret sharing: determine polynomial\n coeffs = [d] + [randint(0, n*m) for _ in range(threshold-1)]\n # determine shares\n shares = [eval_polynomial(coeffs, i, n*m)\n for i in range(1, num_shares + 1)]\n key_shares = [PrivateKeyShare(\n shares[i-1], i, len(shares), threshold, pk) for i in range(1, num_shares + 1)]\n\n # - v, a generator of Z^*_(n^2)\n # - verification key for each decryption party\n\n return pk, key_shares", "def extremely_stupid_naive_brute_force_crap():\n keystrokes = [l.strip() for l in open(\"keylog.txt\")]\n for i in range(1000, 10000000):\n if i % 10000 == 0:\n print i\n password = str(i)\n if all(is_subsequence(password, keys) for keys in keystrokes):\n print password\n break", "def test_check_trialpubs_nctids(self):\n pmids = {29037101, 28735855, 12214118, 28697569, 15380154, 26294005, 21539488, 23680940, 23720230, 24164735,\n 25599006, 25681666, 26086182, 21514250, 19621072, 25961184, 26384466, 24134194, 24495355, 25996285,\n 26265727, 24374288, 25771249, 28359749, 24045855, 24880197, 26640437, 26682691, 27895474, 23796946,\n 25264972, 24507770, 26305649, 25565485, 25891115, 26890759, 26867200, 27529771, 26812512, 24899709,\n 28054939, 27102361, 25344629, 24617349, 25733635, 25733639, 29141041, 25391305, 26135351, 24938711,\n 28319243, 15205295, 20858954, 25352453, 26213339, 25414047, 24334113, 19643207, 28676015, 27570766,\n 17569205, 25002849, 26690214, 18709889, 22232016, 16210710, 22122400, 19204158, 21506929, 22449789,\n 22794138, 27738491, 19641487, 9149659, 28213052, 12663275, 10374811, 17101822, 22371413, 28861684,\n 26652155, 16614482, 27624276, 28925645, 22170358, 25061569, 28980404, 26740832, 26286890, 28448083,\n 29562543, 25928696, 26253520, 26003546, 20810976}\n res = bot.check_trialpubs_nctids(29865058, '10.3233/JAD-179940')\n pmids1 = set([int(pmid) for pmid in res.pmids])\n self.assertEqual(pmids1, pmids)\n pmids = {24491689, 23741057, 15265849, 12409541, 26673558, 23616602, 21080835, 21444883, 21931078, 26984864,\n 26857383, 25131977, 23680885, 21080836, 9921604, 22433752, 21187258, 21315441, 26560249, 25286913,\n 18342224, 12598066, 20176990, 25921522, 21906250, 26874388, 20562255, 18794390, 27207191}\n res = bot.check_trialpubs_nctids(27634736, '10.1002/ejhf.638')\n pmids1 = set([int(pmid) for pmid in res.pmids])\n self.assertEqual(pmids1, pmids)", "def cracker_complete_no_dict(x):\n start = time.time()\n lista = list(x)\n cracked = []\n cycle = 1\n print(\"Cracking password without a dictionary\")\n while True:\n number = str(randomized(0, 9))\n cracked.append(number)\n if cracked == lista:\n print(\"Cycle: \", cycle)\n print(cracked)\n break\n if len(cracked) == len(lista):\n print(\"Cycle =\", cycle)\n print(cracked)\n cracked = []\n cycle += 1\n\n end = time.time()\n return (end - start, cycle)", "def __permute(l,opts):\n MAX_RAND_SIZE = 2080 \n if (len(l)/3 < MAX_RAND_SIZE): \n rd.shuffle(l)\n else:\n sys.stderr.write(\\\n\t\t\"{}:{}: Valid Random Permutation Range Exceeded.\"\\\n\t\t.format(opts.progname,permute.__name__))\n opts.perror+=1", "def gen_keys(lname,dsa=False):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n for n in lname:\n if not os.path.isfile('%s/%s.pem'%(d,n)):\n key = Crypto.PublicKey.DSA.generate(512, os.urandom) if dsa else Crypto.PublicKey.RSA.generate(1024,os.urandom)\n open('%s/%s.pem'%(d,n),'w').write(key.exportKey('PEM'))", "def generate_nice_key(self, prefix=None):\n\n result = None\n\n # Set default values\n\n length_margin = 3\n unique_tries = 10\n\n curr_goal_len = NoodleKeyService.NICE_KEY_GOAL_LENGTH\n\n prefix = prefix or self.prefix\n # Try to get key of length [goal_len] up until [goal_len + length_margin]\n for i in range(length_margin):\n\n # Try to obtain unique key maximum of [unique_tries] times\n for j in range(unique_tries):\n\n # prefix + random 128-bit number compacted to base 36 (a-z,1-9)\n candidate_nice_key = prefix + base_repr(uuid4().int, 36).lower()\n candidate_nice_key = candidate_nice_key[0:curr_goal_len] # Truncate\n\n if candidate_nice_key not in self._generated_nice_keys:\n\n self._generated_nice_keys[candidate_nice_key] = None\n\n if self._is_nice_key_unique(candidate_nice_key):\n\n # Found!\n result = candidate_nice_key\n\n break\n\n if result:\n\n break\n\n curr_goal_len += 1\n\n return result", "def skip(self):\r\n self.owning_letters=list()\r\n for _ in range(7):\r\n self.owning_letters.append(random.choice(self.letter))\r\n return self.owning_letters", "def get_password(wordlen, digitlen, words, strength):\n\n while True:\n\n try:\n w = words.pop().capitalize()\n except IndexError:\n sys.exit(\"Unable to get a sufficiently strong password\")\n\n s = np.random.choice(SPECIAL_CHARS)\n i = np.random.randint(0, 10**digitlen)\n\n comp = [w, f\"{i:0{digitlen}d}\", s, s]\n np.random.shuffle(comp)\n pw = ''.join(comp)\n\n # pw = str(f\"{s}{w}{i:0{digitlen}d}{s}\")\n stats_pw = PasswordStats(pw)\n\n if stats_pw.strength() >= strength:\n return pw, stats_pw", "def getMembersForReproduction(self, numMembers, pickProb):\n\t\tselectedMembers = []\n\t\tmembersWithErrorsModifiable = list(self.membersWithErrors)\n\t\twhile len(selectedMembers) < numMembers:\n\t\t\tindexSelected = 0\n\t\t\twhile rnd.randint(0, 100) > int(pickProb * 100) and indexSelected != len(membersWithErrorsModifiable) - 1:\n\t\t\t\tindexSelected += 1\n\t\t\tmemberWithErrorSelected = membersWithErrorsModifiable[indexSelected]\n\t\t\tif memberWithErrorSelected[0] not in selectedMembers:\n\t\t\t\tselectedMembers.append(memberWithErrorSelected[0])\n\t\t\t\tmembersWithErrorsModifiable.remove(memberWithErrorSelected)\n\t\treturn selectedMembers", "def my_ticket_attempts(lottery_win):\n\n my_ticket = []\n while len(my_ticket) < 4:\n pulled_value = choice(lottery_values)\n if pulled_value not in my_ticket:\n my_ticket.append(pulled value)\n\n return my_ticket", "def password_generator(num_users=1000, password_length=20):\n\n password_list = []\n for ind in range(num_users):\n password = random.randint(0, 2 ** password_length - 1)\n password_list.append(password)\n return password_list", "def choose_problems():\n\n problems = make_dict()\n\n res = random.sample(list(problems.items()), k = 3)\n # res2 = random.choice(list(problems.items()))\n # res3 = random.choice(list(problems.items()))\n\n res0 = res[0]\n res1 = res[1]\n res2 = res[2]\n\n return res0, res1, res2", "def find_adapted_solution(list_of_tuples, module, n_of_players_with_vote):\n\n def malus_roles_left(players_left, roles_left):\n\n \"\"\"\n Checks whether it is possible to deploy all the players by assinging\n a certain number of malus.\n \"\"\"\n\n # Permutations of the players still to be deployed. We do that because\n # we only want that combination of players in which ALL of them are\n # deployed\n players_perm = permutations(players_left, len(players_left))\n\n # Initialize the number of malus (just a number high enough)\n fin_malus = 10\n\n # For each permutation of players to be deployed\n for perm in players_perm:\n\n # Initialize two parameters: a counter and the number of malus for\n # this specific permutation. Counter is used to be sure all the\n # players in the permutation are checked\n count = 0\n temp_malus = 0\n\n # Make a copy of the roles to be covered so we can use it later to\n # delete roles that we are able to cover\n copy_of_adapted_roles = copy.copy(roles_left)\n\n # For each element in the permutation we select the corresponding\n # role and try to cover it\n for i in range(len(perm)):\n role_to_cover = roles_left[i]\n role_cand = perm[i][2]\n\n # If it is possible to cover it with a malus we increase the\n # number of malus and the counter and then remove the role from\n # the list of the roles still uncovered\n if role_to_cover in malus_roles[role_cand]:\n temp_malus += 1\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # If it is possible to cover it with no malus we just increase\n # the counter and delete the role\n elif (role_to_cover not in malus_roles[role_cand]\n and role_to_cover in compatible_roles[role_cand]):\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # Else we interrupt checking this permutation and go to the\n # one\n else:\n break\n\n # If we checked ALL the elements in the permutation and the number\n # of malus is lower than the previous value we store it\n if count == len(perm) and temp_malus < fin_malus:\n fin_malus = temp_malus\n\n # If this value is different from the default one it means we found a\n # solution and we return it\n if fin_malus != 10:\n return fin_malus\n else:\n return False\n\n def calculate(candidate, roles_of_module):\n\n \"\"\"\n This function applies the deploy_players function to look for the\n solution, if it exists. If all the players are deployed it returns\n True, otherwise False.\n \"\"\"\n\n # See find_solution for explanation on the try method\n try:\n to_deploy_list, roles_left = deploy_players(candidate,\n roles_of_module,\n 'adapted')\n\n # If the roles to deploy can be covered with a malus we return the\n # number of malus assigned\n\n if malus_roles_left(to_deploy_list, roles_left):\n return malus_roles_left(to_deploy_list, roles_left)\n else:\n return False\n\n except TypeError:\n return False\n\n ordered_lineup = order_by_role(list_of_tuples)\n\n all_comb = list(combinations(schemes[module], n_of_players_with_vote))\n\n for comb in all_comb:\n\n # Change from tuple to list and check wings\n comb = transf_wings(list(comb), module)\n\n # If a solution is found we return the number of malus\n if calculate(ordered_lineup, comb):\n return calculate(ordered_lineup, comb)\n\n return False", "def genKeys(p, q):\n # Fill in code to generate the server's public and private keys.\n # Make sure to use the Extended Euclidean algorithm...............................\n n = p * q\n phi = (p-1)*(q-1)\n #e = e_finder(n, phi)\n while True:\n e = random.randint(1, phi)\n if gcd_iter(e, phi) == 1:\n break\n d = ext_Euclid(phi, e)\n if d <0:\n d+=phi\n return n, e, d", "def make_pool(num_snp):\r\n\tc=0\r\n\tpool=[]\r\n\tfor i in xrange(0,num_snp+1):\r\n\t\ts=make_str(i, num_snp)\r\n\t\tpool+=map(\"\".join, itertools.permutations(s, num_snp))\r\n\treturn list(set(pool))", "def pwgen(length=16, ichars=string.ascii_letters+string.digits):\n return ''.join(random.choice(ichars) for i in range(length))", "def test_list_keys(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n encryptor.list_secret_keys()\n\n mock_gpg.list_keys.assert_called_once_with(True)" ]
[ "0.5407278", "0.53765005", "0.5358425", "0.5299596", "0.52687997", "0.5203861", "0.5168442", "0.5158879", "0.50972825", "0.5045369", "0.50346786", "0.50105166", "0.4999906", "0.49660498", "0.49414423", "0.49236983", "0.4902794", "0.48773238", "0.48708338", "0.48589072", "0.48584646", "0.4855425", "0.48096266", "0.4807388", "0.47939453", "0.47915325", "0.47869954", "0.47849378", "0.4776742", "0.4776624" ]
0.6327766
0
This function counts how many words are the same between the cuisine file and menu list.
def count_same_words(cuisine_file, menu): cuisine_list = separate_words(cuisine_file) same_word_count = 0 for i in cuisine_list: for j in menu: if i == j: same_word_count += 1 return same_word_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_words(filename):", "def count_words_and_dublicates(novel):", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))", "def count_common_words(filename):\n with open(filename,encoding='utf-8') as f:\n contents = f.read()\n common_words = contents.count('Alice')\n print(common_words)", "def countOccurrences(self, wordsToCheck):\n count = 0\n for token in self.importantTokenList():\n w = token.text\n for wtc in wordsToCheck:\n if wtc == w:\n count = count + 1\n return count", "def count_all_words(file_name):\n\n return len(separate_words(file_name))", "def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None", "def test_counts(self):\n lines, words, chars = analyze_text(self.filename)\n self.assertEqual(lines, 4)\n self.assertEqual(words, 8)\n self.assertEqual(chars, 36)", "def get_counts(data):\n\n word_count = {}\n syll_count = {}\n\n infile = data.corpus\n try:\n\n open_file = codecs.open(infile, 'r', encoding='utf-16')\n for line in open_file:\n line = line.lower()\n # Remove tablet indexing info and line numbers. Grab only text data\n line = line.split(',')\n text = clean_line(line[7])\n\n # Update the occurrences of the words in the line\n for word in text.split():\n count = word_count.setdefault(word, 0)\n word_count[word] = count + 1\n\n # Track occurrences of syllables\n update_syllable_count(word, syll_count)\n\n open_file.close()\n except IOError:\n print(\"Cannot open: \" + infile)\n\n return (word_count, syll_count)", "def get_word_count(file_name):\n\n my_file = open(file_name)\n word_count = {}\n\n for line in my_file:\n stripped_line = line.rstrip()\n line_list = stripped_line.split(' ')\n line_list = [word.lower() for word in line_list]\n\n for word in line_list:\n word_count[word] = word_count.get(word, 0) + 1\n\n for word_in_count, count in word_count.iteritems():\n print \"{} {}\".format(word_in_count, count)\n\n my_file.close()\n # return word_count", "def common_words_safe(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n try:\n with open(filename, 'r') as f:\n contents = f.read()\n except IOError as e:\n print \"IOError {0}: {1}\".format(e.errno, e.strerror)\n return\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance.items(), key=lambda item:item[1], reverse=True)", "def alice_in_wonderland():\n from collections import Counter\n with open(\"lib/alice_in_wonderland.txt\") as f:\n #~ table = maketrans(\" \",\" \")\n #~ wordlist = f.read().lower().translate(table, punctuation).split()\n # Translate actually performs fastest here but we use list comprehension\n # because we like it.\n wordlist = [i.lower() for i in f.read().split() if i.isalpha()]\n counted_words = Counter(wordlist)\n # Sort and write our counted wordlist to a new file:\n with open(\"lib/alice_counted.txt\", \"w\") as fout:\n length = 0\n for k, v in sorted(counted_words.items()):\n if len(k) > length:\n length = len(k)\n print length\n fout.write(k + \" \" + str(v) + \"\\n\")\n\n # 3 Solutions for counting characters (not words):\n #~ import operator\n #~ from string import lowercase, punctuation\n \n # 1: Reading the file into a string, then performing dictionary comprehension.\n #~ s = f.read().lower()\n #~ # Incredibly stupid and slow because it goes through the whole string\n #~ # with each iteration. DO NOT DO THIS.\n #~ L = {i: s.count(i) for i in s if i in lowercase}\n #~ L_sorted = sorted(L.iteritems(), key=operator.itemgetter(0))\n #~ print L_sorted\n\n # 2: Reading the file line by line into a dictionary.\n #~ d = {}\n #~ for i in f:\n #~ i = i.lower().strip()\n #~ i = [c for c in i if c in lowercase]\n #~ for char in i:\n #~ if char in d:\n #~ d[char] += 1\n #~ else:\n #~ d[char] = 1\n #~ keys = d.keys()\n #~ keys.sort()\n #~ for i in keys:\n #~ print (i, d[i]),\n\n # 3: Using Counter\n #~ s = [i for i in f.read().lower() if i in lowercase]\n #~ d = Counter(s)\n # Long version:\n #~ keys = sorted(d.keys())\n #~ for i in keys:\n #~ print (i, d[i]),\n #~ # Concise:\n #~ for k, v in sorted(d.items()): print (k, v),", "def wordCount( aList ):\n return len( aList )", "def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))", "def word_count():\n word_counter = Counter()\n\n # read files and count words\n for file_path in Path(INPUTS_PATH).rglob(\"*\"):\n if file_path.is_file():\n print(f\"Processing input file: {file_path.as_posix()}\")\n word_counter += Counter(file_path.read_text().split())\n\n # write the result to OUTPUTS_PATH\n output_file = Path(OUTPUTS_PATH) / \"output.txt\"\n with output_file.open(\"w\") as f:\n for word, count in word_counter.most_common():\n f.write(f\"{word}: {count}\\n\")\n print(f\"Generated output file: {output_file.as_posix()}\")", "def count(self, word):\n pass", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def wcount(lines, topn=10):\n '''a=[]\n for line in lines:\n word = line.strip()\n a.append(word)\n def histogram(s):\n d = dict()\n for i in s:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\n return d'''\n def process_line(lines,diction):\n lines = lines.replace('-',' ')\n for word in lines.split():\n word=word.strip(string.punctuation+string.whitespace)\n word.lower()\n diction[word]=diction.get(word,0)+1\n\n def process_file(lines):\n diction = {}\n process_line(lines,diction)\n return diction\n diction=process_file(lines)\n x=list(diction.values())\n x.sort()\n x.reverse()\n count = 0\n for i in range(topn):\n for key in list(diction.keys()):\n if diction[key]==x[i] and count<topn:\n print(\"%s %d\"%(key,diction[key]))\n count +=1\n del diction[key]\n pass", "def common_words_tuple(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n with open(filename, 'r') as f:\n contents = f.read()\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance.items(), key=lambda item:item[1], reverse=True)", "def get_file_counts(filename):\n new_file = open(filename, \"r\")\n d = dict()\n for line in new_file: \n split_line = line.split()\n for word in split_line:\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n new_file.close()\n return d", "def word_count_2(filename):\n\n with open(filename) as file_:\n # read file and lowercase all words\n words = file_.read().lower()\n # use translate to remove punc\n words = words.translate(None, string.punctuation)\n # call counter to count on split owrds\n word_counts = Counter(words.split())\n\n # print out items using iteritems (display, doesn't creat list) \n for word, count in word_counts.iteritems():\n print \"{} {}\".format(word, count)\n\n return word_counts", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def common_words_min(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n with open(filename, 'r') as f:\n contents = f.read()\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance, key=occurance.get, reverse=True)", "def count_words(text):\n\n # Open a file and read the text\n with open(text) as file:\n # Split the file in to a list of words\n words = remove_punctuation(file.read()).split()\n # Create a set of unique words from the list words\n unique_words = {*words}\n\n # For each string in the new list\n for unique_word in unique_words:\n # Count the number of times the word appears\n count = words.count(unique_word)\n # Print the string and the number of times it appears.\n print(f'\"{unique_word.capitalize() }\" appears {count} times.')", "def count_word_instances_in_file(file_name, target_word):\n\n\tcount = 0\n\twords = get_words_in_file(file_name)\n\tfor word in words:\n\t\tif target_word == word:\n\t\t\tcount += 1\n\treturn count", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)", "def word_count(self):\n from collections import Counter\n counts = Counter(self._replace_non_alnum().split())\n return counts", "def occurences(words):\n\n\t# Add your code here\n\treturn" ]
[ "0.690674", "0.68663776", "0.68278384", "0.6711609", "0.66950136", "0.659886", "0.6540741", "0.651223", "0.6462825", "0.63756806", "0.62130094", "0.6198834", "0.61832255", "0.6172285", "0.61688614", "0.6166723", "0.615954", "0.61456776", "0.6139662", "0.61389077", "0.6133218", "0.613204", "0.612813", "0.6119896", "0.61139876", "0.61005205", "0.60836387", "0.60836387", "0.6080871", "0.6072744" ]
0.8976418
0
Writes a dictionary of cuisines, scores per dining hall menu to a JSON file
def to_JSON(meal, list_of_cuisines, list_of_menus): data = {} for cuisine in list_of_cuisines: cuisine_list = separate_words(cuisines[cuisine]) scores = {} for i in range(len(list_of_menus)): scores[menus[i]] = get_score(cuisines[cuisine], list_of_menus[i]) data[cuisine] = scores with open(meal+'data.json', 'w') as f: json.dump(data, f) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_json(toWrite):\n with open('clubs.json', 'w+') as outfile:\n json.dump(toWrite, outfile)", "def write_to_json(dictData, fileout):\n\t# Prepare the output file\n\tfout = codecs.open(fileout, 'w', 'utf-8')\n\thwDict = prepare_hw_dict(dictData)\n\tjson.dump(hwDict, fout)\n\t# Give some summary to the user\n\tprint('JSON generated. Success!')\n\tprint('{} headwords written to JSON file.'.format(len(hwDict)))", "def save_to_json(self):\r\n file = col.defaultdict(list)\r\n data_sources = [\"http://www.gcmap.com/\",\r\n \"http://www.theodora.com/country_digraphs.html\",\r\n \"http://www.citypopulation.de/world/Agglomerations.html\",\r\n \"http://www.mongabay.com/cities_urban_01.htm\",\r\n \"http://en.wikipedia.org/wiki/Urban_agglomeration\",\r\n \"http://www.worldtimezone.com/standard.html\"]\r\n file[\"data_sources\"] = data_sources\r\n for code, city in self.vertices.items():\r\n metros = {}\r\n for key, val in vars(city).items():\r\n metros[key] = val\r\n file[\"metros\"].append(metros)\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n routes = {\"ports\": [edge.start, edge.destination], \"distance\": edge.distance}\r\n second_route = {\"ports\": [edge.destination, edge.start], \"distance\": edge.distance}\r\n if second_route not in file[\"routes\"]:\r\n file[\"routes\"].append(routes)\r\n with open('../Data/save.json', 'w') as outfile:\r\n json.dump(file, outfile, indent=4)", "def save_data():\n new_data = {}\n new_data['data sources'] = []\n new_data['metros'] = []\n new_data['routes'] = []\n for code in set_of_cities:\n city = set_of_cities[code]\n data = {}\n data['code'] = city.code\n data['name'] = city.name\n data['country'] = city.country\n data['continent'] = city.continent\n data['timezone'] = city.timezone\n data['coordinates'] = city.coordinates\n data['population'] = city.population\n data['region'] = city.region\n new_data['metros'].append(data)\n for route in set_of_route:\n code_of_cities = route.code_of_cities\n distance = route.distance\n data = {}\n data['ports'] = code_of_cities\n data['distance'] = distance\n new_data['routes'].append(data)\n\n with open(\"new_data.json\", 'wb') as outfile:\n json.dump(new_data, outfile, sort_keys=True, indent=4, separators=(',', ':'))\n\n print \"Changes saved in new_data.json\"", "def writeJSON(filename):\n if not filename.endswith('.json'):\n filename += '.json'\n with open(filename, 'w') as f:\n for x in range(numRows):\n scores = quizScores()\n types = getTypes(scores)\n row = { 'id': x,\n 'challenger': types[0], 'collaborator': types[1],\n 'communicator': types[2], 'contributor': types[3],\n 'q1': scores[0], 'q2': scores[1], 'q3': scores[2],\n 'q4': scores[3], 'q5': scores[4], 'q6': scores[5],\n 'q7': scores[6], 'q8': scores[7], 'q9': scores[8],\n 'q10': scores[9], 'q11': scores[10], 'q12': scores[11],\n 'q13': scores[12], 'q14': scores[13], 'q15': scores[14],\n 'q16': scores[15], 'q17': scores[16], 'q18': scores[17]\n }\n json.dump(row, f, sort_keys=True)", "def write_folds_to_json(self, filepath: str):\n with open(filepath, \"w\") as f:\n json.dump(\n {\n \"isH5\": self.is_h5_dataset,\n \"folds\": self.folds,\n },\n f,\n indent=4,\n )", "def update_heroes(self):\n _save_dict_to_file(self.get_heroes(), \"heroes.json\")", "def save_new_json():\n json.dump(\n seals_data,\n open(os.path.join(seals_root, 'seals.json'), 'w'),\n sort_keys=True,\n indent=4,\n )", "def update_json_file(self):\n with open(\"data/save.txt\", \"r+\") as file:\n dictionary = json.load(file)\n user = dictionary[\"Actual Username\"]\n dictionary[user].append(self.score)\n\n with open(\"data/save.txt\", \"w\") as file:\n json.dump(dictionary, file, indent=3, sort_keys=True)", "def write_to_json(missense_dict, frame_shift_dict, missense_name_dict, frame_shift_name_dict, person):\n json_file[person] = {\n \"missense_variant\": missense_dict,\n \"missense_HGNC_name\": missense_name_dict,\n \"frame_shift_variant\": frame_shift_dict,\n \"frame_shift_HGNC_name\": frame_shift_name_dict}", "def saveCollection(collection, data):\n a = {'menu': data}\n with open(collection, 'w') as f:\n json.dump(a, f, indent=4, sort_keys=True)", "def new_ski_json():\r\n filename_json = \"ski.json\"\r\n with open(filename_json, \"w\") as f_json: # Erstellen der Datei mit der Liste aus dict_ski().\r\n json.dump(dict_ski(), f_json)", "def write_in_json(data):\n with open('genre.json', 'w') as data_file:\n json.dump(data, data_file, indent= 4)", "def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)", "def write(self, fp, **kwds):\n json.dump(self._dict, fp)", "def create_comment_file():\n club = read_json()\n comment_dict = {}\n\n for club in clubs:\n comment_dict[club.name] = []\n\n with open('club_comments.json', 'w') as outfile:\n json.dump(comment_dict, outfile)", "def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)", "def write(self, output):\n with open(output, 'w') as out:\n out.write(json.dumps(self, indent=4))", "def write_json(self, filename):\n data = {\n \"fleets\": json.loads(self.manager_df.to_json(orient=\"records\")),\n \"transports\": json.loads(self.transport_df.to_json(orient=\"records\")),\n \"customers\": json.loads(self.customer_df.to_json(orient=\"records\")),\n \"stations\": json.loads(self.station_df.to_json(orient=\"records\")),\n \"simulation\": json.loads(self.df_avg.to_json(orient=\"records\"))\n }\n\n with open(filename, 'w') as f:\n f.seek(0)\n json.dump(data, f, indent=4)", "def dump(self):\n course = {\n \"course_id\": self.course_id,\n \"title\": self.title,\n \"chapters\": {}\n }\n for chapter_num in self.chapters:\n chapter = self.chapters[chapter_num]\n course[\"chapters\"][chapter.num] = {\n \"name\": chapter.name,\n \"lessons\": {lesson_num: lesson_data.name for lesson_num,\n lesson_data in chapter.lessons.items()}\n }\n with open(_JSON_PATH_FORMAT % self.course_id, \"w+\") as fp:\n _logger.debug(\"Dumping the data into a JSON file so that it can \"\n \"be accessed at a later time quickly and without \"\n \"need to scrape LearnItFirst.com, saving time and \"\n \"unnecessary requests.\")\n json.dump(course, fp)", "def write(self):\r\n\r\n with open(self.filename + \".json\", mode='w') as json_file:\r\n json.dump(self.data, json_file, separators=(',', ':'))", "def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)", "def jsonl(filepath, output):\n\n mazes = load(filepath=filepath)\n\n if output:\n with open(output, \"a\") as f:\n for maze in mazes:\n dump(maze, f)\n \n else:\n for maze in mazes:\n dump(maze, stdout)", "def main(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n\n course_dict = {}\n course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])\n course_dict['blocks'] = build_course_map(data)\n\n filename = '%s' % course_dict['course_id']\n filepath = os.path.join('../input/', filename)\n\n with open(filepath, 'w') as outfile:\n json.dump(course_dict, outfile, indent=4)", "def to_json(self, destination):\n\n with open(destination, \"w\") as file:\n # We open the file we are going to write.\n # Note: We always overwrite the destination.\n\n # We save the current dictionnary into a json format.\n dump(\n self.main_dictionnary,\n file,\n ensure_ascii=False,\n indent=4,\n sort_keys=True,\n )", "def create_gw_json(gw):\n gw_res = read_csv(gw)\n manager_id_dict = read_manager_id()\n\n final_dict = {}\n\n for k in manager_id_dict.keys():\n try:\n final_dict[manager_id_dict[k]] = {\n 'points': gw_res[k]['points'],\n 'name': k,\n 'rank': gw_res[k]['rank']\n }\n except KeyError:\n pass\n with open(output_json_name.format(gw=gw), 'w') as file:\n file.write(json.dumps(final_dict))", "def save_highscores(self, contents):\n\t\ttry:\n\t\t\twith open(self.filename, 'w') as f_obj:\n\t\t\t\tf_obj.write(json.dumps(contents)) #save as json\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 741 6243 for assistance.')", "def save_file(city_dict):\n head = {}\n metros = []\n routes = []\n \n for key in city_dict:\n save_dict = {}\n save_dict[\"code\"] = city_dict[key].code\n save_dict[\"name\"] = city_dict[key].name\n save_dict[\"country\"] = city_dict[key].country\n save_dict[\"continent\"] = city_dict[key].continent\n save_dict[\"timezone\"] = city_dict[key].timezone\n save_dict[\"coordinates\"] = city_dict[key].coordinates\n save_dict[\"population\"] = city_dict[key].population\n save_dict[\"region\"] = city_dict[key].region\n \n metros.append(save_dict)\n \n for flight in city_dict[key].flights_out:\n save_route = {}\n save_route[\"ports\"] = [key, flight[0]]\n save_route[\"distance\"] = flight[1]\n \n routes.append(save_route)\n head[\"metros\"] = metros\n head[\"routes\"] = routes\n \n saved_file = open(\"data.txt\", \"w\")\n saved_file.write(json.dumps(head))", "def scrape_teams_write_tojson() -> None:\r\n # Create a dictionary of Team objects by scraping TCS and Overbuff\r\n teams = TCS.get_teams()\r\n # Save this data to a json file named teams.json\r\n TCS.write_tojson(teams, \"teams.json\")", "def write_json(self, filename):\n with open(filename, 'a+') as f:\n f.write(json.dumps(self.weights))\n f.write(\"\\n\")" ]
[ "0.63122237", "0.612619", "0.5950973", "0.57618344", "0.5728447", "0.5712075", "0.5669831", "0.5658273", "0.56450564", "0.56368273", "0.5627659", "0.56155825", "0.5583689", "0.5558727", "0.55583084", "0.5555163", "0.55480874", "0.5523268", "0.55128515", "0.55048144", "0.54915893", "0.5480932", "0.5480252", "0.547272", "0.5459265", "0.54460424", "0.5402672", "0.5397535", "0.5391193", "0.53829366" ]
0.75506806
0
get shape key local co
def get_shapekeys_co(ob_name): obj = bpy.data.objects[ob_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_key_id(self):", "def loc_key(self):\r\n key = tuple(self.loc.coord)\r\n return (key)", "def key():", "def grid_shape(self, key):\n return self.execute(b\"GRID.SHAPE\", key)", "def GetRootKey(self):", "def locate_shape(shape):", "def shape_id(self):\n return self._shape_id", "def get_key(self, state):\n pass", "def key(key):\n return key", "def get_layer(key):\n layer1 = {'gm': u'Global_Projection', 'np': u'North_Polar_Projection', 'radar': u'Sigma0_Data', 'flag': u'flag'}\n return layer1[key]", "def _key(self):\n return None", "def key(self):\n return self._key if self._key else self.factory().key", "def key_type(self) -> global___Type:", "def key_by_point( self,point ):\n lons,lats,keys = self.lons_lats_keys\n for i, key in enumerate(keys):\n if in_polygon(point = point, poly = (lons[i],lats[i])):\n return key\n return None", "def key(self):\n return key_for_name(self.name)", "def underlying_key(self):\n pass", "def shape(self):", "def shape(self):", "def key(o):\n return hypot((x - o.x), (y - o.y))", "def key(self):\n return self.name", "def key (self):\r\n return self.variable", "def tourney_key(proto_obj):\n return tourney_key_full(proto_obj.id_str)", "def get_key(self):\n return self._determine_key()", "def GetKeyByPath(self, key_path):", "def get_shape_info(self, obj):\r\n return obj.shape", "def key(self):\n return None", "def placementKey( geo):\n def diagcmp( xyA, xyB):\n \"\"\"\n Compare two positions based on x + y. If x + y is the same for the\n two, compare based on x.\n \"\"\"\n return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0])\n\n sorted = [ tuple(geo[i]) for i in xrange(geo.shape[0]) ]\n sorted.sort( diagcmp)\n return hash(tuple(sorted))", "def shape(self):\n return self._shape", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key" ]
[ "0.62794775", "0.627112", "0.6183166", "0.61344665", "0.5998332", "0.5967131", "0.5965354", "0.59295803", "0.5864717", "0.5857207", "0.5822323", "0.57988447", "0.5792031", "0.5763907", "0.57507324", "0.5741417", "0.5723869", "0.5723869", "0.57104725", "0.5698671", "0.5692661", "0.5688225", "0.5681465", "0.56641877", "0.56627584", "0.565048", "0.5638331", "0.56278735", "0.5619848", "0.5619848" ]
0.7656703
0
Gets all users having a specific treatment. Return a list of unique ids.
def get_userids(cursor, having_treatment=None): cursor.execute('SELECT id FROM users WHERE treatment=?', (having_treatment,)) return cursor.fetchall()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id_users(self):\n return self.execute(TABELLE['id_users']['select']['all'])", "def _get_user_ids(model):\n return model.objects.values_list(\"user\", flat=True).distinct(\"user\")", "def user_ids(self):\n return list(self.get_users())", "def get_users_and_id(self):\n return self.execute(TABELLE['id_users']['select']['all'])", "def get_user_id_list(self):\n user_set = set()\n for subscription in self['subscriptions'].values():\n for role_assignment in subscription['role_assignments'].values():\n if role_assignment['principal_type'] == 'User':\n user_set.add(role_assignment['principal_id'])\n return list(user_set)", "def _get_users_list(self):\n return self.users['user_id'].tolist()", "def all_users(self):\n distinct_users = list(self.client.smartsleep.attendees.distinct(\"userId\"))\n return distinct_users", "def getMyUsers(self):\n my_users = None\n if self.current_user.is_superuser or self.current_user.has_perm(\"manage_input_templates\"):\n my_users = UserCompany.objects.filter(\n company__pk=self.client_session.companyId).all()\n else:\n my_users = UserCompany.objects.filter(pk=self.client_session.userCompanyId).all()\n\n ids = []\n if my_users.count() > 0:\n ids = [str(d.id) for d in my_users]\n return \",\".join(ids)", "def user_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"user_ids\")", "def get_user_ids():\n TOTAL_USERS = 50\n return list(numpy.random.choice(\n TOTAL_USERS, random.randint(1, TOTAL_USERS), replace=False\n ))", "def getInterestedUsers():", "def getAssignedUsers(id=0):\n a = [x.user_ID for x in Assignment.query.filter(Assignment.countEvent_ID == id) ]\n return a", "def getIds(self) -> List[int]:\n return list(self.users.keys())", "def user_ids(self):\r\n raise NotImplementedError", "def users_in_group(self, group_id):\n users = []\n users = self._get(('user', 'group', str(group_id)))\n for user in users:\n if 'dreamdiary.diary.user' in user['saml_permissions']:\n users.append(user)\n return users", "def user_ids(self):\n return list(self._user_ids)", "def getResponsibleUsers():", "def get_all_users():", "def get_all_user_meter_ids(session):\n\n return [meter_id[0] for meter_id in session.query(User.meter_id).all()]", "def get_users(db, group):\n my_users = {\n user_id\n for user_id, in db(\"\"\"\n select distinct\n users.id\n from users, members\n where\n users.id = members.user_id\n and group_id = %s\n \"\"\",\n group.group_id)\n }\n return my_users", "def data_scientists_who_like(target_interest):\n return [user_id for user_id, user_interest in interests if user_interest == target_interest]", "def get_user_items(self, id):\n return self.execute(TABELLE['items']['select']['by_id'], (id,))", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def test_user_id_identities_get(self):\n pass", "def select_unique_ids(self):\n utk = self.metadata\n utk_ids = []\n for gg in set(utk['gender']):\n for rg in set(utk['race']):\n for ag in set(utk['age']):\n try:\n intersection_ids = list(utk[np.logical_and(utk['gender'] == gg,\n np.logical_and(utk['race'] == rg,\n utk['age'] == ag))]['filename'])\n if len(intersection_ids) <= CAP:\n utk_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n utk_ids += x\n\n except:\n continue\n self.unique_ids = utk_ids\n return utk_ids", "def return_user_values(self, id_user:int) -> set:\n try:\n value_return = self.cursor.execute(f\"SELECT name_first, name_last, nickname FROM {table_users} WHERE id={id_user};\").fetchone()\n return value_return\n except Exception as e:\n msg = f\"We faced problems with the getting of the user; Mistake: {e}\"\n self.proceed_error(msg)\n return []", "def get_all_uid_service():\n return user_dao.get_all_uid_dao()", "def get_all_ids(self):\r\n return self.__person_repository.get_all_ids()", "def get_queryset(self):\n username = self.request.user.username\n patient = UniquePatient.objects.filter(patient__user__username=username)\n return patient", "def get_all_users(db, fake: bool):\n users = set()\n\n if fake:\n tweet_collection = db[\"fake_tweet_collection\"]\n tweet_info_collection = db[\"fake_tweet_info_coll\"]\n else:\n tweet_collection = db[\"real_tweet_collection\"]\n tweet_info_collection = db[\"real_tweet_info_coll\"]\n\n tweet_ids = set()\n for tweet in tweet_collection.find():\n tweet_ids.add(tweet[\"id\"])\n\n for tweet_info in tweet_info_collection.find():\n if tweet_info[\"id\"] in tweet_ids:\n users.update(set(tweet_info[\"tweet_likes\"]))\n\n # Get replies in all levels of reply\n users.update(get_users_involved_in_replies(tweet_info[\"tweet_replies\"]))\n # users.update(set([comment[\"user\"] for comment in tweet_info[\"tweet_replies\"]]))\n users.update(set([comment[\"user\"][\"id\"] for comment in tweet_info[\"tweet_retweets\"]]))\n\n return list(users)" ]
[ "0.6486269", "0.6268288", "0.6096328", "0.5965909", "0.5874028", "0.5859561", "0.58230615", "0.58035034", "0.5749481", "0.5737924", "0.57142645", "0.5705991", "0.57020724", "0.5667985", "0.5667205", "0.5649348", "0.5617039", "0.5612184", "0.55673", "0.5501717", "0.54955304", "0.5440738", "0.5438726", "0.5438023", "0.543511", "0.5416912", "0.5414177", "0.5405535", "0.5392177", "0.5379461" ]
0.75097686
0
Inject a new user if it is not already existent.
def inject_user(dbConn, uid, username): try: with dbConn: pars = (uid, username) print "pars: ", pars dbConn.execute( """INSERT OR IGNORE INTO users(uid, username) VALUES(?, ?)""", pars) except sqlite3.IntegrityError as e: log.error("Error in DB transaction when injecting user: ", uid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def add_user_to_g():\n \n if CURRENT_USER in session:\n g.user = User.query.get(session[CURRENT_USER])\n\n else:\n g.user = None", "def add_user_to_g():\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def new_user(cls, user):\r\n pass", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def input_and_create_user(self):\n print(\"Please input username!\")\n new_username = input()\n new_user = user.User(new_username)\n self.users.append(new_user)", "def new_user(cls, user):\n pass", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = Employee.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def add_user_to_g():\n if \"Authorization\" in request.headers:\n token = request.headers[\"Authorization\"]\n payload = jwt.decode(token, app.config.get(\n 'SECRET_KEY'), algorithms=[\"HS256\"])\n\n if \"username\" in payload:\n g.user = User.query.filter_by(username=payload[\"username\"]).first()\n\n else:\n g.user = None", "def add_user_to_g():\n # access g in templates, g only lives for life of request\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def check_existing_users(user_name,password):\n\n\n new_user = User(user_name,password)\n\n return new_user", "def new_user():\n pass", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def add_user(self, user):\n\t\tself.users[user.username] = user", "def add(\n new_user: schemas.UserCreate,\n db_session: Session = Depends(get_db),\n current_user: models.User = Depends(get_current_admin_user)\n):\n db_user = crud.get_by_email(db_session, new_user.email)\n\n if db_user:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail='The user with this email already exists in the system.'\n )\n\n return crud.create(db_session, new_user)", "def upsert_user(user_id, nick_name, first_name, last_name):\n if execute_select(get_user_sql, (user_id,)):\n execute_insert(update_user_sql, (nick_name, first_name, last_name, user_id))\n else:\n execute_insert(add_user_sql, (user_id, nick_name, first_name, last_name))", "def register_user():\n pass", "def test_append_user(self):\n print('(' + self.test_append_user.__name__+')',\n self.test_append_user.__doc__)\n new_username = self.connection.append_user(\n NEW_PATIENT_USERNAME, NEW_PATIENT)\n # test appended ok\n self.assertIsNotNone(new_username)\n # check appended the same user data\n self.assertEqual(new_username, NEW_PATIENT_USERNAME)\n # check the added user in db has the same data\n get_new_patient = self.connection.get_user(new_username)\n self.assertDictContainsSubset(\n NEW_PATIENT['restricted_profile'], get_new_patient['restricted_profile'])\n self.assertDictContainsSubset(\n NEW_PATIENT['public_profile'], get_new_patient['public_profile'])", "def addUser(User):\n # check if user already exists #\n user_exists = run('id -u %s >/dev/null 2>&1 || echo \"no\"' % (User))\n if user_exists == \"no\":\n sudo('useradd -m -c \"%s\" -s /bin/bash %s' % (agUsers[User], User))\n else:\n print \"[Info] User '%s' already exists on host '%s'\" % (User, env.host_string)", "def ensure_user_in_database():\n if 'email' in login_session:\n user_exists = session.query(exists().where(User.email == login_session['email'])).scalar()\n if not user_exists:\n user = User(\n id=login_session['userid'],\n picture=login_session['picture'],\n name=login_session['name'],\n email=login_session['email'],\n client_id=login_session['client_id']\n )\n session.add(user)\n session.commit()\n print(\"Recreated user in database\")", "def new_user(global_config, timestamped_email, id_api):\n yield id_api.create_user_if_not_exists(timestamped_email, global_config.users.default.password)", "def test_duplicate_user(self, mapp, existing_user_id):\n\n mapp.create_user(user=existing_user_id, password=1234,\n email=existing_user_id + \"@example.com\", code=409)", "def get_or_create_user(self, username):\n msg = \"get_or_create_user not implemented\"\n raise NotImplementedError(msg)", "def update_user():", "def add_user(self, username, email, password):\n\n new_user = User(username, email, password)\n new_user_details = new_user.get_details()\n for user in self.users:\n if new_user_details['email'] == user['email']:\n return 'User already exists'\n else:\n new_user_details['id'] = len(self.users)\n self.users.append(new_user_details)\n return 'Account created. You can now log in'", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def _load_user():\n user = session.get('user')\n\n if user is None:\n g.user = None\n else:\n g.user = user", "def newUser(self):\n user = IrcUser(hashed=True)\n self.nextId += 1\n id = self.nextId\n self.users[id] = user\n self.flush()\n user.id = id\n return user" ]
[ "0.6981035", "0.6747009", "0.6734936", "0.67245996", "0.671906", "0.671906", "0.671906", "0.6718293", "0.66659963", "0.6617214", "0.6604228", "0.6601467", "0.6455636", "0.6441589", "0.6425503", "0.63296014", "0.63053393", "0.629712", "0.6287836", "0.6258661", "0.6240081", "0.6230157", "0.62264615", "0.6170386", "0.61480856", "0.6134941", "0.6134035", "0.61326003", "0.6129529", "0.61290395" ]
0.68272775
1
Make an update for entry in 'notifications' DB table for a notification message. When a notification arrives.
def make_notification_update(dbConn, obj): try: with dbConn: dbConn.execute("update notifications set rcv_ts=? where msg_id=?", (obj['dt'], obj['msg_id'])) except sqlite3.IntegrityError as e: log.error("Error in DB transaction when updating notification for msg_id: ", obj['msg_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def update_notification_status(db_session: Session, notification: NotificationUpdate):\n notification_exists = await NotificaitonCRUD.find_notification_by_id(db_session, notification.id)\n if notification_exists:\n notification_exists.sent_date = notification.sent_date\n notification_exists.status_code = notification.notify_status\n updated_notification = await NotificaitonCRUD.update_notification(db_session, notification_exists)\n return updated_notification", "def update_notifications_as_seen():\n try:\n update_var = IndividualNotifications.objects.filter(notification_status=1).update(notification_status=2)\n output_json = dict(zip(['Status', 'Message', 'Payload'],\n ['Success', 'Notification was updated successfully', None]))\n return output_json\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to update Notification Status.{ex}', None]))\n return output_json", "def set_user_notification(notification_data):\n if not notification_data:\n return\n chat_id = notification_data[\"chat_id\"]\n status = notification_data[\"status\"]\n notify_at = notification_data[\"time\"]\n restaurant_id = notification_data[\"restaurant\"].split(\"_\")[1]\n\n notify_info = {\n \"status\": \"включены\" if int(status) else \"выключены\",\n \"restaurant\": restaurant_name[restaurant_id],\n }\n\n db = sqlite3.connect(database)\n cursor = db.cursor()\n\n find_notify_record = f\"SELECT id FROM notifications WHERE chat_id={chat_id} AND restaurant_id={restaurant_id}\"\n notify_record = cursor.execute(find_notify_record)\n if len(notify_record.fetchall()):\n query_update = (\n f\"UPDATE notifications SET notify_at='{notify_at}', updated_at='{datetime.now()}', status='{status}' \"\n f\" WHERE chat_id='{chat_id}' AND restaurant_id='{restaurant_id}'\"\n )\n cursor.execute(query_update)\n else:\n query_insert = (\n f\"INSERT OR IGNORE INTO notifications ('chat_id', 'restaurant_id', 'status', 'notify_at', 'created_at', 'updated_at')\"\n f\" VALUES ('{chat_id}', '{restaurant_id}', '{status}', '{notify_at}', '{datetime.now()}', '{datetime.now()}')\"\n )\n cursor.execute(query_insert)\n\n db.commit()\n db.close()\n\n return notify_info", "def send_update_notification(item, target, name):\n\n # Check to see if anything actually changed. A row could be updated with the same values.\n changes = get_changes(target)\n\n # If no changes are found, then we do not need to create a notification.\n # Therefore, we check to see if there are changes before continuing.\n if changes:\n\n # Get the name of the administrator who made the change.\n administrator = \"{} {}\".format(current_user.first_name, current_user.last_name)\n\n # Format the title for the notification.\n title = \"Updated {}\".format(item)\n\n # Format the title for the notification.\n message = \"{} {} was updated by {}\".format(item, name, administrator)\n\n # Create the new notification and add to the database.\n new_notification = Notifications(title=title, message=message)\n db.session.add(new_notification)", "def update_notifications_status(request):\n input_json = request\n try:\n update_record_var = update_record(IndividualNotifications, input_json['individual_notification_id'],\n notification_status=input_json['notification_status'])\n return update_record_var\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to update Notification Status.{ex}', None]))\n return output_json", "def handleNotification(self, notification):\n pass", "def send_notification(notification_id):\n # Lock until we update the delivery date\n notification = db.session.query(Notification).with_for_update().filter_by(id=notification_id).first()\n\n # Check if notification was already sent, and skip if yes.\n if notification.delivery_date > datetime.now(timezone.utc):\n db.session.rollback()\n return\n\n print(f'Delivered notification={notification_id}!') # Code for sending goes here.\n\n notification.set_or_update_delivery_date()\n\n db.session.commit(notification)", "async def update_cache_from_notification(self) -> List[Notification]:\n new_notifications = []\n try:\n notifications = await self.get_user_notifications()\n\n if not notifications:\n return new_notifications\n\n new_notifications = self.get_new_notifications()\n for notification in new_notifications:\n await self.__manage_notification_posts(notification)\n except Exception as e:\n if self.verbose:\n print(f\"Failed to update Weverse Cache - {e}\")\n return new_notifications", "def feed_update_success(message_data, result):\n\n feed_id = message_data['args'][0]\n feed = Feed.objects.get(pk=feed_id)\n\n feed.flagged = False\n feed.save()\n\n notification = Notification(feed=feed, owner=feed.owner, title='FeedUpdated', message=f'Feed: {feed.id}, {feed.link}, {feed.updated_at}]', is_error=False)\n notification.save()\n print(\"dramatiq callback: : feed update success\")", "def send_insert_notification(item, name):\n\n # Get the name of the administrator who made the change.\n administrator = \"{} {}\".format(current_user.first_name, current_user.last_name)\n\n # Format the title that for the notification.\n title = \"New {}\".format(item)\n\n # Format the message for the notification.\n message = \"{} {} was added by {}\".format(item, name, administrator)\n\n # Create the new notification and add to the database.\n new_notification = Notifications(title=title, message=message)\n db.session.add(new_notification)", "def notify_users_of_reminders():\n\n #Get current date into dd/mm/YYYY format.\n now = datetime.datetime.now()\n todays_date = now.strftime(\"%d/%m/%Y\")\n\n #Get current time and convert it to hh:mm.\n todays_time = now.strftime(\"%H:%M\")\n print(todays_time)\n\n #Select all notifications from the database based on that date and time.\n notifications_query = \"\"\"SELECT user, reminder_msg FROM reminders WHERE (date=%s AND time=%s);\"\"\"\n\n #Setup our parameters\n notifications_params = (todays_date, todays_time)\n\n #TODO: Add in cursor.\n #TODO: Run query and get reminder data.\n #TODO: Loop over returned rows, and notify users with send_message_to_irc()", "def snooze(self, update, context):\n update.message.reply_text(\"You have successfully snoozed the notifications for the day. \" \n \"To resume notifications again, use /removeSnooze.\")\n # update to the sqlite table.\n chat = update.message.chat\n self.db_manager.snooze(chat.id)\n self.logger.info(\n 'Username: %s with chat_id: %s snoozed the notifications for the day.' % (chat.username, chat.id))", "def send_notifications():\n due_notifications = Notification.query.filter(Notification.delivery_date <= datetime.now(timezone.utc))\n for notification in due_notifications:\n send_notification.delay(notification.id)", "def notifications(id):\n return core.query(schema.notify, id)", "def notify(guid, message):", "async def __manage_notification_posts(self, notification: Notification):\n notification_type = self.determine_notification_type(notification.message)\n community = self.get_community_by_id(notification.community_id)\n if notification_type == 'comment':\n artist_comments = await self.fetch_artist_comments(notification.community_id, notification.contents_id)\n if artist_comments:\n comment = artist_comments[0]\n comment.post = self.get_post_by_id(comment.post_id)\n if comment.post:\n if comment.post.artist_comments:\n comment.post.artist_comments.insert(0, comment)\n else:\n comment.post.artist_comments = [comment]\n self.all_comments[comment.id] = comment\n elif notification_type in [\"tofans\", \"post\"]:\n post = await self.create_post(community, notification.contents_id)\n if post:\n self.all_posts[post.id] = post\n elif notification_type == 'media':\n media = await self.fetch_media(community.id, notification.contents_id)\n if media:\n self.all_media[media.id] = media\n elif notification_type == 'announcement':\n announcement = await self.fetch_announcement(community.id, notification.contents_id)\n if announcement:\n self.all_announcements[announcement.id] = announcement", "def save(self, **kwargs):\n # Clean up any notifications which might have already been associated with this object\n self.notifications.all().delete()\n super(Behavior, self).save(**kwargs)\n\n my_student = self.enrollment.student\n grades = Grade.objects.filter(student=my_student)\n attendances = AttendanceRecord.objects.filter(enrollment__student=my_student)\n behavior_effors = Behavior.objects.filter(enrollment__student=my_student)\n test_scores = StandardizedTestScore.objects.filter(student=my_student)\n\n calculator = BehaviorNotificationCalculator(student=my_student,\n grades=grades,\n attendances=attendances,\n behavior_efforts=behavior_effors,\n test_scores=test_scores)\n notifications = calculator.get_notifications(self)\n\n # For a behavior notification, interested parties are the case manager as well as all system admins\n interested_parties = [admin for admin in SproutUser.objects.filter(is_superuser=True)]\n interested_parties.append(my_student.case_manager)\n for user in interested_parties:\n for notification in notifications:\n # If there are already unread behavior notifications for this user for this student, get rid of them\n Notification.objects.filter(user=user,\n unread=True,\n student=my_student,\n title=notification.title,\n category=constants.NotificationCategories.BEHAVIOR).delete()\n # Add the new notification\n Notification.objects.create(user=user,\n partial_link=\"/behaviors\",\n unread=True,\n category=constants.NotificationCategories.BEHAVIOR,\n content_object=self,\n **notification._asdict())", "async def send_notification(db_session: Session, notification: NotificationRequest):\n new_notification = await NotificaitonCRUD.create_notification(db_session, notification=notification)\n\n # push the email to the queue service\n await publish(payload=new_notification.id)\n\n return new_notification", "def update_notifications(self: object, body: dict) -> dict:\n # [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/UpdateNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"UpdateNotificationsV1\",\n body=body\n )", "def write(self, notification):", "def process_notification(self, context, event_type, payload):\n\n LOG.info('%s received notification - %s' %\n (self.get_canonical_name(), event_type))\n\n # We need a context that will allow us to manipulate records that are\n # flagged as managed, so we can't use the context that was provided\n # with the notification.\n elevated_context = DesignateContext(tenant=context['tenant']).elevated()\n elevated_context.all_tenants = True\n elevated_context.edit_managed_records = True\n\n # Create an object from the original context so we can use it with the\n # RPC API calls. We want this limited to the single tenant so we can\n # use it to find their domains.\n orig_context = DesignateContext(tenant=context['tenant']).elevated()\n\n # When an instance is deleted, we never get a floating IP update event,\n # we just get notified that the underlying port was deleted. In that\n # case look for it under the other key.\n if event_type.startswith('port.delete'):\n self._disassociate_port_id(context=elevated_context,\n port_id=payload['port_id'])\n\n if event_type.startswith('floatingip.'):\n # A floating IP can only be associated with a single instance at a\n # time, so the first thing we always do is remove any existing\n # association when we get an update. This is always safe whether\n # or not we're deleting it or reassigning it.\n if 'floatingip' in payload:\n # floatingip.update.end\n floating_ip = payload['floatingip']['floating_ip_address']\n floating_ip_id = payload['floatingip']['id']\n elif 'floatingip_id' in payload:\n # floatingip.delete.end\n floating_ip = None\n floating_ip_id = payload['floatingip_id']\n\n self._disassociate_floating_ip(context=elevated_context,\n floating_ip_id=floating_ip_id,\n )\n\n # If it turns out that the event is an update and it has a fixed ip in\n # the update, then we create the new record.\n if event_type.startswith('floatingip.update'):\n if payload['floatingip']['fixed_ip_address']:\n domain = self._pick_tenant_domain(orig_context,\n default_regex=cfg.CONF[self.name].default_regex,\n require_default_regex=cfg.CONF[self.name].require_default_regex,\n )\n if domain is None:\n LOG.info('No domains found for tenant %s(%s), ignoring Floating IP update for %s' %\n (context['tenant_name'], context['tenant_id'], floating_ip))\n else:\n LOG.debug('Using domain %s(%s) for tenant %s(%s)' %\n (domain.name, domain.id,\n context['tenant_name'], context['tenant_id']))\n\n kc = keystone_c.Client(token=context['auth_token'],\n tenant_id=context['tenant_id'],\n region_name=cfg.CONF[self.name].region_name,\n auth_url=cfg.CONF[self.name].keystone_auth_uri)\n\n port_id = payload['floatingip']['port_id']\n instance_info = self._get_instance_info(kc, port_id)\n\n extra = payload.copy()\n extra.update({'instance_name': instance_info['name'],\n 'instance_short_name': instance_info['name'].partition('.')[0],\n 'domain': domain.name})\n self._associate_floating_ip(context=elevated_context,\n domain_id=domain.id,\n extra=extra,\n floating_ip_id=floating_ip_id,\n floating_ip=floating_ip,\n port_id=port_id)", "def backfill_notification_statuses(self):\n LIMIT = 250000\n subq = \"SELECT id FROM notification_history WHERE notification_status is NULL LIMIT {}\".format(LIMIT)\n update = \"UPDATE notification_history SET notification_status = status WHERE id in ({})\".format(subq)\n result = db.session.execute(subq).fetchall()\n\n while len(result) > 0:\n db.session.execute(update)\n print('commit {} updates at {}'.format(LIMIT, datetime.utcnow()))\n db.session.commit()\n result = db.session.execute(subq).fetchall()", "def upgrade():\n connection = op.get_bind()\n connection.execute(\"DELETE FROM notifications WHERE object_id=0;\")", "def process_notifications():\n notification_processed= 0\n for notification in EventNotification.objects.filter(awaiting=True):\n if notification.action == 'active':\n # Process the notification of an element become 'active'.\n is_active= False\n try:\n is_active= notification.item.content_object.active\n except:\n pass\n if is_active:\n if send_notification(notification):\n notification.awaiting= False\n notification.save()\n notification_processed += 1\n else:\n print >> sys.stderr, '[%s] %s' % (datetime.now().isoformat(),\n AppMessage('NotificFailed').message % notification.__unicode__())\n return \"Completed processing notifications: %d sent.\" % notification_processed", "def populate_notifications_sql(request):\n input_json, output_json = request, {}\n try:\n for i in input_json['notification_id_list']:\n populate_notification_params = dict(zip(['super_notification_id', 'notification_status',\n 'profile_id', 'added_by', 'last_modified_by'],\n [i, 1, input_json['profile_id'],\n input_json['profile_id'], input_json['profile_id']]))\n serializer_var = serializer_save(IndividualNotificationsSerializer, populate_notification_params)\n output_json = dict(zip(['Status', 'Message', 'Payload'],\n ['Success', 'Notifications was populated successfully', None]))\n return output_json\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to create Notification.{ex}', None]))\n return output_json", "def on_state_notification(self, data):\n\n self.channel_data.update(data)\n\n # synchronize DataManager data with processed update & entity data\n self.sync_data_update_ha()", "async def send_bulk_update_message(event_instance, message, notification_text):\n attendees = Attend.objects.filter(\n event=event_instance, status=\"attending\")\n for attendee in attendees:\n slack_id = attendee.user.slack_id\n if slack_id:\n message = generate_simple_message(message)\n slack_response = notify_user(\n message, slack_id, text=notification_text)\n\n if slack_response[\"ok\"] is False and slack_response[\"headers\"][\"Retry-After\"]:\n delay = int(slack_response[\"headers\"][\"Retry-After\"])\n logging.info(\"Rate limited. Retrying in \" + str(delay) + \" seconds\")\n sleep(delay)\n notify_user(\n message, slack_id, notification_text)\n elif not slack_response['ok']:\n logging.warning(slack_response)", "def notify(*values):\r\n data = {\"value\"+str(i+1): value for i, value in enumerate(values[:3])}\r\n\r\n response = requests.request(\"POST\", notification_url, data=data)\r\n response.raise_for_status()", "def _set_up_change_notifier(conn, table: str, actions: Set[str]):\n\n # build function to create in the database\n channel = f\"{table}_table_change\"\n func_name = f\"notify_{table}_change()\"\n func = f\"\"\"\n CREATE OR REPLACE FUNCTION {func_name}\n RETURNS TRIGGER AS $$\n BEGIN\n PERFORM pg_notify('{channel}','changed');\n RETURN NULL;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\"\n\n # build triggers that will run func on each action\n triggers = \"\"\n for action in actions:\n if action.upper() in ServerSockets.DbActions:\n trigger_name = f\"{table}_notify_{action.lower()}\"\n\n triggers += f\"\"\"\n DROP TRIGGER IF EXISTS {trigger_name} ON {table};\n CREATE TRIGGER {trigger_name}\n AFTER {action} ON {table}\n FOR EACH ROW EXECUTE PROCEDURE {func_name};\n \"\"\"\n else:\n raise TypeError(\n \"All actions must be either INSERT, UPDATE or DELETE\")\n\n # insert function and respective triggers into the database\n cur = conn.cursor()\n cur.execute(func)\n if triggers:\n cur.execute(triggers)\n return channel", "def notify_users(context, content):\n text = (\n f'❗️Обновление в <a href=\"{URL_BASE + URL_NEWS}\">ЛК Директора</a>❗️\\n'\n f\"<b>{content['entry_date']}</b> {content['entry_title']}\\n\"\n f\"💾 <a href=\\\"{content['entry_doc']}\\\">Посмотреть/скачать документ(ы)</a>\"\n )\n logger.info(\"Sending notifications...\")\n logger.info(content)\n for chat_id in BOT_USERS_CHAT_ID:\n context.bot.send_message(\n chat_id=chat_id,\n text=text,\n disable_web_page_preview=BOT_DISABLE_WEB_PAGE_PREVIEW,\n )" ]
[ "0.68454736", "0.6658394", "0.6574994", "0.6457614", "0.63482887", "0.62627167", "0.6245886", "0.62132937", "0.6201244", "0.61510915", "0.6078458", "0.60640794", "0.60503453", "0.6039418", "0.59960485", "0.59960234", "0.59941435", "0.5983427", "0.59663224", "0.5915447", "0.5867319", "0.5859227", "0.5835556", "0.58038336", "0.5800979", "0.5743148", "0.5723803", "0.571942", "0.5706857", "0.5695574" ]
0.7377954
0
Get the last used uid. Corresponds to the number of current rows in users table.
def get_last_uid(cursor): cursor.execute('SELECT count(uid) FROM users') return int(cursor.fetchall()[0][0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_last_id(cls):\n db = database.db_connection()\n cursor = db.cursor()\n sql_query = \"SELECT max(id_user) FROM user\"\n cursor.execute(sql_query)\n row = cursor.fetchone()\n cursor.close()\n return int(row[0])", "def findLastUserID():\n conn = engine.connect()\n if CheckTblNameExist(\"lineuser\"):\n result_db = conn.execute(\"select * from lineuser order by userid desc\")\n row = result_db.fetchone()\n if row is None:\n conn.close()\n return 0\n else:\n conn.close()\n return int(row.userid)\n else:\n conn.close()\n return -1", "def __getNewUserID(self):\n return db_main.getHandle().seqNextVal(\"users_user_id_seq\")", "def get_uid():\n NNModel._UID_BASE = NNModel._UID_BASE + 1\n return NNModel._UID_BASE", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def getuid(): # type: ignore\n return 0", "def find_max_uidNumber(self):\n #First, get all the users in the LDAP\n get_attrs = \"(uidNumber=*)\"\n login_filter = ['uidNumber']\n\n result_data = self.LdapSearch(get_attrs, login_filter)\n #It there is no user in LDAP yet, First LDAP user\n if result_data == []:\n max_uidnumber = self.ldapUserUidNumberMin\n #Otherwise, get the highest uidNumber\n else:\n uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]\n logger.debug(\"LDAPapi.py \\tfind_max_uidNumber \\\n uidNumberList %s \" % (uidNumberList))\n max_uidnumber = max(uidNumberList) + 1\n\n return str(max_uidnumber)", "def uid(self):\n return safeInt(self.tag(\"uid\"))", "def uid (self):\n return self.__uid", "def get_uid(self):\n value = unicode(self.id) + self.password + app.config['SECRET_KEY']\n if self.last_login:\n value += self.last_login.strftime('%Y-%m-%d %H:%M:%S')\n return hashlib.sha224(value).hexdigest()[::2]", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def getUid(self):\n return self.index", "def uid(self):\n return self._serial_number", "def uid(self):\n\n return self._uid", "def get_id(self):\n return self.uid", "def getUID(self):\r\n while 1:\r\n uid = uuid4()\r\n\r\n if uid not in self._uids:\r\n self._uids.add(uid)\r\n return uid", "def get_last_sid(cursor):\n cursor.execute('SELECT count(sid) FROM sessions')\n return int(cursor.fetchall()[0][0])", "def get_current_id(self) -> int:\n try:\n return self.cursor.execute(f\"SELECT MAX(id) FROM {table_locations};\").fetchone()\n except Exception as e:\n msg = f'We faced some problems with the getting last id value. Mistake: {e}'\n self.proceed_error(msg)\n return -1", "def UID(self):\r\n return self._uid", "def UID(self):\r\n return self._uid", "def getguid(self):\n self.guidp += 1\n return self.guidp-1", "def uid(self) -> str:\n return self._uid", "def uid(self) -> str:\n return pulumi.get(self, \"uid\")", "def user_huid(self) -> Optional[UUID]:\n return self.user.user_huid", "def read_uid(self):\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_READ_UID, (), '', 'I')", "def user_id(self):\n return text_type(hash(self.username))", "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")" ]
[ "0.71453404", "0.69029254", "0.68670946", "0.6793728", "0.67858857", "0.65834606", "0.65095794", "0.6316191", "0.6264117", "0.6212843", "0.6200426", "0.6200426", "0.6200426", "0.6198154", "0.61980546", "0.6170376", "0.61453146", "0.60790145", "0.6074715", "0.6057952", "0.5975867", "0.5975867", "0.59379774", "0.5930488", "0.59098", "0.5874496", "0.5871107", "0.5868936", "0.58469516", "0.58469516" ]
0.8011418
0
Get the last sessios id. Corresponds to the number of current rows in sessions table.
def get_last_sid(cursor): cursor.execute('SELECT count(sid) FROM sessions') return int(cursor.fetchall()[0][0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_last_id(cls):\n db = database.db_connection()\n cursor = db.cursor()\n sql_query = \"SELECT max(id_user) FROM user\"\n cursor.execute(sql_query)\n row = cursor.fetchone()\n cursor.close()\n return int(row[0])", "def last(self):\n return int(self.rpc.call(MsfRpcMethod.SessionRingLast, [self.sid])['seq'])", "def get_current_id(self) -> int:\n try:\n return self.cursor.execute(f\"SELECT MAX(id) FROM {table_locations};\").fetchone()\n except Exception as e:\n msg = f'We faced some problems with the getting last id value. Mistake: {e}'\n self.proceed_error(msg)\n return -1", "def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1", "def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']", "def get_session_id(self):\n return self.request_data['id']", "def getSessionId(self) -> int:\n return self.cpp.getSessionId()", "def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id", "def getSessionId(self):\n return self.sessionid", "def load_last_session_id(cfg: Config) -> Union[str, None]:\n last_session_file = cfg.project.data_dir / LAST_SESSION_FILENAME\n if not last_session_file.exists():\n return None\n with open(last_session_file, \"r\") as f:\n return f.read()", "def get_sessionid(self):\n if not self.__initialized or not self.__loggedin:\n raise NSNitroError(\"Not initialized or not logged in.\")\n\n return self.__sessionid", "def session_id(self) -> str:\n return self._session_id", "def get_session_id(self):\n raise NotImplementedError()", "def get_last_id(obj, session):\n try:\n return session.query(obj).order_by(\"-id\").first().id\n except AttributeError: # This will be thrown for no entries\n return 0", "def get_lastid(table, conn):\n s = select([table.c.id])\n result = conn.execute(s)\n allids = result.fetchall()\n idlist = []\n for idx in allids:\n if isinstance(idx.values()[0], int):\n idlist.append(idx.values()[0])\n lastid = max(idlist)\n return lastid + 1", "def current_index(self):\n job = self.client.query(\"SELECT MAX(ID) FROM {}.{};\".format(self.database_name, self.table_name))\n for row in job.result():\n if row[0] == None:\n return 1\n current_index = row[0] + 1\n return current_index", "def _get_current_session(self) -> Dict[str, Any]:\n return self._data[-1]", "def insert_get_last_id(self, sql: str) -> int:\n with self.connection.cursor() as cursor:\n self.connection.ping(reconnect=True)\n cursor.execute(sql)\n last_id = cursor.lastrowid\n self.connection.commit()\n return last_id", "def get_max_sid(self):\n session = self.DBSession()\n # first element of the first result or None if no rows present.\n # If multiple rows are returned, raises MultipleResultsFound.\n data = session.query(func.max(CurrentPropertySheet.sid)).scalar() or 0\n return data", "def getId(self):\n return self.session.request('id/')", "def findLastUserID():\n conn = engine.connect()\n if CheckTblNameExist(\"lineuser\"):\n result_db = conn.execute(\"select * from lineuser order by userid desc\")\n row = result_db.fetchone()\n if row is None:\n conn.close()\n return 0\n else:\n conn.close()\n return int(row.userid)\n else:\n conn.close()\n return -1", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def last_sequence_ind(self,):\n return self.last_sequence_ind_", "def session_id(self):\n return self.browser.crawlera_session", "def getSessionId(self) -> List[int]:\n return self.pool.getSessionId()", "def getID(self):\n global COUNT, C_LOCK\n with C_LOCK:\n COUNT += 1\n return COUNT", "def getLastId(self,table):\n\tif self.dbType==\"sqlite\":\n\t query = \"SELECT LAST_INSERT_ROWID() FROM %s LIMIT 1\"%table\n\telse:\n\t query = \"SELECT LAST_INSERT_ID() FROM %s\"%table\n\tlocaltime= \"%s \"%time.strftime(\"%H:%M:%S\",time.localtime())\n\tpid = \"%s \"%os.getpid()\n self.log.write(pid+localtime+query+'\\n')\n\t# since SQLite locks a whole table we use separate cursor to get\n\t# information while transaction still in progress\n\tcur = self.db.cursor()\n\tcur.execute(query)\n\ttup = cur.fetchone()\n\tid = tup[0]\n\tcur.close()\n# tup = self.fetchOne(query)\n\tid = tup[0]\n return id", "def get_session_key(self):\n return self.session_key", "def lastrowid(self):\n return self.database.lastrowid", "def SessionCount(self):\n if self.force_auto_sync:\n self.get('SessionCount')\n return self._SessionCount" ]
[ "0.71088165", "0.7083588", "0.7010163", "0.6716816", "0.66496617", "0.66146433", "0.65737474", "0.65153116", "0.6488121", "0.6480088", "0.64697057", "0.6468558", "0.6450978", "0.6436075", "0.62589836", "0.6256979", "0.6219457", "0.62008554", "0.61697483", "0.61649126", "0.6159422", "0.6140813", "0.61071724", "0.6104379", "0.6070233", "0.60600954", "0.6042467", "0.6040585", "0.59911364", "0.5973776" ]
0.77453893
0
Method to scroll down and up the page
def scroll_page(self): scroll_down = self.driver.find_element_by_tag_name("html") scroll_down.send_keys(Keys.END) sleep(TestData.DELAY) scroll_down.send_keys(Keys.CONTROL + Keys.HOME) sleep(TestData.DELAY) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scroll_down(self):\r\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n sleep(self.wait)", "def scroll_half_page_up(event):\n scroll_backward(event, half=True)", "def scrollTop(self):\n\t\tself.driver.execute_script(\"window.scrollTop(0)\")", "def scroll_down():\r\n \r\n # Get scroll height.\r\n last_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n while True:\r\n \r\n # Scroll down to the bottom.\r\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n \r\n # Wait to load the page.\r\n time.sleep(2)\r\n \r\n # Calculate new scroll height and compare with last scroll height.\r\n new_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n if new_height == last_height:\r\n break\r\n \r\n last_height = new_height", "def scroll_up(self, locator):\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.execute_script(\"mobile: scroll\", {\"direction\": 'up', 'element': element.id})", "def scroll_page_down(event):\n w = _current_window_for_event(event)\n b = event.cli.current_buffer\n\n if w and w.render_info:\n # Scroll down one page.\n line_index = max(w.render_info.last_visible_line(), w.vertical_scroll + 1)\n w.vertical_scroll = line_index\n\n b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)\n b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)", "def scroll_down_till_limit(driver, platform):\n # Scroll page to load whole content\n last_height = 0\n while True:\n new_height = scroll_down_page(driver)\n # if no more scrolling possible\n if new_height == last_height:\n break\n # if specified point in past reached\n if is_date_reached(driver, platform):\n break\n\n last_height = new_height\n click_button_xpath(driver, platform)\n\n return driver", "def scroll_page_up(event):\n w = _current_window_for_event(event)\n b = event.cli.current_buffer\n\n if w and w.render_info:\n # Put cursor at the first visible line. (But make sure that the cursor\n # moves at least one line up.)\n line_index = max(0, min(w.render_info.first_visible_line(),\n b.document.cursor_position_row - 1))\n\n b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)\n b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)\n\n # Set the scroll offset. We can safely set it to zero; the Window will\n # make sure that it scrolls at least until the cursor becomes visible.\n w.vertical_scroll = 0", "def scroll_page(self, where: str, direction: ScrollEnum):\n\n element = self.find_element_by_xpath(where)\n if element:\n if direction == ScrollEnum.UP:\n element.send_keys(Keys.HOME)\n elif direction == ScrollEnum.DOWN:\n element.send_keys(Keys.END)", "def scroll_down(self, locator):\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.execute_script(\"mobile: scroll\", {\"direction\": 'down', 'element': element.id})", "def __scroll_element_into_view__(self, element):\n y = element.location['y']\n self.driver.execute_script('window.scrollTo(0, {0})'.format(y))", "def scroll_to_bottom(self):\n # NOTE: this starts scrolling from the current scroll position, not the top of the page.\n current_height = self.driver.execute_script(\n \"return document.documentElement.scrollTop\")\n while True:\n self.click_expandable_buttons()\n # Scroll down to bottom in increments of self.scroll_increment\n new_height = self.driver.execute_script(\n \"return Math.min({}, document.body.scrollHeight)\".format(current_height + self.scroll_increment))\n if (new_height == current_height):\n break\n self.driver.execute_script(\n \"window.scrollTo(0, {});\".format(new_height))\n current_height = new_height\n # Wait to load page\n time.sleep(self.scroll_pause)", "def scroll_to(self):\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", self._element)", "def scroll_to_top_of_page(self):\n self.driver.execute_script(\"window.scrollTo(0, 0)\")", "def scroll_down(driver):\n\n # Get scroll height.\n last_height = driver.execute_script(\n \"return document.body.scrollHeight\")\n\n while True:\n\n # Scroll down to the bottom.\n driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load the page.\n time.sleep(2)\n\n # Calculate new scroll height and compare with last scroll height.\n new_height = driver.execute_script(\n \"return document.body.scrollHeight\")\n\n if new_height == last_height:\n\n break\n\n last_height = new_height", "def scroll(self):\r\n SCROLL_PAUSE_TIME = 2\r\n current_scrolls = 0\r\n\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n while True:\r\n try:\r\n if current_scrolls == total_scroll:\r\n return\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(SCROLL_PAUSE_TIME)\r\n\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n except TimeoutException:\r\n break\r\n return", "def scroll(*args):", "def scrollUp(self, messages=1):\n self.scrollOffset -= messages\n self._recalculateCoordinates()", "def scroll():\n \n SCROLL_PAUSE_TIME = 0.5\n \n last_height = driver.execute_script(\"return window.scrollY\") \n \n tries = 0\n while True:\n down_height = last_height + 1000\n driver.execute_script(\"window.scrollTo(0,\" + str(down_height) + \")\")\n \n time.sleep(SCROLL_PAUSE_TIME)\n \n new_height = driver.execute_script(\"return window.scrollY\")\n if new_height == last_height:\n tries += 1\n if tries == 10:\n break\n else:\n tries = 0\n last_height = new_height", "def page_down(self):\n counter = self.get_entry_count_per_screen()\n while counter != 0 and self.pointer < (len(self.contents)-1):\n logging.debug(\"moved down\")\n self.pointer += 1\n counter -= 1\n self.refresh()\n self.reset_scrolling()\n return True", "def scroll_half_page_down(event):\n scroll_forward(event, half=True)", "def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()", "def scroll_to_bottom(self):\n while self.history.position < self.history.size:\n self.next_page()", "def scrollDown(self, messages=1):\n if self.scrollOffset < 1:\n self.scrollOffset += messages\n self._recalculateCoordinates()", "def page_up(self):\n counter = self.get_entry_count_per_screen()\n while counter != 0 and self.pointer != 0:\n logging.debug(\"moved down\")\n self.pointer -= 1\n counter -= 1\n self.refresh()\n self.reset_scrolling()\n return True", "def scroll_to(self):\n\n if self:\n pass", "def scrollBottom(self):\n\t\tself.driver.execute_script(\"window.scroll(0, document.body.scrollHeight)\")", "def scroll_to_bottom(self):\n expandable_button_selectors = [\n 'button[aria-expanded=\"false\"].pv-skills-section__additional-skills',\n 'button[aria-expanded=\"false\"].pv-profile-section__see-more-inline',\n 'button[aria-expanded=\"false\"].pv-top-card-section__summary-toggle-button',\n 'button[data-control-name=\"contact_see_more\"]'\n ]\n\n current_height = 0\n while True:\n for name in expandable_button_selectors:\n try:\n self.driver.find_element_by_css_selector(name).click()\n except:\n pass\n\n # Use JQuery to click on invisible expandable 'see more...' elements\n self.driver.execute_script(\n 'document.querySelectorAll(\".lt-line-clamp__ellipsis:not(.lt-line-clamp__ellipsis--dummy) .lt-line-clamp__more\").forEach(el => el.click())')\n\n # Scroll down to bottom\n new_height = self.driver.execute_script(\n \"return Math.min({}, document.body.scrollHeight)\".format(current_height + self.scroll_increment))\n if (new_height == current_height):\n break\n self.driver.execute_script(\n \"window.scrollTo(0, Math.min({}, document.body.scrollHeight));\".format(new_height))\n current_height = new_height\n # Wait to load page\n time.sleep(self.scroll_pause)", "def __navigate_scroll(self):\n try:\n _title = self.browser.title\n _body = self.browser.find_element_by_tag_name('body')\n\n i = 0\n while i < 3:\n _html = str(self.browser.page_source)\n _content = Content(_html, _title)\n _attrs = _content.last_divs\n\n scroll_items = []\n for _attr in _attrs:\n xpath_string = '//div'\n\n for k, v in _attr.items():\n if not v:\n xpath_string = xpath_string + \"[@\" + str(k) + \"]\"\n else:\n if isinstance(v, list):\n _vstring = [\"contains(@\" + str(k) + \", '\" + str(_v) + \"')\" for _v in v]\n vstring = \" and \".join(_vstring)\n\n xpath_string = xpath_string + \"[\" + vstring + \"]\"\n\n div = self.browser.find_elements_by_xpath(xpath_string)\n\n for d in div: scroll_items.append(d)\n\n if len(scroll_items) > 10:\n j = 0\n while j < 10:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[j])\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n time.sleep(1)\n j += 1\n except Exception as e:\n print(e)\n j += 1\n continue\n \n else:\n for item in scroll_items:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", item)\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n _body.send_keys(Keys.HOME)\n time.sleep(1)\n except Exception as e:\n print(e)\n continue\n\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n new_html = str(self.driver.page_source)\n new_content = Content(new_html, _title)\n new_attrs = new_content.last_divs\n\n i += 1\n if new_attrs == _attrs:\n break\n else:\n continue\n\n return self.browser.page_source\n\n except:\n return None", "def scroll_into_view(self, offset=None):\n\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", self.web_element)\n if offset:\n self.driver.execute_script(\"window.scrollBy(0, {0});\".format(offset))" ]
[ "0.74910605", "0.6924257", "0.68156487", "0.681399", "0.67801416", "0.66706586", "0.66691154", "0.66658944", "0.6662427", "0.66357076", "0.660041", "0.65823764", "0.6574611", "0.6517441", "0.643081", "0.6410477", "0.6406138", "0.6405084", "0.63762605", "0.63598233", "0.63457644", "0.6338525", "0.6338348", "0.63362837", "0.6325929", "0.6306962", "0.6293838", "0.62864065", "0.62783915", "0.62551963" ]
0.71633923
1
Method to locate the bot image in the login page
def bot_image(self): return self.bot_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def locateImageOnScreen(ImageName):\n location = pyautogui.locateOnScreen(ImageName) \n try: \n for x in location:\n return location\n except:\n sys.exit('The image could not be found in the active screen. \\n'+'Stopping program.')", "def get_bot_icon(self):\n return self.bot_data_file[\"bot_icon\"]", "def image(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"image\" # pylint: disable=unsubscriptable-object\n )", "def login_bot(self):\n pass", "async def _misc_IMGplumbob(self, ctx):\r\n await self.bot.say('{}, http://i.imgur.com/q8xJsJQ.gif'.format(ctx.message.author.mention))", "def get_image_url():", "def logo_image(self):\n return self.company_logo or \"upload/default_avatar.gif\"", "def work_on_the_picture(self) -> None:\n self.folder_create(self.folder_config)\n value_image_used = os.path.join(self.folder_config, entrance_bot_img_name)\n if os.path.exists(value_image_used) and os.path.isfile(value_image_used):\n return value_image_used\n a = TelegramManager()\n try:\n value_img = self.produce_request(entrance_bot_img_link)\n if value_img.status_code == 200:\n with open(value_image_used, 'wb') as new_picture:\n for chunk in value_img:\n new_picture.write(chunk)\n return value_image_used\n a.proceed_message_values('Unfortunatelly, your link to the image is not working.')\n except Exception as e:\n a.proceed_message_values(f'We faced problem with the getting requests. Mistake: {e}')\n return ''", "def logo_image(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_image\")", "def logo_image(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_image\")", "def image(self) -> str:\n return pulumi.get(self, \"image\")", "def get_screen():\n img_title = 'screen_' + g.client_id + '.png'\n image_path = STATIC_FILES_PATH + img_title\n if g.driver_status != WhatsAPIDriverStatus.LoggedIn:\n try:\n g.driver.get_qr(image_path)\n return send_file(image_path, mimetype='image/png')\n except Exception as err:\n pass\n g.driver.screenshot(image_path)\n return send_file(image_path, mimetype='image/png')", "def app_logo_url():\n return \"https://raw.githubusercontent.com/aiidalab/aiidalab-hello-world/master/img/logo.png\"", "def homeassistant_image(self):\n return self._data.get(HOMEASSISTANT_IMAGE)", "async def inspire(self, ctx):\n async with aiohttp.ClientSession() as session:\n async with session.get('http://inspirobot.me/api?generate=true') as response:\n if(response.status == 200):\n imgurl = await response.text()\n embed = discord.Embed(colour=discord.Colour.dark_blue())\n embed.set_image(url=imgurl)\n embed.set_footer(text='http://inspirobot.me/')\n await ctx.bot.send_message(ctx.message.channel, embed=embed)", "def _get_pic_link(self, tree, xpath_adr='/html/body/div[1]/div[2]/div[3]/div[1]/div[1]/div/div/img/@data-src'):\n try:\n return tree.xpath(xpath_adr)[0]\n except:\n print('WARNING: Could not scrape game card web address, check review xpath address')\n return np.nan", "def getimage(self):", "def findLocalImage(client, name):\n try:\n image = client.images.get(name)\n except Exception:\n return None\n return image.id", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")" ]
[ "0.61848575", "0.6016288", "0.5862747", "0.5774769", "0.5676973", "0.565614", "0.56560594", "0.561276", "0.556046", "0.556046", "0.5436463", "0.54205257", "0.54033643", "0.5379427", "0.5356934", "0.5349648", "0.53474325", "0.5340549", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325", "0.5332325" ]
0.70394737
0
Write into the corpus file.
def corpusWriter(self): with open('corpus.txt', 'w') as file: for quote in self.quotes: file.write(quote + '\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def save(file, corpus):\n with open(file, 'w') as f_out:\n f_out.write(corpus)", "def save_to_txt(self):\n content = self.get_corpus()\n txt_pdf = open('text_pdf.txt', 'wb')\n txt_pdf.write(content.encode('utf-8'))\n txt_pdf.close()", "def handle(self, *args, **options):\n self.stdout.write('exporting corpus to text file')\n basetext = '\\n'.join([x.text_str for x in BaseText.objects.all() if x.check_age()])\n with open(os.path.join(BASE_DIR, 'corpus.txt'), 'w') as f:\n f.write(basetext)", "def write_data_corpus(filename, documents):\n\n with open(filename, 'wb') as f:\n for statement in documents:\n enc_statement = statement.encode('utf-8')\n f.write(enc_statement + '\\n')", "def to_file(file_name: str, vocab, corpus: list):\r\n with open(file_name, mode=\"w\", encoding=\"UTF-8\") as out_file:\r\n out_file.write(\r\n \"\".join(vocab[0])\r\n + \",\".join(vocab[1:])\r\n + \"\\n\"\r\n + \"\\n\".join([format_corpus(s, vocab) for s in corpus])\r\n )", "def save_corpus(events_df, path):\n corpus = extract_corpus(events_df)\n with open(path, 'w') as f:\n for doc in corpus:\n f.write(doc + '\\n')", "def write_conll(cls, filename, writer, document_id, sentences):\n with open(filename, 'w') as fd:\n writer.write(fd, document_id, sentences)", "def saveCorpusFile(output_path, arr, format, features):\n def rowMap(x):\n if format == \"csv\":\n if features:\n x = x.split(\",\")[1]\n else:\n parts = x.split(\",\")\n parts.pop(0)\n x = \" \".join(parts)\n return x.replace(\",\", \" \")\n if format == \"tsv\":\n if features:\n x = x.split(\"\\t\")[1]\n else:\n parts = x.split(\"\\t\")\n parts.pop(0)\n x = \" \".join(parts)\n return x.replace(\"\\t\", \" \")\n\n arr_corpus = map(lambda x: rowMap(x), arr)\n with open(output_path, 'w+') as corpusfile:\n for row in arr_corpus:\n corpusfile.write(row + \"\\n\")", "def write_tok_to_file(self):\n dir_path = os.path.join(self.output_path, 'tokens')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n for dataset_name, dataset in self.amr_corpus.items():\n f = open(os.path.join(dir_path, dataset_name + '_tok.txt'), 'w')\n for doc_name, doc in dataset.items():\n for amr_id, amr_data in doc.items():\n amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']\n if not amr_strings:\n continue\n tok = ' '.join(self.amr_corpus[dataset_name][doc_name][amr_id]['tok'])\n f.write(tok + '\\n')\n f.close()", "def append_corpus(output):\n files = []\n output_path = output + \"/ig/\" + \"ig_corpus.txt\"\n for root, directories, filenames in os.walk(output + \"/ig/\"):\n for filename in filenames:\n files.append(os.path.join(root, filename))\n corpusfiles = filter(lambda x: \".txt\" in x, files)\n if not os.path.exists(os.path.dirname(output_path)):\n os.makedirs(os.path.dirname(output_path))\n with open(output_path, \"w+\") as corpusFile:\n for file in corpusfiles:\n fileH = open(file, \"r\")\n corpusFile.write(fileH.read())", "def write_to_file(self, papers, filename):\n\t\tpass", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def write(self, txt):\n for fp in self.files:\n fp.write(txt)", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def write_file(tweets):\n with open((folderlink + \"markov_sentences.txt\"), \"w\") as text_file:\n for tweet in tweets:\n text_file.write (tweet + '\\n')\n with file ((folderlink + \"markov_sentences.txt\"), 'r') as f:\n text = f.read()\n text_model = markovify.NewlineText(text)\n print \"model successful \\n\\n\\n\\n\"\n for i in range(5):\n print(text_model.make_short_sentence(140, tries=100))\n text_file.close()", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )", "def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def write(file_path, kml_str):\n\n fa.text_writer(file_path, kml_str)", "def write(filename):\n print(uc.write(filename))", "def save_corpora(self):\n\n if self.filename_for_save is not None:\n with open(self.filename_for_save, 'w', newline='', encoding=constants.load_encoding) as file:\n writer = csv.writer(file)\n\n for string in self.__corpora:\n writer.writerow([string])", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_to(self, fp):\n fp.write(self.text)", "def write_transcription(output_directory, text):\n if not os.path.exists(f'{output_directory}/transcriptions.txt'):\n transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')\n transfile.close()\n logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')\n with open(f\"{output_directory}/transcriptions.txt\", \"a\", encoding='utf-8-sig') as transfile:\n transfile.write(f'{text}\\n')\n transfile.close()", "def writetofile(invertedindex, filename):\n file = open(filename + '.txt', 'w', encoding='utf-8')\n for word in invertedindex.keys():\n file.write(word)\n file.write(' : ')\n for docid in invertedindex[word][0]:\n file.write(str(docid) + ' ')\n file.write('\\n')", "def write_to_file(self, filename: str) -> None:", "def archive_corpus(self):\n total_perf_array = self.single_sequence_corpus()\n if self.verbose:\n print(total_perf_array.shape)\n data_file_name = \"TinyPerformanceCorpus.h5\"\n with h5py.File(data_file_name, 'w') as data_file:\n data_file.create_dataset('total_performances', data=total_perf_array, dtype='float32')", "def persist_corpus(self):\n subreddit = self.postman.subreddit\n corpus_coll = self.postman.corpus_write\n subreddit_query = {'subreddit':subreddit}\n\n preexisting_corpora = corpus_coll.find(subreddit_query).count()\n print 'deleting %i existing corpora for subreddit' % preexisting_corpora\n corpus_coll.delete_many(subreddit_query)\n\n result = corpus_coll.insert_one({'subreddit':subreddit, 'corpus':list(self.corpus)})\n print 'persisted corpus of length %i' % (len(self.corpus))\n\n # chaining\n return self" ]
[ "0.8130881", "0.7788724", "0.71461576", "0.70922303", "0.6882938", "0.6699375", "0.6698951", "0.65648764", "0.63408846", "0.63222456", "0.6238415", "0.6161453", "0.6103348", "0.6076733", "0.6034292", "0.6018647", "0.5968113", "0.5964516", "0.59475064", "0.5937549", "0.59292996", "0.59289265", "0.5912499", "0.5912499", "0.59066045", "0.5898725", "0.58943564", "0.58922005", "0.5886837", "0.58785796" ]
0.80795825
1
Sets/gets homogeneous external field and does not update vector potential.
def homogeneous_external_field(self): return self._H
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def external_field(self):\n # TODO: return curl(A) for non-homogeneous external_field\n A = self.external_vector_potential\n if A is not None:\n Ax, Ay = A\n # TODO: check expression below\n return (- np.diff(Ax, axis=1) * cfg.idy\n + np.diff(Ay, axis=0) * cfg.idx)\n else:\n return None", "def force_field():\n ff = get_native_force_field('martini22')\n nter = ff.modifications['N-ter'].copy()\n nter.name = (nter.name, )\n cter = ff.modifications['C-ter'].copy()\n cter.name = (cter.name, )\n ff.modifications['N-ter'] = nter\n ff.modifications['C-ter'] = cter\n return ff", "def set_field(self,Hext):\n self.raw_parameters[\"Hext\"] = Hext\n self.parameters = NormalizedParameters(self.raw_parameters)\n self._load()", "def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )", "def set_field( self, data ):\n super( UnsteadyField1D, self ).set_field( data )\n self.history[:] = self.val[:]\n return", "def external_vector_potential(self):\n assert (self.ae is None) == (self.be is None)\n \n if self.ae is not None:\n return self.ae, self.be\n\n return None", "def on_put_field(self, ins, const, obj, value):\n pass", "def make_field(self):\n def field_func(m):\n return self.hext + field.demagnetization(m, self.Nd)\n self.field = field_func", "def PopulateCommonFieldValues(self, field, mojom_field):\n field.name = mojom_field.decl_data.short_name\n field.kind = self.KindFromMojom(mojom_field.type)\n field.attributes = self.AttributesFromMojom(mojom_field)", "def on_get_field(self, ins, const, obj):\n pass", "def handle_field(self, obj, field):\n value = field._get_val_from_obj(obj)\n if isinstance(field, GeometryField):\n self._current[field.name] = value\n else:\n super(Serializer, self).handle_field(obj, field)", "def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)", "def set_real(self, var, value):\n self.fmu.set_real(var.value_reference, value)", "def _set_model_field(self):\n self._field_value = hutils.format_json(self._memory_data)\n setattr(self._model, self._field, self._field_value)", "def on_sense_field(self, field_type, field_parameter):\n raise NotImplementedError()", "def external_irregular_vector_potential(self):\n if self._vpei is not None:\n return self._vpei.get_vec_h()\n\n return None", "def __init__(self, atomlist, atomtypes, partial_charges, lattice_vectors,\n chromophores, verbose=1, **kwds):\n import ff\n self.force_field = ff.ForceField(atomlist, atomtypes, partial_charges,\n lattice_vectors, chromophores, verbose=verbose, **kwds)", "def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n self.fields[:, 0] = self.grid.interpolate(Ex, self.bunch.positions)\n self.fields[:, 1] = self.grid.interpolate(Ey, self.bunch.positions)", "def read_field(self, fieldname):\n if fieldname in ['wind_speed', 'wind_direction']:\n # create a virtual field\n variable = Variable(\n shortname=fieldname,\n description=VIRTUALFIELD_DESCR[fieldname],\n authority=self.get_naming_authority(),\n standardname=VIRTUALFIELD_STDNAME[fieldname]\n )\n field = Field(\n variable,\n OrderedDict([('time', 1),\n ('y', self.get_dimsize('y')),\n ('x', self.get_dimsize('x'))\n ]),\n datatype=numpy.dtype(numpy.float32),\n units=VIRTUALFIELD_UNITS[fieldname]\n )\n field.attach_storage(self.get_field_handler(fieldname))\n else:\n field = NCFile.read_field(self, fieldname)\n return field", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def getValueFromFieldname(self,fieldname):\n if hasattr(self,fieldname): #Standard attributes.\n value = getattr(self,fieldname)\n if not isinstance(value,Cartesian3DVector):\n return value\n if fieldname == \"E\": #Interprets E as energy\n return self.getEnergy()\n momentum_direction = fieldname.replace(\"p\",\"\")\n velocity_direction = fieldname.replace(\"v\",\"\")\n if fieldname.startswith(\"p\") and momentum_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.p,momentum_direction)\n if fieldname.startswith(\"v\") and velocity_direction in [\"x\",\"y\",\"z\"]:\n return getattr(self.v,velocity_direction)\n elif fieldname in [\"x\",\"y\",\"z\"]:\n return getattr(self.x,fieldname)\n raise Exception(\"The given field, \"+fieldname+\", is not defined for the particle.\")", "def scalar_potential(field, coord_sys):\n\n # Check whether field is conservative\n if not is_conservative(field):\n raise ValueError(\"Field is not conservative\")\n if field == Vector.zero:\n return S.Zero\n # Express the field exntirely in coord_sys\n # Substitute coordinate variables also\n if not isinstance(coord_sys, CoordSys3D):\n raise TypeError(\"coord_sys must be a CoordSys3D\")\n field = express(field, coord_sys, variables=True)\n dimensions = coord_sys.base_vectors()\n scalars = coord_sys.base_scalars()\n # Calculate scalar potential function\n temp_function = integrate(field.dot(dimensions[0]), scalars[0])\n for i, dim in enumerate(dimensions[1:]):\n partial_diff = diff(temp_function, scalars[i + 1])\n partial_diff = field.dot(dim) - partial_diff\n temp_function += integrate(partial_diff, scalars[i + 1])\n return temp_function", "def VectorField(\n adata: anndata.AnnData,\n basis: Union[None, str] = None,\n layer: Union[None, str] = None,\n dims: Union[int, list, None] = None,\n genes: Union[list, None] = None,\n normalize: bool = False,\n grid_velocity: bool = False,\n grid_num: int = 50,\n velocity_key: str = \"velocity_S\",\n method: str = \"SparseVFC\",\n min_vel_corr: float = 0.6,\n restart_num: int = 5,\n restart_seed: Union[None, list] = [0, 100, 200, 300, 400],\n model_buffer_path: Union[str, None] = None,\n return_vf_object: bool = False,\n map_topography: bool = False,\n pot_curl_div: bool = False,\n cores: int = 1,\n result_key: Union[str, None] = None,\n copy: bool = False,\n **kwargs,\n) -> Union[anndata.AnnData, base_vectorfield]:\n logger = LoggerManager.gen_logger(\"dynamo-topography\")\n logger.info(\"vectorfield calculation begins...\", indent_level=1)\n logger.log_time()\n adata = copy_adata(adata) if copy else adata\n\n if basis is not None:\n logger.info(\n \"Retrieve X and V based on basis: %s. \\n \"\n \" Vector field will be learned in the %s space.\" % (basis.upper(), basis.upper())\n )\n X = adata.obsm[\"X_\" + basis].copy()\n V = adata.obsm[\"velocity_\" + basis].copy()\n\n if np.isscalar(dims):\n X, V = X[:, :dims], V[:, :dims]\n elif type(dims) is list:\n X, V = X[:, dims], V[:, dims]\n else:\n logger.info(\n \"Retrieve X and V based on `genes`, layer: %s. \\n \"\n \" Vector field will be learned in the gene expression space.\" % layer\n )\n valid_genes = (\n list(set(genes).intersection(adata.var.index))\n if genes is not None\n else adata.var_names[adata.var.use_for_transition]\n )\n if layer == \"X\":\n X = adata[:, valid_genes].X.copy()\n X = np.expm1(X)\n else:\n X = inverse_norm(adata, adata.layers[layer])\n\n V = adata[:, valid_genes].layers[velocity_key].copy()\n\n if sp.issparse(X):\n X, V = X.A, V.A\n\n Grid = None\n if X.shape[1] < 4 or grid_velocity:\n logger.info(\"Generating high dimensional grids and convert into a row matrix.\")\n # smart way for generating high dimensional grids and convert into a row matrix\n min_vec, max_vec = (\n X.min(0),\n X.max(0),\n )\n min_vec = min_vec - 0.01 * np.abs(max_vec - min_vec)\n max_vec = max_vec + 0.01 * np.abs(max_vec - min_vec)\n\n Grid_list = np.meshgrid(*[np.linspace(i, j, grid_num) for i, j in zip(min_vec, max_vec)])\n Grid = np.array([i.flatten() for i in Grid_list]).T\n\n if X is None:\n raise Exception(f\"X is None. Make sure you passed the correct X or {basis} dimension reduction method.\")\n elif V is None:\n raise Exception(\"V is None. Make sure you passed the correct V.\")\n\n logger.info(\"Learning vector field with method: %s.\" % (method.lower()))\n if method.lower() == \"sparsevfc\":\n vf_kwargs = {\n \"M\": None,\n \"a\": 5,\n \"beta\": None,\n \"ecr\": 1e-5,\n \"gamma\": 0.9,\n \"lambda_\": 3,\n \"minP\": 1e-5,\n \"MaxIter\": 30,\n \"theta\": 0.75,\n \"div_cur_free_kernels\": False,\n \"velocity_based_sampling\": True,\n \"sigma\": 0.8,\n \"eta\": 0.5,\n \"seed\": 0,\n }\n elif method.lower() == \"dynode\":\n try:\n from dynode.vectorfield import networkModels\n from dynode.vectorfield.samplers import VelocityDataSampler\n\n # from dynode.vectorfield.losses_weighted import MAD, BinomialChannel, WassersteinDistance, CosineDistance\n from dynode.vectorfield.losses_weighted import MSE\n from .scVectorField import dynode_vectorfield\n except ImportError:\n raise ImportError(\"You need to install the package `dynode`.\" \"install dynode via `pip install dynode`\")\n\n velocity_data_sampler = VelocityDataSampler(adata={\"X\": X, \"V\": V}, normalize_velocity=normalize)\n max_iter = 2 * 100000 * np.log(X.shape[0]) / (250 + np.log(X.shape[0]))\n\n cwd, cwt = os.getcwd(), datetime.datetime.now()\n\n if model_buffer_path is None:\n model_buffer_path = cwd + \"/\" + basis + \"_\" + str(cwt.year) + \"_\" + str(cwt.month) + \"_\" + str(cwt.day)\n main_warning(\"the buffer path saving the dynode model is in %s\" % (model_buffer_path))\n\n vf_kwargs = {\n \"model\": networkModels,\n \"sirens\": False,\n \"enforce_positivity\": False,\n \"velocity_data_sampler\": velocity_data_sampler,\n \"time_course_data_sampler\": None,\n \"network_dim\": X.shape[1],\n \"velocity_loss_function\": MSE(), # CosineDistance(), # #MSE(), MAD()\n # BinomialChannel(p=0.1, alpha=1)\n \"time_course_loss_function\": None,\n \"velocity_x_initialize\": X,\n \"time_course_x0_initialize\": None,\n \"smoothing_factor\": None,\n \"stability_factor\": None,\n \"load_model_from_buffer\": False,\n \"buffer_path\": model_buffer_path,\n \"hidden_features\": 256,\n \"hidden_layers\": 3,\n \"first_omega_0\": 30.0,\n \"hidden_omega_0\": 30.0,\n }\n train_kwargs = {\n \"max_iter\": int(max_iter),\n \"velocity_batch_size\": 50,\n \"time_course_batch_size\": 100,\n \"autoencoder_batch_size\": 50,\n \"velocity_lr\": 1e-4,\n \"velocity_x_lr\": 0,\n \"time_course_lr\": 1e-4,\n \"time_course_x0_lr\": 1e4,\n \"autoencoder_lr\": 1e-4,\n \"velocity_sample_fraction\": 1,\n \"time_course_sample_fraction\": 1,\n \"iter_per_sample_update\": None,\n }\n else:\n raise ValueError(\"current only support two methods, SparseVFC and dynode\")\n\n vf_kwargs = update_dict(vf_kwargs, kwargs)\n\n if restart_num > 0:\n if len(restart_seed) != restart_num:\n main_warning(\n f\"the length of {restart_seed} is different from {restart_num}, \" f\"using `np.range(restart_num) * 100\"\n )\n restart_seed = np.arange(restart_num) * 100\n restart_counter, cur_vf_list, res_list = 0, [], []\n while True:\n if method.lower() == \"sparsevfc\":\n kwargs.update({\"seed\": restart_seed[restart_counter]})\n VecFld = SvcVectorfield(X, V, Grid, **vf_kwargs)\n cur_vf_dict = VecFld.train(normalize=normalize, **kwargs)\n elif method.lower() == \"dynode\":\n train_kwargs = update_dict(train_kwargs, kwargs)\n VecFld = dynode_vectorfield(X, V, Grid, **vf_kwargs)\n # {\"VecFld\": VecFld.train(**kwargs)}\n cur_vf_dict = VecFld.train(**train_kwargs)\n\n # consider refactor with .simulation.evaluation.py\n reference, prediction = (\n cur_vf_dict[\"Y\"][cur_vf_dict[\"valid_ind\"]],\n cur_vf_dict[\"V\"][cur_vf_dict[\"valid_ind\"]],\n )\n true_normalized = reference / (np.linalg.norm(reference, axis=1).reshape(-1, 1) + 1e-20)\n predict_normalized = prediction / (np.linalg.norm(prediction, axis=1).reshape(-1, 1) + 1e-20)\n res = np.mean(true_normalized * predict_normalized) * prediction.shape[1]\n\n cur_vf_list += [cur_vf_dict]\n res_list += [res]\n if res < min_vel_corr:\n restart_counter += 1\n main_info(\n f\"current cosine correlation between input velocities and learned velocities is less than \"\n f\"{min_vel_corr}. Make a {restart_counter}-th vector field reconstruction trial.\",\n indent_level=2,\n )\n else:\n vf_dict = cur_vf_dict\n break\n\n if restart_counter > restart_num - 1:\n main_warning(\n f\"Cosine correlation between input velocities and learned velocities is less than\"\n f\" {min_vel_corr} after {restart_num} trials of vector field reconstruction.\"\n )\n vf_dict = cur_vf_list[np.argmax(np.array(res_list))]\n\n break\n else:\n if method.lower() == \"sparsevfc\":\n VecFld = SvcVectorfield(X, V, Grid, **vf_kwargs)\n vf_dict = VecFld.train(normalize=normalize, **kwargs)\n elif method.lower() == \"dynode\":\n train_kwargs = update_dict(train_kwargs, kwargs)\n VecFld = dynode_vectorfield(X, V, Grid, **vf_kwargs)\n # {\"VecFld\": VecFld.train(**kwargs)}\n vf_dict = VecFld.train(**train_kwargs)\n\n if result_key is None:\n vf_key = \"VecFld\" if basis is None else \"VecFld_\" + basis\n else:\n vf_key = result_key if basis is None else result_key + \"_\" + basis\n\n vf_dict[\"method\"] = method\n if basis is not None:\n key = \"velocity_\" + basis + \"_\" + method\n X_copy_key = \"X_\" + basis + \"_\" + method\n\n logger.info_insert_adata(key, adata_attr=\"obsm\")\n logger.info_insert_adata(X_copy_key, adata_attr=\"obsm\")\n adata.obsm[key] = vf_dict[\"V\"]\n adata.obsm[X_copy_key] = vf_dict[\"X\"]\n\n vf_dict[\"dims\"] = dims\n\n logger.info_insert_adata(vf_key, adata_attr=\"uns\")\n adata.uns[vf_key] = vf_dict\n else:\n key = velocity_key + \"_\" + method\n\n logger.info_insert_adata(key, adata_attr=\"layers\")\n adata.layers[key] = sp.csr_matrix((adata.shape))\n adata.layers[key][:, valid_genes] = vf_dict[\"V\"]\n\n vf_dict[\"layer\"] = layer\n vf_dict[\"genes\"] = genes\n vf_dict[\"velocity_key\"] = velocity_key\n\n logger.info_insert_adata(vf_key, adata_attr=\"uns\")\n adata.uns[vf_key] = vf_dict\n\n if map_topography:\n tp_kwargs = {\"n\": 25}\n tp_kwargs = update_dict(tp_kwargs, kwargs)\n\n logger.info(\"Mapping topography...\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n adata = topography(\n adata,\n basis=basis,\n X=X,\n layer=layer,\n dims=None,\n VecFld=vf_dict,\n **tp_kwargs,\n )\n if pot_curl_div:\n logger.info(f\"Running ddhodge to estimate vector field based pseudotime in {basis} basis...\")\n\n ddhodge(adata, basis=basis, cores=cores)\n if X.shape[1] == 2:\n logger.info(\"Computing curl...\")\n curl(adata, basis=basis)\n\n logger.info(\"Computing divergence...\")\n divergence(adata, basis=basis)\n\n control_point, inlier_prob, valid_ids = (\n \"control_point_\" + basis if basis is not None else \"control_point\",\n \"inlier_prob_\" + basis if basis is not None else \"inlier_prob\",\n vf_dict[\"valid_ind\"],\n )\n if method.lower() == \"sparsevfc\":\n logger.info_insert_adata(control_point, adata_attr=\"obs\")\n logger.info_insert_adata(inlier_prob, adata_attr=\"obs\")\n\n adata.obs[control_point], adata.obs[inlier_prob] = False, np.nan\n adata.obs.loc[adata.obs_names[vf_dict[\"ctrl_idx\"]], control_point] = True\n adata.obs.loc[adata.obs_names[valid_ids], inlier_prob] = vf_dict[\"P\"].flatten()\n\n # angles between observed velocity and that predicted by vector field across cells:\n cell_angels = np.zeros(adata.n_obs)\n for i, u, v in zip(valid_ids, V[valid_ids], vf_dict[\"V\"]):\n # fix the u, v norm == 0 in angle function\n cell_angels[i] = angle(u, v)\n\n if basis is not None:\n temp_key = \"obs_vf_angle_\" + basis\n\n logger.info_insert_adata(temp_key, adata_attr=\"obs\")\n adata.obs[temp_key] = cell_angels\n else:\n temp_key = \"obs_vf_angle\"\n logger.info_insert_adata(temp_key, adata_attr=\"obs\")\n adata.obs[temp_key] = cell_angels\n\n logger.finish_progress(\"VectorField\")\n if return_vf_object:\n return VecFld\n elif copy:\n return adata\n return None", "def make_field(self):\n uniaxial = self.u[0]*self.u[1]*self.u[2] != 0\n cubic = self.c1[0]*self.c1[1]*self.c1[2]*self.c2[0]*self.c2[1]*self.c2[2] != 0\n @nb.njit\n def field_func(m):\n heff = self.hext + field.demagnetization(m, self.Nd)\n if uniaxial:\n heff += field.uniaxial_anisotropy(m, self.u, self.hu1, self.hu2)\n if cubic:\n heff += field.cubic_anisotropy(m, self.c1, self.c2, self.c3, self.hc1, self.hc2)\n return heff\n self.field = field_func", "def set_field(self, x:int, y:int, field:Field) -> None:\r\n self.fields[x][y] = field", "def __init__(self, fieldFunction, geoObject, geoEvent):\n # TODO: restrict value pairs to geoObject\n pass", "def _set_field(self, instrument_name, parameter_name, field, value, force_update):\n if self.verbose >= 2:\n print('_set_field: %s %s: %s' % (instrument_name, parameter_name, str(value)))\n tree_widget = self._itemsdict[instrument_name][parameter_name]['widget']\n double_box = self._itemsdict[instrument_name][parameter_name]['double_box']\n\n field_index = self._fields.index(field)\n\n double_value = False\n if field_index == 0 and double_box is not None:\n double_value = True\n if not double_value:\n tree_widget.setText(field_index + 1, str(value))\n else:\n # update a float value\n try:\n update_value = np.abs(tree_widget.value() - value) > 1e-9\n except Exception as ex:\n logging.debug(ex)\n update_value = True\n if update_value or force_update:\n if not double_box.hasFocus(): # do not update when editing\n logging.debug('update %s to %s' % (parameter_name, value))\n try:\n oldstate = double_box.blockSignals(True)\n double_box.setValue(value)\n double_box.blockSignals(oldstate)\n except Exception as ex:\n logging.debug(ex)", "def _build_reduced_system(self, original_force_field, topology, scale_amount=None):\n # As this method deals mainly with the toolkit, we stick to\n # simtk units here.\n from openforcefield.typing.engines.smirnoff import ForceField\n\n parameter_tag = self.parameter_key.tag\n parameter_smirks = self.parameter_key.smirks\n parameter_attribute = self.parameter_key.attribute\n\n original_handler = original_force_field.get_parameter_handler(parameter_tag)\n original_parameter = original_handler.parameters[parameter_smirks]\n\n if self.use_subset_of_force_field:\n\n force_field = ForceField()\n handler = copy.deepcopy(original_force_field.get_parameter_handler(parameter_tag))\n force_field.register_parameter_handler(handler)\n\n else:\n\n force_field = copy.deepcopy(original_force_field)\n handler = force_field.get_parameter_handler(parameter_tag)\n\n parameter_index = None\n value_list = None\n\n if hasattr(original_parameter, parameter_attribute):\n parameter_value = getattr(original_parameter, parameter_attribute)\n else:\n attribute_split = re.split(r'(\\d+)', parameter_attribute)\n\n assert len(parameter_attribute) == 2\n assert hasattr(original_parameter, attribute_split[0])\n\n parameter_attribute = attribute_split[0]\n parameter_index = int(attribute_split[1]) - 1\n\n value_list = getattr(original_parameter, parameter_attribute)\n parameter_value = value_list[parameter_index]\n\n if scale_amount is not None:\n\n existing_parameter = handler.parameters[parameter_smirks]\n\n if np.isclose(parameter_value.value_in_unit(parameter_value.unit), 0.0):\n # Careful thought needs to be given to this. Consider cases such as\n # epsilon or sigma where negative values are not allowed.\n parameter_value = (scale_amount if scale_amount > 0.0 else 0.0) * parameter_value.unit\n else:\n parameter_value *= (1.0 + scale_amount)\n\n if value_list is None:\n setattr(existing_parameter, parameter_attribute, parameter_value)\n else:\n value_list[parameter_index] = parameter_value\n setattr(existing_parameter, parameter_attribute, value_list)\n\n system = force_field.create_openmm_system(topology)\n\n if not self.enable_pbc:\n disable_pbc(system)\n\n return system, parameter_value", "def set_field(coil, fieldValue, fieldGain):\n current = (fieldValue/fieldGain)*1e3 # set the current to be in milliamps\n print(current) \n coil.current(current)\n return", "def extents(self, value):\n\n self._local = value\n if self.is_attached:\n if self._local is None:\n self[\"local\"] = self._global\n else:\n self[\"local\"] = self._local\n self[\"clipping\"] = self._clipping\n self[\"transform\"] = self._transform" ]
[ "0.6045435", "0.6040067", "0.58883786", "0.5711208", "0.55493957", "0.5493443", "0.54701155", "0.53724504", "0.5348009", "0.5171922", "0.5158918", "0.5158763", "0.51249254", "0.5080409", "0.50776005", "0.5062098", "0.5047997", "0.5029041", "0.50261176", "0.5023545", "0.49867105", "0.49796137", "0.4970015", "0.49694014", "0.49634507", "0.4951223", "0.49479184", "0.49242756", "0.49068695", "0.49047053" ]
0.61421114
0
Sets self.gvpei = (self.ae, self.be) + (ai, bi). To be executed in self.external_vector_potential and self.fixed_vortices setters.
def _update_gvpei(self): assert (self.ae is None) == (self.be is None) ai, bi = None, None if self.fixed_vortices is not None and self.fixed_vortices._vpi is not None: ai, bi = self.fixed_vortices._vpi.get_vec_h() assert (ai is None) == (bi is None) vpei = None if self.ae is not None: if ai is not None: vpei = (self.ae + ai, self.be + bi) else: vpei = (self.ae, self.be) else: vpei = (ai, bi) if self._vpei is not None and vpei is None: self._vpei.free() self._vpei = None else: #TODO: easier if GArray supports like for vector storage shapes = [vpei[0].shape, vpei[1].shape] self._vpei = GArray(shape = shapes, dtype = cfg.dtype) self._vpei.set_vec_h(vpei[0], vpei[1]) self._vpei.sync()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_global_problem_vf_3_gr1_bif(self):\n #0\n std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)\n self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)\n self.b = Epetra.Vector(std_map)\n for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):\n #1\n soma = 0.0\n soma2 = 0.0\n soma3 = 0.0\n volume_centroid = self.mesh_topo_util.get_average_position([volume])\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n z_vol = self.tz - volume_centroid[2]\n soma = 0.0\n temp_glob_adj = []\n temp_k = []\n flux_gr = []\n for adj in adj_volumes:\n #2\n global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n adj_centroid = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - adj_centroid[2]\n altura = adj_centroid[2]\n direction = adj_centroid - volume_centroid\n uni = self.unitary(direction)\n kvol = np.dot(np.dot(kvol,uni),uni)\n #kvol = kvol*(lamb_w_vol + lamb_o_vol)\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]\n lbt_adj = lamb_w_adj + lamb_o_adj\n\n #kadj = kadj*(lamb_w_adj + lamb_o_adj)\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n keq = keq*(np.dot(self.A, uni)/float(abs(np.dot(direction, uni))))\n grad_z = (z_adj - z_vol)\n q_grad_z = grad_z*self.gama*keq\n flux_gr.append(q_grad_z)\n\n temp_glob_adj.append(self.map_vols_ic[adj])\n temp_k.append(-keq)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n soma2 = -sum(flux_gr)\n temp_k.append(-sum(temp_k))\n temp_glob_adj.append(self.map_vols_ic[volume])\n self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)\n if volume in self.wells_n:\n #2\n index = self.wells_n.index(volume)\n # tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)\n if volume in self.wells_inj:\n #3\n self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2\n #2\n else:\n #3\n self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2\n #1\n else:\n #2\n self.b[self.map_vols_ic[volume]] += soma2\n #0\n for volume in self.neigh_wells_d:\n #1\n soma2 = 0.0\n soma3 = 0.0\n volume_centroid = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - volume_centroid[2]\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n soma = 0.0\n temp_glob_adj = []\n temp_k = []\n flux_gr = []\n for adj in adj_volumes:\n #2\n global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n adj_centroid = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - adj_centroid[2]\n altura = adj_centroid[2]\n direction = adj_centroid - volume_centroid\n uni = self.unitary(direction)\n z = uni[2]\n kvol = np.dot(np.dot(kvol,uni),uni)\n #kvol = kvol*(lamb_w_vol + lamb_o_vol)\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]\n lbt_adj = lamb_o_adj + lamb_o_adj\n #kadj = kadj*(lamb_w_adj + lamb_o_adj)\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))\n grad_z = (z_adj - z_vol)\n q_grad_z = grad_z*self.gama*keq\n flux_gr.append(q_grad_z)\n #2\n if adj in self.wells_d:\n #3\n soma = soma + keq\n index = self.wells_d.index(adj)\n self.b[self.map_vols_ic[volume]] += self.set_p[index]*(keq)\n #2\n else:\n #3\n temp_glob_adj.append(self.map_vols_ic[adj])\n temp_k.append(-keq)\n soma = soma + keq\n #2\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n soma2 = -sum(flux_gr)\n temp_k.append(soma)\n temp_glob_adj.append(self.map_vols_ic[volume])\n self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)\n if volume in self.wells_n:\n #2\n index = self.wells_n.index(volume)\n # tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)\n if volume in self.wells_inj:\n #3\n self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2\n #2\n else:\n #3\n self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2\n #1\n else:\n #2\n self.b[self.map_vols_ic[volume]] += soma2\n #0\n self.trans_fine.FillComplete()", "def set_IVP_problem(self, *args, ncc_cutoff=1e-10, **kwargs):\n self.problem_type = 'IVP'\n self.problem = de.IVP(self.domain, variables=self.variables, ncc_cutoff=ncc_cutoff)\n self.set_equations(*args, **kwargs)", "def IterateValues(self):\n agrid = self.agrid\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n for l in range(self.Na): \n self.c[-1][l] = agrid[l]*(1+self.r) + self.b\n self.v[-1][l] = self.util(self.c[-1][l],0)\n self.vtilde[-1] = interp1d(agrid,self.v[-1], kind='cubic')\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n m0 = 0 \n for l in range(self.Na):\n # Find a bracket within which optimal a' lies\n m = max(0, m0-1)\n m0, a, b, c = self.GetBracket(y, l, m, agrid)\n # Define objective function for optimal a'\n def objfn(a1):\n v = self.value(y, agrid[l], a1)\n return -v\n # Find optimal a' using Golden Section Search\n if a == b:\n self.a[y][l] = 0\n elif b == c:\n self.a[y][l] = agrid[-1]\n else:\n result = minimize_scalar(objfn, bracket=(a,b,c), method='Golden')\n #‘Brent’,‘Bounded’,‘Golden’\n self.a[y][l] = result.x\n # Computing consumption and labor\n if y >= -self.R:\n self.c[y][l], self.n[y][l] = (1+self.r)*agrid[l] + self.b - self.a[y][l], 0\n else:\n self.c[y][l], self.n[y][l] = self.solve(agrid[l], self.a[y][l])\n self.v[y][l] = self.util(self.c[y][l],self.n[y][l]) + self.beta*self.vtilde[y+1](self.a[y][l])\n self.vtilde[y] = interp1d(agrid, self.v[y], kind='cubic')", "def __call__(self, vIGT):\r\n return self.interpolant(vIGT)", "def set_ic(self, problem, eos):\n i_min = self.i_min\n j_min = self.j_min\n k_min = self.k_min\n i_max = self.i_max \n j_max = self.j_max\n k_max = self.k_max\n if problem.type == \"RP\":\n ro_l = problem.ro_l\n ro_r = problem.ro_r\n p_l = problem.p_l\n p_r = problem.p_r\n for i in range(i_min, i_max):\n for j in range(j_min, j_max):\n for k in range(k_min, k_max):\n if problem.dir=='x':\n u_l = problem.u_l\n u_r = problem.u_r\n v_l = 0.\n w_l = 0.\n e_l = eos.gete(ro_l, p_l)\n E_l = e_l + u_l*u_l/2. + v_l*v_l/2. + w_l*w_l/2.\n v_r = 0.\n w_r = 0.\n e_r = eos.gete(ro_r, p_r)\n E_r = e_r + u_r*u_r/2. + v_r*v_r/2. + w_r*w_r/2.\n if self.x_mesh[i] < problem.q_0 and math.fabs(self.x_mesh[i]-problem.q_0)>self.dx/100.:\n self.U[i][j][k] = [ro_l, ro_l*u_l, ro_l*v_l, ro_l*w_l, ro_l*E_l]\n else:\n self.U[i][j][k] = [ro_r, ro_r*u_r, ro_r*v_r, ro_r*w_r, ro_r*E_r]\n elif problem.dir == 'y':\n u_l = 0.\n v_l = problem.u_l\n w_l = 0.\n e_l = eos.gete(ro_l, p_l)\n E_l = e_l + u_l * u_l / 2. + v_l * v_l / 2. + w_l * w_l / 2.\n u_r = 0.\n v_r = problem.u_r\n w_r = 0.\n e_r = eos.gete(ro_r, p_r)\n E_r = e_r + u_r * u_r / 2. + v_r * v_r / 2. + w_r * w_r / 2.\n if self.y_mesh[j] < problem.q_0 and math.fabs(self.y_mesh[j] - problem.q_0) > self.dy / 100.:\n self.U[i][j][k] = [ro_l, ro_l * u_l, ro_l * v_l, ro_l * w_l, ro_l * E_l]\n else:\n self.U[i][j][k] = [ro_r, ro_r * u_r, ro_r * v_r, ro_r * w_r, ro_r * E_r]\n elif problem.dir == 'z':\n u_l = 0.\n v_l = 0.\n w_l = problem.u_l\n e_l = eos.gete(ro_l, p_l)\n E_l = e_l + u_l * u_l / 2. + v_l * v_l / 2. + w_l * w_l / 2.\n u_r = 0.\n v_r = 0.\n w_r = problem.u_r\n e_r = eos.gete(ro_r, p_r)\n E_r = e_r + u_r * u_r / 2. + v_r * v_r / 2. + w_r * w_r / 2.\n if self.z_mesh[k] < problem.q_0 and math.fabs(self.z_mesh[k] - problem.q_0) > self.dz / 100.:\n self.U[i][j][k] = [ro_l, ro_l * u_l, ro_l * v_l, ro_l * w_l, ro_l * E_l]\n else:\n self.U[i][j][k] = [ro_r, ro_r * u_r, ro_r * v_r, ro_r * w_r, ro_r * E_r]\n else:\n print(\"Error: CField.set_ic(): Sorry, only x-direction case can be considered. Bye!\")\n exit(-1)\n elif problem.type == \"RTI\":\n U = self.U\n ro_down = problem.ro_down\n ro_up = problem.ro_up\n u = 0.\n v = 0.\n w = 0.\n p_0 = problem.p_0\n g = problem.g\n q_0 = problem.q_0\n p = 0.\n for i in range(i_min, i_max):\n for j in range(j_min, j_max):\n for k in range(k_min, k_max):\n x = .5*self.dx + self.x_mesh[i]\n y = .5*self.dy + self.y_mesh[j]\n z = .5*self.dz + self.z_mesh[k]\n if problem.dir == 'x':\n q = x\n elif problem.dir == 'y':\n q = y\n else:\n q = z\n if q < q_0:\n ro = ro_down\n else:\n ro = ro_up\n p = p_0 + ro*g*(q - q_0)\n e = eos.gete(ro, p)\n E = e + .5*(0.*0. + 0.*0. + 0.*0.)\n self.U[i][j][k] = [ro, ro*u, ro*v, ro*w, ro*E]\n # Apply initial disturbance\n # Uncomment the variant you prefer\n # Yalinewich 2D disturbance\n PI = 3.14159\n w_0 = 0.0025\n for i in range(i_min, i_max):\n for j in range(j_min, j_max):\n for k in range(k_min, k_max):\n # x = self.dx * (.5 + self.x_mesh[i])\n # y = self.dy * (.5 + self.y_mesh[j])\n # z = self.dz * (.5 + self.z_mesh[k])\n x = .5 * self.dx + self.x_mesh[i]\n y = .5 * self.dy + self.y_mesh[j]\n z = .5 * self.dz + self.z_mesh[k]\n if problem.dir == 'x':\n self.U[i][j][k][3] = 0.\n self.U[i][j][k][1] = self.U[i][j][k][0]*w_0* \\\n (1. - math.cos(4.*PI*z)) * (1.-math.cos(4.*PI*x/3.))\n elif problem.dir == 'y':\n U[i][j][k][1] = 0.\n U[i][j][k][2] = U[i][j][k][0]*w_0*(1. - math.cos(4.*PI*x)) * (1.-math.cos(4.*PI*y/3.))\n elif problem.dir == 'z':\n self.U[i][j][k][2] = 0.\n self.U[i][j][k][3] = self.U[i][j][k][0]*w_0* \\\n (1. - math.cos(4.*PI*y)) * (1.-math.cos(4.*PI*z/3.))\n else:\n print(\"Error: CField.set_ic(): unknown problem type! Only 1d-PRs and 2d-RTIs allowed. Bye!\")\n exit(-1)\n return", "def addAresta(self,u,v,peso):\n self.grafo.append([u,v,peso])", "def icvv(self, icvv):\n self._icvv = icvv", "def __call__(self, vigt):\r\n return self.interpolant(vigt)", "def gis_niveau(self, gis_niveau):\n\n self._gis_niveau = gis_niveau", "def gVI(g,rBC,lBC,time,npts):\n #Important coeffcients\n global gamma\n gamma = g\n global alpha\n alpha = (gamma+1)/(gamma-1)\n global beta\n beta = (2*gamma)/(gamma-1)\n global epsilon\n epsilon = (2*gamma)/(gamma+1)\n #Boundary conditions\n global lbc\n lbc = lBC\n global rbc\n rbc = rBC\n #Time\n global t\n t = time\n #points\n global numPts\n numPts = npts\n #Speed of sound for states 1 and 5\n global cL\n cL = np.sqrt(gamma*lbc[0]/lbc[1])\n global cR\n cR = np.sqrt(gamma*rbc[0]/rbc[1])", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def __gia(self, *args, **kwargs):\n pass", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def pv2ssh(lon, lat, q, hg, c, nitr=1, name_grd=''):\n def compute_avec(vec,aaa,bbb,grd):\n\n avec=np.empty(grd.np0,)\n avec[grd.vp2] = aaa[grd.vp2]*((vec[grd.vp2e]+vec[grd.vp2w]-2*vec[grd.vp2])/(grd.dx1d[grd.vp2]**2)+(vec[grd.vp2n]+vec[grd.vp2s]-2*vec[grd.vp2])/(grd.dy1d[grd.vp2]**2)) + bbb[grd.vp2]*vec[grd.vp2]\n avec[grd.vp1] = vec[grd.vp1]\n\n return avec,\n if name_grd is not None:\n if os.path.isfile(name_grd):\n with open(name_grd, 'rb') as f:\n grd = pickle.load(f)\n else:\n grd = Grid(lon,lat)\n with open(name_grd, 'wb') as f:\n pickle.dump(grd, f)\n f.close()\n else:\n grd = Grid(lon,lat)\n\n ny,nx,=np.shape(hg)\n g=grd.g\n\n\n x=hg[grd.indi,grd.indj]\n q1d=q[grd.indi,grd.indj]\n\n aaa=g/grd.f01d\n bbb=-g*grd.f01d/c**2\n ccc=+q1d\n\n aaa[grd.vp1]=0\n bbb[grd.vp1]=1\n ccc[grd.vp1]=x[grd.vp1] ##boundary condition\n\n vec=+x\n\n avec,=compute_avec(vec,aaa,bbb,grd)\n gg=avec-ccc\n p=-gg\n\n for itr in range(nitr-1):\n vec=+p\n avec,=compute_avec(vec,aaa,bbb,grd)\n tmp=np.dot(p,avec)\n\n if tmp!=0. : s=-np.dot(p,gg)/tmp\n else: s=1.\n\n a1=np.dot(gg,gg)\n x=x+s*p\n vec=+x\n avec,=compute_avec(vec,aaa,bbb,grd)\n gg=avec-ccc\n a2=np.dot(gg,gg)\n\n if a1!=0: beta=a2/a1\n else: beta=1.\n\n p=-gg+beta*p\n\n vec=+p\n avec,=compute_avec(vec,aaa,bbb,grd)\n val1=-np.dot(p,gg)\n val2=np.dot(p,avec)\n if (val2==0.):\n s=1.\n else:\n s=val1/val2\n\n a1=np.dot(gg,gg)\n x=x+s*p\n\n # back to 2D\n h=np.empty((ny,nx))\n h[:,:]=np.NAN\n h[grd.indi,grd.indj]=x[:]\n\n\n return h", "def handle_set_governance_variables(self, context: 'IconScoreContext', params: dict):\n # This API is available after IISS decentralization is enabled.\n if context.revision < REV_DECENTRALIZATION or self.term.sequence < 0:\n raise MethodNotFoundException(\"setGovernanceVariables is disabled\")\n\n address: 'Address' = context.tx.origin\n\n prep: 'PRep' = context.preps.get_by_address(address)\n if prep is None:\n raise InvalidParamsException(f\"P-Rep not found: {address}\")\n\n kwargs: dict = TypeConverter.convert(params, ParamType.IISS_SET_GOVERNANCE_VARIABLES)\n\n # Update incentive rep\n irep: int = kwargs[\"irep\"]\n validate_irep(context, irep, prep)\n\n # EventLog\n EventLogEmitter.emit_event_log(\n context,\n score_address=ZERO_SCORE_ADDRESS,\n event_signature=\"GovernanceVariablesSet(Address,int)\",\n arguments=[address, irep],\n indexed_args_count=1\n )\n\n # Update the changed properties of a P-Rep to stateDB\n # context.storage.prep.put_dirty_prep(context, prep)\n new_prep: 'PRep' = prep.copy()\n new_prep.set_irep(irep, context.block.height)\n context.put_dirty_prep(new_prep)", "def run_grav(self):\n\n # Solucao direta\n self.prod_w = []\n self.prod_o = []\n t0 = time.time()\n # self.set_volumes_in_primal()\n self.set_sat_in()\n self.set_lamb_2()\n self.set_global_problem_vf_3_gr1_bif()\n self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))\n self.organize_Pf()\n del self.Pf\n self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))\n del self.Pf_all\n self.test_conservation_fine()\n # self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()\n\n \"\"\"\n ################################################################\n # Solucao Multiescala\n self.calculate_restriction_op_2()\n self.calculate_prolongation_op_het()\n self.organize_op()\n self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)\n self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)\n self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)\n self.set_Pc()\n self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)\n\n del self.trilOP\n del self.trilOR\n del self.Tc\n del self.Qc\n del self.Pc\n\n self.organize_Pms()\n del self.Pms\n self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))\n del self.Pms_all\n self.erro()\n\n self.test_conservation_coarse_gr()\n # self.Neuman_problem_6_gr()\n # self.store_flux_pms_gr = self.create_flux_vector_pms_gr()\n ####################################################################\n \"\"\"\n\n\n\n\n\n\n\n print('acaboooou')\n self.mb.write_file('new_out_bif_gr.vtk')\n\n\n shutil.copytree(self.caminho1, self.pasta)", "def g_xy(self):\n for x in range(self.size.x):\n for y in range(self.size.y):\n yield self.p[0] + Vect(x, y)", "def update_E(self):\n self.grid.E[self.loc] += (\n self.grid.courant_number\n * self.grid.inverse_permittivity[self.loc]\n * self.phi_E\n )", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)", "def on_VI_gas_set_clicked(self):\n # TODO: not implemented yet\n dispVI_gas()\n print \"GAS PARA:\", qmdz_const.VI_GAS", "def test_set_vx_to_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8000 | (x << 8) | (y << 4)\n for v in range(0x0, 0xFF):\n cpu.V_register[y] = v\n cpu.set_vx_to_vy()\n assert(cpu.V_register[x] == v)", "def set_inverter_v_and_i(self):\n # NOTE: This method could be implemented in a more efficient\n # manner, but it's more readable and more robust to use the\n # public methods the class already has.\n\n # Define function to be used with the loop helper.\n def set_v_and_i(inv):\n # Attempt to get the rated power.\n try:\n s_str = inv['rated_power']\n except KeyError:\n # No rated power. Set arbitrary V and I in.\n self.log.warning(f\"Inverter {inv['name']} does not have the \"\n \"rated_power attribute. Setting V_In=10000 \"\n \"I_In=10000.\")\n\n self._modify_item(inv, {'V_In': 10000, 'I_In': 10000})\n else:\n # We have a rated power. Set values accordingly.\n s = float(s_str) * 1.1\n # Just use 1000.\n v = 1000\n i = s / v\n\n # Modify the inverter.\n self._modify_item(inv, {'V_In': v, 'I_In': i})\n\n # Loop over the inverter objects and call the helper.\n self.loop_over_objects_helper('inverter', set_v_and_i)\n\n self.log.info('All inverters have V_In and I_In set according to '\n 'their rated power.')\n # That's it.\n return None", "def vj(vj, pol, ant) :\n s.vj(pol, vj, ant)", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def setIInternal(self):\n # if the di vectors are defined this method populates the upper limit vector\n self.i = {}\n for label in self.di.keys():\n self.i[label] = []\n L = 0\n for l in self.di[label]:\n L += l\n self.i[label].append(L)", "def awGrid(vis,HA,uvw,image_params,obs_params,Mterms,Mterms_ij):\t\n\tStokes = image_params['Stokes']\n\t\n\n\n\tprint '--------------Gridding X pol--------------------'\n\txgrid_wt, xgrid_uv = gridOnePolAWproj(vis[0],HA,uvw,image_params,obs_params,Mterms[0],Mterms_ij[0])\n\tprint '--------------Gridding Y pol--------------------'\n\tygrid_wt, ygrid_uv = gridOnePolAWproj(vis[1],HA,uvw,image_params,obs_params,Mterms[1],Mterms_ij[1])\n\n\tN = np.shape(xgrid_wt)[0]\n\tgrid_uv = np.zeros([N, N], dtype=complex)\n\tgrid_wt = np.zeros([N, N], dtype=complex)\n\t\n\tif Stokes == 'I':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# I = (XX+YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real + xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag + xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real + xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag + xgrid_wt.imag)/2\n\n\telif Stokes == 'Q':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# Q = (XX-YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real - xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag - xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real - xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag - xgrid_wt.imag)/2\n\n\n\tdty_image=np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(grid_uv)))\n\tpsf_image=np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(grid_wt)))\n\n\treturn dty_image, psf_image", "def gen_parameter(self, g, ng, p):\n pass", "def g_tensor(self,gpara,gperp,zeta_a):\n gx = gperp\n gy = gperp\n gz = gpara\n\n self.gx = gx\n self.gy = gy\n self.gz = gz\n self.g_grid = np.array([[gx*gx, gx*gy, gx*gz],[gy*gx, gy*gy, gy*gz],[gz*gx, gz*gy, gz*gz]])\n # rotate the crystal coordinates so that I'm now in the coordinate system \n # given by the zeeman tensor's principal axes\n self.a = Ry(zeta_a) @ self.a\n self.b = Ry(zeta_a) @ self.b\n self.c = Ry(zeta_a) @ self.c" ]
[ "0.56007665", "0.5436167", "0.53549033", "0.5279826", "0.52668923", "0.5232523", "0.5191996", "0.5186861", "0.5171484", "0.5170207", "0.5155571", "0.5155571", "0.51301646", "0.50711966", "0.50634015", "0.5041026", "0.5024466", "0.50092936", "0.5004564", "0.49962527", "0.49778172", "0.4974977", "0.49696314", "0.49655512", "0.49592388", "0.4959113", "0.49272752", "0.49262372", "0.49208868", "0.49033844" ]
0.8162519
0
Sets/gets external vector potential.
def external_vector_potential(self): assert (self.ae is None) == (self.be is None) if self.ae is not None: return self.ae, self.be return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def external_irregular_vector_potential(self):\n if self._vpei is not None:\n return self._vpei.get_vec_h()\n\n return None", "def potential(self) -> np.ndarray:\n if self._V is None:\n potential = -self._gp.reshape(-1, 1) * self._gp # pylint: disable=E1101\n object.__setattr__(self, \"_V\", potential)\n return self._V", "def lib_vector(self, lib_vector):\n self.logger.debug(\"In 'lib_vector' setter.\")\n\n self._lib_vector = lib_vector", "def __init__(self,vector):\n self._vector = vector", "def set(self, incoming_vector):\n self.vector = incoming_vector", "def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el", "def AsVector(self) -> ngsolve.la.BaseVector:", "def vector(self, base_ring=None):\n if (base_ring is None) or (base_ring is self._base_ring):\n return self._vector\n else:\n return vector(base_ring, self._vector)", "def __call__(self):\n return self._vector", "def setVector(self, vector):\n self.p2 = vector(self.p1)", "def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")", "def vector(self):\n return self.__vector", "def set_random_vector(self):\n self.vector = vu.create_dense_random_vector(dimension)", "def vector_potential(self, xyz):\n r = self.distance(xyz)\n f = (\n (1j * self.omega * self.mu * self.moment) / (4 * np.pi * r) *\n np.exp(-1j * self.wavenumber * r)\n )\n f = np.kron(np.ones(1, 3), np.atleast_2d(f).T)\n return self.dot_orientation(f)", "def define_vector_functions(self):\n\n # Exit if functions have already been defined.\n # A function decorator might work better here...\n if hasattr(self, 'velocity'):\n return None\n\n unsteady = self.config['formulation']['time']['unsteady']\n lagrangian = self.config['formulation']['domain'] == 'lagrangian'\n lin_elastic = self.config['material']['const_eqn'] == 'lin_elastic'\n elastic = self.config['material']['type'] == 'elastic'\n\n init = self.config['formulation']['initial_condition']\n\n # Trial and test functions\n self.test_vector = dlf.TestFunction(self.vectorSpace)\n self.trial_vector = dlf.TrialFunction(self.vectorSpace)\n\n if elastic and unsteady:\n if init['displacement'] is not None:\n disp = init['displacement']\n self.displacement = dlf.project(disp, self.vectorSpace)\n self.displacement0 = self.displacement.copy(deepcopy=True)\n else:\n self.displacement = dlf.Function(self.vectorSpace)\n self.displacement0 = dlf.Function(self.vectorSpace)\n self.displacement.rename(\"u\", \"displacement\")\n self.displacement0.rename(\"u0\", \"displacement\")\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n self.velocity0.rename(\"v0\", \"velocity\")\n elif unsteady: # Unsteady viscous material.\n self.displacement = 0\n self.displacement0 = 0\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n self.velocity0.rename(\"v0\", \"velocity\")\n\n # self.velocity = dlf.Function(self.vectorSpace, name=\"v\")\n # self.velocity0 = dlf.Function(self.vectorSpace, name=\"v0\")\n elif elastic: # Steady elastic material.\n if init['displacement'] is not None:\n disp = init['displacement']\n self.displacement = dlf.project(disp, self.vectorSpace)\n # self.displacement0 = self.displacement.copy(deepcopy=True)\n else:\n self.displacement = dlf.Function(self.vectorSpace)\n # self.displacement0 = dlf.Function(self.vectorSpace)\n self.displacement.rename(\"u\", \"displacement\")\n # self.displacement0.rename(\"u0\", \"displacement\")\n\n # self.displacement = dlf.Function(self.vectorSpace, name=\"u\")\n self.displacement0 = 0\n self.velocity = 0\n self.velocity0 = 0\n else: # Steady viscous material\n self.displacement = 0\n self.displacement0 = 0\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n # self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n # self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n # self.velocity0.rename(\"v0\", \"velocity\")\n\n # self.velocity = dlf.Function(self.vectorSpace, name=\"v\")\n self.velocity0 = 0\n\n # # Apply initial conditions if provided\n # initial_condition = self.config['formulation']['initial_condition']\n # if initial_condition['displacement'] is not None:\n # init_disp = initial_condition['displacement']\n # self.apply_initial_conditions(init_disp,\n # self.displacement,\n # self.displacement0)\n # if initial_condition['velocity'] is not None:\n # init_vel = initial_condition['velocity']\n # self.apply_initial_conditions(init_vel,\n # self.velocity,\n # self.velocity0)\n\n return None", "def vector_potential(self, xyz):\n r = self.distance(xyz)\n a = (\n (self.current * self.length) / (4*np.pi*r) *\n np.exp(-i*self.wavenumber*r)\n )\n a = np.kron(np.ones(1, 3), np.atleast_2d(a).T)\n return self.dot_orientation(a)", "def get_vector(self,term):\n return self.dict.get(term)", "def provide_vector(self, vec):\n self.vec = vec\n self.vocab_size = vec.shape[0]\n self.embedding_size = len(vec[0])", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def get_vector(self, word):\n\n if word in self.glove.stoi:\n return self.glove.vectors[self.glove.stoi[word]]\n else:\n return None", "def set_vector(iif, dq, rq):\n\n (nbeads, natoms) = rq.shape\n natoms //= 3\n (dbeads, datoms) = dq.shape\n datoms //= 3\n\n # Check that indices make sense\n if iif.index < 0 and natoms != datoms:\n raise ValueError(\n \"Initialization tries to mix up structures with different atom numbers.\"\n )\n if iif.index >= datoms:\n raise ValueError(\n \"Cannot initialize single atom as atom index %d is larger than the number of atoms\"\n % iif.index\n )\n if iif.bead >= dbeads:\n raise ValueError(\n \"Cannot initialize single bead as bead index %d is larger than the number of beads\"\n % iif.bead\n )\n\n if iif.bead < 0: # we are initializing the path\n res = nm_rescale(nbeads, dbeads) # path rescaler\n if nbeads != dbeads:\n info(\n \" # Initialize is rescaling from %5d beads to %5d beads\"\n % (nbeads, dbeads),\n verbosity.low,\n )\n if iif.index < 0:\n dq[:] = res.b1tob2(rq)\n else: # we are initializing a specific atom\n dq[:, 3 * iif.index : 3 * (iif.index + 1)] = res.b1tob2(rq)\n else: # we are initializing a specific bead\n if iif.index < 0:\n dq[iif.bead] = rq\n else:\n dq[iif.bead, 3 * iif.index : 3 * (iif.index + 1)] = rq", "def vector(molec, dihed, nonH, energy):\n #Torison\n if dihed:\n pass\n #XYZ\n else:\n coords = ()\n if nonH:\n for atom in molec.atoms:\n coords += atom.coords\n else:\n for atom in molec.atoms:\n if atom.atomicnum > 1:\n coords += atom.coords\n #Energy\n if energy:\n coords += (molec.energy/10.0,)\n return coords", "def numpy_vector(self):\n pass", "def GlobalVector(self):\n return _hypre.HypreParVector_GlobalVector(self)", "def init_vector(self,x,dim):\n if dim == \"noise\":\n self.prior.sqrtM.init_vector(x,1)\n else:\n self.prior.init_vector(x,dim)", "def generate_vector(self,dim=0,v=None):\n vec = dl.Vector()\n self.init_vector(vec,dim)\n if v is not None:\n vec[:]=v\n return vec", "def set_vector(self):\n noise = np.random.choice([0., 1.], size=self.num_selections)\n noise = torch.tensor(noise)\n # Cast to precision and CUDA, and edit shape\n self.vector = noise.to(dtype=self.precision, device='cuda').squeeze()\n #noise = torch.full(self.num_selections, 0.05, dtype=self.precision,\n # device='cuda')\n #noise_vector = torch.zeros(self.vec_length, dtype=self.precision,\n # device='cuda')\n #noise_vector[self.choices] = noise\n #self.vector = noise_vector", "def setv(self, node, vector):\n\n self.daq.setVector(f'/{self.device_id}/{node}', vector)", "def V_potential(X,A):\n\treturn norm_matrix(A-X)", "def __init__(self, w, x, y, z):\n self.__scalar = w\n self.__vector = np.array([x, y, z])" ]
[ "0.6709609", "0.6408864", "0.63957906", "0.6121401", "0.6120507", "0.6060116", "0.6041537", "0.5978795", "0.5968136", "0.59421694", "0.5931237", "0.5890419", "0.5791883", "0.5742687", "0.57241136", "0.57071424", "0.5693986", "0.56902176", "0.5679575", "0.56609213", "0.5643648", "0.5641717", "0.55790895", "0.5570933", "0.55669415", "0.5550806", "0.55493605", "0.5537654", "0.55308414", "0.55252314" ]
0.67878634
0
Sets/gets external irregular vector potential
def external_irregular_vector_potential(self): if self._vpei is not None: return self._vpei.get_vec_h() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def potential(self) -> np.ndarray:\n if self._V is None:\n potential = -self._gp.reshape(-1, 1) * self._gp # pylint: disable=E1101\n object.__setattr__(self, \"_V\", potential)\n return self._V", "def external_vector_potential(self):\n assert (self.ae is None) == (self.be is None)\n \n if self.ae is not None:\n return self.ae, self.be\n\n return None", "def AsVector(self) -> ngsolve.la.BaseVector:", "def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el", "def vector_potential(self, xyz):\n r = self.distance(xyz)\n a = (\n (self.current * self.length) / (4*np.pi*r) *\n np.exp(-i*self.wavenumber*r)\n )\n a = np.kron(np.ones(1, 3), np.atleast_2d(a).T)\n return self.dot_orientation(a)", "def compute_V(self, x):\n assert x.ndim == 3\n return self.potential_net(x.reshape(x.size(0), -1))", "def V_potential(X,A):\n\treturn norm_matrix(A-X)", "def vel_inicial(x): #Velocidad inicial como un vector de ceros\r\n return np.zeros_like(x)", "def vector_potential(self, xyz):\n r = self.distance(xyz)\n f = (\n (1j * self.omega * self.mu * self.moment) / (4 * np.pi * r) *\n np.exp(-1j * self.wavenumber * r)\n )\n f = np.kron(np.ones(1, 3), np.atleast_2d(f).T)\n return self.dot_orientation(f)", "def vector(molec, dihed, nonH, energy):\n #Torison\n if dihed:\n pass\n #XYZ\n else:\n coords = ()\n if nonH:\n for atom in molec.atoms:\n coords += atom.coords\n else:\n for atom in molec.atoms:\n if atom.atomicnum > 1:\n coords += atom.coords\n #Energy\n if energy:\n coords += (molec.energy/10.0,)\n return coords", "def __call__(self):\n return self._vector", "def vector(self, base_ring=None):\n if (base_ring is None) or (base_ring is self._base_ring):\n return self._vector\n else:\n return vector(base_ring, self._vector)", "def __init__(self,vector):\n self._vector = vector", "def vector(self):\n return self.__vector", "def __init__( self, u = [ 1., 0., 0. ], v = [ 0., 1., 0. ], w = [ 0., 0., 1. ], coeff = 1. ): \n\tdirect = [ u, v, w ]\n self.coeff = coeff\n\tself.direct = [ [ i*coeff for i in j ] for j in direct ]\n self.reciprocal_updated = False\n self.lattice_parameters_updated = False\n self.volume_updated = False\n self.get_lattice_parameters( u, v, w )\n self.get_volume( u, v, w )\n self.get_reciprocal_basis( u, v, w )", "def potential(Walker):\n V = 0.0\n r_cut = 1.0e-4\n # e-e\n for i in range(Walker.Ne-1):\n for j in range(i+1,Walker.Ne):\n r = sqrt(sum((Walker.Re[i]-Walker.Re[j])**2))\n V += 1.0/max(r_cut,r)\n\n # e-Ion\n for i in range(Walker.Ne):\n for j in range(Walker.Nn):\n r = sqrt(sum((Walker.Re[i]-Walker.Rn[j])**2))\n V -= Walker.Zn[j]/max(r_cut,r)\n\n # Ion-Ion\n for i in range(Walker.Nn-1):\n for j in range(i+1,Walker.Nn):\n r = sqrt(sum((Walker.Rn[i]-Walker.Rn[j])**2))\n V += 1.0/max(r_cut,r)\n\n return V", "def V_vect(self, points):\n return self.A_conf*norm(points)*self.isOutside(points)", "def evaluation(individual):\n # Get the closest term through individual vector\n individual_term = individual\n # make circular convolution the generation virtual patent\n vectors4novelty = np.concatenate((existed_function_vector,np.array([individual_term])))\n patent_vector = lps.get_patent_vector_a(vectors4novelty)\n # Calculate Novelty Value based on patent space\n # We use the base space to test the algorithm temperorely\n novelty_value = novelty.get_novelty(patent_vector)\n # Calcualte Fesibility value based on function space\n feasibility_value = feasibility.get_feasibility_a(vectors4novelty)\n return [novelty_value,feasibility_value]", "def set(self, incoming_vector):\n self.vector = incoming_vector", "def V(self, point = -1):\n return self.solution('V', point)", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def __size_restriction_correct_vector_vector(self):\n\n strTestName = 'Vector size lower or equal to the size of a vector (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('vRefParameter1', 'Vector ref. parameter')\n RxCSObject.paramType('vRefParameter1', np.ndarray)\n\n # Now, let me define a Numpy vector\n RxCSObject.paramAddMan('parameter1', 'Numpy array 1D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizLE('parameter1', 'vRefParameter1', mul=3)\n\n RxCSObject.vRefParameter1 = np.array([0, 1, 0, 4])\n RxCSObject.parameter1 = np.random.randn(9)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def get_vector(self, word):\n\n if word in self.glove.stoi:\n return self.glove.vectors[self.glove.stoi[word]]\n else:\n return None", "def get_vector(self,term):\n return self.dict.get(term)", "def define_vector_functions(self):\n\n # Exit if functions have already been defined.\n # A function decorator might work better here...\n if hasattr(self, 'velocity'):\n return None\n\n unsteady = self.config['formulation']['time']['unsteady']\n lagrangian = self.config['formulation']['domain'] == 'lagrangian'\n lin_elastic = self.config['material']['const_eqn'] == 'lin_elastic'\n elastic = self.config['material']['type'] == 'elastic'\n\n init = self.config['formulation']['initial_condition']\n\n # Trial and test functions\n self.test_vector = dlf.TestFunction(self.vectorSpace)\n self.trial_vector = dlf.TrialFunction(self.vectorSpace)\n\n if elastic and unsteady:\n if init['displacement'] is not None:\n disp = init['displacement']\n self.displacement = dlf.project(disp, self.vectorSpace)\n self.displacement0 = self.displacement.copy(deepcopy=True)\n else:\n self.displacement = dlf.Function(self.vectorSpace)\n self.displacement0 = dlf.Function(self.vectorSpace)\n self.displacement.rename(\"u\", \"displacement\")\n self.displacement0.rename(\"u0\", \"displacement\")\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n self.velocity0.rename(\"v0\", \"velocity\")\n elif unsteady: # Unsteady viscous material.\n self.displacement = 0\n self.displacement0 = 0\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n self.velocity0.rename(\"v0\", \"velocity\")\n\n # self.velocity = dlf.Function(self.vectorSpace, name=\"v\")\n # self.velocity0 = dlf.Function(self.vectorSpace, name=\"v0\")\n elif elastic: # Steady elastic material.\n if init['displacement'] is not None:\n disp = init['displacement']\n self.displacement = dlf.project(disp, self.vectorSpace)\n # self.displacement0 = self.displacement.copy(deepcopy=True)\n else:\n self.displacement = dlf.Function(self.vectorSpace)\n # self.displacement0 = dlf.Function(self.vectorSpace)\n self.displacement.rename(\"u\", \"displacement\")\n # self.displacement0.rename(\"u0\", \"displacement\")\n\n # self.displacement = dlf.Function(self.vectorSpace, name=\"u\")\n self.displacement0 = 0\n self.velocity = 0\n self.velocity0 = 0\n else: # Steady viscous material\n self.displacement = 0\n self.displacement0 = 0\n\n if init['velocity'] is not None:\n vel = init['velocity']\n self.velocity = dlf.project(vel, self.vectorSpace)\n # self.velocity0 = self.velocity.copy(deepcopy=True)\n else:\n self.velocity = dlf.Function(self.vectorSpace)\n # self.velocity0 = dlf.Function(self.vectorSpace)\n self.velocity.rename(\"v\", \"velocity\")\n # self.velocity0.rename(\"v0\", \"velocity\")\n\n # self.velocity = dlf.Function(self.vectorSpace, name=\"v\")\n self.velocity0 = 0\n\n # # Apply initial conditions if provided\n # initial_condition = self.config['formulation']['initial_condition']\n # if initial_condition['displacement'] is not None:\n # init_disp = initial_condition['displacement']\n # self.apply_initial_conditions(init_disp,\n # self.displacement,\n # self.displacement0)\n # if initial_condition['velocity'] is not None:\n # init_vel = initial_condition['velocity']\n # self.apply_initial_conditions(init_vel,\n # self.velocity,\n # self.velocity0)\n\n return None", "def make_forcing_vec(pot_mesh, geo_mesh, u_d, f, l, mu):\n pot_nodes = pot_mesh.get_nodes()\n num_nodes = pot_nodes.shape[0]\n\n x_c = geo_mesh.get_centroid()\n c_0 = -1. / (4. * np.pi)\n\n # make Power and Miranda supplementary flow vector\n f_s = f / (-8. * np.pi * mu) # the script F seen in Pozrikidis\n l_s = l / (-8. * np.pi * mu) # the script L seen in Pozrikidis\n v_s = np.empty(3 * num_nodes)\n for src_num in range(num_nodes):\n node = pot_nodes[src_num]\n v_s[(3 * src_num) : (3 * src_num + 3)] = np.einsum(\n \"il,l->i\", geo.stokeslet(node, x_c), f_s\n ) + np.einsum(\n \"il,l->i\", geo.rotlet(node, x_c), l_s\n )\n fv = c_0 * (u_d - v_s) # script C term from Pozrikidis\n return fv", "def __call__(self, x):\n v = vector(RDF,x)\n if v.is_zero():\n raise ValueError, \"The origin must not be a vertex.\"\n v = v/norm(v) # normalize vertices to unit sphere\n v = self.house*v # reflect so self.projection_dir is at \"north pole\"\n denom = self.height-v[self.dim-1]\n if denom.is_zero():\n raise ValueError, 'Point cannot coincide with ' \\\n 'coordinate singularity at ' + repr(x)\n return vector(RDF, [ v[i]/denom for i in range(self.dim-1) ])", "def vector(self):\n return self.q[1:4]", "def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")", "def AsVector(self) -> BaseVector:" ]
[ "0.6777151", "0.6714715", "0.6365124", "0.62756366", "0.62136114", "0.615632", "0.6140968", "0.6131208", "0.61214024", "0.6068877", "0.6030403", "0.5973165", "0.596924", "0.59545225", "0.5888215", "0.5796419", "0.5769236", "0.57671154", "0.5765479", "0.57427657", "0.57207257", "0.5719309", "0.5693157", "0.5677461", "0.5677027", "0.5674424", "0.5672109", "0.56676126", "0.5664268", "0.56614566" ]
0.711595
0
Check usage of default credentials on master node
def test_001_check_default_master_node_credential_usage(self): ip = self.config.nailgun_host ssh_client = ssh.Client(ip, self.config.master.master_node_ssh_user, self.config.master.master_node_ssh_password, timeout=self.config.master.ssh_timeout) cmd = "date" output = [] try: output = ssh_client.exec_command(cmd) LOG.debug(output) except exceptions.SSHExecCommandFailed: self.verify_response_true(len(output) == 0, 'Step 1 failed: Default credentials for ' 'ssh on master node were not changed') except exceptions.TimeoutException: self.verify_response_true(len(output) == 0, 'Step 1 failed: Default credentials for ' 'ssh on master node were not changed') except exc.SSHException: self.verify_response_true(len(output) == 0, 'Step 1 failed: Default credentials for ' 'ssh on master node were not changed') self.verify_response_true(len(output) == 0, 'Step 1 failed: Default credentials for ' 'ssh on master node were not changed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(username=usr,\n password=pwd,\n auth_url=url)\n keystone.authenticate()\n except k_exceptions.Unauthorized:\n pass\n else:\n self.fail('Step 1 failed: Default credentials '\n 'for keystone on master node were not changed')", "def test_002_check_default_openstack_credential_usage(self):\n cluster_data = {\n 'password': self.config.identity.admin_password,\n 'username': self.config.identity.admin_username}\n\n default_data = {\n 'password': 'admin',\n 'username': 'admin'}\n\n self.verify_response_body_not_equal(\n exp_content=default_data,\n act_content=cluster_data,\n msg='Default credentials values are used. '\n 'We kindly recommend that you changed all defaults.',\n failed_step='1')", "def credentials(self):\n return True", "def check_credentials(self, cli_credentials, default_prompt, enable_prompt, logger):\n raise NotImplementedError(\"Class {} must implement method 'check_credentials'\".format(type(self)))", "def check_credentials(self) -> None:\n # Checks the GitHub token is defined\n configuration.get_value(ConfigurationVariable.GIT_TOKEN)", "def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def check_auth():", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "def test_credential_default_values():\n creds = Credentials()\n assert creds.url is None\n assert creds.token is None\n assert creds.org_key is None\n assert creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None\n with pytest.raises(AttributeError):\n assert creds.notexist is None", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def login_to_system(credentials):\n return True if credentials else False", "def test_default_auth_methods(mp_config_file):\n with custom_mp_config(mp_config_file):\n check.is_in(\"env\", default_auth_methods())\n check.is_in(\"msi\", default_auth_methods())\n check.is_in(\"cli\", default_auth_methods())\n check.is_in(\"interactive\", default_auth_methods())", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def use_cred():\n prompt = \"Use Credentials? (N for Anonymous)\"\n return query_yes_no(question=prompt, default=\"no\")", "def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password", "def check_auth(username, password):\n return username == current_app.config['DOC_USERNAME'] and password == current_app.config['DOC_PASSWORD']", "def check_auth(username, password):\n return (username == app.config['USERNAME'] and\n password == app.config['PASSWORD'])", "def check_auth(username, password):\n try:\n locust_username = os.environ['LOCUST_USER_NAME']\n locust_password = os.environ['LOCUST_PASSWORD']\n return username == locust_username and password == locust_password\n except:\n return True", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def check_get_config() -> Config:\n global config\n if config.token == DEFAULT_TOKEN:\n # try reinit, as may have ran login in another terminal/subprocess\n _config = init(config._env)\n if _config.token == DEFAULT_TOKEN:\n # still don't have a token set for the env, open up the browser\n if not _IN_PYTEST:\n f = furl(path=\"/home/\", origin=_config.server)\n webbrowser.open(url=str(f), new=2)\n raise InvalidTokenError(\n \"Please sign-up and login - if you already have then please restart your Jupyter kernel/Python instance to initialize your new token\"\n )\n return _config\n return config", "def test_getcredentials_from_netrc(netrc):\n netrc.return_value.authenticators.return_value = (USERNAME, \"\", PASSWORD)\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def credentials_given(self):\n return self.key and self.secret", "def credentials_work(self):\n good = True\n try:\n self.session.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good" ]
[ "0.8095686", "0.73973477", "0.68096805", "0.6725103", "0.6619466", "0.6389539", "0.6344078", "0.6290091", "0.6246063", "0.6232161", "0.61840326", "0.6176696", "0.6170216", "0.6159659", "0.611495", "0.6110858", "0.60856205", "0.6053819", "0.60522896", "0.604298", "0.6013833", "0.60124224", "0.60058", "0.59982073", "0.5989699", "0.59826565", "0.5981146", "0.59761333", "0.5953952", "0.59383583" ]
0.7976447
1
Check if default credentials for OpenStack cluster have changed
def test_002_check_default_openstack_credential_usage(self): cluster_data = { 'password': self.config.identity.admin_password, 'username': self.config.identity.admin_username} default_data = { 'password': 'admin', 'username': 'admin'} self.verify_response_body_not_equal( exp_content=default_data, act_content=cluster_data, msg='Default credentials values are used. ' 'We kindly recommend that you changed all defaults.', failed_step='1')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_003_check_default_keystone_credential_usage(self):\n\n usr = self.config.master.keystone_user\n pwd = self.config.master.keystone_password\n url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host)\n\n try:\n keystone = keystoneclient(username=usr,\n password=pwd,\n auth_url=url)\n keystone.authenticate()\n except k_exceptions.Unauthorized:\n pass\n else:\n self.fail('Step 1 failed: Default credentials '\n 'for keystone on master node were not changed')", "def test_001_check_default_master_node_credential_usage(self):\n ip = self.config.nailgun_host\n\n ssh_client = ssh.Client(ip,\n self.config.master.master_node_ssh_user,\n self.config.master.master_node_ssh_password,\n timeout=self.config.master.ssh_timeout)\n cmd = \"date\"\n output = []\n try:\n output = ssh_client.exec_command(cmd)\n LOG.debug(output)\n except exceptions.SSHExecCommandFailed:\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')\n except exceptions.TimeoutException:\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')\n except exc.SSHException:\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')\n\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')", "def credentials(self):\n return True", "def checkCredentialChange(response):\n credentials = getattr(flask.g, '_credentials', None)\n if credentials is not None:\n config = get_user_config()\n json_credentials = credentials.to_json()\n if config.credentials != json_credentials:\n config.credentials = json_credentials\n config.save()\n\n return response", "def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def check_credentials(cluster):\n es_config = cluster['es']\n es_auth = (es_config['username'], es_config['password'])\n\n _app_path = '/app/kibana'\n kbn_config = cluster['kibana']\n kbn_auth = (kbn_config['username'], kbn_config['password'])\n kbn_config['auth'] = kbn_auth\n kbn_url = f'{kbn_config[\"protocol\"]}://{kbn_config[\"url\"]}:{kbn_config[\"port\"]}'\n\n es = Elasticsearch(\n es_config['url'],\n use_ssl=True if es_config['protocol'] == 'https' else False,\n port=es_config['port'],\n verify_certs=True,\n http_auth=es_auth)\n\n try:\n if es.cluster.health():\n es_config['client'] = es\n rv = requests.head(\n f'{kbn_url}{_app_path}', auth=kbn_auth, timeout=10.0)\n except Exception as e:\n return False\n\n return rv.ok", "def credentials_given(self):\n return self.key and self.secret", "def check_credentials(self, cli_credentials, default_prompt, enable_prompt, logger):\n raise NotImplementedError(\"Class {} must implement method 'check_credentials'\".format(type(self)))", "def test_credential_default_values():\n creds = Credentials()\n assert creds.url is None\n assert creds.token is None\n assert creds.org_key is None\n assert creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None\n with pytest.raises(AttributeError):\n assert creds.notexist is None", "def credentials_work(self):\n good = True\n try:\n self.session.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "def test_300_keystone_default_config(self):\n u.log.debug('Checking keystone config file...')\n unit = self.keystone_sentry\n conf = '/etc/keystone/keystone.conf'\n ks_ci_rel = unit.relation('identity-service',\n 'cinder:identity-service')\n my_ks_rel = self.pxc_sentry.relation('shared-db',\n 'keystone:shared-db')\n db_uri = \"mysql://{}:{}@{}/{}\".format('keystone',\n my_ks_rel['password'],\n my_ks_rel['db_host'],\n 'keystone')\n expected = {\n 'DEFAULT': {\n 'debug': 'False',\n 'admin_token': ks_ci_rel['admin_token'],\n 'use_syslog': 'False',\n 'log_config_append': '/etc/keystone/logging.conf',\n 'public_endpoint': u.valid_url, # get specific\n 'admin_endpoint': u.valid_url, # get specific\n },\n 'extra_headers': {\n 'Distribution': 'Ubuntu'\n },\n 'database': {\n 'connection': db_uri,\n 'idle_timeout': '200'\n }\n }\n\n if self._get_openstack_release() < self.trusty_mitaka:\n expected['DEFAULT']['verbose'] = 'False'\n expected['DEFAULT']['log_config'] = \\\n expected['DEFAULT']['log_config_append']\n del expected['DEFAULT']['log_config_append']\n\n if self._get_openstack_release() >= self.trusty_kilo and \\\n self._get_openstack_release() < self.trusty_mitaka:\n # Kilo and Liberty\n expected['eventlet_server'] = {\n 'admin_bind_host': '0.0.0.0',\n 'public_bind_host': '0.0.0.0',\n 'admin_port': '35347',\n 'public_port': '4990',\n }\n elif self._get_openstack_release() <= self.trusty_icehouse:\n # Juno and earlier\n expected['DEFAULT'].update({\n 'admin_port': '35347',\n 'public_port': '4990',\n 'bind_host': '0.0.0.0',\n })\n\n for section, pairs in expected.iteritems():\n ret = u.validate_config_data(unit, conf, section, pairs)\n if ret:\n message = \"keystone config error: {}\".format(ret)\n amulet.raise_status(amulet.FAIL, msg=message)", "def _check_user_entry(user):\n if \"tenant_name\" in user:\n keys = set(user.keys())\n if keys == {\"username\", \"password\", \"tenant_name\",\n \"project_domain_name\", \"user_domain_name\"}:\n if (user[\"user_domain_name\"] == \"\"\n and user[\"project_domain_name\"] == \"\"):\n # it is credentials of keystone v2 and they were created\n # --fromenv\n del user[\"user_domain_name\"]\n del user[\"project_domain_name\"]\n return True\n else:\n # it looks like keystone v3 credentials\n user[\"project_name\"] = user.pop(\"tenant_name\")\n return True", "def credentials_work(self):\n\n good = True\n try:\n self.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def test_default_auth_methods(mp_config_file):\n with custom_mp_config(mp_config_file):\n check.is_in(\"env\", default_auth_methods())\n check.is_in(\"msi\", default_auth_methods())\n check.is_in(\"cli\", default_auth_methods())\n check.is_in(\"interactive\", default_auth_methods())", "def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def isSciServerComputeEnvironment():\n if os.path.isfile(\"/home/idies/keystone.token\"):\n return True\n else:\n return False", "def test_getcredentials_from_netrc(netrc):\n netrc.return_value.authenticators.return_value = (USERNAME, \"\", PASSWORD)\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def is_directly_updatable(credentials: Credentials) -> bool:\n if credentials.base_url == QE_URL:\n return True\n\n if credentials.base_url in (QCONSOLE_URL, QE2_URL, QCONSOLE2_URL):\n if credentials.base_url == credentials.url:\n return True\n\n return False", "def check_credentials(self) -> None:\n # Checks the GitHub token is defined\n configuration.get_value(ConfigurationVariable.GIT_TOKEN)", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def login_to_system(credentials):\n return True if credentials else False", "def command_check_credentials():\n \n # now calling STS service with the credentials retrieved for verification\n if not aws.check_credentials():\n print(\"credential check failed. exiting program with exit code 1\")\n sys.exit(1)", "def check_auth(username, password):\n try:\n locust_username = os.environ['LOCUST_USER_NAME']\n locust_password = os.environ['LOCUST_PASSWORD']\n return username == locust_username and password == locust_password\n except:\n return True", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']" ]
[ "0.7338063", "0.65568185", "0.6468364", "0.62571883", "0.59062266", "0.5873348", "0.585109", "0.5782438", "0.57635754", "0.57020897", "0.5653839", "0.563108", "0.5605719", "0.55638623", "0.5553499", "0.5546776", "0.5503604", "0.5487107", "0.5486545", "0.5467942", "0.5448755", "0.54438424", "0.54226017", "0.5417828", "0.54158974", "0.54145515", "0.5404713", "0.5403893", "0.5386291", "0.53856677" ]
0.7722615
0
Check usage of default credentials for keystone on master node
def test_003_check_default_keystone_credential_usage(self): usr = self.config.master.keystone_user pwd = self.config.master.keystone_password url = 'http://{0}:5000/v2.0'.format(self.config.nailgun_host) try: keystone = keystoneclient(username=usr, password=pwd, auth_url=url) keystone.authenticate() except k_exceptions.Unauthorized: pass else: self.fail('Step 1 failed: Default credentials ' 'for keystone on master node were not changed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_002_check_default_openstack_credential_usage(self):\n cluster_data = {\n 'password': self.config.identity.admin_password,\n 'username': self.config.identity.admin_username}\n\n default_data = {\n 'password': 'admin',\n 'username': 'admin'}\n\n self.verify_response_body_not_equal(\n exp_content=default_data,\n act_content=cluster_data,\n msg='Default credentials values are used. '\n 'We kindly recommend that you changed all defaults.',\n failed_step='1')", "def test_001_check_default_master_node_credential_usage(self):\n ip = self.config.nailgun_host\n\n ssh_client = ssh.Client(ip,\n self.config.master.master_node_ssh_user,\n self.config.master.master_node_ssh_password,\n timeout=self.config.master.ssh_timeout)\n cmd = \"date\"\n output = []\n try:\n output = ssh_client.exec_command(cmd)\n LOG.debug(output)\n except exceptions.SSHExecCommandFailed:\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')\n except exceptions.TimeoutException:\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')\n except exc.SSHException:\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')\n\n self.verify_response_true(len(output) == 0,\n 'Step 1 failed: Default credentials for '\n 'ssh on master node were not changed')", "def test_300_keystone_default_config(self):\n u.log.debug('Checking keystone config file...')\n unit = self.keystone_sentry\n conf = '/etc/keystone/keystone.conf'\n ks_ci_rel = unit.relation('identity-service',\n 'cinder:identity-service')\n my_ks_rel = self.pxc_sentry.relation('shared-db',\n 'keystone:shared-db')\n db_uri = \"mysql://{}:{}@{}/{}\".format('keystone',\n my_ks_rel['password'],\n my_ks_rel['db_host'],\n 'keystone')\n expected = {\n 'DEFAULT': {\n 'debug': 'False',\n 'admin_token': ks_ci_rel['admin_token'],\n 'use_syslog': 'False',\n 'log_config_append': '/etc/keystone/logging.conf',\n 'public_endpoint': u.valid_url, # get specific\n 'admin_endpoint': u.valid_url, # get specific\n },\n 'extra_headers': {\n 'Distribution': 'Ubuntu'\n },\n 'database': {\n 'connection': db_uri,\n 'idle_timeout': '200'\n }\n }\n\n if self._get_openstack_release() < self.trusty_mitaka:\n expected['DEFAULT']['verbose'] = 'False'\n expected['DEFAULT']['log_config'] = \\\n expected['DEFAULT']['log_config_append']\n del expected['DEFAULT']['log_config_append']\n\n if self._get_openstack_release() >= self.trusty_kilo and \\\n self._get_openstack_release() < self.trusty_mitaka:\n # Kilo and Liberty\n expected['eventlet_server'] = {\n 'admin_bind_host': '0.0.0.0',\n 'public_bind_host': '0.0.0.0',\n 'admin_port': '35347',\n 'public_port': '4990',\n }\n elif self._get_openstack_release() <= self.trusty_icehouse:\n # Juno and earlier\n expected['DEFAULT'].update({\n 'admin_port': '35347',\n 'public_port': '4990',\n 'bind_host': '0.0.0.0',\n })\n\n for section, pairs in expected.iteritems():\n ret = u.validate_config_data(unit, conf, section, pairs)\n if ret:\n message = \"keystone config error: {}\".format(ret)\n amulet.raise_status(amulet.FAIL, msg=message)", "def credentials(self):\n return True", "def check_credentials(self) -> None:\n # Checks the GitHub token is defined\n configuration.get_value(ConfigurationVariable.GIT_TOKEN)", "def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok", "def keystonehost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['keystone']\n env.exists = exists", "def check_credentials(self, cli_credentials, default_prompt, enable_prompt, logger):\n raise NotImplementedError(\"Class {} must implement method 'check_credentials'\".format(type(self)))", "def test_credential_default_values():\n creds = Credentials()\n assert creds.url is None\n assert creds.token is None\n assert creds.org_key is None\n assert creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None\n with pytest.raises(AttributeError):\n assert creds.notexist is None", "def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"", "def _check_user_entry(user):\n if \"tenant_name\" in user:\n keys = set(user.keys())\n if keys == {\"username\", \"password\", \"tenant_name\",\n \"project_domain_name\", \"user_domain_name\"}:\n if (user[\"user_domain_name\"] == \"\"\n and user[\"project_domain_name\"] == \"\"):\n # it is credentials of keystone v2 and they were created\n # --fromenv\n del user[\"user_domain_name\"]\n del user[\"project_domain_name\"]\n return True\n else:\n # it looks like keystone v3 credentials\n user[\"project_name\"] = user.pop(\"tenant_name\")\n return True", "def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password", "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "def isSciServerComputeEnvironment():\n if os.path.isfile(\"/home/idies/keystone.token\"):\n return True\n else:\n return False", "def credentials_given(self):\n return self.key and self.secret", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def test_getcredentials_from_netrc(netrc):\n netrc.return_value.authenticators.return_value = (USERNAME, \"\", PASSWORD)\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def check_auth():", "def validate_keystone_users(self, client):\n u.log.debug('Checking keystone users...')\n base = [\n {'name': 'demoUser',\n 'enabled': True,\n 'id': u.not_null,\n 'email': '[email protected]'},\n {'name': 'admin',\n 'enabled': True,\n 'id': u.not_null,\n 'email': 'juju@localhost'},\n {'name': 'cinder_cinderv2',\n 'enabled': True,\n 'id': u.not_null,\n 'email': u'juju@localhost'}\n ]\n expected = []\n for user_info in base:\n if self.keystone_api_version == 2:\n user_info['tenantId'] = u.not_null\n else:\n user_info['default_project_id'] = u.not_null\n expected.append(user_info)\n actual = client.users.list()\n ret = u.validate_user_data(expected, actual,\n api_version=self.keystone_api_version)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def test_validate_credentials(self):\n pass", "def authenticate_user(self):\r\n if self.options.os_auth_strategy == 'keystone':\r\n if self.options.os_token or self.options.os_url:\r\n # Token flow auth takes priority\r\n if not self.options.os_token:\r\n raise exc.CommandError(\r\n _(\"You must provide a token via\"\r\n \" either --os-token or env[OS_TOKEN]\"))\r\n\r\n if not self.options.os_url:\r\n raise exc.CommandError(\r\n _(\"You must provide a service URL via\"\r\n \" either --os-url or env[OS_URL]\"))\r\n\r\n else:\r\n # Validate password flow auth\r\n if (not self.options.os_username\r\n and not self.options.os_user_id):\r\n raise exc.CommandError(\r\n _(\"You must provide a username or user ID via\"\r\n \" --os-username, env[OS_USERNAME] or\"\r\n \" --os-user_id, env[OS_USER_ID]\"))\r\n\r\n if not self.options.os_password:\r\n raise exc.CommandError(\r\n _(\"You must provide a password via\"\r\n \" either --os-password or env[OS_PASSWORD]\"))\r\n\r\n if (not self.options.os_tenant_name\r\n and not self.options.os_tenant_id):\r\n raise exc.CommandError(\r\n _(\"You must provide a tenant_name or tenant_id via\"\r\n \" --os-tenant-name, env[OS_TENANT_NAME]\"\r\n \" --os-tenant-id, or via env[OS_TENANT_ID]\"))\r\n\r\n if not self.options.os_auth_url:\r\n raise exc.CommandError(\r\n _(\"You must provide an auth url via\"\r\n \" either --os-auth-url or via env[OS_AUTH_URL]\"))\r\n else: # not keystone\r\n if not self.options.os_url:\r\n raise exc.CommandError(\r\n _(\"You must provide a service URL via\"\r\n \" either --os-url or env[OS_URL]\"))\r\n\r\n self.client_manager = clientmanager.ClientManager(\r\n token=self.options.os_token,\r\n url=self.options.os_url,\r\n auth_url=self.options.os_auth_url,\r\n tenant_name=self.options.os_tenant_name,\r\n tenant_id=self.options.os_tenant_id,\r\n username=self.options.os_username,\r\n user_id=self.options.os_user_id,\r\n password=self.options.os_password,\r\n region_name=self.options.os_region_name,\r\n api_version=self.api_version,\r\n auth_strategy=self.options.os_auth_strategy,\r\n service_type=self.options.service_type,\r\n endpoint_type=self.options.endpoint_type,\r\n insecure=self.options.insecure,\r\n ca_cert=self.options.os_cacert,\r\n log_credentials=True)\r\n return", "def _authenticate(self):\n cred_file = self.module.params.pop('config_file', None)\n section = self.module.params.pop('section')\n self._env_vars(cred_file=cred_file, section=section)\n\n required_vars = ['login_url', 'login_user', 'login_password']\n variables = [\n 'login_url',\n 'login_user',\n 'login_password',\n 'login_tenant_name',\n 'region',\n 'auth_version',\n 'snet'\n ]\n variables_dict = self._get_vars(variables, required=required_vars)\n\n login_url = variables_dict.pop('login_url')\n login_user = variables_dict.pop(\n 'login_user', os.getenv('OS_AUTH_URL')\n )\n login_password = variables_dict.pop(\n 'login_password', os.getenv('OS_AUTH_URL')\n )\n login_tenant_name = variables_dict.pop(\n 'login_tenant_name', os.getenv('OS_TENANT_ID')\n )\n region = variables_dict.pop('region', None)\n\n auth_version = variables_dict.pop('auth_version')\n snet = variables_dict.pop('snet', None)\n\n if snet in BOOLEANS_TRUE:\n snet = True\n else:\n snet = None\n\n if login_password is None:\n self.failure(\n error='Missing Password',\n rc=2,\n msg='A Password is required for authentication. Try adding'\n ' [ login_password ] to the task'\n )\n\n if login_tenant_name is None:\n login_tenant_name = ' '\n\n creds_dict = {\n 'user': login_user,\n 'key': login_password,\n 'authurl': login_url,\n 'tenant_name': login_tenant_name,\n 'os_options': {\n 'region': region\n },\n 'snet': snet,\n 'auth_version': auth_version\n }\n\n self.swift = client.Connection(**creds_dict)", "def command_check_credentials():\n \n # now calling STS service with the credentials retrieved for verification\n if not aws.check_credentials():\n print(\"credential check failed. exiting program with exit code 1\")\n sys.exit(1)", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def test_keystone_v3(self):\n if self._get_openstack_release() >= self.xenial_queens:\n u.log.info('Skipping keystone v3 test for queens or later')\n return\n os_release = self._get_openstack_release_string()\n if CompareOpenStackReleases(os_release) < 'kilo':\n u.log.info('Skipping test, {} < kilo'.format(os_release))\n return\n u.log.info('Checking that service is configured and operate correctly '\n 'when using Keystine v3 auth...')\n if not self._set_auth_api_version('3'):\n msg = \"Unable to set auth_api_version to '3'\"\n amulet.raise_status(amulet.FAIL, msg=msg)\n return\n if self._get_openstack_release() >= self.trusty_mitaka:\n # NOTE(jamespage):\n # Re-init tests to create v3 versions of glance, swift and\n # keystone clients for mitaka or later, where glance uses\n # v3 to access backend swift services. Early v3 deployments\n # still use v2 credentials in glance for swift access.\n self._initialize_tests(api_version=3)\n self.test_302_proxy_server_config(auth_api_version='3')\n self.test_400_swift_backed_image_create()" ]
[ "0.7416696", "0.73040277", "0.66775703", "0.64706546", "0.63807833", "0.63734967", "0.6169981", "0.6136318", "0.6118", "0.6086049", "0.607734", "0.60373425", "0.60364085", "0.6034082", "0.59883475", "0.5980997", "0.5974277", "0.59671694", "0.59401226", "0.5902685", "0.5882367", "0.5877262", "0.5874383", "0.5870375", "0.58376634", "0.5831962", "0.5830922", "0.5822705", "0.5781756", "0.5760464" ]
0.8827416
0
helper funtion to get user.id using email
def getUserID(email): try: user = session.query(User_info).filter_by(email=email).one() return user.id except Exception as e: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id(self, email):\n\n query = self._db.User.select(self._db.User.c.email == email)\n query = query.with_only_columns([self._db.User.c.id_, ])\n\n record = query.execute().fetchone()\n return record[0]", "def find_user_id(email: str):\n user_id = sdk.search_users(email=email)\n \n \"\"\" Customized logic block to check if an email address is associated with a Looker user\"\"\"\n if len(user_id) == 0: \n return 'There is no user associated with this email' \n else:\n return user_id[0]['id']", "def getUserID(email):\r\n try:\r\n session = DBSession()\r\n return session.query(User).filter_by(email=email).one().id\r\n except:\r\n return None", "def getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None", "def get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None", "def get_info(email):\n # Get the first user where _id=email\n user = models.User.objects.raw({\"_id\": email}).first()\n return user", "def getUserID(email):\n\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None", "def getUserID(email):\n\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None", "def get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except Exception as e:\n print 'No user found for ' + email + ': ' + str(e)\n return None", "def get_user_id(email: str) -> str:\n response = api.search_users(search_payload={\"keywords\": EMAIL})\n\n if not response.ok:\n print(response.data)\n sys.exit(1)\n\n for item in response.data.get(\"items\"):\n if item.get(\"email\") == EMAIL:\n return item.get(\"id\")\n else:\n return None", "def get_user_id(session, email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n\n except BaseException:\n return None", "def get_user_id_from_email(email, users):\n # find the user id that matches the email provided in user_to_add field\n for user in users:\n if user.get('email') == email:\n return user.get('userId')\n\n raise Exception('no user found with email ' + email)", "def get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except NoResultFound:\n return -1\n except MultipleResultsFound:\n return -1", "def helper_get_by_email(user_email):\n user = heart_rate_databases_starter.models.User.objects.raw({\"_id\": user_email}).first() # Get the first user where _id=email\n return user", "def get_userid(email, name):\n user = session.query(User).filter_by(email=email).first()\n\n if user:\n return user.id\n else:\n user = User(email=email, name=name)\n session.add(user)\n session.commit()\n return user.id", "def get_id(self):\n return self.email", "def get_id(self):\n return self.email", "def lookup_user_by_email(email):\n try:\n slack_client = get_client()\n result = slack_client.users_lookupByEmail(email=email)\n id = result.data['user']['id'] # Looks like: UJ0JNCX19, tag the user in a message like <@UJ0JNCX19>\n return '<@' + id + '>'\n except:\n return email", "def user(email):\r\n return User.objects.get(email=email)", "def lookup_email(email):\n user = User.objects(email=email).first()\n return user", "def get_auth0_user_id_by_email(email):\n\n get_token = GetToken(auth0_domain)\n token = get_token.client_credentials(\n auth0_client_id,\n auth0_client_secret,\n 'https://{}/api/v2/'.format(auth0_domain)\n )\n mgmt_api_token = token['access_token']\n auth0_users = Auth0Users(auth0_domain, mgmt_api_token)\n query = 'email:%s' % email\n results = auth0_users.list(q=query, search_engine='v3')\n if results['users']:\n auth0_user_id = results['users'][0]['user_id']\n else:\n auth0_user_id = None\n\n return auth0_user_id", "def get_email(obj):\r\n return obj.user.email", "def lookup_user(email):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.users_lookupByEmail(email=email)\n assert response['ok'] is True\n return response['user']['id']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def get_auth0_id_of_user(email):\n return _get_auth0_id_of_user(email,\n token_redis_connection(),\n auth0_token(),\n current_app.config)", "def current_user(email):\n for user in Data.users:\n if email == user['email']:\n return user", "def get_user(id):\n pass", "def get_email(self, id_):\n\n query = self._db.User.select(self._db.User.c.id_ == id_)\n query = query.with_only_columns([self._db.User.c.email, ])\n\n record = query.execute().fetchone()\n return record[0]", "def test_get_user_by_emailuser_email_get(self):\n pass", "def get_account_id(self, email=None, username=None, email_id=None):\n if email_id is not None:\n login_type = 'email_id'\n login_value = email_id\n elif email is not None:\n login_type = 'email_id'\n login_value = self.get_email_id(email)\n elif username is not None:\n login_type = 'username'\n login_value = username\n else:\n return 0\n \n try:\n return self.sql('SELECT id FROM accounts WHERE {} = %s'.format(login_type), login_value)[0][0]\n except IndexError:\n return 0", "def check_if_user_exists(self, email):\n for user in self.users.values():\n if user['email'] == email:\n return user['id']\n else:\n return False" ]
[ "0.80991805", "0.80331326", "0.79804707", "0.7974303", "0.7971684", "0.7953753", "0.7935821", "0.7935821", "0.78868335", "0.78705585", "0.77772075", "0.7756721", "0.7755162", "0.77276444", "0.75779545", "0.7553001", "0.7553001", "0.75268793", "0.74339384", "0.73698187", "0.7310679", "0.72602355", "0.72335696", "0.72294647", "0.721393", "0.7206407", "0.71615267", "0.7159916", "0.7107587", "0.7083839" ]
0.80812913
1
Launch the instance of tensorboard given the directory and port
def launch_tb(logdir: str = None, port: str = '7900'): tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', logdir, '--port', port]) url = tb.launch() print(f'======\nLaunching tensorboard,\nDirectory: {logdir}\nPort: {port}\n======\n') return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_tensorboard(self):\n python_path = sys.executable\n option = '--logdir=' + self.instance.instance_summary_folder_path\n args_ = [python_path, tensorboard_dir(), option]\n self.open_subprocess(args_=args_, subprocess_key=\"tensorboard\")", "def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()", "def run(port):\n run(host=config.HOST, port=port)", "def start(args_string):\n context = _get_context()\n try:\n import IPython\n import IPython.display\n except ImportError:\n IPython = None\n\n if context == _CONTEXT_NONE:\n handle = None\n print(\"Launching TensorBoard...\")\n else:\n handle = IPython.display.display(\n IPython.display.Pretty(\"Launching TensorBoard...\"),\n display_id=True,\n )\n\n def print_or_update(message):\n if handle is None:\n print(message)\n else:\n handle.update(IPython.display.Pretty(message))\n\n parsed_args = shlex.split(args_string, comments=True, posix=True)\n start_result = manager.start(parsed_args)\n\n if isinstance(start_result, manager.StartLaunched):\n _display(\n port=start_result.info.port,\n print_message=False,\n display_handle=handle,\n )\n\n elif isinstance(start_result, manager.StartReused):\n template = (\n \"Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. \"\n \"(Use '!kill {pid}' to kill it.)\"\n )\n message = template.format(\n port=start_result.info.port,\n pid=start_result.info.pid,\n delta=_time_delta_from_info(start_result.info),\n )\n print_or_update(message)\n _display(\n port=start_result.info.port,\n print_message=False,\n display_handle=None,\n )\n\n elif isinstance(start_result, manager.StartFailed):\n def format_stream(name, value):\n if value == \"\":\n return \"\"\n elif value is None:\n return \"\\n<could not read %s>\" % name\n else:\n return \"\\nContents of %s:\\n%s\" % (name, value.strip())\n message = (\n \"ERROR: Failed to launch TensorBoard (exited with %d).%s%s\" %\n (\n start_result.exit_code,\n format_stream(\"stderr\", start_result.stderr),\n format_stream(\"stdout\", start_result.stdout),\n )\n )\n print_or_update(message)\n\n elif isinstance(start_result, manager.StartTimedOut):\n message = (\n \"ERROR: Timed out waiting for TensorBoard to start. \"\n \"It may still be running as pid %d.\"\n % start_result.pid\n )\n print_or_update(message)\n\n else:\n raise TypeError(\n \"Unexpected result from `manager.start`: %r.\\n\"\n \"This is a TensorBoard bug; please report it.\"\n % start_result\n )", "def create_tensorboard_process(self):\n port = 6006\n\n for _ in range(100):\n p = subprocess.Popen(\n [\"tensorboard\", \"--logdir\", self.logdir, \"--host\", \"localhost\", \"--port\",\n str(port)],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n self.event.wait(5)\n if p.poll():\n port += 1\n else:\n return port, p\n\n raise OSError(\n 'No available ports to start TensorBoard. Attempted all ports between 6006 and 6105')", "def launch(config):\n \n launch_with_configs([config])", "def _tunnel(port):\n func_args = locals()\n conf = Bunch(**func_args)\n \n # Loads default config if there is one\n # and update the conf object with data\n # from it, but function args have precedence\n fname = os.path.expanduser(\"~/.nbx/aws.json\")\n fname = Path(fname)\n if fname.is_file(): \n stored = load(fname)\n for k,v in stored.items():\n if k not in conf: conf[k] = v\n \n # Check if we got everything we need to\n # connect to instance\n fail = False\n for k in [\"ip\", \"user\", \"key\", \"port\"]:\n if conf[k] is None:\n fail = True\n print(f\"Please provide --{k}\")\n \n if fail: return\n \n # We could write some environment vars\n # but we can't source them from here\n #\n # fname = os.path.expanduser(\"~/.nbx/.bash_aws\")\n # string = f\"export xaws={conf.user}@{conf.ip};\\n\"\n # dump(string, fname, format=\".txt\")\n\n # Connect to server and forward local port 8888 to remote port 8888\n # We can now connect to a remote jupyter notebook server via `http://localhost:8888/`\n cmd = f\"ssh -i {conf.key} -L {conf.port}:localhost:{conf.port} {conf.user}@{conf.ip}\"\n os.system(f'bash -c \\\"{cmd}\\\"')", "def run(self):\n self.app.run(host=\"0.0.0.0\")", "def use_tensorboard(print_dir):\n assert not torch.cuda.is_available(), 'TensorBoard not available on free GPUs on Paperspace Gradient'\n TB_process = subprocess.Popen([\"tensorboard\", f\"--logdir={print_dir.parent}\"], stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT) # logdir={print_dir} to show just this run\n writer = tf.SummaryWriter(print_dir / 'TensorBoard_events')\n return TB_process, writer", "def launch(**kwargs):\n\n device_config = load_device_config('anc300', kwargs['config'], logger=kwargs['logger'])\n telnet_config = device_config['telnet_config']\n\n\n anc300 = ANC300(\n host=telnet_config['host'], \n port=telnet_config['port'], \n query_delay=device_config['query_delay'], \n passwd=telnet_config['passwd'], \n limits = device_config['limits'],\n logger=kwargs['logger']\n )\n\n\n anc300_service = Service()\n anc300_service.assign_module(module=anc300)\n anc300_service.assign_logger(logger=kwargs['logger'])\n anc300_server = GenericServer(\n service=anc300_service,\n host=get_ip(),\n port=kwargs['port']\n )\n anc300_server.start()", "def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)", "def start_from_terminal(app):\n parser = optparse.OptionParser()\n parser.add_option(\n '-d', '--debug',\n help='enable debug mode',\n action='store_true', default=False)\n parser.add_option(\n '-p', '--port',\n help='which port to serve content on',\n type='int', default=5000)\n parser.add_option(\n '-f', '--folder',\n help=\"folder to store temporary data\",\n default=\"\")\n parser.add_option(\n '-l', '--lite',\n help='enforcing light mode',\n action='store_true', default=False)\n\n opts, args = parser.parse_args()\n\n assert opts.folder, \"a local cache folder needs to be specified\"\n global MAIN_FOLDER\n global LITE_VIEW\n MAIN_FOLDER = opts.folder\n\n port = opts.port\n logger.info(\"Running on port: {}\".format(port))\n # handle the local folders here\n logger.info(\"Local folder : {}\".format(MAIN_FOLDER))\n cache_dir = os.path.join(MAIN_FOLDER, \"local\")\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n for subdir in [\"sim\", \"tmp\"]:\n sim_dir = os.path.join(MAIN_FOLDER, subdir)\n if not os.path.exists(sim_dir):\n os.makedirs(sim_dir)\n\n if opts.lite:\n create_lite_dag_dict()\n app.config[\"LITEVIEW\"] = opts.lite\n\n if opts.debug:\n app.run(debug=True, host='0.0.0.0', port=port)\n else:\n logger.setLevel(logging.INFO)\n start_tornado(app, port)\n # app.run(debug=False, host='0.0.0.0', port=port)", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def run(*port):\n print(port)\n if port:\n port = port[0]\n else:\n port = 8000\n external_ip = '0.0.0.0:{}'.format(port)\n _manage('runserver %s' % external_ip)", "def cmd_port(args):", "def run(port):\n print \"========= SAND conformance server =============\"\n print \"-----------------------------------------------\"\n import os\n if os.environ.get('PORT') is not None:\n port = int(os.environ['PORT'])\n APP.run(port=port)", "def run():\n app.run(debug=True, port=5001)", "def run(self, host=\"0.0.0.0\", port=8080):\n self.app.run(host=host, port=port, debug=True, use_reloader=False,\n use_evalex=False)", "def run(ctx, name, path, sn=None, board=None):\n sn = util.get_device_sn(ctx, sn)\n if sn is None:\n return\n\n if board is None:\n board_name = ctx.pylon.board\n else:\n board_name = board\n\n xp.build(ctx, board=board)\n\n with ctx.cd(path):\n if not os.path.isdir(os.path.join(path, \"repos\")):\n ctx.run(\"newt -v upgrade\")\n ctx.run(\"newt create-image {app}_{board} 1.0.0\".format(app=name, board=board_name))\n\n img = \"{path}/bin/targets/{app}_{board}/app/apps/{app}/{app}.img\"\n img = img.format(path=path, app=name, board=board_name)\n\n # Flash app in first app slot\n board_const = boards.get_board_constants(board_name)\n jlink.flash(ctx, img, sn, board_const[\"flash_start_addr\"])", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex", "def run(debug, threaded, host, port):\n \n HOST, PORT = host, port\n print \"running on %s:%d\" % (HOST, PORT)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def launch(self):", "def _display(port=None, height=None, print_message=False, display_handle=None):\n if height is None:\n height = 600\n\n if port is None:\n infos = manager.get_all()\n if not infos:\n raise ValueError(\"Can't display TensorBoard: no known instances running.\")\n else:\n info = max(manager.get_all(), key=lambda x: x.start_time)\n port = info.port\n else:\n infos = [i for i in manager.get_all() if i.port == port]\n info = (\n max(infos, key=lambda x: x.start_time)\n if infos\n else None\n )\n\n if print_message:\n if info is not None:\n message = (\n \"Selecting TensorBoard with {data_source} \"\n \"(started {delta} ago; port {port}, pid {pid}).\"\n ).format(\n data_source=manager.data_source_from_info(info),\n delta=_time_delta_from_info(info),\n port=info.port,\n pid=info.pid,\n )\n print(message)\n else:\n # The user explicitly provided a port, and we don't have any\n # additional information. There's nothing useful to say.\n pass\n\n fn = {\n _CONTEXT_COLAB: _display_colab,\n _CONTEXT_IPYTHON: _display_ipython,\n _CONTEXT_NONE: _display_cli,\n }[_get_context()]\n return fn(port=port, height=height, display_handle=display_handle)", "def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()", "def init():\n\n @click.command()\n @click.option('--cell', required=True,\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False)\n @click.option('--ssh', help='SSH client to use.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @click.argument('command', nargs=-1)\n def ssh(ssh, app, command):\n \"\"\"SSH into Treadmill container.\"\"\"\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))\n\n return ssh", "def start_from_terminal(app):\n parser = optparse.OptionParser()\n parser.add_option(\n '-d', '--debug',\n help=\"enable debug mode\",\n action=\"store_true\", default=False)\n parser.add_option(\n '-p', '--port',\n help=\"which port to serve content on\",\n type='int', default=5000)\n parser.add_option(\n '-g', '--gpu',\n help=\"use gpu mode\",\n action='store_true', default=False)\n\n opts, args = parser.parse_args()\n\n if opts.debug:\n app.run(debug=True, host='0.0.0.0', port=opts.port)\n else:\n start_tornado(app, opts.port)", "def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")", "def start(port, table_size, update_size, update_rate):\n app = make_app(table_size, update_size, update_rate)\n app.listen(port)\n logging.critical(\"Listening on http://localhost:{}\".format(port))\n loop = tornado.ioloop.IOLoop.current()\n loop.start()" ]
[ "0.73095095", "0.6589419", "0.6576443", "0.64658266", "0.6464839", "0.6164005", "0.613992", "0.58848023", "0.58216435", "0.57985985", "0.5750669", "0.5724207", "0.5703905", "0.56819606", "0.567644", "0.5631442", "0.5608709", "0.56047523", "0.55997616", "0.55934083", "0.5542906", "0.55144155", "0.55073684", "0.5499866", "0.5492536", "0.548476", "0.5482419", "0.5479538", "0.54782116", "0.547308" ]
0.8142196
0
Random display of 25 fonts
def lf(): return random.sample(font_list, 25)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawtext(self, drawer):\n rand_chars = self.randchars()\n font = ImageFont.truetype(self._font_face, self._font_size)\n font_width, font_height = font.getsize(rand_chars)\n drawer.text(\n ((self._width - font_width) / 2,\n (self._height - font_height) / 2),\n rand_chars,\n font=font,\n fill=self.randcolor(0, 127)\n )\n\n return rand_chars", "def rd(text, on_color=None, attr=None,\n width=80, justify=\"center\"):\n rand_int = random.randint(1, len(font_list)+1)\n rand_color = color_dict.get(random.randint(30, 38))\n\n rand_font = font_list[rand_int]\n print(f\"Random font: {format(rand_font)}\")\n f = Figlet(\n font=rand_font, width=width,\n justify=justify\n )\n r = f.renderText(text)\n return colored(r, rand_color, on_color, attr)", "def mock_tweet():\n count = random.randint(70, 140)\n return ''.join([random.choice(string.letters) for i in xrange(count)])", "def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)", "def generate_random_texts(n):\n assert n >= 0\n global FirstText, SecondText\n FirstText = str(\"\".join([random.choice(string.letters[:26]) for i in xrange(n)]))\n SecondText = str(\"\".join([random.choice(string.letters[:26]) for i in xrange(n)]))", "def test_generate_mine_counter_text(self):\n pg.font.init()\n font_surface = utils.generate_mine_counter_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def test_generate_mine_text(self):\n pg.font.init()\n font_surface = utils.generate_mine_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def generateRandomePlainText():\n randomPlainTextArray = [random.choice('0123456789abcdef')\n for n in range(24)]\n randomPlainText = \"\".join(randomPlainTextArray)\n return randomPlainText", "def generate_random_text(length):\r\n text = []\r\n for num in range(length):\r\n text.append(alphabet[random.randint(0, 25)])\r\n return(''.join(text))", "def generate_babble_text(self):\n markov_chain_output = []\n for n in range(self.number_of_sentences):\n sentence_length = random.randint(self.min_sentence_length, self.max_sentence_length)\n markov_chain_output.append(self.markov_chain.generate_sentence(sentence_length))\n\n random.shuffle(markov_chain_output)\n\n to_display = ''\n for i in markov_chain_output:\n to_display += i + '\\n'\n\n # Clears any old text in the display, then inserts the newly created text\n self.display.delete('1.0', tk.END)\n self.display.insert('1.0', to_display)", "def random_character(latin_chance=0.6):\n if random.random() < latin_chance:\n return random.choice(LATIN) + random.choice(LATIN)\n else:\n return random.choice(NON_LATIN)", "def random_text(self, n=100):\n # choose a random prefix (not weighted by frequency)\n start = random.choice(list(self.suffix_map.keys()))\n #print(\">>DEBUG | start is\", start)\n \n for i in range(n):\n #print(\">> DEBUG | i is\", n)\n suffixes = self.suffix_map.get(start, None)\n #print(\">> DEBUG | suffixes is\", suffixes)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n #print(\">> DEBUG | start isn't in map\")\n random_text(n-i)\n return\n\n # choose a random suffix\n word = random.choice(suffixes)\n #print(\">> DEBUG | word is\", word)\n print(word, end=' ')\n start = self.shift(start, word)", "def name_generator(size=8, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def __getRandChar(self):\n return self.letterbag[random.randint(0,25)]", "def rand_string():\n out = ''\n for _ in range(24):\n out += choice(ascii_letters)\n return out", "def main(word_count=2, use_caps=False, use_leet=False, caps_percent=25, leet_percent=20):\n\n phrase = get_phrase(word_count)\n\n if use_caps:\n phrase = random_caps(phrase, caps_percent)\n\n if use_leet:\n phrase = random_characters(phrase, leet_percent)\n\n print(phrase)", "def generate_fantasy_title():\n d20 = random.randint(1, 20)\n if d20 <= 4:\n #genetive noun\n return fantasy_genetive[random.randint(0, len(fantasy_genetive) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 > 4 and d20 < 13: \n #The adj noun\n return \"The \" + fantasy_adj[random.randint(0, len(fantasy_adj) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 >= 13:\n #something of something\n return fantasy_noun[random.randint(0, len(fantasy_noun) - 1)] + \" of \" + fantasy_what_is_this[random.randint(0, len(fantasy_what_is_this) - 1)]", "def random_name(size=6):\r\n chars = string.ascii_uppercase + string.digits\r\n return 'test-' + ''.join(random.choice(chars) for x in range(size))", "def randomHelmet():\n return random.choice(HELMETS)", "def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))", "def gen_random_chars(n: int = 10) -> Text:\n if n < 1:\n raise Exception('Number of random chars to generate has to be > 0')\n\n return ''.join(choice(ascii_lowercase + '-_')\n for i in range(n))", "def create_random_text(word_count=10):\n sample_text_lst = TEXT_BASE_RUS.replace('\\n', '').split(' ')\n generate_text_lst = []\n for i in range(word_count):\n generate_text_lst.append(random.choice(sample_text_lst))\n generate_text = ' '.join(generate_text_lst)\n return generate_text", "def generate_rnd_msg() -> str:\n\n char_num = random.randint(8,20)\n i = 0\n s = \"\"\n for n in range(char_num):\n if i == char_num:\n break\n rnd_char = random.randint(0, len(string.ascii_lowercase) - 1)\n s += string.ascii_lowercase[rnd_char]\n i += 1\n\n return s", "def random_color_func(word=None, font_size=None, position=None,\n orientation=None, font_path=None, random_state=None):\n if random_state is None:\n random_state = Random()\n return \"hsl(%d, 80%%, 50%%)\" % random_state.randint(0, 255)", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def system_font_demo(my_canvas, fonts):\n pos_y = 750\n for font in fonts:\n try:\n ttf = TTFont(font, fonts[font])\n except:\n # Skip this font\n continue\n\n pdfmetrics.registerFont(ttf)\n\n my_canvas.setFont(font, 12)\n my_canvas.drawString(30, pos_y, font)\n pos_y -= 10\n if pos_y < 40:\n my_canvas.showPage()\n pos_y = 750", "def randchars(self):\n samples = random.sample(self._charset, self._text_length)\n return ''.join(samples)", "def generateColor(text):\n random.seed(text)\n return ('#%06X' % random.randint(0,0xFFFFFF))", "def generate_fonts_doc() -> None:\n text = 'pygame menu'\n save_font_image(pygame_menu.font.FONT_8BIT, text, '_static/font_8bit.png')\n save_font_image(pygame_menu.font.FONT_BEBAS, text, '_static/font_bebas.png')\n save_font_image(pygame_menu.font.FONT_COMIC_NEUE, text, '_static/font_comic_neue.png')\n save_font_image(pygame_menu.font.FONT_DIGITAL, text, '_static/font_digital.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE, text, '_static/font_firacode.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD, text, '_static/font_firacode_bold.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD_ITALIC, text, '_static/font_firacode_bold_italic.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_ITALIC, text, '_static/font_firacode_italic.png')\n save_font_image(pygame_menu.font.FONT_FRANCHISE, text, '_static/font_franchise.png')\n save_font_image(pygame_menu.font.FONT_HELVETICA, text, '_static/font_helvetica.png')\n save_font_image(pygame_menu.font.FONT_MUNRO, text, '_static/font_munro.png')\n save_font_image(pygame_menu.font.FONT_NEVIS, text, '_static/font_nevis.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS, text, '_static/font_open_sans.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_BOLD, text, '_static/font_open_sans_bold.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_ITALIC, text, '_static/font_open_sans_italic.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_LIGHT, text, '_static/font_open_sans_light.png')\n save_font_image(pygame_menu.font.FONT_PT_SERIF, text, '_static/font_pt_serif.png')", "def getrandomcolor(self) -> str:\n return self.tab10[random.randint(0, len(self.tab10)-1)]" ]
[ "0.6392809", "0.6286903", "0.62738657", "0.6246008", "0.6230189", "0.62171894", "0.616977", "0.61241275", "0.609214", "0.59919924", "0.59631574", "0.59398395", "0.59008574", "0.58772904", "0.5865573", "0.58576256", "0.5825984", "0.5825982", "0.58239526", "0.58212125", "0.5809161", "0.58050096", "0.58032197", "0.57898515", "0.5784998", "0.5780204", "0.5766249", "0.575889", "0.5750898", "0.5736376" ]
0.8467218
0
An art font that generates random fonts and random colors.
def rd(text, on_color=None, attr=None, width=80, justify="center"): rand_int = random.randint(1, len(font_list)+1) rand_color = color_dict.get(random.randint(30, 38)) rand_font = font_list[rand_int] print(f"Random font: {format(rand_font)}") f = Figlet( font=rand_font, width=width, justify=justify ) r = f.renderText(text) return colored(r, rand_color, on_color, attr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lf():\n return random.sample(font_list, 25)", "def test_generate_mine_text(self):\n pg.font.init()\n font_surface = utils.generate_mine_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))", "def random_color_func(word=None, font_size=None, position=None,\n orientation=None, font_path=None, random_state=None):\n if random_state is None:\n random_state = Random()\n return \"hsl(%d, 80%%, 50%%)\" % random_state.randint(0, 255)", "def generateColor(text):\n random.seed(text)\n return ('#%06X' % random.randint(0,0xFFFFFF))", "def create_text(text, font_size, bold, text_color):\n myfont = pygame.font.SysFont(\"Courier\", font_size, bold)\n surface = myfont.render(text,True,text_color)\n return surface", "def _create_font(cls, font, size):\n if font[-4:] in (\".ttf\", \".otf\"):\n return pygame.font.Font(font, size)\n else:\n return pygame.font.SysFont(font, size)", "def generate_fonts_doc() -> None:\n text = 'pygame menu'\n save_font_image(pygame_menu.font.FONT_8BIT, text, '_static/font_8bit.png')\n save_font_image(pygame_menu.font.FONT_BEBAS, text, '_static/font_bebas.png')\n save_font_image(pygame_menu.font.FONT_COMIC_NEUE, text, '_static/font_comic_neue.png')\n save_font_image(pygame_menu.font.FONT_DIGITAL, text, '_static/font_digital.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE, text, '_static/font_firacode.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD, text, '_static/font_firacode_bold.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD_ITALIC, text, '_static/font_firacode_bold_italic.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_ITALIC, text, '_static/font_firacode_italic.png')\n save_font_image(pygame_menu.font.FONT_FRANCHISE, text, '_static/font_franchise.png')\n save_font_image(pygame_menu.font.FONT_HELVETICA, text, '_static/font_helvetica.png')\n save_font_image(pygame_menu.font.FONT_MUNRO, text, '_static/font_munro.png')\n save_font_image(pygame_menu.font.FONT_NEVIS, text, '_static/font_nevis.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS, text, '_static/font_open_sans.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_BOLD, text, '_static/font_open_sans_bold.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_ITALIC, text, '_static/font_open_sans_italic.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_LIGHT, text, '_static/font_open_sans_light.png')\n save_font_image(pygame_menu.font.FONT_PT_SERIF, text, '_static/font_pt_serif.png')", "def test_generate_mine_counter_text(self):\n pg.font.init()\n font_surface = utils.generate_mine_counter_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def drawtext(self, drawer):\n rand_chars = self.randchars()\n font = ImageFont.truetype(self._font_face, self._font_size)\n font_width, font_height = font.getsize(rand_chars)\n drawer.text(\n ((self._width - font_width) / 2,\n (self._height - font_height) / 2),\n rand_chars,\n font=font,\n fill=self.randcolor(0, 127)\n )\n\n return rand_chars", "def __init__(self, font, color=(255,255,255,255)):\r\n if not font.endswith('.png'):\r\n font += '.png'\r\n super(Pngfont, self).__init__(\"fonts/%s\" % font)\r\n self.font = font\r\n pixels = self.im.load()\r\n\r\n self.glyph_table = {}\r\n # Extract font information from top scanline of font image; create width,\r\n # height, tex_coord and vertices for each character.\r\n for v in range(95):\r\n x = (pixels[v * 2, 0][0] * 2.0) / self.ix\r\n y = ((pixels[v * 2, 0][1] + 8) * 2.0) / self.iy\r\n width = float(pixels[v * 2 + 1, 0][0])\r\n height = float(pixels[v * 2 + 1, 0][1])\r\n width_scale = width / self.ix\r\n height_scale = height / self.iy\r\n\r\n self.glyph_table[v] = [width, height,\r\n [(x + width_scale, y - height_scale),\r\n (x, y - height_scale),\r\n (x, y),\r\n (x + width_scale, y)],\r\n [(width, 0, 0), (0, 0, 0), (0, -height, 0), (width, -height, 0)]]\r\n\r\n alph = self.im.split()[-1] #keep alpha\r\n draw = ImageDraw.Draw(self.im)\r\n draw.rectangle((0, 1, self.ix, self.iy), fill=color)\r\n self.im.putalpha(alph)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = self.im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def generate_typeracer(text: str, output: str, fontname: str):\n # Wrap text and calculate dimensions\n lines = textwrap.wrap(text, width=56)\n height = 16 + len(lines) * 16\n\n # Load the font\n font = ImageFont.truetype(f\"./img/font/{fontname}.ttf\", 16)\n\n # Draw the text onto the image\n im = Image.new(\"RGBA\", (400, height), \"#2C2F33\")\n draw = ImageDraw.Draw(im)\n for i, line in enumerate(lines):\n draw.text((4, 4 + i * 16), line, font=font)\n\n # Save image to output file\n im.save(f\"./img/{output}\")", "def __init__(self, text, font, pos, color=(255, 255, 255)):\r\n self.pos = pos\r\n self.label = font.render(text, 1, color)", "def test_generate_timer_text(self):\n pg.font.init()\n font_surface = utils.generate_timer_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def font(size=20, name=None):\n name = name or \"regular\"\n path = ROOT_DIR / \"wclib\" / \"assets\" / (name + \".ttf\")\n return pygame.font.Font(path, size)", "def create_font_data(self):\n font_data = FontImages()\n if not font_data.assert_data_correct():\n font_data.create_images()\n font_data.augment_data()", "def _generate_sequence_image(self, text, debug=False, font=None):\n\n # gen handwriten\n char_images = []\n default_config = {}\n\n ### remember add space to fix 13 character each line or 26 char for multiple lines\n\n # remove characters in text that not exist in etl character images\n text, missChar = self._remove_unknown_characters(text)\n # print('text after remove: ', text, missChar)\n #normalize text\n\n\n if len(text) == 0:\n text = str(random.randint(0, 100))\n\n # self.character_height = configAugment.getConfigHeightHW() # to help resize to self.height\n # self.height = self.character_height\n config = self._get_config_kata(text, default_config)\n \n\n # Calculate the average height of a character\n if self.deterministic:\n indices = {each_char: self._get_random_choice_index(each_char)\n for each_char in list(set(text))}\n else:\n indices = {}\n\n\n for _idx, each_char in enumerate(text):\n if (each_char == '・'):\n each_charChoice = '.'\n else:\n each_charChoice = each_char\n\n char_images.append(self._generate_single_image(\n each_char, config['text'][_idx], indices.get(each_charChoice, None))\n )\n\n # desired_height = max_height + 4\n # norm_img_seq = []\n # for each_img in char_images:\n # top_pad = desired_height - each_img.shape[0] - 3\n # norm_img_seq.append(np.pad(each_img, ((top_pad, 3), (0, 0)),\n # mode='constant', constant_values=self.background_value))\n\n\n\n image = np.concatenate(char_images, axis=1)\n _, width = image.shape\n paddingValue = 0\n if len(text) <= 13:\n paddingValue = max(0, self.configKata['ONE_LINE_WIDTH']- width)\n else:\n paddingValue = max(0, self.configKata['TWO_LINE_WIDTH'] - width)\n\n image = np.pad(image, ((0, 0), (0, paddingValue)),\n 'constant', constant_values=self.background_value)\n # cv2.imwrite('C:\\\\Users\\\\ABC\\\\Desktop\\\\deletetemp\\\\GenDataHWBB\\\\synthesizedKana\\\\test_final.png', image)\n\n\n # add padding space behind the final characters\n # image = np.pad(image, ((0, 0), (0, 10)),\n # mode='constant', constant_values=self.background_value)\n\n # image = self.augment.augment_line(image)\n\n return image, text", "def create_font(font_name, fit = True):\n font = {}\n try:\n numbers = Image.open(fonts_path + font_name + \".jpg\")\n if fit:\n numbers = images.fit_to_display(numbers, True)\n width, height = numbers.size\n font[\"d\"] = Image.open(fonts_path + \"degree.jpg\")\n font[\"d\"] = images.fit_to_display(font[\"d\"])\n font[\"p\"] = Image.open(fonts_path + \"percent.jpg\")\n font[\"p\"] = images.fit_to_display(font[\"p\"])\n font[\"m\"] = Image.open(fonts_path + \"am.jpg\")\n font[\"m\"] = images.fit_to_display(font[\"m\"], True)\n font[\"a\"] = Image.open(fonts_path + \"pm.jpg\")\n font[\"a\"] = images.fit_to_display(font[\"a\"], True)\n d_w, d_h = font[\"d\"].size\n font[\"d\"] = font[\"d\"].crop((10,0,d_w-10,d_w))\n box_width = float(width)/10 \n #Crop out each character in the provided image and save that to a dictionary\n for i in range(0, 10):\n box = [int(round(i*(box_width))), 0, int(round((i + 1)*(box_width))), height]\n #Checks if a subrectangle passes the width of the image, and shortens it if necessary\n if box[3] > width:\n box[3] = width\n \n box = tuple(box)\n font[str(i)] = numbers.crop(box) \n return font\n except IOError:\n print(\"Specified font file: %s.jpg cannot be found at: %s\" % (font_name,fonts_path))", "def __init__(self, font=None, size=24, text=\"\"):\n self.font_name = font\n self.font_size = size\n self.color_fg = Color(\"white\")\n self.color_bg = Color(\"gray20\")\n\n self._aa = True\n self._text = text\n self.font = pygame.font.Font(font, size)\n self.screen = pygame.display.get_surface()\n\n self.dirty = True\n self.image = None\n self._render()", "def generate_new_font(self, font_file_name, prepend=\"gap_\"):\n\n f = open(font_file_name)\n out_font_filename = prepend + font_file_name\n fo = open(out_font_filename, \"wb\")\n\n fo.write(f.readline())\n fo.write(f.readline())\n fo.write(f.readline())\n\n line = f.readline().split(\" \")\n out_texture_filename = prepend + line[0]\n fo.write(\"%s %s %s\\n\" % (out_texture_filename, self.w, self.h))\n texture_filename = line[0]\n texture_size = ( int(line[1]), int(line[2]) )\n self.open_images(texture_filename, texture_size[0], texture_size[1])\n for i in range(256):\n line = f.readline().split(\" \")\n # ascii, char_x, char_y, byteWidth, byteHeight, xOffset, yOffset, screenWidth, screenHeight\n if i != int(line[0]): raise ValueError, \"font loading error\"\n x, y = (int(line[1]), int(line[2]))\n w, h = (int(line[3]), int(line[4]))\n\n newpos = self.copy_char(x, y, w, h)\n line[1] = str(newpos[0])\n line[2] = str(newpos[1])\n fo.write(\" \".join(line))\n\n line = f.readline()\n fo.write(line)\n line = line.split(\" \")\n\n self.image_out.save(out_texture_filename)\n print \"wrote '%s' and '%s'\" % (out_font_filename, out_texture_filename)", "def makeText(colour, size, text, bgcolour, textSize=15):\n sx = int((len(text)+1)*textSize/2.5)\n size = (sx, size[1])\n image = pygame.Surface(size)\n image.fill(bgcolour)\n font = pygame.font.SysFont(None, textSize)\n txtSurface = font.render(text, False, colour, bgcolour)\n tx = (image.get_width() - txtSurface.get_width())/2\n image.blit(txtSurface, (tx, size[1]/2))\n image.convert()\n return image", "def setup(theme='DarkAmber'):\r\n sg.theme(theme)\r\n\r\n headline_font = ('Arial bold', 20)\r\n font = ('Arial', 20)\r\n warning_font = ('Arial bold', 14)\r\n button_font = ('Arial', 14)\r\n\r\n return headline_font, font, warning_font, button_font", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\r\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\r\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\r\n return surface.convert_alpha()", "def create_text(text, font, colour, position):\n _text = font.render(text, False, colour)\n _text_rect = _text.get_rect()\n _text_rect.center = position # place text centered on given position\n\n return {'surface': _text, 'rect': _text_rect}", "def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence\n text_obj = font.render(text, True, color)\n text_rect = text_obj.get_rect()\n text_rect.center = (x, y)\n surface.blit(text_obj, text_rect)", "def _text16(self, font, text, x0, y0, color=WHITE, background=BLACK):\n for char in text:\n ch = ord(char)\n if (font.FIRST <= ch < font.LAST\n and x0+font.WIDTH <= self.width\n and y0+font.HEIGHT <= self.height):\n\n each = 16\n if font.HEIGHT == 16:\n passes = 2\n size = 32\n else:\n passes = 4\n size = 64\n\n for line in range(passes):\n idx = (ch-font.FIRST)*size+(each*line)\n buffer = struct.pack(\n '>128H',\n color if font.FONT[idx] & _BIT7 else background,\n color if font.FONT[idx] & _BIT6 else background,\n color if font.FONT[idx] & _BIT5 else background,\n color if font.FONT[idx] & _BIT4 else background,\n color if font.FONT[idx] & _BIT3 else background,\n color if font.FONT[idx] & _BIT2 else background,\n color if font.FONT[idx] & _BIT1 else background,\n color if font.FONT[idx] & _BIT0 else background,\n color if font.FONT[idx+1] & _BIT7 else background,\n color if font.FONT[idx+1] & _BIT6 else background,\n color if font.FONT[idx+1] & _BIT5 else background,\n color if font.FONT[idx+1] & _BIT4 else background,\n color if font.FONT[idx+1] & _BIT3 else background,\n color if font.FONT[idx+1] & _BIT2 else background,\n color if font.FONT[idx+1] & _BIT1 else background,\n color if font.FONT[idx+1] & _BIT0 else background,\n color if font.FONT[idx+2] & _BIT7 else background,\n color if font.FONT[idx+2] & _BIT6 else background,\n color if font.FONT[idx+2] & _BIT5 else background,\n color if font.FONT[idx+2] & _BIT4 else background,\n color if font.FONT[idx+2] & _BIT3 else background,\n color if font.FONT[idx+2] & _BIT2 else background,\n color if font.FONT[idx+2] & _BIT1 else background,\n color if font.FONT[idx+2] & _BIT0 else background,\n color if font.FONT[idx+3] & _BIT7 else background,\n color if font.FONT[idx+3] & _BIT6 else background,\n color if font.FONT[idx+3] & _BIT5 else background,\n color if font.FONT[idx+3] & _BIT4 else background,\n color if font.FONT[idx+3] & _BIT3 else background,\n color if font.FONT[idx+3] & _BIT2 else background,\n color if font.FONT[idx+3] & _BIT1 else background,\n color if font.FONT[idx+3] & _BIT0 else background,\n color if font.FONT[idx+4] & _BIT7 else background,\n color if font.FONT[idx+4] & _BIT6 else background,\n color if font.FONT[idx+4] & _BIT5 else background,\n color if font.FONT[idx+4] & _BIT4 else background,\n color if font.FONT[idx+4] & _BIT3 else background,\n color if font.FONT[idx+4] & _BIT2 else background,\n color if font.FONT[idx+4] & _BIT1 else background,\n color if font.FONT[idx+4] & _BIT0 else background,\n color if font.FONT[idx+5] & _BIT7 else background,\n color if font.FONT[idx+5] & _BIT6 else background,\n color if font.FONT[idx+5] & _BIT5 else background,\n color if font.FONT[idx+5] & _BIT4 else background,\n color if font.FONT[idx+5] & _BIT3 else background,\n color if font.FONT[idx+5] & _BIT2 else background,\n color if font.FONT[idx+5] & _BIT1 else background,\n color if font.FONT[idx+5] & _BIT0 else background,\n color if font.FONT[idx+6] & _BIT7 else background,\n color if font.FONT[idx+6] & _BIT6 else background,\n color if font.FONT[idx+6] & _BIT5 else background,\n color if font.FONT[idx+6] & _BIT4 else background,\n color if font.FONT[idx+6] & _BIT3 else background,\n color if font.FONT[idx+6] & _BIT2 else background,\n color if font.FONT[idx+6] & _BIT1 else background,\n color if font.FONT[idx+6] & _BIT0 else background,\n color if font.FONT[idx+7] & _BIT7 else background,\n color if font.FONT[idx+7] & _BIT6 else background,\n color if font.FONT[idx+7] & _BIT5 else background,\n color if font.FONT[idx+7] & _BIT4 else background,\n color if font.FONT[idx+7] & _BIT3 else background,\n color if font.FONT[idx+7] & _BIT2 else background,\n color if font.FONT[idx+7] & _BIT1 else background,\n color if font.FONT[idx+7] & _BIT0 else background,\n color if font.FONT[idx+8] & _BIT7 else background,\n color if font.FONT[idx+8] & _BIT6 else background,\n color if font.FONT[idx+8] & _BIT5 else background,\n color if font.FONT[idx+8] & _BIT4 else background,\n color if font.FONT[idx+8] & _BIT3 else background,\n color if font.FONT[idx+8] & _BIT2 else background,\n color if font.FONT[idx+8] & _BIT1 else background,\n color if font.FONT[idx+8] & _BIT0 else background,\n color if font.FONT[idx+9] & _BIT7 else background,\n color if font.FONT[idx+9] & _BIT6 else background,\n color if font.FONT[idx+9] & _BIT5 else background,\n color if font.FONT[idx+9] & _BIT4 else background,\n color if font.FONT[idx+9] & _BIT3 else background,\n color if font.FONT[idx+9] & _BIT2 else background,\n color if font.FONT[idx+9] & _BIT1 else background,\n color if font.FONT[idx+9] & _BIT0 else background,\n color if font.FONT[idx+10] & _BIT7 else background,\n color if font.FONT[idx+10] & _BIT6 else background,\n color if font.FONT[idx+10] & _BIT5 else background,\n color if font.FONT[idx+10] & _BIT4 else background,\n color if font.FONT[idx+10] & _BIT3 else background,\n color if font.FONT[idx+10] & _BIT2 else background,\n color if font.FONT[idx+10] & _BIT1 else background,\n color if font.FONT[idx+10] & _BIT0 else background,\n color if font.FONT[idx+11] & _BIT7 else background,\n color if font.FONT[idx+11] & _BIT6 else background,\n color if font.FONT[idx+11] & _BIT5 else background,\n color if font.FONT[idx+11] & _BIT4 else background,\n color if font.FONT[idx+11] & _BIT3 else background,\n color if font.FONT[idx+11] & _BIT2 else background,\n color if font.FONT[idx+11] & _BIT1 else background,\n color if font.FONT[idx+11] & _BIT0 else background,\n color if font.FONT[idx+12] & _BIT7 else background,\n color if font.FONT[idx+12] & _BIT6 else background,\n color if font.FONT[idx+12] & _BIT5 else background,\n color if font.FONT[idx+12] & _BIT4 else background,\n color if font.FONT[idx+12] & _BIT3 else background,\n color if font.FONT[idx+12] & _BIT2 else background,\n color if font.FONT[idx+12] & _BIT1 else background,\n color if font.FONT[idx+12] & _BIT0 else background,\n color if font.FONT[idx+13] & _BIT7 else background,\n color if font.FONT[idx+13] & _BIT6 else background,\n color if font.FONT[idx+13] & _BIT5 else background,\n color if font.FONT[idx+13] & _BIT4 else background,\n color if font.FONT[idx+13] & _BIT3 else background,\n color if font.FONT[idx+13] & _BIT2 else background,\n color if font.FONT[idx+13] & _BIT1 else background,\n color if font.FONT[idx+13] & _BIT0 else background,\n color if font.FONT[idx+14] & _BIT7 else background,\n color if font.FONT[idx+14] & _BIT6 else background,\n color if font.FONT[idx+14] & _BIT5 else background,\n color if font.FONT[idx+14] & _BIT4 else background,\n color if font.FONT[idx+14] & _BIT3 else background,\n color if font.FONT[idx+14] & _BIT2 else background,\n color if font.FONT[idx+14] & _BIT1 else background,\n color if font.FONT[idx+14] & _BIT0 else background,\n color if font.FONT[idx+15] & _BIT7 else background,\n color if font.FONT[idx+15] & _BIT6 else background,\n color if font.FONT[idx+15] & _BIT5 else background,\n color if font.FONT[idx+15] & _BIT4 else background,\n color if font.FONT[idx+15] & _BIT3 else background,\n color if font.FONT[idx+15] & _BIT2 else background,\n color if font.FONT[idx+15] & _BIT1 else background,\n color if font.FONT[idx+15] & _BIT0 else background\n )\n self.blit_buffer(buffer, x0, y0+8*line, 16, 8)\n x0 += font.WIDTH", "def gen_captcha(text, fnt, fnt_sz, file_name, fmt='JPEG'):\n # randomly select the foreground color\n fgcolor = random.randint(0,0xffff00)\n # make the background color the opposite of fgcolor\n bgcolor = fgcolor ^ 0xffffff\n # create a font object \n font = ImageFont.truetype(fnt,fnt_sz)\n # determine dimensions of the text\n dim = font.getsize(text)\n # create a new image slightly larger that the text\n im = Image.new('RGB', (dim[0]+5,dim[1]+5), bgcolor)\n d = ImageDraw.Draw(im)\n x, y = im.size\n r = random.randint\n # draw 100 random colored boxes on the background\n for num in range(100):\n d.rectangle((r(0,x),r(0,y),r(0,x),r(0,y)),fill=r(0,0xffffff))\n # add the text to the image\n d.text((3,3), text, font=font, fill=fgcolor)\n im = im.filter(ImageFilter.EDGE_ENHANCE_MORE)\n # save the image to a file\n im.save(file_name, format=fmt)", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=False)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()" ]
[ "0.72884387", "0.64297265", "0.64126635", "0.62243783", "0.6147591", "0.61150885", "0.60803175", "0.60634375", "0.60376585", "0.6032824", "0.60188615", "0.5972626", "0.59216464", "0.59104794", "0.5828189", "0.5799208", "0.57634753", "0.5763224", "0.57085055", "0.565248", "0.56401956", "0.5636327", "0.56173366", "0.5616793", "0.5616098", "0.56019443", "0.55986816", "0.5594569", "0.5585174", "0.55421704" ]
0.7143183
1
Gets the ith bit (zeroindexed).
def get_bit(num, i): return 1 if num & 1 << i else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bit(num, i):\n return num & (1 << i) != 0", "def __getitem__(self, index):\n nth_int, nth_bit = divmod(index, BitArray._UNSIGNED_INT)\n return self.bits[nth_int] & (1 << nth_bit)", "def _get_bit(byte, ii):\n return (byte >> (7 - ii)) & 1", "def bit_get(val, idx):\n return (val >> idx) & 1", "def getBit(self,i):\n return self.boolVals[i]", "def bitget(x, n):\n return (x >> n) & 1", "def get_jth_bit(x,j):\n return 1 & (x >> j)", "def get_bit(reg,n_bit):\n return reg >> n_bit & 1", "def get_bit(x, k):\n\n return (x >> k) & 1", "def get_bit(byte, bit_num):\n return (byte & (1 << bit_num)) >> bit_num", "def index_of_least_significant_zero_bit(self, value):\n\n index = 1\n while (value & 1) != 0:\n value >>= 1\n index += 1\n return index", "def bit_component(x, i):\n return (x & 2**i) >> i", "def bit(self, idx: int) -> int:\n pos = self.start() + idx\n chunk = self.raw_key()[(pos // 8)]\n bit = pos % 8\n return ((1 << bit) & chunk) >> bit", "def get_bit(num, position):\n\treturn (num >> position) & 0b1", "def _get_bit(self, num, bit, mask=1):\n return (int(num) >> bit) & mask", "def __getitem__(self, n):\n return (self.num >> np.uint64(n)) & UINT64_ONE", "def get_bit(self):\n try:\n current_byte = self.contents[self.current_bit_position >> 3]\n except IndexError:\n raise EmptyStreamError(f\"Attempting read at bit position {self.current_bit_position} \"\n f\"(byte {self.current_bit_position >> 3})\")\n bit = min(1, current_byte & (1 << (7 - (self.current_bit_position % 8))))\n self.current_bit_position += 1\n return bit", "def get_integer_from(bool_array: List[bool]) -> int:\n return_value = 0\n for i in range(0, MAX_BIT_LENGTH - 1):\n return_value |= (1 << i) if bool_array[i] else 0\n return return_value", "def getbit(self, key, offset):\n key = self._encode(key)\n index, bits, mask = self._get_bits_and_offset(key, offset)\n\n if index >= len(bits):\n return 0\n\n return 1 if (bits[index] & mask) else 0", "def get_bit_position(x, k):\n\n return x & (1 << k)", "def set_bit(num, i):\n return num | (1 << i)", "def get_bit(a, bit_pos):\n return np.clip(np.bitwise_and(a, 2 ** (bit_pos-1)), 0, 1)", "def get_lowest_set_bit(x):\n\n return x & -x", "def get_lowest_unset_bit(x):\n\n return ~x & (x + 1)", "def get_first(x, get_bit_array_indices):\n\n bit_array = get_bit_array_indices(x)\n return get_lowest_set_bit(bit_array)", "def msb(n: int) -> int:\n ndx = 0\n while ( 1 < n ):\n n = ( n >> 1 )\n ndx += 1\n return ndx", "def get_least_significant_bits(x, n):\n\n return x & ones(n)", "def _lsb(self, i : int) -> int:\n\n return i & -i", "def get_bit(self, register: str, n_bit: int):\n byte = self.get_byte(register)\n return byte[::-1][n_bit]", "def _bits(num):\r\n return bin(int(num))[2:]" ]
[ "0.7826954", "0.7410046", "0.7386657", "0.73263216", "0.7222269", "0.7176076", "0.7137559", "0.71253127", "0.6973061", "0.6967496", "0.6958228", "0.69483864", "0.694268", "0.68543786", "0.6816369", "0.6688558", "0.66808105", "0.6567417", "0.6564154", "0.6555715", "0.653013", "0.65284437", "0.6439715", "0.64238584", "0.63242465", "0.6303704", "0.6255033", "0.6215915", "0.62121177", "0.6185622" ]
0.81653005
0
set kromosom dengan cara mencari biner dari solusi untuk dijadikan 8 kromosom
def setKromosom(self,x,y): binx = bin(x)[2:].zfill(4) biny = bin(y)[2:].zfill(4) self.kromosom = list(binx+biny)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getKromosom(self):\n intx = int(\"\".join(self.kromosom[:4]),2)\n inty = int(\"\".join(self.kromosom[4:]),2)\n return [intx,inty]", "def generateKromosom(self):\n result = []\n # looping sebanyak panjangKromosom\n for _ in range(self.panjangKromosom):\n # generate angka random 0 atau 1\n result.append(random.randint(0, 1))\n return result", "def dekodeKromosom(self, kromosom):\n xMin, xMaks = self.batas[0]\n yMin, yMaks = self.batas[1]\n t, x, y = 0, 0, 0\n n = (self.panjangKromosom)//2\n for i in range(0, n):\n t += 2**(-(i+1))\n for i in range(0, n):\n x += kromosom[i] * 2**-(i+1)\n y += kromosom[n + i] * 2**-(i+1)\n x *= (xMaks - xMin / t)\n y *= (yMaks - yMin / t)\n x += xMin\n y += yMin\n return [x, y]", "def ask_KUMIKI():\n # TSUGITE\n TSUGITE_strings = ['ARI', 'KAMA', 'RYAKUKAMA', 'MECHIGAI', 'AIKAKI','KOSHIKAKE', 'HAKO']\n message = 'Which TSUGITE to make on legs?'\n\n TSUGITE_name = rs.GetString(message, 'ARI', TSUGITE_strings)\n\n # SIGUCHI\n SHIGUCHI_strings = ['TOME', 'IRIWA', 'SANMAIKUMI', 'AIKAKI', 'HAKO']\n message = 'Which SHIGUCHI to make at corner?'\n\n SHIGUCHI_name = rs.GetString(message, 'IRIWA', SHIGUCHI_strings)\n\n print ('TSUGITE : %s' % TSUGITE_name)\n print ('SHIGUCHI : %s' % SHIGUCHI_name)\n\n \"\"\"\n Get ofset num.\n \"\"\"\n minimum = 0\n maximum = 0.3\n\n offset = rs.GetReal(\"Put the offset num to fit KUMIKI tight. (0.0 < offset < 0.3)\",\\\n 0.15, minimum, maximum)\n\n # NOTE: offset num is not parametric number. It's always fixed.\n\n return TSUGITE_name, SHIGUCHI_name, offset", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def vloz (pole, pozice, znak):\n for k in pozice:\n pole [k[1]] [k[0]]= znak", "def mutasi(self, keturunan):\n for i in range(self.panjangKromosom):\n if random.uniform(0, 1) < self.probMutasi:\n # membalik nilai bit nya\n keturunan[0][i] = 1 - keturunan[0][i]\n keturunan[1][i] = 1 - keturunan[1][i]\n return keturunan", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def set_gKanal(self, mapa):\r\n noviKanal = mapa['programMjerenjaId']\r\n if self.gKanal != noviKanal:\r\n self.gKanal = noviKanal", "def update_kanda(self, kanda):\n\t\tself.subvarga = ''\n\t\tself.subvargaNum = 1\n\t\tself.varga = ''\n\t\tself.vargaNum = 1\n\t\tself.kanda = kanda\n\t\tself.kandaNum += 1", "def setLabels(self):\n #productive\n profprint()\n self.option = {0:'Ba',\n 1:'Bb',\n 2:'Bc',\n 3:'Bd',\n 4:'Be',\n 5:'Bf',\n 6:'Bg',\n 7:'Bh',\n 8:'Bi',\n 9:'Bj',\n 10:'Bk',\n 11:'Bl',\n 12:'Ca',\n 13:'Cb',\n 14:'Cc',\n 15:'Cd',\n 16:'Ce',\n 17:'Cf',\n 18:'Cg',\n 19:'Ch',\n 20:'Ci',\n 21:'Cj',\n 22:'Ck',\n 23:'Cl',\n 24:'Cm',\n 25:'Cn',\n 26:'Co',\n 27:'Cp',\n 28:'Cq',\n 29:'Cr',\n 30:'Da',\n 31:'Db',\n 32:'Dc',\n 33:'Dd',\n 34:'De',\n 35:'Df',\n 36:'Dg',\n 37:'Dh',\n 38:'Di',\n 39:'Dj',\n 40:'Ea',\n 41:'Eb',\n 42:'Ec',\n 43:'Ed',\n 44:'Ee',\n 45:'Ef',\n 46:'Eg',\n 47:'Eh',\n 48:'Aa',\n 49:'Ab',\n 50:'Ac',\n 51:'Ad',\n 52:'Ae',\n 53:'Af',\n 54:'Iu', \n 55:'Fa',\n 56:'Fb',\n 57:'Fc',\n 58:'Fd',\n 59:'Fe',\n 60:'Ff',\n 61:'Fg',\n 62:'Fh',\n 63:'--'}\n\n return self.option", "def makeBinary(self):\r\n\t\tls = 5.12 #limite superior\r\n\t\tli = -5.12 #limite inferior\r\n\t\tt = 14 # total de binarios\r\n\t\t\r\n\t\tcadena_bits = \"\"\r\n\t\tfor i in self.values:\r\n\t\t\tentero = (int) ( ( ( i - li ) * ( 2 ** t ) ) / ( ls - li ) )\r\n\t\t\t#print entero\r\n\t\t\tcadena_bits += \"{0:b}\".format(entero).zfill(14)\r\n\t\t\t\r\n\t\tself.cadenaBits = cadena_bits\r\n\t\treturn cadena_bits", "def check_BDT_simulations_slice_KS(bolo_name, analysis_type, mass):\n\n\tplt.ion()\n\n\tpop_path = \"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\"\n\tBDT_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_BDT_simu_better/BDT_\" + bolo_name + \"/\" + analysis_type + \"/\"\n\n\tttrue,ftrue = PyRPl.open_ROOT_object(\"../Fond_ERA_merged/\" + bolo_name + \"_\" + analysis_type + \"_lowmass_fond.root\", \"t_merged\")\n\ttsimu, fsimu = PyRPl.open_ROOT_object(BDT_path +\"True_events/ROOT_files/\" + bolo_name + \"_true_events_tree.root\", \"t_new0\")\n\n\tprint \"true: \", ttrue.GetEntries(\"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\tprint \"simu: \", tsimu.GetEntries(\"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\tttrue.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist(1000,-2,15,1000,-2,15\", \"\")\n\ttsimu.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist2(1000,-2,15,1000,-2,15\", \"\")\n\n\t# ttrue.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# tsimu.Draw(\"0.5*(EIB+EID):0.5*(EC1+EC2)>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# ttrue.Draw(\"0.414*EIB+(1-0.414)*EID:0.574*EC1+(1-0.574)*EC2>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"0.414*EIB+(1-0.414)*EID:0.574*EC1+(1-0.574)*EC2>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\t# ttrue.Draw(\"EIB:EID>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"EIB:EID>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\t# ttrue.Draw(\"EC1:EC2>>hist(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0 && EIA<2 && EIC<2\")\n\t# tsimu.Draw(\"EC1:EC2>>hist2(1000,-2,15,1000,-2,15\", \"EIB>2 && EID>2 && abs(EIB-EID)<1 && abs(EIB-EC1)<4 && EC1>2.0\")\n\n\thist.SetMarkerColor(kRed)\n\thist.SetMarkerStyle(20)\n\thist2.SetMarkerStyle(20)\n\thist.Draw()\n\thist2.Draw(\"same\")\n\n\traw_input()\n\n\t#Open event files\n\tdata_types = {\"names\": (\"EC1\", \"EC2\", \"EIA\", \"EIB\", \"EIC\", \"EID\"), \"formats\": (\"f\", \"f\", \"f\", \"f\", \"f\", \"f\")}\n\n\tarr_true = np.loadtxt(pop_path + bolo_name + \"_true_events_all.txt\", delimiter=\",\", dtype=data_types)\n\tarr_simu = np.loadtxt(pop_path + bolo_name + \"_simu_events_all.txt\", delimiter=\",\", dtype=data_types)\n\n\tEI_true = 0.5*(arr_true[\"EIB\"]+arr_true[\"EID\"])\n\tEC_true = 0.5*(arr_true[\"EC1\"]+arr_true[\"EC2\"])\n\n\tEI_simu = 0.5*(arr_simu[\"EIB\"]+arr_simu[\"EID\"])\n\tEC_simu = 0.5*(arr_simu[\"EC1\"]+arr_simu[\"EC2\"])\n\n\th2Darr = TH2F(\"h2Darr\", \"h2Darr\", 1000, -2, 15, 1000, -2, 15)\n\th2Dsimu = TH2F(\"h2Dsimu\", \"h2Dsimu\", 1000, -2, 15, 1000, -2, 15)\n\n\tfor i in range(EI_true.shape[0]):\n\t\th2Darr.Fill(EC_true[i], EI_true[i])\n\tfor i in range(EI_simu.shape[0]):\n\t\th2Dsimu.Fill(EC_simu[i],EI_simu[i])\n\n\tPyRPl.process_TH2(h2Darr, X_title = \"EC\", Y_title = \"EI\", color = kRed)\n\tPyRPl.process_TH2(h2Dsimu, X_title = \"EC\", Y_title = \"EI\", color = kBlack)\n\n\th2Darr.Draw()\n\th2Dsimu.Draw(\"same\")\n\n\t#Slices on EC\n\tfor EC in range(2,15):\n\t\tl_true = np.where(np.logical_and(EC_true>EC-1 , EC_true<EC))\n\t\tl_simu = np.where(np.logical_and(EC_simu>EC-1 , EC_simu<EC))\n\n\t\tslice_EI_true = EI_true[l_true]\n\t\tslice_EI_simu = EI_simu[l_simu]\n\n\t\tprint scipy.stats.ks_2samp(slice_EI_true, slice_EI_simu),\" \", 1.36*sqrt(len(slice_EI_true) + len(slice_EI_simu))/sqrt(len(slice_EI_true) * len(slice_EI_simu))\n\n\t\ttrue_cdf = sm.distributions.ECDF(slice_EI_true)\n\t\tsimu_cdf = sm.distributions.ECDF(slice_EI_simu)\n\n\t\tx_true = np.linspace(min(slice_EI_true), max(slice_EI_true))\n\t\tx_simu = np.linspace(min(slice_EI_simu), max(slice_EI_simu))\n\t\ty_true = true_cdf(x_true)\n\t\ty_simu = simu_cdf(x_simu)\n\n\t\tplt.step(x_true, y_true, \"r\", label = \"True IonFid CDF @ EC in [\" + str(EC-1) + \",\" + str(EC) + \"]\" )\n\t\tplt.step(x_simu, y_simu, \"k\", label = \"Simu IonFid CDF @ EC in [\" + str(EC-1) + \",\" + str(EC) + \"]\")\n\t\tplt.legend(loc=\"upper left\", prop={\"size\":10})\n\n\t\tplt.show()\n\t\traw_input()\n\t\tplt.clf()\n\n\t#Slices on EI\n\tfor EI in range(1,15):\n\t\tl_true = np.where(np.logical_and(EI_true>EI-1 , EI_true<EI))\n\t\tl_simu = np.where(np.logical_and(EI_simu>EI-1 , EI_simu<EI))\n\n\t\tslice_EC_true = EC_true[l_true]\n\t\tslice_EC_simu = EC_simu[l_simu]\n\n\t\tprint scipy.stats.ks_2samp(slice_EC_true, slice_EC_simu),\" \", 1.36*sqrt(len(slice_EC_true) + len(slice_EC_simu))/sqrt(len(slice_EC_true) * len(slice_EC_simu))\n\n\t\ttrue_cdf = sm.distributions.ECDF(slice_EC_true)\n\t\tsimu_cdf = sm.distributions.ECDF(slice_EC_simu)\n\n\t\tx_true = np.linspace(min(slice_EC_true), max(slice_EC_true))\n\t\tx_simu = np.linspace(min(slice_EC_simu), max(slice_EC_simu))\n\t\ty_true = true_cdf(x_true)\n\t\ty_simu = simu_cdf(x_simu)\n\n\t\tplt.step(x_true, y_true, \"r\", label = \"True IonFid CDF @ EI in [\" + str(EI-1) + \",\" + str(EI) + \"]\" )\n\t\tplt.step(x_simu, y_simu, \"k\", label = \"Simu IonFid CDF @ EI in [\" + str(EI-1) + \",\" + str(EI) + \"]\")\n\t\tplt.legend(loc=\"upper left\", prop={\"size\":10})\n\n\t\tplt.show()\n\t\traw_input()\n\t\tplt.clf()", "def setLabels(self):\r\n # productive\r\n profprint()\r\n self.option = {0:'Ba',\r\n 1:'Bb',\r\n 2:'Bc',\r\n 3:'Bd',\r\n 4:'Be',\r\n 5:'Bf',\r\n 6:'Bg',\r\n 7:'Bh',\r\n 8:'Bi',\r\n 9:'Bj',\r\n 10:'Bk',\r\n 11:'Bl',\r\n 12:'Ca',\r\n 13:'Cb',\r\n 14:'Cc',\r\n 15:'Cd',\r\n 16:'Ce',\r\n 17:'Cf',\r\n 18:'Cg',\r\n 19:'Ch',\r\n 20:'Ci',\r\n 21:'Cj',\r\n 22:'Ck',\r\n 23:'Cl',\r\n 24:'Cm',\r\n 25:'Cn',\r\n 26:'Co',\r\n 27:'Cp',\r\n 28:'Cq',\r\n 29:'Cr',\r\n 30:'Da',\r\n 31:'Db',\r\n 32:'Dc',\r\n 33:'Dd',\r\n 34:'De',\r\n 35:'Df',\r\n 36:'Dg',\r\n 37:'Dh',\r\n 38:'Di',\r\n 39:'Dj',\r\n 40:'Ea',\r\n 41:'Eb',\r\n 42:'Ec',\r\n 43:'Ed',\r\n 44:'Ee',\r\n 45:'Ef',\r\n 46:'Eg',\r\n 47:'Eh',\r\n 48:'Aa',\r\n 49:'Ab',\r\n 50:'Ac',\r\n 51:'Ad',\r\n 52:'Ae',\r\n 53:'Af',\r\n 54:'Iu',\r\n 55:'Fa',\r\n 56:'Fb',\r\n 57:'Fc',\r\n 58:'Fd',\r\n 59:'Fe',\r\n 60:'Ff',\r\n 61:'Fg',\r\n 62:'Fh',\r\n 63:'--'}\r\n\r\n return self.option", "def agregar_bolsa(self, letra, cantidad):", "def get_kpoints(self,ifwrite='yes'):\n a11 = float(self.lat[2].split()[0])\n a12 = float(self.lat[2].split()[1])\n a13 = float(self.lat[2].split()[2])\n a21 = float(self.lat[3].split()[0])\n a22 = float(self.lat[3].split()[1])\n a23 = float(self.lat[3].split()[2])\n a31 = float(self.lat[4].split()[0])\n a32 = float(self.lat[4].split()[1])\n a33 = float(self.lat[4].split()[2])\n \n x0 = [a11, a12, a13]\n x1 = [a21, a22, a23]\n x2 = [a31, a32, a33]\n \n self.natom = sum(list(map(int,self.lat[6].split())))\n # Number of atoms in POSCAR/CONTCAR\n \n l0 = np.linalg.norm(x0)\n l1 = np.linalg.norm(x1)\n l2 = np.linalg.norm(x2)\n\n self.cell_norm = [l0, l1, l2]\n \n N = (l0*l1*l2*self.kppra/self.natom)**(1.0/3.0)\n \n k0 = int(N/l0)\n k1 = int(N/l1)\n k2 = int(N/l2)\n\n klist = [k0,k1,k2]\n flag = 0\n kn = klist[:]\n\n if len(set(klist)) == 1:\n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n kn = [v+1 for v in kn]\n elif len(set(klist)) == 3:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 3:\n kn[klist.index(sorted(klist)[flag])] += 1\n flag += 1\n else:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 2:\n tmp = sorted(set(klist))[flag]\n tmp_ind = []\n for i in range(3):\n if klist[i] == tmp:\n tmp_ind.append(i)\n kn = [kn[i]+1 if i in tmp_ind else kn[i] for i in range(3)]\n flag += 1\n\n self.kps = kn\n \n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n print(\"===== WARNING =====\")\n print(\"K-points generate method may not be appropriate!\")\n print(\"Check source code!!!!\")\n print(\"===================\")\n exit()\n\n #if ifwrite == 'yes':\n # self.write_output()", "def __init__(self, Nbin=10):\n \n self.Nbin = Nbin", "def mezclar_bolsa(self):", "def primos_permitidos():\r\n parejas_permitidas = [(31,23),(47,19),(7,19),(17,41),(31,7),(29,47),(37,23),(2,79),(43,17),(7,37),(5,61),\r\n (17,31),(23,19),(23,7),(11,83),(17,7),(71,3),(37,29),(7,79),(11,59),(37,3),(3,59),(13,53),(79,11),(89,3),\r\n (2,97),(23,5),(13,41),(89,2),(5,97),(89,7),(41,7),(59,7),(19,41),(31,13),(29,19),(79,5),(83,7),\r\n (83,3),(43,7),(23,17),(23,29),(3,41),(17,47),(37,13),(37,11),(53,5),(43,3),(5,83),(7,67),(89,5),\r\n (19,53),(29,17),(53,11),(11,41),(5,47),(73,13),(13,23),(47,29),(5,89),(17,23),(5,43),(71,11),(67,5),\r\n (149,3),(7,47),(19,37),(127,7),(109,7),(7,53),(67,2),(19,41),(67,11),(7,97),(3,103),(3,131),(163,2),(11,61),\r\n (113,5),(73,5),(17,7),(61,5),(97,5),(43,13),(157,5),(2,107),(71,5),(3,151),(5,29),(2,151),(137,3),\r\n (13,29),(59,11),(137,5),(47,11),(13,47),(2,197),(53,17),(239,3),(229,2),(23,37),(53,13),(11,73)]\r\n return parejas_permitidas", "def whatareks():\n\n print(\"loading data\")\n dataset = datagen.DataSet(datapath)\n dataset.load()\n\n for k, ens in enumerate(dataset.ensembles):\n\n print(\n f\"k: {k} --> b={ens.b}, N={ens.grid_shape[0]}, iternum={ens.iternum}\")", "def get_keys():\n SCALE_DICT = {\n 'major': [2,2,1,2,2,2,1],\n 'minor':[2,1,2,2,1,2,2],\n 'chrom':[1,1,1,1,1,1,1,1,1,1,1,1],\n 'ionanian':[2,2,1,2,2,2,1],\n 'dorian':[2,1,2,2,2,1,2],\n 'phrygian':[1,2,2,2,1,2,2],\n 'lydian':[2,2,2,1,2,2,1],\n 'mixolydian':[2,2,1,2,2,1,2],\n 'aeolian':[2,1,2,2,1,2,2],\n 'locrian':[1,2,2,1,2,2,2],\n 'minor_pent':[3,2,2,3,2],\n 'major_pent':[2,2,3,2,3],\n 'pent_6':[2,2,3,1,3],\n 'pent_2':[1,3,3,2,3],\n 'pent_3':[2,1,4,2,3],\n 'pent_5':[2,2,2,3,3],\n 'mixo_pent':[2,2,3,3,2],\n 'phryg_pent':[1,2,3,1,3],\n 'dim_pent':[2,1,3,1,3],\n 'blues':[3,2,1,1,3,2],\n 'harmonic_minor':[2,1,2,2,1,3,2],\n 'melodic_mimnor':[2,1,2,2,1,3,2],\n 'whole_tone':[2,2,2,2,2,2],\n 'whole_half':[2,1,2,1,2,1,2,1],\n 'half_whole':[1,2,1,2,1,2,1,2],\n 'lydian_flat7':[2,2,2,1,2,1,2]\n }\n\n return SCALE_DICT", "def tabelaOrareve():\n \n linja = 1\n kpm = \"3\"\n\n #print(f\"linja {oraret['linja1']} mberrin ne {kpm} minuta\")\n print(f\"Oraret per linjen 1 :\\n {oraret['linja1']}\\n, {oraret['linja2']}\\n, {oraret['linja3']}\\n\")", "def SetMoneda(num, simbolo=\"$\", n_decimales=2):\n #con abs, nos aseguramos que los dec. sea un positivo.\n n_decimales = abs(n_decimales)\n\n #se redondea a los decimales idicados.\n num = round(num, n_decimales)\n\n #se divide el entero del decimal y obtenemos los string\n num, dec = str(num).split(\".\")\n\n #si el num tiene menos decimales que los que se quieren mostrar,\n #se completan los faltantes con ceros.\n dec += \"0\" * (n_decimales - len(dec))\n\n #se invierte el num, para facilitar la adicion de comas.\n num = num[::-1]\n\n #se crea una lista con las cifras de miles como elementos.\n l = [num[pos:pos+3][::-1] for pos in range(0,50,3) if (num[pos:pos+3])]\n l.reverse()\n\n #se pasa la lista a string, uniendo sus elementos con comas.\n num = str.join(\",\", l)\n\n #si el numero es negativo, se quita una coma sobrante.\n try:\n if num[0:2] == \"-,\":\n num = \"-%s\" % num[2:]\n except IndexError:\n pass\n\n #si no se especifican decimales, se retorna un numero entero.\n if not n_decimales:\n return \"%s %s\" % (simbolo, num)\n\n return \"%s %s.%s\" % (simbolo, num, dec)", "def set_dev_hash(self,dev_hash):\r\n self.devHash = dev_hash\r\n self.names = ['project 0','project 1']\r\n self.proj0_dev = {}\r\n self.proj1_dev = {}\r\n self.proj0_total = 0\r\n self.proj1_total = 0\r\n print dev_hash\r\n\r\n for k,v in self.devHash.iteritems():\r\n dev0,dev1 = k\r\n port = int(v)\r\n self.proj0_total += port\r\n self.proj1_total += port\r\n if self.proj0_dev.has_key(dev0) == 0:\r\n self.proj0_dev[dev0] = 0\r\n self.proj0_dev[dev0] += port\r\n if self.proj1_dev.has_key(dev1) == 0:\r\n self.proj1_dev[dev1] = 0\r\n self.proj1_dev[dev1] += port\r\n self.data = []\r\n self.label = []\r\n for proj in (self.proj0_dev,self.proj1_dev):\r\n print proj\r\n data = []\r\n label = []\r\n for k,v in proj.iteritems():\r\n port = float(proj[k])\r\n pcent_port = (port * 100)/self.proj0_total\r\n data.append(pcent_port)\r\n label.append(k)\r\n self.data.append(data)\r\n self.label.append(label)\r\n\r\n print self.data\r\n print self.label", "def busca(self, k):\n x = self.getRaiz()\n while x is not None and k != x.getChave():\n if k < x.getChave():\n x = x.getEsquerdo()\n else:\n x = x.getDireito()\n return x", "def get_mapu_kanala_ID_OPIS(self):\n out = {}\n for kanal in self.sviKanali:\n out[kanal] = self.get_datastore(kanal).koncentracija.opis\n return out" ]
[ "0.5587814", "0.55320877", "0.55121857", "0.55119824", "0.5262004", "0.5262004", "0.5262004", "0.5262004", "0.5262004", "0.5259152", "0.5237551", "0.5234786", "0.50942725", "0.5071469", "0.5066532", "0.5040176", "0.5033861", "0.50290656", "0.49992993", "0.49971503", "0.49918824", "0.49858534", "0.49720895", "0.4965856", "0.4948377", "0.493162", "0.49123794", "0.4911999", "0.49113303", "0.49082094" ]
0.7294824
0
return all the URIs that directly or indirectly share keys with the given URI
def traverse_uris(uri): seen = set() uris_to_check = [uri] while len(uris_to_check) > 0: uri = uris_to_check.pop() if uri not in seen: seen.add(uri) for key in keys_for_uri[uri]: for uri2 in uris_for_key[key]: if uri2 not in seen: uris_to_check.append(uri2) return seen
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_signed_urls(urls, rse, operation='read'):\n result = {}\n for url in urls:\n try:\n endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)\n\n signed_url = None\n if operation == 'read':\n # signed_url = conn.generate_url(3600, 'GET', bucket_name, key_name, query_auth=True, force_http=False)\n bucket = _get_bucket(rse, endpoint, bucket_name)\n key = bucket.get_key(key_name)\n if key is None:\n signed_url = exception.SourceNotFound('Key %s not found on %s' % (key_name, endpoint))\n else:\n try:\n signed_url = key.generate_url(3600, 'GET', query_auth=True, merge_meta=False, force_http=False)\n except TypeError:\n # merge_meta option is not supported\n signed_url = key.generate_url(3600, 'GET', query_auth=True, force_http=False)\n else:\n conn = _get_connection(rse, endpoint)\n _get_bucket(rse, endpoint, bucket_name, operation='write')\n signed_url = conn.generate_url(3600, 'PUT', bucket_name, key_name, query_auth=True, force_http=False)\n result[url] = signed_url\n except boto.exception.S3ResponseError as e:\n if e.status in [404, 403]:\n result[url] = exception.DestinationNotAccessible(e)\n else:\n result[url] = exception.ServiceUnavailable(e)\n except exception.RucioException as e:\n result[url] = e\n except:\n result[url] = exception.RucioException(\"Failed to get signed url for %s, error: %s\" % (url, traceback.format_exc()))\n return result", "def parse_s3_uri(URIs):\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split(\"/\")\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n\n return buckets, keys", "def _recursiveURISearch(self, multidict):\r\n valueList = []\r\n keys = []\r\n\r\n for k, v in multidict.iteritems():\r\n if isinstance(v, dict):\r\n valueList += self._recursiveURISearch(v)\r\n elif k[-1] == '*':\r\n keys.append(k)\r\n\r\n for k in keys:\r\n ele = multidict.pop(k)\r\n\r\n if isinstance(ele, list):\r\n lst = [None] * len(ele)\r\n multidict[k[:-1]] = lst\r\n\r\n for i, uri in enumerate(ele):\r\n valueList.append((uri, lst, i))\r\n else:\r\n valueList.append((ele, multidict, k[:-1]))\r\n\r\n return valueList", "def iterkeyrefs(self):\n for key in self.iterkeys():\n yield ref(key)", "def get_issuer_urls_gnames(self):\n urls = ['uri:' + u for u in self.issuer_urls]\n return self.load_gnames(urls)", "def __contains__(self, uri):\n\t\treturn uri in self._uris", "def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d", "def has_keys(self, key_in_pointer):\n start = self.head\n rList = []\n while start:\n if key_in_pointer in start.getMember().keys():\n rList.append(start)\n start = start.getLink()\n return rList", "def get_all_paths(self):\n seen = set()\n for v in self:\n # v in self returns all nodes in the pathgraph\n if v not in seen:\n # self [v] returns a path containing v. If the v does not belong to a path\n # a singleton path [v] is returned\n yield self[v]\n seen.update(self[v])", "def GetSubkeys(self):", "def _get_all_pinged_urls():\n p = data.DinghyData(redis_host)\n\n return p.get_all_pinged_urls()", "def get_matching_s3_keys(client, bucket, prefix=\"\", suffix=\"\"):\n\n for obj in get_matching_s3_objects(client, bucket, prefix, suffix):\n yield obj[\"Key\"]", "def iterkeyrefs(self):\r\n return self.data.iterkeys()", "def iterkeyrefs(self):\r\n return self.data.iterkeys()", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n pass", "def keyrefs(self):\n return [ref(key) for key in self.iterkeys()]", "def getAllSocialPaths(self, userID):\n visited = {}\n # use a queue\n q = []\n q.append([userID])\n # add userID as its own key and value to visited\n visited[userID] = [userID]\n\n while len(q) > 0:\n path = q.pop(0)\n curr_friend = path[-1]\n\n # for all the userID keys inside self.friendships\n for friend in self.friendships[curr_friend]:\n # add neighbor as a key, if not visited, in visited with an empty list as value\n if friend not in visited:\n visited[friend] = list()\n # break out of loop if already in visited\n else: \n continue\n \n # create a new list that holds the path from userID to friend\n friend_path = list(path)\n # add the friend onto the end of the list\n friend_path.append(friend)\n # also add path to the queue\n q.append(friend_path) \n # add path as the value to the friend\n visited[friend].extend(friend_path)\n \n return visited", "def all_photosets():\n return self._uris.values()", "def find_uuids_linked_to_item(cls, rid):\n ignored(rid)\n return []", "async def keys(self) -> Iterable[str]:", "def keyrefs(self):\r\n return self.data.keys()", "def keyrefs(self):\r\n return self.data.keys()", "def cachepath(self):\n return [self.fs.cachepath(uri) for uri in self.uri]", "def test_getting_keys(self): \n cons_hash = ConsistentHash(2) \n \n nodes = ['192.168.1.1:20000',\n '192.168.1.1:20001',\n '192.168.1.1:20002',\n '192.168.1.1:20003'] \n\n for node in nodes:\n cons_hash.add(node)\n \n self.assertEquals(len(cons_hash), 8)\n node_counts = defaultdict(int)\n for i in xrange(0,100):\n key = str(uuid.uuid4())\n node = cons_hash.get_node(key)\n \n self.assertTrue(node in nodes)\n node_counts[node] += 1\n\n self.assertTrue(cons_hash._is_consistent())", "def get_shares_for_url(url):\n return twitter_shares_for_url(url) + facebook_shares_for_url(url)", "def get_resource_urls(soup: BeautifulSoup) -> set:\n return {\n tag[TAGS_ATTR[tag.name]]\n for tag in soup.findAll(name=list(TAGS_ATTR.keys()))\n if tag.has_attr(TAGS_ATTR[tag.name])\n }", "def pending_apikey_lookups(self):\n self.pending_apikey_replies_lock.acquire()\n ks = self.pending_apikey_replies.keys()\n self.pending_apikey_replies_lock.release()\n return ks", "def copyurls(door):\n return {name: Url(url.path) for name, url in door.urls.items()}", "def _extract_bucket_key(s3_uri: str)->tuple:\n s3_regex=\"^s3://([a-z0-9.-]+)/(.*)$\"\n search =re.search(s3_regex, s3_uri)\n if search is None:\n raise Error(\"Invalid s3 uri: {}\".format(s3_uri))\n return search.groups()", "def contract_uri(\n uri: str, cmaps: Optional[List[PREFIX_MAP]] = None, strict: bool = False, shortest: bool = True\n) -> List[str]:\n if cmaps is None:\n # TODO warn if not shortest?\n curie = default_converter.compress(uri)\n if curie is not None:\n return [curie]\n elif strict:\n raise NoPrefix(uri)\n else:\n return []\n\n curies = set()\n for cmap in cmaps:\n for k, v in cmap.items():\n if isinstance(v, str):\n if uri.startswith(v):\n curies.add(uri.replace(v, k + \":\"))\n curies = list(curies)\n if shortest:\n if len(curies) > 1:\n le = min(len(x) for x in curies)\n curies = [x for x in curies if len(x) == le]\n if strict:\n if len(curies) == 0:\n raise NoPrefix(uri)\n if len(curies) > 1:\n raise AmbiguousPrefix(uri, curies)\n return curies" ]
[ "0.5448103", "0.5409984", "0.5319906", "0.5254115", "0.519453", "0.5190271", "0.5170817", "0.5147005", "0.5144864", "0.5075499", "0.50541604", "0.5042928", "0.50086915", "0.50086915", "0.50020605", "0.49731576", "0.49711323", "0.49554473", "0.49431983", "0.49355274", "0.49327114", "0.49327114", "0.49283648", "0.49139073", "0.49123538", "0.49119133", "0.49092877", "0.48857206", "0.48754537", "0.4872225" ]
0.7433617
0
return a sort key for the given URI, based on whether it represents the primary work in the record
def uri_sort_key(uri): if uri.startswith('http://urn.fi/URN:NBN:fi:bib:me:'): priority = int(uri[-2:]) # last two digits are 00 for the primary work, 01+ for other works mentioned else: priority = -1 # higher priority for e.g. authorized agents return (priority, uri)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _wiki_sort_key(doc):\n url = doc['url']\n return 1 if url.startswith('https://en.wikipedia') else -1", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if utils.is_int(end):\n return (start, int(end))\n return name", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if is_int(end):\n return (start, int(end))\n return name", "def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)", "def sort_key(self):\n ...", "def get_sort_key(self) -> str:\n return self.name", "def sortKey(self):\n return 'filestore:{0}'.format(id(self.stage))", "def _get_field_sort_key(self, field):\n if not field.is_relation:\n return -1\n return 0 if field.many_to_many else 1", "def sort_by_key(request):\n return request.param", "def sort_by_key(request):\n return request.param", "def _make_sort_key(line):\n dep = line.partition('=')[0].encode('utf-8')\n return hashlib.sha1(dep).digest()", "def sortKey(self, p_str): # real signature unknown; restored from __doc__\n return QCollatorSortKey", "def key(self, sorting):\n if(sorting & Sorting.NoSorting):\n return (lambda x: 1) # All elements get the same key\n\n if(sorting & Sorting.Date):\n return (lambda x: x.date)\n\n if(sorting & Sorting.Code):\n return (lambda x: x.code)\n\n if(sorting & Sorting.User):\n return (lambda x: x.name)\n\n if(sorting & Sorting.Priviledges):\n # Not having priviledges grants \"points\": the more points the higher in the sort\n return (lambda x: (x.filters & Filters.NonSubs) + (x.filters & Filters.NonMods))\n\n if(sorting & Sorting.TimesRequested):\n return (lambda x: x.times_requested)", "def sort_key(path):\n file_end = path.rsplit(os.sep,1)[1]\n file_number = file_end.rstrip('.tif')\n return int(file_number)", "def job_sorter(self, job):\n key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL)\n return key(job)", "def subdomain_sorting_key(hostname):\n parts = hostname.split('.')[::-1]\n if parts[-1] == 'www':\n return parts[:-1], 1\n return parts, 0", "def connection_sort_key(conn):\n\n conn_rec_state = ConnRecord.State.get(conn[\"state\"])\n if conn_rec_state is ConnRecord.State.ABANDONED:\n pfx = \"2\"\n elif conn_rec_state is ConnRecord.State.INVITATION:\n pfx = \"1\"\n else:\n pfx = \"0\"\n\n return pfx + conn[\"created_at\"]", "def _get_sort_key(self, req):\n sort_key = req.params.get('sort_key', 'created_at')\n if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS:\n _keys = ', '.join(SUPPORTED_SORT_KEYS)\n msg = _(\"Unsupported sort_key. Acceptable values: %s\") % (_keys,)\n raise exc.HTTPBadRequest(explanation=msg)\n return sort_key", "def sortkey(item):\n chrom, pos, ref, alt = item[0]\n if chrom.startswith('chr'):\n chrom = chrom[3:]\n if chrom.isdigit():\n chrom = int(chrom)\n return (chrom, pos, len(ref), len(alt))", "def compareByName(keyname, author):\n authentry = me.getKey(author)\n if (keyname == authentry):\n return 0\n elif (keyname > authentry):\n return 1\n else:\n return -1", "def _column_sorting_key(self, c):\n first_index = 0\n if c.startswith('hybrid'):\n first_index = 1\n elif c.startswith('solar'):\n first_index = 2\n elif c.startswith('wind'):\n first_index = 3\n elif c == MERGE_COLUMN:\n first_index = -1\n return first_index, self._hybrid_meta.columns.get_loc(c)", "def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)", "def sortkey(style, reference, context='bibliography'):\n return(reference['title'], reference['date'])", "def _key_func_0(entry: tuple[str, str]) -> tuple[bool, str]:\n main, uri = entry\n return not main, uri # show main entries at first", "def smart_sort(item):\n try:\n return int(''.join(os.path.basename(item).split('.')[0:-1]))\n except (TypeError, ValueError, AttributeError):\n return item", "def sort_wildcard(self):\n return self.make_key(\n self._model._name,\n \"*\",\n self.name,\n )", "def job_priority_key(self, job):\n raise NotImplemented", "def order(name: str):\n if name.startswith('pred'):\n split = name.split('_')\n if len(str(split[-2])) > 10: # New file format, -2 is hash\n return int(split[-3])\n return int(split[-2])\n split = name.split('_')\n x = split[-1].split('.')[0]\n return int(x)", "def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher priority\n # The `end` should be further multiplied by\n # `_stats.active_shares` / `_stats.cpu_used`.\n # However, that gives the same value for all the jobs\n # and we only need the ordering, not the absolute value.\n return (end, camp.created, user.ID, camp.ID,\n job.submit, job.ID)", "def _natural_sort_key(value):\n return map(try_int_cast, re.findall(r'(\\d+|\\D+)', value))" ]
[ "0.6385338", "0.6095066", "0.6061569", "0.5977724", "0.5976031", "0.5923008", "0.5896237", "0.58184385", "0.5716709", "0.5716709", "0.5691437", "0.56786144", "0.55717903", "0.5541411", "0.55260164", "0.54876274", "0.54588896", "0.544542", "0.5411701", "0.5399729", "0.5323552", "0.52998084", "0.52764493", "0.5273194", "0.52542675", "0.52229184", "0.5183881", "0.51783735", "0.51729965", "0.51344156" ]
0.7179098
0
return the most appropriate URI from the given set of URIs
def select_uri(uris): return sorted(uris, key=uri_sort_key)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_best(a_list, base_url, keyword=TERMS_KEYWORD):\n if not a_list:\n return None\n\n if len(a_list) == 1:\n return get_absolute_url(a_list[0], base_url)\n\n for a in a_list:\n full_url_str = get_absolute_url(a, base_url)\n full_url = URL(full_url_str)\n\n if full_url.domain != base_url.domain:\n continue\n\n if keyword == TERMS_KEYWORD:\n if \"terms of service\" in a.string.lower():\n return full_url_str\n if keyword == PRIVACY_KEYWORD:\n if \"privacy policy\" in a.string.lower():\n return full_url_str\n\n return None", "def get_single_uri(artifact_list: List[Artifact]) -> Text:\n return get_single_instance(artifact_list).uri", "def test_uris(self):\r\n invariant = [ \r\n u\"ftp://ftp.is.co.za/rfc/rfc1808.txt\",\r\n u\"http://www.ietf.org/rfc/rfc2396.txt\",\r\n u\"ldap://[2001:db8::7]/c=GB?objectClass?one\",\r\n u\"mailto:[email protected]\",\r\n u\"news:comp.infosystems.www.servers.unix\",\r\n u\"tel:+1-816-555-1212\",\r\n u\"telnet://192.0.2.16:80/\",\r\n u\"urn:oasis:names:specification:docbook:dtd:xml:4.1.2\" ]\r\n for uri in invariant:\r\n self.assertEqual(uri, iri2uri(uri))", "def get_most_surfed_page(records):\n uris = {}\n for r in records:\n if r.code != 408:\n uris[r.uri] = uris.get(r.uri, 0) + 1\n max_req = 0\n max_uri = None\n for k,v in uris.items():\n if v > max_req:\n max_req, max_uri = v, k\n print(max_req)\n return max_uri", "def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo", "def mergeURLS(inputs):\n urls = set()\n for i in inputs:\n # Re-raise any exceptions\n try:\n urls = urls.union(i.urls())\n except:\n raise\n return urls", "def _get_uri_reference(self):\n ref_name, ref_val = next(iter(self._choose_reference().items()))\n if ref_name == 'sha1':\n return 'sha1/%s' % ref_val\n else:\n return 'ref/%s' % ref_val", "def traverse_uris(uri):\n seen = set()\n uris_to_check = [uri]\n while len(uris_to_check) > 0: \n uri = uris_to_check.pop()\n if uri not in seen:\n seen.add(uri)\n for key in keys_for_uri[uri]:\n for uri2 in uris_for_key[key]:\n if uri2 not in seen:\n uris_to_check.append(uri2)\n \n return seen", "def uri_dispatch(uri):\n\n return uri_dispatch_map[os.path.splitext(uri)[1]]", "def first_http_uri(xia):\n\n return first_uri_matching_prefix(xia, \"http://\")", "def any_to_uri(uri_or_path):\n if os.path.splitdrive(uri_or_path)[0]:\n return path_to_file_uri(uri_or_path)\n u = urlparse(uri_or_path)\n return uri_or_path if u.scheme else path_to_file_uri(uri_or_path)", "def contract_uri(\n uri: str, cmaps: Optional[List[PREFIX_MAP]] = None, strict: bool = False, shortest: bool = True\n) -> List[str]:\n if cmaps is None:\n # TODO warn if not shortest?\n curie = default_converter.compress(uri)\n if curie is not None:\n return [curie]\n elif strict:\n raise NoPrefix(uri)\n else:\n return []\n\n curies = set()\n for cmap in cmaps:\n for k, v in cmap.items():\n if isinstance(v, str):\n if uri.startswith(v):\n curies.add(uri.replace(v, k + \":\"))\n curies = list(curies)\n if shortest:\n if len(curies) > 1:\n le = min(len(x) for x in curies)\n curies = [x for x in curies if len(x) == le]\n if strict:\n if len(curies) == 0:\n raise NoPrefix(uri)\n if len(curies) > 1:\n raise AmbiguousPrefix(uri, curies)\n return curies", "def get_urls(inputfiles):\n urls = []\n scheme_rgx = re.compile(r'^https?://')\n for ifile in inputfiles:\n urls.append(ifile.read().splitlines())\n urls = set([n for l in urls for n in l])\n urls = list(filter(None, urls))\n for i in range(len(urls)):\n if not scheme_rgx.match(urls[i]):\n urls[i] = 'http://' + urls[i]\n return urls", "def uri_sort_key(uri):\n if uri.startswith('http://urn.fi/URN:NBN:fi:bib:me:'):\n priority = int(uri[-2:]) # last two digits are 00 for the primary work, 01+ for other works mentioned\n else:\n priority = -1 # higher priority for e.g. authorized agents\n return (priority, uri)", "def test_multiple_gets(uris):\n\n for uri in uris:\n print('='*10 + ' Try uri : {uri} '.format(uri=uri) + '='*10)\n resp = get_api_url(uri)\n print(resp)\n try:\n pprint(resp.json())\n except Exception as e:\n print(resp.text)", "def parse_uri(uri):\r\n groups = URI.match(uri).groups()\r\n return (groups[1], groups[3], groups[4], groups[6], groups[8])", "def download_image_urls(\n urls_filename: Union[Path, str],\n synsets: List[str],\n max_concurrent: int = 50,\n rewrite: bool = False\n) -> Dict[str, Optional[List[str]]]:\n print(\"Downloading image urls.\")\n synsets_to_urls = asyncio.run(_download_image_urls(urls_filename, synsets, max_concurrent, rewrite))\n return synsets_to_urls", "def _uri(self) -> str:\n return random.SystemRandom().choice(self._uris)", "def getClassName(self, uris):\n\t\t#Iterate over the class mapping. As the uris are stored in the value they have to be checked against the given uris parameter\n\t\t#Class mapping is an ordered dictionary => Order sepcifies specificy\n\t\tfor (key, value) in list(self._classMapping.items()):\n\t\t\tif(value in uris):\n\t\t\t\treturn key\n\t\t#If nothing was found the Resource class is the right one\n\t\treturn ClassMapper.DEFAULT_CLASS", "def get_highest_preference(self, routes):\n # start highest lpref route as the first route's path\n highest_lprf_route = [routes[0]]\n # start the highest lpref as that\n # of the first route's path\n highest_lprf = int(routes[0][LPRF])\n # iterate through all routes in given list and\n # find the one with the highest local pref\n for route in routes:\n r_lprf = int(route[LPRF])\n if r_lprf > highest_lprf:\n highest_lprf = r_lprf\n highest_lprf_route = [route]\n elif r_lprf == highest_lprf:\n highest_lprf_route.append(route)\n return highest_lprf_route", "def lookup_maybe(self, digests):\n\t\tassert digests\n\t\tfor digest in digests:\n\t\t\tassert digest\n\t\t\tif '/' in digest or '=' not in digest:\n\t\t\t\traise BadDigest(_('Syntax error in digest (use ALG=VALUE, not %s)') % digest)\n\t\t\tfor store in self.stores:\n\t\t\t\tpath = store.lookup(digest)\n\t\t\t\tif path:\n\t\t\t\t\treturn path\n\t\treturn None", "async def _download_image_urls(\n urls_filename: Union[Path, str],\n synsets: List[str],\n max_concurrent: int = 50,\n rewrite: bool = False\n) -> Dict[str, Optional[List[str]]]:\n if (not rewrite) and os.path.exists(urls_filename):\n with open(urls_filename, \"r\") as f:\n return json.load(f)\n raise NotImplementedError(\"The ImageNet site was updated and there is no longer access to lists of urls by synset.\")\n semaphore = asyncio.Semaphore(max_concurrent) # pylint: disable=unreachable\n synsets_to_urls = dict(await asyncio.gather(*[_download_urls_for_synset(synset, semaphore) for synset in synsets]))\n with open(urls_filename, \"w\") as f:\n json.dump(synsets_to_urls, f)\n print(len(synsets_to_urls))\n return synsets_to_urls", "def get_best_match(self, list):\n raise NotImplementedError", "def get_origin_routes(self, routes):\n outroutes = []\n current_best = \"UNK\"\n # iterate through routes in given list updating the current best if a better\n # option is discovered\n for route in routes:\n if route[ORIG] == current_best:\n outroutes.append(route)\n elif (route[ORIG] == \"EGP\" and current_best != \"IGP\") or route[ORIG] == \"IGP\":\n # if the current best is worse than EGP and the current is EGP,\n # update best and start a new list\n # if the current best is worse than IGP and the current is IGP,\n # update best and start a new list\n current_best = route[ORIG]\n outroutes = [route]\n\n return outroutes", "def find_best_reference_set(points):\n\n # Group points by color\n grouped = defaultdict(list)\n for point in points:\n grouped[point.color].append(point)\n\n # Brute force search on all combinations of points with unique colors\n possibilities = product(*[grouped[key] for key in grouped])\n return min(possibilities, key=summed_distances)", "def find_best_route(all_cost, all_routes):\n cost_best_route = np.inf\n for i in range(len(all_cost)):\n if all_cost[i] < cost_best_route:\n cost_best_route = all_cost[i]\n best_route = all_routes[i]\n return cost_best_route, best_route", "def _get_best_ref(self, header_in):\n header_in = dict(header_in)\n log.verbose(\"Getting bestrefs:\", self.basename, verbosity=55)\n expr_header = utils.condition_header_keys(header_in)\n self.check_rmap_omit(expr_header) # Should bestref be omitted based on rmap_omit expr?\n self.check_rmap_relevance(expr_header) # Should bestref be set N/A based on rmap_relevance expr?\n # Some filekinds, .e.g. ACS biasfile, mutate the header\n header = self._precondition_header(self, header_in) # Execute type-specific plugin if applicable\n header = self.map_irrelevant_parkeys_to_na(header) # Execute rmap parkey_relevance conditions\n try:\n bestref = self.selector.choose(header)\n except Exception as exc:\n # Check conditions for Do Not Reprocess dataset parameters, set to NA if True\n dnr = self.dnr_check(header)\n if dnr is True:\n log.verbose(\"DNR dataset identified - setting reference to NA\", str(exc), verbosity=55)\n raise crexc.IrrelevantReferenceTypeError(\"Reference type not required for DNR dataset.\") from exc\n\n log.verbose(\"First selection failed:\", str(exc), verbosity=55)\n header = self._fallback_header(self, header_in) # Execute type-specific plugin if applicable\n try:\n if header:\n header = self.minimize_header(header)\n log.verbose(\"Fallback lookup on\", repr(header), verbosity=55)\n header = self.map_irrelevant_parkeys_to_na(header) # Execute rmap parkey_relevance conditions\n bestref = self.selector.choose(header)\n else:\n raise\n except Exception as exc:\n log.verbose(\"Fallback selection failed:\", str(exc), verbosity=55)\n if self._reffile_required in [\"YES\", \"NONE\"]:\n log.verbose(\"No match found and reference is required:\", str(exc), verbosity=55)\n raise\n else:\n log.verbose(\"No match found but reference is not required:\", str(exc), verbosity=55)\n raise crexc.IrrelevantReferenceTypeError(\"No match found and reference type is not required.\") from exc\n log.verbose(\"Found bestref\", repr(self.instrument), repr(self.filekind), \"=\", repr(bestref), verbosity=55)\n if MappingSelectionsDict.is_na_value(bestref):\n raise crexc.IrrelevantReferenceTypeError(\"Rules define this type as Not Applicable for these observation parameters.\")\n if MappingSelectionsDict.is_omit_value(bestref):\n raise crexc.OmitReferenceTypeError(\"Rules define this type to be Omitted for these observation parameters.\")\n return bestref", "def first_https_uri(xia):\n\n return first_uri_matching_prefix(xia, \"https://\")", "def get_most_specific_rdf_type(types):\n mapper = PyOpenWorm.CONTEXT.mapper\n most_specific_types = tuple(mapper.base_classes.values())\n for x in types:\n try:\n class_object = mapper.RDFTypeTable[x]\n if issubclass(class_object, most_specific_types):\n most_specific_types = (class_object,)\n except KeyError:\n L.warning(\n \"\"\"A Python class corresponding to the type URI \"{}\" couldn't be found.\n You may want to import the module containing the class as well as\n add additional type annotations in order to resolve your objects to\n a more precise type.\"\"\".format(x))\n return most_specific_types[0].rdf_type", "def best_match(self, u):\n u = u.decode(\"UTF8\") if isinstance(u, (bytes, bytearray)) else u\n cur_u = sub(\"\\?.*$\", \"\", u)\n result = None, None\n for base_url in self.all_conf:\n if u.startswith(base_url):\n conf = self.all_conf[base_url]\n for pat in conf[\"conf\"]:\n if match(\"^%s$\" % pat, cur_u):\n result = conf, pat\n break\n return result" ]
[ "0.5774539", "0.5353114", "0.5214836", "0.5105618", "0.5089583", "0.5086685", "0.5053703", "0.50328743", "0.50301826", "0.502398", "0.50229007", "0.50123894", "0.5010701", "0.49976727", "0.4970601", "0.4955148", "0.4953455", "0.49447128", "0.49424458", "0.49263084", "0.4909074", "0.49033087", "0.48980603", "0.48775092", "0.48692036", "0.4852576", "0.4819775", "0.4804781", "0.47933996", "0.47336063" ]
0.7494441
0
Return user details from Kakao account
def get_user_details(self, response): kaccount_email = "" kakao_account = response.get("kakao_account", "") if kakao_account: kaccount_email = kakao_account.get("email", "") properties = response.get("properties", "") nickname = properties.get("nickname") if properties else "" return { "username": nickname, "email": kaccount_email, "fullname": nickname, "first_name": nickname[1:] if nickname else "", "last_name": nickname[0] if nickname else "", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_data(self, access_token, *args, **kwargs):\n return self.get_json(\n \"https://kapi.kakao.com/v2/user/me\",\n headers={\n \"Authorization\": f\"Bearer {access_token}\",\n \"Content_Type\": \"application/x-www-form-urlencoded;charset=utf-8\",\n },\n params={\"access_token\": access_token},\n )", "def user_info(self):\n return self.auth.get_user_by_session()", "def user_data(self, access_token, *args, **kwargs):\n data = {'method': 'users.getInfo', 'session_key': access_token}\n return mailru_api(data)[0]", "def user_info(self):\n response = self.query('user_info')\n return response", "def getUserInfo(data):\n\tusername = data[\"session_username\"]\n\tuser = Users.objects.filter(username=username).first()\n\n\tresponse = {}\n\n\tif not user:\n\t\treturn {\"Success\": False, \"Error\": \"Unable to retrieve the user information from database\"}\n\n\tresponse[\"Success\"] = True\n\tresponse[\"Username\"] = user.username\n\tresponse[\"Email\"] = user.email\n\tresponse[\"Verified\"] = user.verified\n\tresponse[\"Level\"] = user.level\n\tresponse[\"Experience\"] = user.experience\n\tresponse[\"Coins\"] = user.coins\n\tresponse[\"Preferences\"] = {\"Grid Opacity\": user.pref_grid}\n\n\treturn response", "def account_info(request):\r\n user = request.user\r\n\r\n return _api_response(request, user.safe_data())", "def get_user_details(self, response):\n name = response.get(\"name\")\n return {\n \"username\": str(response.get(\"account_id\")),\n \"email\": response.get(\"email\"),\n \"fullname\": name.get(\"display_name\"),\n \"first_name\": name.get(\"given_name\"),\n \"last_name\": name.get(\"surname\"),\n }", "def get_user_info():\n if session and session.get(\"email\") and session.get(\"display_name\"):\n email = session.get(\"email\")\n display_name = session.get(\"display_name\")\n data = dict(email=email, displayName=display_name)\n app.logger.debug(\"Success in getting log information on user: {} at email: {}\".format(display_name, email))\n return jsonify(data)\n else:\n return jsonify(dict(email=\"error\", display_name=\"Could not get info for this user\"))", "def account(request):\r\n # if auth fails, it'll raise an HTTPForbidden exception\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }", "def get_user_details():\n rv = query_db('select * from user')\n return rv[0] if rv else None", "def get_user_details(self, response):\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\"),\n \"fullname\": response.get(\"username\"),\n }", "def get_user_info(self) -> str:\n return self._searcher.get_user_info()", "def get_user_info(uid):\r\n session = tables.get_session()\r\n account_name = ''\r\n description = ''\r\n if session is None:\r\n return account_name, description\r\n try:\r\n user_account = UserAccount()\r\n account_name = user_account.get_field_by_key(UserAccount.account_name, UserAccount.user_id, uid,\r\n session)\r\n description = user_account.get_field_by_key(UserAccount.description, UserAccount.user_id, uid,\r\n session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('User login failed: %s', err)\r\n return account_name, description\r\n finally:\r\n session.close()\r\n return account_name, description", "def get():\n return prepare_response(get_user_info())", "def get_user_details(self, response):\n token = response.get('access_token')\n headers = {\"Authorization\": \"Bearer %s\" % token}\n endpoint = self.USER_INFO_URL\n response = requests.get(endpoint, headers=headers)\n return {'email': response.json()['email'] or '',\n # We'll need sub, the unique ID, for get_user_id.\n 'sub': response.json()['sub']}", "def user_info(username):\n print(json.dumps(client.user_info(username)))", "def user_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time()*1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/users/me', param, self.timeout)", "def get_users_info(): \n \n data = user_obj.get_users_info()\n return data", "def get_user_details(client):\n\n try:\n return client.user(user_id='me').get(fields=['login'])\n # print(f\"The email of the user is: {me['login']}\")\n\n except Exception as e:\n print(f\"Error has occurred: {e}\")\n return None", "def user_data(self, access_token, *args, **kwargs):\n headers = {'Authorization': 'Bearer %s' % access_token}\n try:\n resp = requests.get(ASANA_USER_DETAILS_URL,\n headers=headers)\n resp.raise_for_status()\n return resp.json()['data']\n except ValueError:\n return None", "async def get_user_account(self):\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", \"/api/v3/account\", params, auth=True)\n return success, error", "def get_account_details(self):\n pass", "def get_user_info_by_name(self, username: str) -> dict:", "def get_user_details(self, response):\n email = response.get(\"email\")\n return {\"email\": email, \"username\": email.split(\"@\")[0]}", "def getUserInfo(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n displayName = j['displayName']\n name = j['name']\n uid = j['id']\n isBanned = j['isBanned']\n joinDate = j['created']\n description = j['description']\n return displayName,name,uid,isBanned,joinDate,description", "def user_info(self):\n \n return self.auth.get_user_by_session()", "async def get_user_account(self):\n uri = \"/fapi/v1/account\"\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error", "def userinfo(self, access_token: str) -> dict[str, Any]:\n data: dict[str, Any] = self.client.get(\n url=f\"{self.protocol}://{self.domain}/userinfo\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n return data", "def get_user_info(self, session, apikey):\n if self.is_login(session, apikey):\n session = sessions.first(session=session)\n if session is not None:\n users.find()\n user_info = users.get(session.user_id)\n del user_info.password\n return user_info\n return None", "def get_user():\n\treturn '1', 200" ]
[ "0.74495924", "0.7401827", "0.7308182", "0.7220801", "0.7169196", "0.7113641", "0.7105932", "0.70832413", "0.69508857", "0.6946847", "0.69346327", "0.6913946", "0.6912289", "0.6847098", "0.6845799", "0.68399817", "0.6829566", "0.6815086", "0.6811426", "0.6808608", "0.6788369", "0.6771193", "0.67679524", "0.67662054", "0.67654115", "0.6757998", "0.67574024", "0.67307204", "0.67081356", "0.66828436" ]
0.77409214
0
Publishes freespace (as measured by e.g. sonar).
def send_free_space(self, distance): self.client.publish('free_space', str(distance))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def freespace(self):\n self.log.info(\"freespace\")\n freebytes = shutil.disk_usage(self.s3_dir).free\n self.log.info(\"returning:\" + str(freebytes))\n return freebytes", "def record_queue_size():\n statsd.gauge('rabbitmq.size', rabbitmq_queue_size())", "def check_free_space():\n subprocess.run([\"ssh\",backup_host, \"du -h\", dest ]])\n # get output", "def _free_space() -> int:\n return disk_usage(realpath('/')).free", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def get_space_used():\n fs.get_space_used()", "def ufree(verbose=False):\n import gc\n import os\n F = gc.mem_free()\n A = gc.mem_alloc()\n T = F+A\n P = '{0:.2f}%'.format(F/T*100)\n if not verbose:\n return P\n return ('Total: {} Free: {} ({})'.format(T ,F, P))", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def test_free_space_without_arguments():\n result = _run_metric('free_space')\n assert result.exit_code == 0\n assert '%' in result.output", "def leak(self, value):\n\t\t\n\t\t#Charge buffer(bucket)\n\t\tif self.buff + value < self.BUCKET_SIZE:\n\t\t\tself.buff += value\n\t\t\tself.queue_package.put(value)\n\t\t#When buffer is full - \n\t\t#started sending packages under rate via Thread\n\t\telif not self.rate_thread.isAlive():\n\t\t\tself.rate_thread.start()", "def get_space_committed():\n reserved = jobtracker.query(\"SELECT SUM(size) FROM files \" \\\n \"WHERE status IN ('downloading', 'new', \" \\\n \"'retrying', 'failed')\", \\\n fetchone=True)\n if reserved is None:\n reserved = 0\n return reserved", "def get_free_space(config, task):\n if 'host' in config:\n import paramiko\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n ssh.connect(\n config.get('host'),\n config.get('port', 22),\n config.get('user'),\n config.get('password', None),\n config.get('pkey', None),\n config.get('ssh_key_filepath'),\n timeout=5000,\n )\n except Exception as e:\n logger.error(\"Issue connecting to remote host. {}\", e)\n task.abort('Error with remote host.')\n if config['allotment'] != -1:\n stdin, stdout, stderr = ssh.exec_command(f\"du -s {config['path']} | cut -f 1\")\n else:\n stdin, stdout, stderr = ssh.exec_command(\n f\"df -k {config['path']} | tail -1 | tr -s ' ' | cut -d' ' -f4\"\n )\n outlines = stdout.readlines()\n resp = ''.join(outlines)\n ssh.close()\n try:\n if config['allotment'] != -1:\n free = int(config['allotment']) - ((int(resp.strip()) * 1024) / 1000000)\n else:\n free = int(resp.strip()) / 1000\n except ValueError:\n logger.error('Non-integer was returned when calculating disk usage.')\n task.abort('Error with remote host.')\n return free\n elif os.name == 'nt':\n import ctypes\n\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(\n ctypes.c_wchar_p(config['path']), None, None, ctypes.pointer(free_bytes)\n )\n return free_bytes.value / (1024 * 1024)\n else:\n stats = os.statvfs(config['path'])\n return (stats.f_bavail * stats.f_frsize) / (1024 * 1024)", "def log_free_disk_space():\n cmd = 'df -h'\n p = Popen(cmd, shell=True, stdout=PIPE)\n res = p.communicate()\n if res[0]:\n res = res[0]\n else:\n res = res[1]\n logger.warning('Disk usage statisticks:')\n logger.warning(res)", "def freespace(p):\n s = os.statvfs(p)\n return (s.f_bsize * s.f_bavail) /1024", "def CacheFreeSpaceCheck(self, amount):\n self._required_cache = max(self._required_cache, amount)\n self.script.append(('apply_patch_space(%d) || abort(\"E%d: Not enough free '\n 'space on /cache to apply patches.\");') % (\n amount,\n common.ErrorCode.INSUFFICIENT_CACHE_SPACE))", "def yapasGarbageCollector(self):\r\n core.FW_conf['connection'].sendCommonMsg([0x00,0x00,0x10,0x38,0x00,0x06,0x00,0x01,0x01,0x5e,0x00,0x00]) #UI_FORCE_GARBAGE_COLLECTION_REQ\r\n if not core.FW_conf['connection'].recvMsg():\r\n raise GraniteConnectionException('Failed to receive UI_FORCE_GARBAGE_COLLECTION_RESP')", "def get_free_gb():\n mem_info = get_mem_info()\n free_gb = float(mem_info['MemAvailable'].value) / 10**6\n return free_gb", "def get_capacity():\n fs.get_capacity()", "def verifyAvailableSpace(sitemover, totalFileSize, path, error):\n\n ec = 0\n pilotErrorDiag = \"\"\n\n # skip for now: add the 5 GB + 2 GB limits for output and log files to the total input file size\n _neededSpace = totalFileSize\n tolog(\"Needed space: %d B\" % (_neededSpace))\n # get the locally available space\n _availableSpace = getLocalSpace(path)\n tolog(\"Locally available space: %d B\" % (_availableSpace))\n\n # should the file size verification be done? (not if \"mv\" is used)\n doVerification = sitemover.doFileVerifications()\n \n # are we wihin the limit?\n if (_neededSpace > _availableSpace) and doVerification:\n pilotErrorDiag = \"Not enough local space for staging input files and run the job (need %d B, but only have %d B)\" %\\\n (_neededSpace, _availableSpace)\n tolog(\"!!FAILED!!2999!! %s\" % (pilotErrorDiag))\n ec = error.ERR_NOLOCALSPACE\n\n return ec, pilotErrorDiag", "def MAXMEM(self):", "def output_queue_size(self):\r\n results_dirname = get_param('results_dir')\r\n filename = os.path.join(results_dirname,\r\n '%s_%s' % (get_param('file_prefix'),\r\n 'queued_tasks'))\r\n queued_tasks_file = open(filename, 'w')\r\n queued_tasks_file.write('time\\ttotal_queued_tasks\\n')\r\n for time, queued_tasks in self.enqueued_tasks:\r\n queued_tasks_file.write('%s\\t%s\\n' % (time, queued_tasks))\r\n queued_tasks_file.close()", "def postpurge(self):\n step_name = 'Post-purge'\n width = self.times[5]\n position = self.positions[5]\n print('Postpurge', width)\n self.db_poll(step_name)\n self.throttle_valve_set(position)\n time.sleep(width)\n self.settings['steps_taken'] += 1", "def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total", "def verify_avail_space(self, pool, project, share, size):\n self.verify_project(pool, project)\n avail = self.get_project_stats(pool, project)\n if avail < size:\n exception_msg = (_('Error creating '\n 'share: %(share)s on '\n 'pool: %(pool)s. '\n 'Not enough space.')\n % {'share': share,\n 'pool': pool})\n raise exception.ShareBackendException(msg=exception_msg)", "def action_space_size(self) -> int:\n pass", "def get_free_space(dirname):\n return psutil.disk_usage(dirname).free", "def check_disk_free_space_reserved(self):\n if self.skip_disk_space_check:\n return True\n disk_partition_size = util.disk_partition_size(self.outfile_dir)\n free_disk_space = util.disk_partition_free(self.outfile_dir)\n free_space_factor = self.free_space_reserved_percent / 100\n free_space_reserved = disk_partition_size * free_space_factor\n if free_disk_space < free_space_reserved:\n raise OSCError(\n \"NOT_ENOUGH_SPACE\",\n {\n \"need\": util.readable_size(free_space_reserved),\n \"avail\": util.readable_size(free_disk_space),\n },\n )", "def get_available_space(self):\n return self.maxsize - len(self)", "def _publish_stats(self):\n if self._stat_publish_event is not None:\n self._stat_publish_event.cancel()\n\n topic = LOGGER(subtopic=self._publish_topic + \"/status/cpu\")\n\n points = {}\n\n for k, v in psutil.cpu_times_percent().__dict__.items():\n points['times_percent/' + k] = {'Readings': v,\n 'Units': 'double'}\n\n points['percent'] = {'Readings': psutil.cpu_percent(),\n 'Units': 'double'}\n try:\n self.vip.pubsub.publish('pubsub', topic.format(), message=points)\n\n except Exception as e:\n _log.warn(\"Failed to publish to topic {}\".format(topic.format()))\n finally:\n # The stats publisher publishes both to the local bus and the vc\n # bus the platform specific topics.\n next_update_time = self._next_update_time(\n seconds=self._stats_publish_interval)\n\n self._stats_publish_event = self.core.schedule(\n next_update_time, self._publish_stats)" ]
[ "0.61439735", "0.611649", "0.59722584", "0.59080696", "0.5887496", "0.5801403", "0.57203335", "0.55763495", "0.5572963", "0.55633366", "0.55046725", "0.54344904", "0.5423423", "0.5421534", "0.5417115", "0.5363897", "0.53438485", "0.53435826", "0.5342639", "0.5336582", "0.53083354", "0.5288352", "0.52743757", "0.52721274", "0.5250163", "0.5233155", "0.5224277", "0.522035", "0.5207898", "0.5205279" ]
0.68613786
0
Fetch data from RRD archive for given period of time.
def _fetch_data(self, rrdObject, startTime, endTime): #print rrdObject if not path.exists(rrdObject): raise Exception("File not exists: %s" % rrdObject) #print "%s - %s" % (startTime, endTime) rrd_data = None try: rrd_data = rrdtool.fetch(str(rrdObject), "AVERAGE", "--start", str(startTime), "--end", str(endTime)) except Exception as err: LOG.error("fetch exc %s | %s", err, rrdObject) return RrdData(info=rrd_data[0], additional=rrd_data[1], series=rrd_data[2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetchOHLC(ticker,interval = \"minute\",duration=4):\r\n data = pd.DataFrame(kite.historical_data(ticker,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.date =data.date.map(lambda t: t.strftime('%Y-%m-%d %H:%M'))\r\n return data", "def fetch_data(self, from_date: float, to_date: float, processing_command: Optional[str] = None) -> None:\n if from_date >= to_date:\n logger.error(f\"Cannot fetch data for invalid data range, from date={from_date} and to date={to_date}\")\n return\n\n # Archiver expects timestamps to be in utc by default\n from_dt = datetime.utcfromtimestamp(from_date)\n to_dt = datetime.utcfromtimestamp(to_date)\n\n # Put the dates into the form expected by the archiver in the request url, see here for more details:\n # http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()\n from_date_str = from_dt.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3] + \"Z\"\n to_date_str = to_dt.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3] + \"Z\"\n\n base_url = os.getenv(\"PYDM_ARCHIVER_URL\")\n if base_url is None:\n logger.error(\"Environment variable: PYDM_ARCHIVER_URL must be defined to use the archiver plugin, for \"\n \"example: http://lcls-archapp.slac.stanford.edu\")\n return\n\n url_string = f\"{base_url}/retrieval/data/getData.json?{self.address}&from={from_date_str}&to={to_date_str}\"\n if processing_command:\n url_string = url_string.replace(\"pv=\", \"pv=\" + processing_command + \"(\", 1)\n url_string = url_string.replace(\"&from=\", \")&from=\", 1)\n\n request = QNetworkRequest(QUrl(url_string))\n # This get call is non-blocking, can be made in parallel with others, and when the results are ready they\n # will be delivered to the data_request_finished method below via the \"finished\" signal\n self.network_manager.get(request)", "def query_radar_data(station,product,start,\n minute_delta=0,hour_delta=0,day_delta=0):\n \n end = start+timedelta(days=day_delta, minutes=minute_delta, hours=hour_delta)\n \n print(f\"query start time:{start}\")\n print(f\"query end time:{end}\")\n rs = RadarServer('http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/')\n query = rs.query()\n rs.validate_query(query)\n print(rs.stations[station])\n\n query.stations(station).time_range(start,end).variables(product)\n catalog = rs.get_catalog(query)\n file_station = str(catalog.datasets[0])\n file_station = file_station[0:4]\n \n file_list = list(catalog.datasets.values())\n for t in file_list: print(t)\n LatLonBox = [rs.stations[station].longitude-3,rs.stations[station].longitude+3,\n rs.stations[station].latitude-2,rs.stations[station].latitude+2]\n \n return file_list,LatLonBox", "def fetchOHLC(ticker,interval,duration):\r\n instrument = instrumentLookup(instrument_df,ticker)\r\n data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.set_index(\"date\",inplace=True)\r\n return data", "def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st", "def fetchOHLC(ticker,interval,duration):\n instrument = instrumentLookup(instrument_df,ticker)\n data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\n data.set_index(\"date\",inplace=True)\n return data", "def fetch(self, daterange=(datetime.now() - timedelta(1), datetime.now())):\n cursor = self.conn.cursor()\n sql = 'SELECT measure_dt, ping, download, upload FROM speedlogs ' + \\\n ' WHERE measure_dt BETWEEN ? AND ?'\n cursor.execute(sql, daterange)\n return cursor.fetchall()", "def load_mta_archived_feed(feed='gtfs', timestamp='2014-09-17-09-31'):\n import requests\n\n return requests.get(\"https://datamine-history.s3.amazonaws.com/{0}-{1}\".format(feed, timestamp))", "def fetch_rda(year, month):\n props = get_properties()\n req = requests.post(\n \"https://rda.ucar.edu/cgi-bin/login\",\n dict(\n email=props[\"rda.user\"],\n passwd=props[\"rda.password\"],\n action=\"login\",\n ),\n timeout=30,\n )\n if req.status_code != 200:\n LOG.info(\"RDA login failed with code %s\", req.status_code)\n return\n cookies = req.cookies\n\n days = [\"0109\", \"1019\"]\n lastday = (\n datetime.date(year, month, 1) + datetime.timedelta(days=35)\n ).replace(day=1) - datetime.timedelta(days=1)\n days.append(f\"20{lastday.day}\")\n for day in days:\n uri = (\n \"https://data.rda.ucar.edu/ds608.0/3HRLY/\"\n f\"{year}/NARRsfc_{year}{month:02.0f}_{day}.tar\"\n )\n req = exponential_backoff(\n requests.get, uri, timeout=30, cookies=cookies, stream=True\n )\n tmpfn = f\"{TMP}/narr.tar\"\n with open(tmpfn, \"wb\") as fh:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk:\n fh.write(chunk)\n process(tmpfn)\n os.unlink(tmpfn)\n\n # Now call coop script\n subprocess.call(\n [\n \"python\",\n \"/opt/iem/scripts/climodat/narr_solarrad.py\",\n f\"{year}\",\n f\"{month}\",\n ]\n )\n subprocess.call(\n [\n \"python\",\n \"/opt/iem/scripts/iemre/merge_narr.py\",\n f\"{year}\",\n f\"{month}\",\n ]\n )", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def fetch_data(t0, t1, stock_symbol):\n \n # Alpha vantage timeseries object to fetch data (value every 5 min) in pandas-format\n ts = TimeSeries(API_KEY, output_format=\"pandas\")\n data, _ = ts.get_intraday(symbol=stock_symbol, interval=\"5min\", outputsize=\"full\")\n\n # Cut current time window data\n current_data = data[str(t0):str(t1)]\n\n return current_data", "def testIrradianceFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'irradiance',\n orderBy = [timeCol, 'sensor_id'],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def get_ooni_data(range):\n\n last_ooni_report_generated = get_sys_info(request='last_ooni_report_generated', update=True)\n\n configs = get_configs()\n bucket = 'ooni-data-eu-fra'\n \n session = boto3.Session(profile_name=configs['profile'])\n client = session.client('s3')\n \n #get date range\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=range)\n delta = datetime.timedelta(days=1)\n\n logger.debug(f\"Now: {now} Then: {then}\")\n\n engine = db.create_engine(configs['database_url'])\n connection = engine.connect()\n metadata = db.MetaData()\n\n ooni_reports = db.Table('ooni_reports', metadata, autoload=True, autoload_with=engine)\n\n file_list = []\n logger.debug(\"Getting OONI file list from S3...\")\n while then <= now:\n date_str = then.strftime('%Y%m%d')\n file_date = 'raw/' + date_str\n then += delta\n\n date_report_list = client.list_objects_v2(\n Bucket=bucket,\n Prefix=file_date\n )\n\n for s3_file in date_report_list['Contents']:\n if ('webconnectivity' in s3_file['Key']) and ('jsonl' in s3_file['Key']):\n file_list.append(s3_file['Key'])\n\n\n # Process Files\n domain_list, mirror_list = lists()\n\n matching_domain_data = {}\n for domain in domain_list:\n matching_domain_data[domain['name']] = []\n\n for file in file_list:\n file_parts = file.split('/')\n local_name = ('-').join(file_parts)\n local_file_path = configs['local_tmp'] + '/' + local_name\n\n logger.debug(f\"Downloading to: {local_file_path}\")\n with open(local_file_path, 'wb') as file_data:\n client.download_fileobj(bucket, file, file_data)\n\n data = []\n \n with gzip.open(local_file_path) as raw_file:\n line = raw_file.readline()\n json_data = json.loads(line)\n data.append(json_data)\n\n os.remove(local_file_path) \n \n for jdata in data:\n logger.debug(f\"input: {jdata['input']}\")\n domain_name = False\n for domain in domain_list:\n match = site_match(domain['name'], jdata['input'])\n if match:\n domain_name = domain['name']\n domain_id = domain['id']\n if not domain_name:\n logger.debug(\"No match.\")\n continue\n \n date_reported = datetime.datetime.strptime(jdata['measurement_start_time'], '%Y-%m-%d %H:%M:%S')\n matching_domain_data[domain_name] = {\n 'domain_id': domain_id,\n 'url_accessed': jdata['input'],\n 'country': jdata['probe_cc'],\n 'blocked': jdata['test_keys']['blocking'],\n 'dns_consistency': jdata['test_keys']['dns_consistency'],\n 'date_reported': date_reported\n } \n \n for key in jdata['test_keys']['requests']:\n for s_key in key:\n if s_key == 'failure':\n matching_domain_data[domain_name]['failure'] = key['failure']\n\n print(f\"Matching Domain Data for {domain_name}:{matching_domain_data[domain_name]}\")\n # Make report\n ooni_report_data = matching_domain_data[domain_name]\n\n insert = ooni_reports.insert().values(**ooni_report_data)\n result = connection.execute(insert)\n\n return", "def load_archive(self, from_date, to_date=None):\r\n return self.get_or_create_archive().fetch(from_date, to_date)", "def get_data(symbol_id='BTC', period_id='1DAY', request_limit=1000, tdelta=30):\n now = datetime.utcnow()\n month = timedelta(days=tdelta)\n past_month = (now - month).isoformat()\n\n parameters = {'symbol_id': symbol_id, 'period_id': period_id, 'time_start': past_month[:-3], 'limit':request_limit}\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n\n while response.status_code != 200:\n time.sleep(5)\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n \n data = response.json()\n \n # this is a commnet\n csv_headers = ['time_period_start', 'time_period_end', 'price_high', 'price_low', 'price_close', 'price_open', 'trades_count', \n 'volume_traded', 'time_open', 'time_close']\n\n\n with open(str(datafolder / f'{symbol_id}_{tdelta}_day.csv'), 'w', newline='') as f:\n writer = csv.DictWriter(f, csv_headers)\n writer.writeheader()\n for item in data:\n writer.writerow(item)", "def fetch(self, from_time, until_time=None):\r\n until_time = until_time or datetime.now()\r\n time_info, values = whisper.fetch(self.path,\r\n from_time.strftime('%s'),\r\n until_time.strftime('%s'))\r\n # build up a list of (timestamp, value)\r\n start_time, end_time, step = time_info\r\n current = start_time\r\n times = []\r\n while current <= end_time:\r\n times.append(current)\r\n current += step\r\n return zip(times, values)", "def get_radar_data(file_list,index=0):\n ds = file_list[index]\n data = Dataset(ds.access_urls['CdmRemote'])\n\n radar_time = ((data.time_coverage_start).replace('T',' ')).replace('Z','')\n date_time_obj = datetime.strptime(radar_time, '%Y-%m-%d %H:%M:%S')\n\n print('Date:', date_time_obj.date())\n print('Time:', date_time_obj.time())\n print('Date-time:', date_time_obj)\n title_time = \"{0:%d %b %Y %H%MZ}\".format(date_time_obj)\n file_time = \"{0:%Y_%m_%d_%H%MZ}\".format(date_time_obj)\n print(title_time,file_time)\n #print(data)\n return data, title_time, file_time", "def fetch(pair, time_period=None, interval=None):\n\n if time_period is None:\n url = f'http://platotradeinfo.silencatech.com/main/dashboard/ajaxgetetradedata'\n response = requests.get(url, params={'pair': pair})\n return response.json()['result']\n elif time_period is not None and interval is not None:\n url = 'http://platotradeinfo.silencatech.com/main/dashboard/ajaxgetetradedataforperiod'\n response = requests.get(url, params={'pair': pair,\n 'from': time_period['from'],\n 'to': time_period['to'],\n 'period': interval})\n return response.json()['data']", "def getnewdata():\n try:\n os.remove(cachepath)\n except os.error:\n pass\n tdelta = int(EPGHOURS)*60*60\n now = time.time()\n later = now + tdelta\n # 2020-03-24%2021%3A00%3A00.000%2B0000\n starttime = urllib.parse.quote(datetime.fromtimestamp(now).\n strftime('%Y-%m-%d %H:00:00.000+0000'))\n # 2020-03-25%2005%3A00%3A00.000%2B0000\n stoptime = urllib.parse.quote(datetime.fromtimestamp(later).\n strftime('%Y-%m-%d %H:00:00.000+0000'))\n url = \"http://api.pluto.tv/v2/channels?start=\" + starttime + \"&stop=\" + stoptime\n\n if debugmode:\n logging.debug(url)\n\n logging.debug(\"Using api.pluto.tv, writing %s.\", CACHEFILE)\n\n try:\n wget.download(url, out=cachepath)\n except IOError:\n logging.error(\"There was an issue downloading EPG data. Exiting.\")\n sys.exit()", "def data_pull_s3(self):\n year = self.month_year[0]\n month = self.month_year[1]\n self.s3 = boto3.resource('s3',aws_access_key_id=self.creds_data['key_id'],\n aws_secret_access_key=self.creds_data['key_access'])\n bucket = self.s3.Bucket('himatdata')\n home = os.getcwd()\n file_path = os.path.join(*[home, 'Trmm/', self.output_folder, year + '_' + month])\n print(file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n for obj in bucket.objects.filter(Delimiter='', Prefix='Trmm/{}{}_{}'.format(self.output_folder, year, month)):\n if obj.key.endswith('.nc4'):\n bucket.download_file(obj.key,os.path.join(os.path.join(home, obj.key)))\n logging.info(\"Done with Year Month: %s\", month_year)", "def test_fetch():\n service = WebService(TestFactory())\n query = service.parse(\n parse_qs(\n \"id=BOU&starttime=2016-06-06\"\n \"&endtime=2016-06-07&elements=H,E,Z,F&sampling_period=60\"\n \"&format=iaga2002&type=variation\"\n )\n )\n timeseries = service.fetch(query)\n assert_equal(isinstance(timeseries, Stream), True)", "def fetch(self,etime,lat,lon,timewindow=20,radius=100,limit=None):\n utctime = etime\n #get the most likely event time and ID for the event we input\n eid,gtime = self._check_catalog(etime,lat,lon,timewindow,radius)\n if eid is None:\n msg = 'Could not find this event in the GeoNet earthquake catalog. Returning.'\n raise StrongMotionFetcherException(msg)\n\n #set up the ftp url for this day and month\n #[MONTH] should be in the format mm_Mon (04_Apr, 05_May, etc.)\n neturl = GEOBASE.replace('[YEAR]',str(utctime.year))\n monthstr = utctime.strftime('%m_%b')\n neturl = neturl.replace('[MONTH]',monthstr)\n urlparts = urllib.parse.urlparse(neturl)\n ftp = ftplib.FTP(urlparts.netloc)\n ftp.login() #anonymous\n ftp.cwd(urlparts.path)\n\n \n #get the current local directory, then cd to the desired raw folder\n cwd = os.getcwd()\n os.chdir(self._rawfolder)\n self._datafiles = []\n\n #create the event folder name from the time we got above\n fname = gtime.strftime('%Y-%m-%d_%H%M%S')\n try:\n ftp.cwd(fname)\n except:\n msg = 'Could not find an FTP data folder called \"%s\". Returning.' % (urllib.parse.urljoin(neturl,fname))\n raise StrongMotionFetcherException(msg)\n\n try:\n #actually retrieve the data files\n volumes = []\n dirlist = ftp.nlst()\n for volume in dirlist:\n if volume.startswith('Vol'):\n ftp.cwd(volume)\n if 'data' not in ftp.nlst():\n ftp.cwd('..')\n continue\n\n ftp.cwd('data')\n flist = ftp.nlst()\n for ftpfile in flist:\n if not ftpfile.endswith('V1A'):\n continue\n localfile = os.path.join(os.getcwd(),ftpfile)\n if localfile in self._datafiles:\n continue\n self._datafiles.append(localfile)\n f = open(localfile,'wb')\n sys.stderr.write('Retrieving remote file %s...\\n' % ftpfile)\n ftp.retrbinary('RETR %s' % ftpfile,f.write)\n f.close()\n if limit is not None and len(self._datafiles) >= limit:\n break\n ftp.cwd('..')\n ftp.cwd('..')\n if limit is not None and len(self._datafiles) >= limit:\n break\n except Exception as e:\n pass\n finally:\n ftp.quit()\n os.chdir(cwd)\n return", "def test_device_readings_get_past_dates(self):\n request = self.client().get(f'/devices/{self.device_uuid}/readings/'\n f'?start={int(self.setup_time - 75)}'\n f'&end={int(self.setup_time + 25)}')\n\n self.assertEqual(len(request.json), 2)", "def get_umatched_rr_data(self, day, qry=\"\"):\n query = '''\n SELECT\n bs.\"CohortId\",\n bs.\"AppointmentId\",\n bs.\"Barcode\" as \"BloodSampleBarcode\",\n bs.\"Comments\",\n bs.\"SiteNurseEmail\",\n bs.\"CreatedAt\",\n bs.\"State\",\n mr.\"id\" as \"ManifestId\",\n mr.\"Barcode\" as \"ManifestBarcode\",\n mr.\"CohortId\" as \"ManifestCohortId\",\n mr.\"Site\",\n mr.\"Visit\",\n mr.\"Room\",\n mr.\"CollectionDateTime\",\n mr.\"Comments\" as \"ManifestComments\",\n rr.\"id\" as \"ReceiptId\",\n rr.\"Barcode\" as \"ReceiptBarcode\",\n rr.\"Clinic\",\n rr.\"DateTimeTaken\",\n rr.\"TissueSubType\",\n rr.\"ReceivedDateTime\",\n rr.\"Volume\",\n rr.\"VolumeUnit\",\n rr.\"SampleId\",\n rr.\"Comments\" as \"ReceiptComments\"\n FROM blood_sample_receiptrecords as rr\n inner join blood_sample_manifestrecords as mr on \\\n rr.\"Barcode\"=mr.\"Barcode\"\n inner join blood_sample_bloodsample as bs on \\\n bs.\"Barcode\"=mr.\"Barcode\"\n WHERE rr.\"SampleId\" not in (\n SELECT\n \"pr\".\"ParentId\"\n FROM blood_sample_processedreport as pr\n join blood_sample_receiptrecords as rr on \\\n pr.\"ParentId\"=rr.\"SampleId\"\n join blood_sample_manifestrecords as mr on \\\n rr.\"Barcode\"=mr.\"Barcode\"\n join blood_sample_bloodsample as bs on \\\n bs.\"Barcode\" = mr.\"Barcode\"\n )\n AND bs.\"State\" = '0'\n AND mr.\"CollectionDateTime\" BETWEEN '{}' AND '{}'{}\n order by bs.\"Barcode\";\n '''.format(day.replace(hour=0, minute=0, second=0,\n microsecond=0).strftime(\"%Y-%m-%d %H:%M:%S\"),\n day.replace(hour=23, minute=59, second=59,\n microsecond=0).strftime(\"%Y-%m-%d %H:%M:%S\"), qry)\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n columns = [col[0] for col in cursor.description]\n data = [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n return data", "def pull(self, period):\n # Compile the regex expressions we'll use to parse the title text\n self.identity_regex = re.compile(r\"(\\d{4} \\d{3} \\d{3})\\s{2,}(\\S+)\\s{2,}(\\d{3} \\d{3} \\d{3} *\\S*)\")\n self.ats_regex = re.compile(r\"ATS REFERENCE: (\\S*)\")\n self.municipality_regex = re.compile(r\"MUNICIPALITY: (.*)\")\n self.reference_regex = re.compile(r\"REFERENCE NUMBER: (.*?)\\-{80}\", re.DOTALL)\n self.payday_regex = re.compile(r\"(\\-{80}).*(\\-{80})(.*)\", re.DOTALL)\n\n # Filter the dataframe by date and retrieve each title\n df = self.journal\n df = df[df['Registration Date'] >= period]\n\n df.to_pickle('run/{}.journal.pkl'.format(self.runtime))\n\n click.echo('Journal constructed and saved with timestamp {}'.format(self.runtime))\n\n # Set up structure for target DataFrame\n self.dataframe = pd.DataFrame(\n columns=[\n 'linc',\n 'short_legal',\n 'title_number',\n 'ats_reference',\n 'municipality',\n 'registration',\n 'registration_date',\n 'document_type',\n 'sworn_value',\n 'consideration',\n 'condo'\n ], index=df.index\n )\n\n with click.progressbar(df.iterrows(), label='Pulling basic title data', length=len(df)) as d:\n for index, row in d:\n try:\n payload = self.retrieve_title(index)\n self.dataframe.loc[index, 'linc'] = payload['linc']\n self.dataframe.loc[index, 'short_legal'] = payload['short_legal']\n self.dataframe.loc[index, 'title_number'] = payload['title_number']\n self.dataframe.loc[index, 'ats_reference'] = payload['ats_reference']\n self.dataframe.loc[index, 'municipality'] = payload['municipality']\n self.dataframe.loc[index, 'registration'] = payload['registration']\n self.dataframe.loc[index, 'registration_date'] = payload['date']\n self.dataframe.loc[index, 'document_type'] = payload['document_type']\n self.dataframe.loc[index, 'sworn_value'] = payload['value']\n self.dataframe.loc[index, 'consideration'] = payload['consideration']\n self.dataframe.loc[index, 'condo'] = payload['condo']\n except TypeError:\n pass\n\n self.dataframe['registration_date'] = pd.to_datetime(self.dataframe['registration_date'])\n self.dataframe['sworn_value'] = self.dataframe['sworn_value'].astype(float)\n self.dataframe['consideration'] = self.dataframe['consideration'].astype(float)\n self.dataframe['condo'] = self.dataframe['condo'].fillna(False).astype(bool)\n\n self.dataframe.to_pickle('run/{}.dataframe.pkl'.format(self.runtime))\n click.echo('Dataframe constructed and saved with timestamp {}'.format(self.runtime))\n\n return self.dataframe", "def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def pull(self, domain=\"data.cityofchicago.org\",dataset_id=\"crimes\",\\\n token=\"ZIgqoPrBu0rsvhRr7WfjyPOzW\",store=True, out_fname=\"pull_df.p\",\n pull_all=False):\n\n pdb.set_trace()\n client = Socrata(domain, token)\n if domain == \"data.cityofchicago.org\" and dataset_id==\"crimes\":\n self._coord1 = \"latitude\"\n self._coord2 = \"longitude\"\n self._EVENT = \"primary_type\"\n\n if pull_all:\n new_data = client.get(dataset_id)\n # pull_df = pd.DataFrame(new_data).dropna(\\\n # subset=[self._coord1, self._coord2, self._DATE, self._EVENT],\\\n # axis=1).sort_values(self._DATE)\n # NOTE: running into columns encoded in unicode not accepting subset\n # specific filtering of NaN's by column error\n # columns defined in subset aren't columns in the pulled DataFrame\n pull_df = pd.DataFrame(new_data).dropna().sort_values(self._DATE)\n self._logdf = pull_df\n else:\n self._logdf.sort_values(self._DATE)\n pull_after_date = \"'\"+str(self._logdf[self._DATE].iloc[-1]).replace(\\\n \" \", \"T\")+\"'\"\n new_data = client.get(dataset_id, where=\\\n (\"date > \"+pull_after_date))\n if domain == \"data.cityofchicago.org\" and dataset_id==\"crimes\":\n self._DATE = \"date\"\n # pull_df = pd.DataFrame(new_data).dropna(\\\n # subset=[self._coord1, self._coord2, self._DATE, self._EVENT],\\\n # axis=1).sort_values(self._DATE)\n pull_df = pd.DataFrame(new_data).dropna().sort_values(self._DATE)\n self._logdf = self._logdf.append(pull_df)\n\n if store:\n assert out_fname is not None, \"Out filename not specified\"\n self._logdf.to_pickle(out_fname)\n\n return", "def get_archer(self):\n\n # Format URL\n url = f'http://tropic.ssec.wisc.edu/real-time/adt/archive{self.year}/{self.id[2:4]}{self.id[1]}-list.txt'\n\n # Read in data\n a = requests.get(url).content.decode(\"utf-8\")\n content = [[c.strip() for c in b.split()] for b in a.split('\\n')]\n # data = [[dt.strptime(line[0]+'/'+line[1][:4],'%Y%b%d/%H%M'),-1*float(line[-4]),float(line[-5])] for line in content[-100:-3]]\n archer = {}\n for name in ['time', 'lat', 'lon', 'mnCldTmp']:\n archer[name] = []\n for i, line in enumerate(content):\n try:\n ndx = ('MWinit' in line[-1])\n archer['time'].append(dt.strptime(\n line[0] + '/' + line[1][:4], '%Y%b%d/%H%M'))\n archer['lat'].append(float(line[-5 - ndx]))\n archer['lon'].append(-1 * float(line[-4 - ndx]))\n archer['mnCldTmp'].append(float(line[-9 - ndx]))\n except:\n continue\n self.archer = archer\n\n return archer", "def run(self) -> list:\n logger.debug('Fetching date %s', self._day.strftime('%Y/%m/%d'))\n \n regions = [r() for r in regions_list]\n air_quality = list()\n \n # fetch air quality of each region\n for r in regions:\n r.fetch_air_quality(self._day)\n \n # gather results from all regions\n for r in regions:\n # wait until region has fetched his data\n r.wait_for_quality()\n logging.info('Fetched region:%s for day:%s', r.name, self._day)\n air_quality.append({\n 'name': r.name,\n 'provinces': [\n {'name': x.name, 'short': x.short_name, 'quality': x.quality.asdict()} \n for x in r.provinces]\n })\n\n self._fetcher.fetched_result(self._day, air_quality)", "def get_data(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n # Convert dates to datetime objects so deltas can be calculated\n begin_datetime = parse_known_date_formats(begin_date)\n end_datetime = parse_known_date_formats(end_date)\n delta = end_datetime - begin_datetime\n\n # If the length of our data request is less or equal to 31 days,\n # we can pull the data from API in one request\n if delta.days <= 31:\n data_url = build_query_url(\n begin_datetime.strftime(\"%Y%m%d %H:%M\"),\n end_datetime.strftime(\"%Y%m%d %H:%M\"),\n stationid, product, datum, bin_num, interval, units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is less than 365 days\n # AND the product is hourly_height or high_low, we can pull data directly\n # from the API in one request\n elif delta.days <= 365 and (\n product == 'hourly_height' or product == 'high_low'):\n data_url = build_query_url(\n begin_date, end_date, stationid, product, datum, bin_num, interval,\n units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is greater than 365 days\n # AND the product is hourly_height or high_low, we need to load data from\n # the API in365 day blocks.\n elif product == 'hourly_height' or product == 'high_low':\n # Find the number of 365 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_365day_blocks = int(math.floor(delta.days / 365))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 365 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_365day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))\n end_datetime_loop = begin_datetime_loop + timedelta(days=365)\n\n # If end_datetime_loop of the current 365 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build url for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # If the length of the user specified data request is greater than 31 days\n # for any other products, we need to load data from the API in 31 day\n # blocks\n else:\n # Find the number of 31 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_31day_blocks = int(math.floor(delta.days / 31))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 31 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_31day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))\n end_datetime_loop = begin_datetime_loop + timedelta(days=31)\n\n # If end_datetime_loop of the current 31 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build URL for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # Rename output dataframe columns based on requested product\n # and convert to useable data types\n if product == 'water_level':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'QC', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'hourly_height':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'high_low':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'ty': 'high_low',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Separate to high and low dataframes\n df_HH = df[df['high_low'] == \"HH\"].copy()\n df_HH.rename(columns={'date_time': 'date_time_HH',\n 'water_level': 'HH_water_level'},\n inplace=True)\n\n df_H = df[df['high_low'] == \"H \"].copy()\n df_H.rename(columns={'date_time': 'date_time_H',\n 'water_level': 'H_water_level'},\n inplace=True)\n\n df_L = df[df['high_low'].str.contains(\"L \")].copy()\n df_L.rename(columns={'date_time': 'date_time_L',\n 'water_level': 'L_water_level'},\n inplace=True)\n\n df_LL = df[df['high_low'].str.contains(\"LL\")].copy()\n df_LL.rename(columns={'date_time': 'date_time_LL',\n 'water_level': 'LL_water_level'},\n inplace=True)\n\n # Extract dates (without time) for each entry\n dates_HH = [x.date() for x in pd.to_datetime(df_HH['date_time_HH'])]\n dates_H = [x.date() for x in pd.to_datetime(df_H['date_time_H'])]\n dates_L = [x.date() for x in pd.to_datetime(df_L['date_time_L'])]\n dates_LL = [x.date() for x in pd.to_datetime(df_LL['date_time_LL'])]\n\n # Set indices to datetime\n df_HH['date_time'] = dates_HH\n df_HH.index = df_HH['date_time']\n df_H['date_time'] = dates_H\n df_H.index = df_H['date_time']\n df_L['date_time'] = dates_L\n df_L.index = df_L['date_time']\n df_LL['date_time'] = dates_LL\n df_LL.index = df_LL['date_time']\n\n # Remove flags and combine to single dataframe\n df_HH = df_HH.drop(\n columns=['flags', 'high_low'])\n df_H = df_H.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_L = df_L.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_LL = df_LL.drop(columns=['flags', 'high_low',\n 'date_time'])\n\n # Keep only one instance per date (based on max/min)\n maxes = df_HH.groupby(df_HH.index).HH_water_level.transform(max)\n df_HH = df_HH.loc[df_HH.HH_water_level == maxes]\n maxes = df_H.groupby(df_H.index).H_water_level.transform(max)\n df_H = df_H.loc[df_H.H_water_level == maxes]\n mins = df_L.groupby(df_L.index).L_water_level.transform(max)\n df_L = df_L.loc[df_L.L_water_level == mins]\n mins = df_LL.groupby(df_LL.index).LL_water_level.transform(max)\n df_LL = df_LL.loc[df_LL.LL_water_level == mins]\n\n df = df_HH.join(df_H, how='outer')\n df = df.join(df_L, how='outer')\n df = df.join(df_LL, how='outer')\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(\n ['date_time', 'date_time_HH', 'date_time_H', 'date_time_L',\n 'date_time_LL'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df.index)\n df['date_time_HH'] = pd.to_datetime(df['date_time_HH'])\n df['date_time_H'] = pd.to_datetime(df['date_time_H'])\n df['date_time_L'] = pd.to_datetime(df['date_time_L'])\n df['date_time_LL'] = pd.to_datetime(df['date_time_LL'])\n\n elif product == 'predictions':\n if interval == 'h':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n\n elif interval == 'hilo':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl',\n 'type': 'hi_lo'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'hi_lo'])\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'currents':\n # Rename columns for clarity\n df.rename(columns={'b': 'bin', 'd': 'direction',\n 's': 'speed', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'wind':\n # Rename columns for clarity\n df.rename(columns={'d': 'dir', 'dr': 'compass',\n 'f': 'flags', 'g': 'gust_spd',\n 's': 'spd', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags', 'compass'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_pressure':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_press'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'water_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'water_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n # Set datetime to index (for use in resampling)\n df.index = df['date_time']\n df = df.drop(columns=['date_time'])\n\n # Handle hourly requests for water_level and currents data\n if (product == 'water_level') | (product == 'currents') & (\n interval == 'h'):\n df = df.resample('H').first() # Only return the hourly data\n\n return df" ]
[ "0.59084827", "0.5897862", "0.58881354", "0.5841368", "0.5826922", "0.58209974", "0.5776042", "0.5727331", "0.56761867", "0.56529486", "0.56008005", "0.5581093", "0.5486558", "0.54823905", "0.54436505", "0.5440745", "0.5436888", "0.5407445", "0.53838426", "0.5374633", "0.5373649", "0.53530437", "0.53300345", "0.53162104", "0.5310356", "0.5287134", "0.5284008", "0.5274258", "0.52734876", "0.5272862" ]
0.6730135
0
Download genotype data the save the out put in .data dir
def download_genotype_data(): print("downloading genotype data") download_from_url(PSAM_PATH, dst=f"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam", desc="downloading psam") download_from_url(PVAR_PATH, dst=f"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst", desc="downloading pvar") download_from_url(PGEN_PATH, dst=f"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst", desc="downloading pgen") decompress_genotype_file(f"{MERGED_GENOTYPE_FILE}.pvar") decompress_genotype_file(f"{MERGED_GENOTYPE_FILE}.pgen")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_data(self):\n # Command to get the download data\n pass", "def download_proteome(proteome_id, data_dir, domain=\"Eukaryota\"):\n base = (\"ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/\"\n \"knowledgebase/reference_proteomes\")\n\n url = [base, domain, proteome_id + \".fasta.gz\"]\n outfile = os.path.join(data_dir, proteome_id + \".fasta\")\n\n with closing(request.urlopen(url)) as remote_handle:\n with open(remote_handle, \"rb\") as remote_file:\n mem_file = io.BytesIO(remote_file.read())\n\n with open(outfile, \"w\") as out, gzip.open(mem_file) as gz:\n outfile.write(gz.read())\n\n return outfile", "def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')", "def download_data():\n # Download Unihan meta data for radical-stroke analysis\n os.system(' mkdir Unihan')\n os.system(' curl -O http://unicode.org/Public/UCD/latest/ucd/Unihan.zip')\n os.system(' apt-get -y install unzip')\n os.system(' unzip Unihan.zip -d Unihan/')\n os.system(' rm Unihan.zip')\n\n data_path = 'Unihan/Unihan_RadicalStrokeCounts.txt'\n assert(os.path.isfile(data_path))\n\n return data_path", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def gatherData(data,neat,gen,iter_i,hyp,savePop=False):\n data.gatherData(neat.pop, neat.species)\n\n if savePop is True: # Get a sample pop to play with in notebooks\n global fileName\n pref = output_dir + '/iter_{}'.format(iter_i) + '/gen_' + str(gen).zfill(4)\n import pickle\n with open(pref+'.obj', 'wb') as fp:\n pickle.dump(neat.pop,fp)\n\n return data", "def download(data_type, gs_aoi, main_dir):\n # Get URLs for tiles covered by a polygon:\n # ----------------------------------------\n tiles = get_tile_names(gs_aoi)\n print('Found {} products'.format(len(tiles['tile_names'])))\n\n # Make sure temporary folder for download exists:\n # -----------------------------------------------\n dwn_dir = join(main_dir, data_type)\n if not exists(dwn_dir):\n makedirs(dwn_dir)\n\n # Proceed to download:\n # --------------------\n if data_type == 'DTM':\n # DOWNLOAD DTM FILES & UNZIP:\n # ---------------------------\n print('\\nDownloading DTM files:')\n for num, name in enumerate(tiles['dtm_url']):\n print('{} of {}'.format(num+1, len(tiles['dtm_url'])))\n dwn_stat, file_name = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n extract_zip(join(dwn_dir, file_name))\n # Delete ZIP file after extraction\n remove(join(dwn_dir, file_name))\n \n # Finished downloading:\n # ---------------------\n out_msg = 'Finished downloading DTM files!'\n \n elif data_type == 'LAZ':\n # DOWNLOAD LAZ FILES:\n # -------------------\n print('\\nDownloading LAZ files:')\n for num, name in enumerate(tiles['laz_url']):\n print('{} of {}'.format(num+1, len(tiles['laz_url'])))\n dwn_stat, _ = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n \n # Finished downloading:\n # ---------------------\n out_msg = 'Finished downloading LAZ files!'\n \n else:\n dwn_dir = None\n out_msg = 'Unexpected data_type'\n \n # Output dictionary:\n # ------------------\n out = {'out_msg': out_msg,\n 'out_dir': dwn_dir}\n \n return out", "def download_dataset(self):\n raise NotImplementedError", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def download_glove ():\n # Get the URL ...\n print(\"Downloading https://nlp.stanford.edu/data/glove.6B.zip ...\")\n res = requests.get(\"https://nlp.stanford.edu/data/glove.6B.zip\", stream=True)\n if res.status_code != 200:\n print(\"Could not download the 6B GloVe Dataset! The server responded with code \" + res.status_code + \".\")\n sys.exit(1)\n\n # ... and write it to file\n fp = open(\"data/glove.6B.zip\", \"wb\")\n total_length = int(res.headers.get('content-length'))\n # Thanks again to the internet for this beautiful piece of code <3\n for chunk in tqdm.tqdm(res.iter_content(chunk_size=1024), unit=\"KB\", total=ceil(total_length/1024) + 1):\n if chunk:\n fp.write(chunk)\n fp.flush()\n fp.close()\n print(\"ZIP-file downloaded! Extracting ...\")\n with ZipFile(\"data/glove.6B.zip\", \"r\") as zf:\n files = zf.namelist()\n print(\"Members in archive:\")\n print(\"\\n\".join(files))\n\n for file in files:\n if file.endswith(\"glove.6B.300d.txt\"):\n print(\"Extracting member \" + file + \" from archive ...\")\n zf.extract(file)\n break\n \n # Remove the zip file again\n os.remove(\"data/glove.6B.zip\")\n print(\"Successfully extracted GloVe embeddings (300 dimensions) to data directory.\")\n print(\"You can now train the classifier using the GloVe embeddings.\")", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()", "def download_data(self, url: str, source_type: str) -> None:\n r = None # request\n\n # download data from nextcloud\n if source_type == \"nextcloud\":\n token = url\n r = requests.get(\n os.environ[\"NC_WEBDAV_URL\"], auth=(token, os.environ[\"NC_PASSWORD\"])\n )\n\n # download data from generic URLs\n if source_type == \"generic_url\":\n s = requests.Session()\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0\"\n }\n s.headers.update(headers)\n r = s.get(url)\n\n f_name = None # file name\n\n if \"content-disposition\" in r.headers.keys():\n d = r.headers[\"content-disposition\"]\n f_name = re.findall('filename=\"(.+)\"', d)[0]\n else:\n f_name = url.split(\"/\")[-1]\n\n # save file\n try:\n with open(Path(os.environ[\"DATA_PATH\"]) / f_name, \"wb\") as f:\n for chunk in r.iter_content(self.chunk_size):\n f.write(chunk)\n except OSError:\n print(f\"Error: {list(Path(os.environ['DATA_PATH']).iterdir())}\")", "def download_data():\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def download(data_type, gs_aoi, main_dir, local_rep=True):\n # Get URLs for tiles covered by a polygon:\n tiles = get_tile_names(gs_aoi, data_type)\n print(f'Found {len(tiles)} products')\n\n # Make sure temporary folder for download exists:\n dwn_dir = join(main_dir, data_type)\n if not exists(dwn_dir):\n makedirs(dwn_dir)\n\n if local_rep:\n # Copy DTM files from local repository:\n print('\\nCopying DTM files:')\n for num, name in enumerate(tiles):\n print('{} of {}'.format(num+1, len(tiles)))\n dwn_stat, _ = copy_local(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n out_msg = 'Finished copying DTM files!'\n else:\n # Download DTM files:\n print(f\"\\nDownloading {data_type} files:\")\n for num, name in enumerate(tiles):\n print('{} of {}'.format(num+1, len(tiles)))\n dwn_stat, _ = download_file(name, dwn_dir)\n print('File {}.'.format(dwn_stat))\n if data_type == \"DTM\":\n # Convert to Geotiff\n print(\"Converting to GeoTIFF...\")\n result = asc_to_gtif(dwn_dir)\n print(result)\n out_msg = \"Finished downloading DTM files!\"\n\n # Output dictionary:\n out = {'out_msg': out_msg,\n 'out_dir': dwn_dir}\n\n return out", "def _get_data(self):\n try:\n \n with open('auto-mpg.data.txt', 'w') as data_file:\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n logger.debug(f'response code from url: 200')\n self.response_code = 200\n for line in r.iter_lines():\n data_file.write(line.decode() + '\\n')\n else:\n self.response_code = r.status_code\n logger.info(f'{url} returned status code {r.status_code}')\n except Exception as e:\n logger.info(f'Unexpected error writing to file {str(e)}. Exiting.')\n sys.exit()", "def fetch_data():\n log = logging.getLogger(__name__)\n log.info('Checking data files...')\n if not os.path.isfile('CGN.txt'):\n params_cgn = {\n 'institute.code': ['NLD037'],\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n cgn = GenesysParser(params_cgn)\n cgn.fetch2json('CGN.txt')\n log.info('CGN data has been saved.')\n else:\n log.info('CGN data file already exists.')\n\n if not os.path.isfile('USDA.txt'):\n params_usda = {\n 'institute.code': usda_all,\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n usda = GenesysParser(params_usda)\n usda.fetch2json('USDA.txt')\n log.info('USDA data has been saved.')\n else:\n log.info('USDA data file already exists.')", "def download(self):\n cloud_path = f\"gs://{const.GCS_BUCKET}/{self.GCS_PATH}\"\n # download label file\n label_zip = download_file_from_gcs(\n cloud_path, self.root, self.LABEL_ZIP\n )\n with zipfile.ZipFile(label_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)\n\n # download tfexamples for a dataset split\n tfexamples_zip = download_file_from_gcs(\n cloud_path, self.root, self.SPLITS_ZIP.get(self.split)\n )\n with zipfile.ZipFile(tfexamples_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)", "def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))", "def download_mnist (data='training'):\n assert data in ['training', 'testing']\n \n if data == 'training':\n images_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'\n labels_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'\n else:\n images_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'\n labels_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n \n (images_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')\n (labels_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')\n return (images_fn_gz, labels_fn_gz)", "def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def _download(self, path):\n self.logger.info('Getting Million Song Dataset...')\n self.logger.info('Downloading Echo Nest Taste Subprofile train data...')\n base_url = 'http://millionsongdataset.com/sites/default/files/challenge/'\n\n download_dataset(\n base_url + 'train_triplets.txt.zip',\n join(self.data_folder, 'train.zip')\n )\n rename(join(self.data_folder, 'train'), path)\n\n self.logger.info('Downloading evaluation data for MSD Challenge...')\n download_dataset(\n base_url + 'EvalDataYear1MSDWebsite.zip',\n join(path, 'eval.zip')\n )\n rename(\n join(path, 'EvalDataYear1MSDWebsite'),\n join(path, 'evaluation')\n )\n\n self.logger.info('Downloading list of matching errors...')\n url = 'http://millionsongdataset.com/sites/default/files/tasteprofile/sid_mismatches.txt'\n download_url(url, join(path, 'sid_mismatches.txt'))" ]
[ "0.67733765", "0.6754389", "0.65670013", "0.6480931", "0.64402175", "0.64136404", "0.61601907", "0.6154601", "0.6146112", "0.6115417", "0.60394645", "0.60286486", "0.60172874", "0.59958476", "0.59882545", "0.5964745", "0.59604585", "0.5958659", "0.59532046", "0.5930072", "0.5918282", "0.5917775", "0.5904624", "0.5902819", "0.5896541", "0.5886692", "0.58668774", "0.5854503", "0.5846777", "0.5835924" ]
0.87439203
0
create merged genotype file from psam pvar and pgen
def create_merged_genotype_file(snps_file_path): print("creating merged genotype file") plink_runner = Plink2DockerRunner() shutil.copyfile(snps_file_path, f"{GENOTYPE_DATA_PATH}/{SNP_LIST_FILE_NAME}") plink_runner(f"./plink2 --pfile {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_GENOTYPE_FILE} vzs " f"--extract {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{SNP_LIST_FILE_NAME} --export vcf " f"--out {IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_GENOTYPE_FILE}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gmpe_data_file(indata_dir, tmpdir,\n gmpe_file, gmpe_label_file,\n gmpe_group_name):\n # Find SRC file\n basedir = os.path.join(indata_dir, os.listdir(indata_dir)[0])\n src_file = glob.glob(\"%s%s*.src\" % (basedir, os.sep))\n if not len(src_file):\n print \"Unable to find SRC file!\"\n sys.exit(1)\n src_file = src_file[0]\n # Now parse SRC file\n src_keys = parse_src_file(src_file)\n\n # Find station list\n stl_file = glob.glob(\"%s%s*.stl\" % (basedir, os.sep))\n if len(stl_file) != 1:\n print \"Unable to find STL file!\"\n sys.exit(1)\n stl_file = stl_file[0]\n # Parse station list\n slo = StationList(stl_file)\n site_list = slo.getStationList()\n\n # Write ri50 files\n rrups = []\n for site in site_list:\n output_file = os.path.join(tmpdir, \"%s.ri50\" % (site.scode))\n calculate_gmpe(src_keys, site, output_file, rrups, gmpe_group_name)\n mean_rrup = numpy.mean(rrups)\n\n # Get periods\n gmpe_group = gmpe_config.GMPES[gmpe_group_name]\n \n # Write label file\n out_labels = open(gmpe_label_file, 'w')\n # Write labels\n labels = \",\".join(gmpe_group[\"labels\"])\n out_labels.write(\"%s\\n\" % (labels))\n # Done\n out_labels.close()\n\n # Open output file, write header\n outfile = open(gmpe_file, 'w')\n # Add header for the GMPE column\n outfile.write(\"0\")\n for period in gmpe_group[\"periods\"]:\n outfile.write(\",%10.5f\" % period)\n outfile.write(\"\\n\")\n\n # Get number of GMPEs that we have\n number_of_gmpes = len(gmpe_group[\"models\"])\n\n # Get list of stations to process\n stations = sorted(glob.glob(\"%s%s*.ri50\" % (tmpdir, os.sep)))\n for station in stations:\n # Start empty\n gmpe_ri50 = []\n \n input_file = open(station, 'r')\n for line in input_file:\n line = line.strip()\n # Skip comments\n if line.startswith(\"#\"):\n continue\n pieces = [float(item) for item in line.split()]\n # Initialize gmpe_ri50 structure\n if not gmpe_ri50:\n for item in pieces[1:]:\n gmpe_ri50.append([])\n for item, dst in zip(pieces[1:], gmpe_ri50):\n dst.append(item)\n # Done with input file\n input_file.close()\n # Read all values\n for i in range(0, len(gmpe_ri50)):\n outfile.write(\"%d\" % (i + 1))\n for item in gmpe_ri50[i]:\n outfile.write(\",%10.6f\" % (item))\n outfile.write(\"\\n\")\n\n # All done, close output file\n outfile.close()\n\n return (src_keys['magnitude'], mean_rrup, number_of_gmpes)", "def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")", "def write_psts(self,prefix,existing_jco=None,noptmax=None):\n self.log(\"writing realized pest control files\")\n # get a copy of the pest control file\n pst = self.pst.get(par_names=self.pst.par_names,obs_names=self.pst.obs_names)\n\n if noptmax is not None:\n pst.control_data.noptmax = noptmax\n pst.control_data.noptmax = noptmax\n\n if existing_jco is not None:\n pst.pestpp_options[\"BASE_JACOBIAN\"] = existing_jco\n\n # set the indices\n pst.parameter_data.index = pst.parameter_data.parnme\n pst.observation_data.index = pst.observation_data.obsnme\n\n if self.parensemble.istransformed:\n par_en = self.parensemble._back_transform(inplace=False)\n else:\n par_en = self.parensemble\n\n for i in range(self.num_reals):\n pst_name = prefix + \"{0:d}.pst\".format(i)\n self.log(\"writing realized pest control file \" + pst_name)\n pst.parameter_data.loc[par_en.columns,\"parval1\"] = par_en.iloc[i, :].T\n\n # reset the regularization\n #if pst.control_data.pestmode == \"regularization\":\n #pst.zero_order_tikhonov(parbounds=True)\n #zero_order_tikhonov(pst,parbounds=True)\n # add the obs noise realization if needed\n if self.obsensemble.shape[0] == self.num_reals:\n pst.observation_data.loc[self.obsensemble.columns,\"obsval\"] = \\\n self.obsensemble.iloc[i, :].T\n\n # write\n pst.write(pst_name)\n self.log(\"writing realized pest control file \" + pst_name)\n self.log(\"writing realized pest control files\")", "def file_creator(title_list):\n for file_name in title_list: #title names are retrieved out of genID.txt\n with open (\"nuc_variant_calls/\"+file_name.strip()+\".var\",'w') as x:\n x.write(\"Feature type\\tAlignment length\\tIdentical nucleotides\\tIndel count\\n\") #Table headers.", "def make_smi_and_gyspum_params(gen_smiles_file, folder_path,\n gypsum_output_folder_path, max_variance,\n gypsum_thoroughness, min_ph, max_ph,\n pka_precision):\n list_of_gypsum_params = []\n\n with open(gen_smiles_file) as smiles_file:\n for line in smiles_file:\n if line == \"\\n\":\n continue\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\" \", \"\\t\")\n parts = line.split(\"\\t\") # split line into parts separated by 4-spaces\n if len(parts) == 0 or len(parts) == 1:\n print(parts)\n smile = parts[0]\n # ligand_name example\n # (Gen_30_Cross_639427+Gen_31_Cross_717928)Gen_34_Cross_709666 But\n # bash doesn't like + or () for file names so we will abridge\n # lig_name_short name for above example becomes\n # Gen_34_Cross_709666 if ligand is from the source files we wont\n # split the name\n\n ligand_name = parts[1]\n if len(ligand_name.split(\")\")) == 2:\n lig_name_short = ligand_name.split(\")\")[1]\n elif len(ligand_name.split(\")\")) == 1:\n lig_name_short = ligand_name\n else:\n printout = \"Ligand name failed to abridge. Smiles may be \\\n named in improper format please separate with _ \\\n or camelcase. Our formatting is: \\\n (Gen_2_Cross_631+Gen_3_Cross_744)Gen_4_Cross_702 \\\n which reads as Gen_34_Cross_702 (aka ligand 702) \\\n was produced by crossover using ligands: \\\n Gen_2_Cross_631 and Gen_3_Cross_744. \\\n This will abridge to Gen_4_Cross_702 for saving \\\n files.\\nThe failed ligand name was \\\n {}\".format(ligand_name)\n\n print(printout)\n raise Exception(printout)\n\n smi_line = \"{}\\t{}\".format(smile, lig_name_short)\n\n smi_path = \"{}{}.smi\".format(folder_path, lig_name_short)\n\n # make .smi file\n with open(smi_path, \"w\") as smi_file:\n smi_file.write(smi_line)\n\n # Make .json file\n gypsum_params = {\n \"source\": smi_path,\n \"output_folder\": gypsum_output_folder_path,\n \"num_processors\": 1,\n \"job_manager\": \"serial\",\n \"use_durrant_lab_filters\": True,\n \"max_variants_per_compound\": max_variance,\n \"thoroughness\": gypsum_thoroughness,\n \"separate_output_files\": True,\n \"add_pdb_output\": False,\n \"add_html_output\": False,\n \"min_ph\": min_ph,\n \"max_ph\": max_ph,\n \"pka_precision\": pka_precision,\n \"skip_optimize_geometry\": False,\n \"skip_alternate_ring_conformations\": False,\n \"skip_adding_hydrogen\": False,\n \"skip_making_tautomers\": False,\n \"skip_enumerate_chiral_mol\": False,\n \"skip_enumerate_double_bonds\": False,\n \"let_tautomers_change_chirality\": False,\n \"2d_output_only\": False,\n \"cache_prerun\": False,\n \"test\": False,\n }\n\n list_of_gypsum_params.append(gypsum_params)\n\n return list_of_gypsum_params", "def genotype(args) :\n from genotyper import genotype_samples\n genotype_samples(args)", "def generatePositivePHASLoci(options,whole_mapped_data,phase,cycle):\n out_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".positive_phase_loci\"\n fhw=open(out_filename,\"w\")\n for chromosome in sorted(whole_mapped_data):\n filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\"\n try:\n fhr=open(filename,\"r\")\n except FileNotFoundError:\n continue\n flag_reg=1000\n window_start,window_end=0,0\n for line in fhr:\n \"\"\"pvalue=float(line.strip().split()[-1])\n if pvalue>=options.pvalue_cutoff:continue\"\"\"\n register,start,end=map(int,line.strip().split()[:3])\n if register==flag_reg:\n if window_end>start:\n window_end=end\n else:\n fhw.write(chromosome+\"\\t\"+str(window_start)+\"\\t\"+str(window_end)+\"\\n\")\n window_start=start\n window_end=end\n else:\n if flag_reg!=1000:\n fhw.write(chromosome+\"\\t\"+str(window_start)+\"\\t\"+str(window_end)+\"\\n\")\n window_start=start\n window_end=end\n flag_reg=register\n fhr.close()\n fhw.write(chromosome+\"\\t\"+str(window_start)+\"\\t\"+str(window_end)+\"\\n\")\n fhw.close()", "def generatePhasingScore(options,phase,cycle):\n score,readcount,readseq=readDataForPhasingScoreComputation(options,phase)\n phased_loci_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".positive_phase_loci\"\n final_phase_loci=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".phasing_score_phase_loci\"\n fhr=open(phased_loci_filename,\"r\")\n out4=open(final_phase_loci,\"w\")\n for line in fhr:\n chromosome,ss,ee=line.strip().split()\n ss=int(ss)\n ee=int(ee)\n #correct=list(range(ss,ee+1,phase))\n phasing_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".phasing_score\"\n abundance_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".abundance\"\n out=open(phasing_score_filename,\"w\")\n out2=open(abundance_score_filename,\"w\")\n score_count={}\n for site in range(ss,ee+1):\n start=site-(phase*4)\n end=site+(phase*5)-1\n max_within_site,max_within_count,all_scores=0,0,0\n for cor in range(start,end+1):\n if cor not in score[chromosome]:continue\n all_scores+=score[chromosome][cor]\n for i in readcount[chromosome][cor]:\n if max_within_count<readcount[chromosome][cor][i]:\n max_within_site=cor\n max_within_count=readcount[chromosome][cor][i]\n all_scores-=max_within_count\n P,k=0,0\n s=start\n while s<end:\n if s not in score[chromosome]:\n s+=phase\n continue\n if score[chromosome][s]!=0:\n P+=score[chromosome][s]\n k+=1\n if s == max_within_site:\n P-=max_within_count \n s+=phase\n U=all_scores-P\n \n #if U<0: continue\n if k>=3:\n #print(P,U,k)\n phas_score=math.log((1+(10*(P/(1+U))))**(k-2))\n \"\"\"if phas_score>max and site in correct:\n max=phas_score\"\"\"\n else:\n phas_score=0\n out.write(str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.write(chromosome+\"\\t\"+str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n if chromosome not in score_count:\n score_count[chromosome]={}\n if site not in score_count[chromosome]:\n score_count[chromosome][site]=phas_score\n if site in readcount[chromosome] and '+' in readcount[chromosome][site] and readcount[chromosome][site]['+']!=0:\n out2.write(str(site)+\"\\t\"+str(readcount[chromosome][site]['+'])+\"\\n\")\n if site in readcount[chromosome] and '-' in readcount[chromosome][site] and readcount[chromosome][site]['-']!=0:\n out2.write(str(site)+\"\\t-\"+str(readcount[chromosome][site]['-'])+\"\\n\")\n out.close()\n out2.close()\n \n #out4.write(chromosome+\"\\t\"+str(ss)+\"\\t\"+str(ee)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.close()", "def main():\n\n # Accept up to three command-line arguments\n input_terms = \"<input_GO_terms_file>\"\n input_annotations = \"<input_gene_associations_file>\"\n output_filename = \"<output_filename>\"\n\n\n # The first two arguments are required GO terms file ending with .obo\n # and gene association GAF file ending with .gaf\n if len(sys.argv) < 3:\n sys.exit(\"Please provide required GO terms .obo file and gene \" +\n \"assocatiion .gaf file.\")\n elif not sys.argv[1].endswith(\".obo\"):\n sys.exit(\"Please provide a GO terms .obo file.\")\n elif not sys.argv[2].endswith(\".gaf\"):\n sys.exit(\"Please provide a gene association .gaf file.\")\n else:\n input_terms = sys.argv[1]\n input_annotations = sys.argv[2]\n\n\n # Check if the provided import .obo or .gaf files exist\n if not input_terms:\n sys.exit(input_terms + \" not found. Check the file path and try again.\")\n elif not input_annotations:\n sys.exit(input_annotations + \" not found. Check the file path and try again.\")\n elif len(sys.argv) == 3:\n output_filename = \"results.tsv\"\n sys.stdout = open(\"results.tsv\", \"w\")\n elif len(sys.argv) == 4:\n output_filename = sys.argv[3] + \".tsv\"\n sys.stdout = open(output_filename, \"w\")\n\n\n # parse id and is_valeus and make a go_dict\n split_input_terms = split_terms(input_terms)\n go_dict = {}\n for record in split_input_terms:\n (go_id, is_a) = parse_go_term(record)\n key_go_dict = \"\".join(go_id)\n go_dict[key_go_dict] = is_a\n\n\n # Export an annotation gene information to tsv format into the output file\n gene_association_map = map_protein_to_go(input_annotations)\n for protein, go_ids in sorted(gene_association_map.items()):\n print(protein, end=\"\")\n\n for go_id in sorted(go_ids):\n parent_go_ids = find_parent_terms(go_id, go_dict)\n\n count = 0\n for parent_go_id in sorted(parent_go_ids):\n\n if count == 0:\n print(\"\\t\", go_id, \"\\t\", parent_go_id)\n count += 1\n else:\n print(\"\\t\", parent_go_id, sep=\"\\t\")\n\n sys.stdout.close()", "def gos_files_creation(annotation_file, go_namespace_studied):\n go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo')\n\n # For each GO terms look to the namespaces associated with them.\n go_namespaces = {}\n for go_term in go_ontology:\n go_namespaces[go_term.id] = go_term.other['namespace'][0]\n\n # For each GO terms look if there is an alternative ID fo them.\n go_alt_ids = {}\n for go_term in go_ontology:\n if 'alt_id' in go_term.other:\n for go_alt in go_term.other['alt_id']:\n go_alt_ids[go_alt] = go_term.id\n\n # Genome file with genes associated with GO terms.\n df = pa.read_csv(annotation_file, sep='\\t', header=None)\n df.columns = ['Gene_Name', 'GOs']\n df.replace(np.nan, '', inplace=True)\n\n gos_in_df = []\n for gos in df['GOs']:\n for go in gos.split(','):\n if go not in gos_in_df:\n gos_in_df.append(go)\n\n df.set_index('Gene_Name', inplace=True)\n\n gene_gos = []\n for gene, row in df.iterrows():\n for go in row['GOs'].split(','):\n gene_gos.append((go, gene))\n\n dic_go_genes = {}\n for go in tqdm(gos_in_df):\n genes = []\n for gene_go in gene_gos:\n if go != '' and go not in go_namespaces:\n go = go_alt_ids[go]\n if gene_go[0] == go and go != '' and go_namespaces[go] == go_namespace_studied:\n genes.append(gene_go[1])\n if go != '':\n dic_go_genes[go] = genes\n\n print(len(dic_go_genes))\n\n delete_keys = []\n for go in dic_go_genes:\n if len(dic_go_genes[go]) < 4:\n delete_keys.append(go)\n\n for key in delete_keys:\n del dic_go_genes[key]\n print(len(dic_go_genes))\n\n df_go = pa.DataFrame.from_dict(dic_go_genes, orient='index')\n df_go.insert(0, 'Description', 'GO_terms')\n\n df_go.to_csv('go_gene.gmt', sep='\\t', header=False)\n\n df.reset_index(inplace=True)\n df_query_go = pa.concat([pa.Series(row['Gene_Name'], row['GOs'].split(','))\n for _, row in df.iterrows()]).reset_index()\n df_query_go.columns = ['GOs', 'Gene_Name']\n df_query_go = df_query_go[['Gene_Name', 'GOs']]\n df_query_go.to_csv('query_go.tsv', sep='\\t', index=False)", "def writeProteins( self ):\n\n self.logger.info( 'writeProteins: START' )\n\n proteinsDestination = self.openInsertFile( 'proteinsInsert.psql' )\n accessionsDestination = self.openInsertFile( 'accessionsInsert.psql' )\n\n proteins = {}\n\n totalOfSequences = self.reader.getTotalOfSequences()\n\n self.logger.info( 'writeProteins: total of sequences: ' + str(totalOfSequences) + '.' )\n\n files = self.reader.getPepFiles()\n\n self.logger.info( 'writeProteins: total of sequence files: ' + str(len(files)) + '.' )\n\n # For log purposes only!\n counter = 0\n\n for pepFile in files:\n f = self.reader.openPepFile( pepFile )\n\n positions = self.reader.getPepEntriesPositions()\n\n # Just for the log system.\n fileName = self.afs.getFileName( pepFile ) \n self.logger.info( 'writeProteins: writing file: ' + str(fileName) + '.' )\n self.logger.info( 'writeProteins: file: ' + str(fileName) + ' have : ' + str(len(positions)) + ' entries.' )\n # END of log stuff.\n\n for position in positions:\n\n # Only log how long it's taking to run.\n # By thousands.\n counter += 1\n if ( counter % 100000 ) == 0:\n self.logger.info( 'writeProtein: step: ' + str(counter) + '.')\n # END log step.\n\n\n entry = self.reader.getPepParsedEntry( position )\n\n # Sometimes there's 'pep' files without related organism. It happens in KEGG database.\n # We skip completely sequences without related organism.\n if not entry.organism.code in self.importerOrganism.organismsInserted:\n self.logger.info( 'writeProteins: ORGANISM NOT FOUND: ' + entry.organism.code )\n\n # Skip the 'pep' file completely.\n break\n\n else:\n organismId = self.importerOrganism.organismsInserted[ entry.organism.code ]\n\n self.logger.info( 'writeProteins: writing entry : ' + str(entry.identification) + '.' )\n\n #self.writeProteinsFile( proteinsDestination, entry.identification, entry.fullFastaHeader, entry.description, organismId, entry.sequence )\n proteinInserted = self.writeFile( proteinsDestination, 'proteins', [ str(entry.identification), str(entry.fullFastaHeader), str(entry.description), str(organismId), str(entry.sequence) ] )\n self.proteinsInserted[ entry.identification ] = proteinInserted\n\n accessionInserted = self.writeFile( accessionsDestination, 'accessions', [ str(entry.identification) ] )\n self.accessionsInserted[ entry.identification ] = accessionInserted \n #self.writeAccessionsFile( accessionsDestination, entry.identification )\n\n\n self.logger.info( 'writeProteins: DONE' )", "def write_output(pfam,\n indices_from_pfam_id,\n uniprot):\n pfam_starts, pfam_ends, pfam_sequences = pfam\n uniprot_ids, uniprot_sequences = uniprot\n\n logging.info('Writing output file %s...', FLAGS.output_file)\n\n n_pfam_entries_found = 0\n n_sequence_mismatches = 0\n n_repeats = 0\n n_start_mismatches = 0\n with tf.io.gfile.GFile(FLAGS.output_file, 'w') as f:\n f.write(','.join(OUTPUT_FIELDS) + '\\n')\n for uniprot_id, uniprot_sequence in zip(uniprot_ids, uniprot_sequences):\n for idx in indices_from_pfam_id[uniprot_id]:\n pfam_start, pfam_end = pfam_starts[idx], pfam_ends[idx]\n pfam_sequence = pfam_sequences[idx]\n\n uniprot_starts = find_all(uniprot_sequence, pfam_sequence)\n\n n_pfam_entries_found += 1\n if uniprot_starts:\n n_repeats += len(uniprot_starts) > 1\n n_start_mismatches += pfam_start not in uniprot_starts\n else:\n n_sequence_mismatches += 1\n\n pfam_id = f'{uniprot_id}/{pfam_start}-{pfam_end}'\n uniprot_starts = ';'.join([str(i) for i in uniprot_starts])\n fields = [pfam_id, uniprot_starts, uniprot_sequence]\n f.write(','.join(fields) + '\\n')\n\n logging.info('Finished writing %d entries to output file.',\n n_pfam_entries_found)\n\n logging.info('%d / %d Pfam-A seed entries have mismatching sequences.',\n n_sequence_mismatches, n_pfam_entries_found)\n logging.info('%d / %d Pfam-A seed entries have repeats.',\n n_repeats, n_pfam_entries_found)\n logging.info('%d / %d Pfam-A seed entries have mismatching starts.',\n n_start_mismatches, n_pfam_entries_found)", "def create_grp_file(data, model_name, gp_var, outputModelFilesDirectory):\n\n dimx = None\n dimy = None\n if len(data.shape) == 1:\n dimy = 1\n dimx = data.shape[0]\n else:\n dimx, dimy = data.shape\n data = np.ones(dimx)\n\n if not (gp_var == None):\n i = 1\n for key in sorted(gp_var.keys()):\n\n for index in gp_var[key]:\n data[index] = i\n\n i += 1\n\n\n f = open(os.path.join(outputModelFilesDirectory, model_name + '.grp'), 'w')\n\n print >>f, '/NumWaves\\t1'\n print >>f, '/NumPoints\\t%d\\n' %dimx\n print >>f, '/Matrix'\n np.savetxt(f, data, fmt='%d', delimiter='\\t')\n\n f.close()", "def write_agp(self, agp_fn, ref_fn, add_suffix_to_unplaced=False):\n used_components = set()\n used_edges = set()\n obj_header_idx = -1\n\n agp = AGPFile(agp_fn, \"w\")\n agp.add_pragma()\n agp.add_comment(\"# AGP created by RagTag {}\".format(get_ragtag_version()))\n\n while True:\n # Find a starting node\n from_node = None\n to_node = None\n cur_ref = None\n for u, v in sorted(self.edges):\n if (u, v) not in used_edges:\n u_base = u[:-2]\n\n u_degree = 0\n if u_base + \"_b\" in self.nodes:\n u_degree += self.graph.degree[u_base + \"_b\"]\n if u_base + \"_e\" in self.nodes:\n u_degree += self.graph.degree[u_base + \"_e\"]\n\n assert u_degree in {2, 4}\n\n # Check if we have found a starting target sequence\n if u_degree == 2:\n cur_ref = u_base\n from_node = u\n to_node = v\n used_edges.add((u, v))\n used_edges.add((v, u))\n break\n\n # If we haven't found a new starting target sequence, we are done\n if from_node is None:\n break\n\n # Initialize this object\n obj_header_idx += 1\n obj_header = \"scf\" + \"{0:08}\".format(obj_header_idx)\n obj_pos = 0\n obj_pid = 1\n\n # Process the first target sequence\n cur_ref_len = self.component_lens[cur_ref]\n cur_ref_strand = \"+\"\n if from_node.endswith(\"_b\"):\n cur_ref_strand = \"-\"\n agp.add_seq_line(obj_header, obj_pos+1, obj_pos+cur_ref_len, obj_pid, \"W\", cur_ref, 1, cur_ref_len, cur_ref_strand)\n obj_pos += cur_ref_len\n obj_pid += 1\n used_components.add(cur_ref)\n\n # Process the remaining sequences.\n next_edge_exists = True\n while next_edge_exists:\n # Process the patch\n patch_aln = self.graph[from_node][to_node][\"alignment\"]\n patch_query = patch_aln.query\n patch_strand = \"+\"\n if patch_aln.strand:\n patch_strand = \"-\"\n\n patch_len = patch_aln.their_query_start - patch_aln.my_query_end\n if patch_len > 0:\n if patch_aln.is_gap:\n agp.add_gap_line(obj_header, obj_pos+1, obj_pos+patch_len, obj_pid, \"N\", patch_len, \"scaffold\", \"yes\", \"align_genus\")\n else:\n agp.add_seq_line(obj_header, obj_pos+1, obj_pos+patch_len, obj_pid, \"W\", patch_query, patch_aln.my_query_end+1, patch_aln.their_query_start, patch_strand)\n used_components.add(patch_query)\n obj_pos += patch_len\n obj_pid += 1\n\n # Next, process the reference sequence\n comp_start = min(0, patch_len)\n cur_ref = to_node[:-2]\n cur_ref_len = self.component_lens[cur_ref]\n cur_ref_strand = \"+\"\n if to_node.endswith(\"_e\"):\n cur_ref_strand = \"-\"\n agp.add_seq_line(obj_header, obj_pos+1, obj_pos+(cur_ref_len + comp_start), obj_pid, \"W\", cur_ref, 1+(-1*comp_start), cur_ref_len, cur_ref_strand)\n obj_pos += cur_ref_len + comp_start\n obj_pid += 1\n used_components.add(cur_ref)\n\n # Look for the next edge\n from_node = to_node[:-2] + \"_b\"\n if to_node.endswith(\"_b\"):\n from_node = to_node[:-2] + \"_e\"\n\n if from_node in self.graph.nodes:\n next_nodes = set(self.graph[from_node])\n assert len(next_nodes) == 1\n to_node = next_nodes.pop()\n used_edges.add((from_node, to_node))\n used_edges.add((to_node, from_node))\n else:\n next_edge_exists = False\n\n # Write unplaced reference sequences\n fai = pysam.FastaFile(ref_fn)\n all_ref_seqs = set(fai.references)\n fai.close()\n remaining_components = all_ref_seqs - used_components\n for c in sorted(remaining_components):\n agp.add_seq_line(\n c + \"_RagTag\" * add_suffix_to_unplaced,\n \"1\",\n str(self.component_lens[c]),\n \"1\",\n \"W\",\n c,\n \"1\",\n str(self.component_lens[c]),\n \"+\"\n )\n\n agp.write()", "def load_gen_data(gen_file, sample_file):\n\n gen_data=open(gen_file, \"r\")\n sample_data=open(sample_file, \"r\")\n\n sample_names=[]\n\n # First two lines are headers\n sample_data.next()\n sample_data.next()\n\n for line in sample_data:\n sample_names.append(line.split(\" \")[0])\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n for line in gen_data:\n data=line[:-1] # Remove \\n from the end of the line\n data=data.rstrip().split(\" \")\n snp_names.append(data[1])\n snp_pos.append(int(data[2]))\n gt = data[5:]\n if not len(gt)==3*len(sample_names): \n print gt\n raise Exception(\"Bad data line: %d samples and %d entries\" % (len(sample_names), len(gt)) )\n gt = [x.index(max(x)) for x in zip(gt[1::3],gt[2::3], gt[3::3])]\n genotype_data.append(gt)\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def output_phased_data(phasing, sample_names, snp_names, options):\n things_to_output=[]\n things_to_output.append( (\"la\", \"local_ancestry\", parents_to_string))\n if options.get(\"best_parents\", None): things_to_output.append( (\"bp\", \"best_parents\", parents_to_string) )\n \n # Output phased data\n for suffix, tag, format_func in things_to_output:\n\n if(options.get(\"gzip\", None)):\n file_name = options[\"out\"]+\".\"+suffix+\".txt.gz\"\n out_file = gzip.open(file_name, \"w\")\n else:\n file_name = options[\"out\"]+\".\"+suffix+\".txt\"\n out_file = open(file_name, \"w\")\n \n #out_file.write( \"\\t\".join([\"POS\"]+sample_names) + \"\\n\" )\n for i in range(len(phasing[sample_names[0]][tag])):\n #out_file.write( \"\\t\".join([snp_names[i]]+[format_func(phasing[s][tag][i]) for s in sample_names] ) + \"\\n\")\n out_file.write( \" \".join([format_func(phasing[s][tag][i]) for s in sample_names] ) + \"\\n\")\n\n out_file.close()", "def create_file_empty_particles( self, fullpath, iteration,\n time, dt, select_nglobal_dict=None ):\n # Create the file (can be done by one proc or in parallel)\n f = self.open_file( fullpath,\n parallel_open=self.write_metadata_parallel )\n\n # Setup the different layers of the openPMD file\n # (f is None if this processor does not participate is writing data)\n if f is not None:\n\n # Setup the attributes of the top level of the file\n self.setup_openpmd_file( f, iteration, time, dt )\n # Setup the meshes group (contains all the particles)\n f.attrs[\"particlesPath\"] = np.string_(\"particles/\")\n particle_path = \"/data/%d/particles/\" %iteration\n particle_grp = f.require_group(particle_path)\n # Loop through all particle species\n for species_name in sorted(self.species_dict.keys()):\n species = self.species_dict[species_name]\n\n # Check the number of particles to write\n if select_nglobal_dict is not None:\n N = select_nglobal_dict[species_name]\n else:\n N = None\n\n # Create and setup the h5py.Group species_grp\n species_path = particle_path+\"%s/\" %(species_name)\n species_grp = f.require_group( species_path )\n self.setup_openpmd_species_group( species_grp, species, N=N )\n\n # Loop over the different quantities that should be written\n # and setup the corresponding datasets\n for particle_var in self.particle_data:\n\n # Vector quantities\n if particle_var in [\"position\", \"momentum\", \"E\", \"B\"]:\n # Setup the dataset\n quantity_path=species_path+ \"%s/\" %particle_var\n quantity_grp = f.require_group(quantity_path)\n for coord in [\"x\",\"y\",\"z\"]:\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = quantity_grp.create_dataset(\n coord, (N,), dtype='f8')\n else:\n dset = quantity_grp.create_dataset(\n coord, (0,), maxshape=(None,), dtype='f8')\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( quantity_grp,\n particle_var)\n\n # Scalar quantity\n elif particle_var in [\"weighting\", \"id\", \"t\"]:\n # Choose the type of the output\n if particle_var == \"id\":\n dtype = 'uint64'\n else:\n dtype = 'f8'\n # Create the dataset (fixed size or appendable)\n if N is not None:\n dset = species_grp.create_dataset(\n particle_var, (N,), dtype=dtype )\n else:\n dset = species_grp.create_dataset( particle_var,\n (0,), maxshape=(None,), dtype=dtype)\n self.setup_openpmd_species_component( dset )\n self.setup_openpmd_species_record( dset, particle_var )\n\n # Unknown field\n else:\n raise ValueError(\n \"Invalid string in particletypes: %s\" %particle_var)\n\n # Close the file\n f.close()", "def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):\r\n suffix = '' # for *s after the p-value\r\n try:\r\n x = prob.shape\r\n prob = prob[0]\r\n except:\r\n pass\r\n if prob < 0.001: suffix = ' ***'\r\n elif prob < 0.01: suffix = ' **'\r\n elif prob < 0.05: suffix = ' *'\r\n title = [['Name','N','Mean','SD','Min','Max']]\r\n lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],\r\n [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]\r\n if type(fname)<>StringType or len(fname)==0:\r\n print\r\n print statname\r\n print\r\n pstats.printcc(lofl)\r\n print\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix\r\n print\r\n else:\r\n file = open(fname,writemode)\r\n file.write('\\n'+statname+'\\n\\n')\r\n file.close()\r\n writecc(lofl,fname,'a')\r\n file = open(fname,'a')\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n file.write(pstats.list2string(['\\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\\n\\n']))\r\n file.close()\r\n return None", "def to_psf_file(self, psf_path) -> None:\n with open(psf_path, \"w\", encoding=\"utf-8\") as psf_file:\n psf_file.write(self.to_psf_block())", "def giveMotevoParamFile(genome, wmlen, inter_dir, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior, bgorder, bgprior):\n\n ##UFE_models from genome_dict are not used anymore\n #UFEmodel_hg19 is UFE model for mammal species\n genome_dict = {}\n genome_dict['hg19'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau6:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_hg19']\n genome_dict['hg18'] = ['((((hg18:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau3:0.186713,(equCab1:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom4:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFE_mammals']\n #genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_dm3']\n genome_dict['dm3'] = ['((((((dm3:0.059,droSim1:0.075):0.041,(droYak2:0.104,droEre2:0.107):0.054):0.120,droAna3:0.377):0.072,dp4:0.397):0.061,droWil1:0.536):0.020,((droVir3:0.196,droMoj3:0.255):0.073,droGri2:0.291):0.337);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/dm3UFEparallel/UFEmodel_dm3']\n genome_dict['mm9'] = ['((((hg19:0.032973,rheMac2:0.057695):0.09821,mm9:0.352605):0.020666,(bosTau7:0.186713,(equCab2:0.107726,canFam2:0.150374):0.010431):0.032764):0.156024,monDom5:0.425899);', '/import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/MotEvo_v1.0/UFEmodels/UFEmodel_mm9']\n\n\n sitefilepath = os.path.join(inter_dir, 'sites_' + tag)\n priorfilepath = os.path.join(inter_dir, 'priors_' + tag)\n loglikfile = os.path.join(inter_dir, 'loglik_' + tag)\n\n\n print '\\nCreate motevo parameter file %s' %tag\n print 'aligned', aligned\n if aligned:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE %s' %genome_dict[genome][0],\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'UFEwmprior %s' %200,\n 'UFEwmfile %s' %ufemodel_path,\n 'UFEwmlen %s' %wmlen,\n 'UFEprint %s' %0,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile])\n else:\n motevo_params = '\\n'.join(['refspecies %s' %genome,\n 'TREE (%s: 1)' %genome,\n 'Mode TFBS',\n 'EMprior %s' %emprior,\n 'priordiff %s' %0.05,\n 'markovorderBG %s' %bgorder,\n 'bgprior %s' %bgprior,\n 'bg A %s' %ATfreq,\n 'bg T %s' %ATfreq,\n 'bg G %s' %GCfreq,\n 'bg C %s' %GCfreq,\n 'restrictparses %s' %0,\n 'sitefile %s' %sitefilepath,\n 'priorfile %s' %priorfilepath,\n 'printsiteals %s' %0,\n 'minposterior %f' %0.0,\n 'loglikfile %s' %loglikfile]) \n\n params_path = os.path.join(inter_dir, 'motevo_TFBS_params_' + tag)\n pf = open(params_path, 'w')\n pf.write(motevo_params)\n return (params_path, sitefilepath, priorfilepath, loglikfile)", "def generate_pileup(contig, site, bam_file, ref_file, vcf_file, output_dir):\n # create the vcf handler\n vcf_handler = modules.vcf_handler.VCFFileProcessor(vcf_file)\n # generate dictionary of the region\n vcf_handler.populate_dictionary(contig, site, hom_filter=False)\n\n # create ref and bam files handler\n ref_handler = modules.ref_handler.RefFileProcessor(ref_file)\n bam_handler = modules.bam_handler_mpileup.BamProcessor(bam_file)\n\n # create a summary file\n smry = open(output_dir + \"summary\" + '_' + contig + site.replace(':', '_').replace('-', '_') + \".csv\", 'w')\n\n # get the vcf dictionary of that region\n vcf_dict = vcf_handler.get_variant_dictionary()\n\n # get the odds of selecting a homozygous case\n total_hom, total_het, total_homalt = vcf_handler.get_genotype_counts()\n odds_of_generating_hom_case = get_odds_for_hom(total_hom, total_het, total_homalt)\n\n # keep count of how many images of each type is generated\n total_generated_hom, total_generated_het, total_generated_hom_alt = 0, 0, 0\n\n for pos in vcf_dict.keys():\n for rec in vcf_dict[pos]:\n\n # if genotype is SNP then generate image\n if rec.genotype_class == 'SNP':\n alt = '.'\n if rec.type == 'Hom':\n pileup_str = bam_handler.get_pileup_of_a_site(contig, rec.pos-1).split(' ')[1]\n ref_at_pos = ref_handler.get_ref_of_region(contig, \":\" + str(rec.pos) + \"-\" + str(rec.pos))\n alt, mismatches = get_alts_in_hom_pileup(pileup_str, ref_at_pos)\n if mismatches == 0:\n continue\n\n if rec.type == 'Hom' and numpy.random.uniform(0, 1) > odds_of_generating_hom_case:\n continue\n elif rec.type == 'Hom':\n rec.alt = alt\n\n total_generated_hom += 1 if rec.type == 'Hom' else 0\n total_generated_het += 1 if rec.type == 'Het' else 0\n total_generated_hom_alt += 1 if rec.type == 'Hom_alt' else 0\n\n # get pileup columns from bam file\n pileup_columns = bam_handler.get_pileupcolumns_aligned_to_a_site(contig, pos-1)\n # create the pileup processor object\n pileup_object = modules.pileup_creator.PileupProcessor(ref_handler, pileup_columns, contig, pos-1,\n rec.type, rec.alt)\n # create the image\n image_array, array_shape = pileup_object.create_image_test(pos-1, image_height=299, image_width=299,\n ref_band=5, alt=rec.alt)\n # file name for the image and save the image\n file_name = contig + \"_\" + str(rec.pos)\n pileup_object.save_image_as_png(image_array, output_dir, file_name)\n\n # label of the image and save the image\n label = get_label(rec.type)\n smry.write(os.path.abspath(output_dir + file_name) + \".png,\" + str(label) + ',' + ','.join(\n map(str, array_shape)) + '\\n')\n\n # report progress\n if (total_generated_hom_alt+total_generated_hom+total_generated_het) % 100 == 0:\n total = (total_generated_hom_alt+total_generated_hom+total_generated_het)\n sys.stderr.write(str(total) + ' variants processed in region ' + str(contig) + str(site) + \"\\n\")\n\n # print some stats\n sys.stderr.write('IN REGION: ' + str(contig) + ' ' + site + '\\n')\n sys.stderr.write('TOTAL IN RECORDS:\\n' + 'HOM\\t' + 'HET\\t' + 'HOM_ALT\\t' + '\\n')\n sys.stderr.write(str(total_hom) + '\\t' + str(total_het) + '\\t' + str(total_homalt) + '\\n')\n\n sys.stderr.write('TOTAL GENERATED:\\n' + 'HOM\\t' + 'HET\\t' + 'HOM_ALT' + '\\n')\n sys.stderr.write(str(total_generated_hom) + '\\t' + str(total_generated_het) + '\\t'\n + str(total_generated_hom_alt) + '\\n')", "def Param_gen_and_write(self):\n SF_start_params = self.source_df.groupby(\n \"FinalID\").apply(self.start_params)\n SF_start_params.reset_index(level=0, inplace=True)\n self.source_df = pd.merge(\n self.source_df,\n SF_start_params,\n on=\"FinalID\")\n self.source_df.to_csv(\"../Data/Biotraits_with_start_params.csv\")", "def calculate_genotype_probabilities(self):\n for name, member in self.members.items():\n member.genotype_probabilities = self.genotype_probabilities_of(name)", "def read_write_protein_files(dir_path, heme_files):\n for i in number_of_files:\n# seqs = {}\n input_files = (dir_path + heme_files[i])\n f = open(input_files)\n count = 0\n# output_file = (dir_path + heme_files[i] + \".txt\")\n# g = open(output_file, \"x\")\n with open(input_files) as f:\n for line in f:\n if line.startswith('>'):\n name = line[1:].rstrip('\\n')\n count = count + 1\n seqs =[]\n else: # sequence, not header\n seqs[name] = seqs[name] + line\n# sequences += line[:-1]\n# output_file = open(\"out_\" + str(count) + \"_.txt\", \"a\")\n# output_file.write(str(len(sequences)))\n print(\"Number of proteins read:\" + count)\n f.close", "def stampaGTFEsIn(dictTranscript, dictGenes, dictInput, fileOut, geneNames):\n\n\tstringaGTF \t\t\t\t= \t\t'%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'\t\t\t\t\t# Formato della riga da stampare nel file\n\texonF\t\t\t\t\t= \t\t'exon_number \"%d\"'\t\t\t\t\t\t\t# Formato della stringa di tipo exon (True)\n\tintronF\t\t\t\t\t=\t\t'intron_number \"%d\"'\t\t\t\t\t\t# Formato della stringa di tipo intron (False)\n\t\n\t# Indici all'interno del dizionario dei transcript\n\t#\n\tidx_transcriptName = 0\n\tidx_geneID = 1\n\t\n\t# Indici all'interno del dizionari dei geni\n\t#\n\tidx_geneName = 0\n\tidx_cromosoma = 1\n\n\t# Indici all'interno del dizionario degli introni e degli esoni\n\t#\n\tidx_start = 0\n\tidx_end = 1\n\tidx_tipo = 2\t\n\n\t# Tipo di regioni\n\tesone = True\n\tintrone = False\n\n\n\t# Apertura e preparazione dei file da scrivere (un file gtf con\n\t# esoni/introni per ogni gene e uno totale con tutte le regioni per tutti\n\t# i geni passati dall'utente\n\t#\t\n\tfiles = {}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\n\tfor gene in geneNames:\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tcod = geneNames[gene]\n\t\t# Avendo tanti geni, ad ogni nome di gene si associa la relativa\n\t\t# cartella del gene corrente tra quelli passati dall'utente\n\t\t#\n\t\tif not path.exists(cartella % cod):\n\t\t\tsystem('mkdir ' + cartella % cod)\n\t\tfiles[gene] = open(str(cartella % cod + fileOut), 'w')\n\t\t\n\t# File contenente le regioni esoniche/introniche di tutti i geni\n\t# passati dall'utente (serve per mappare le reads)\n\t#\n\tfileGtf = open(str(fileOut), 'w')\t\t\t\t\t\t\t \n\n\tfor transcriptID in dictInput:\n\t\tgeneID \t\t\t= dictTranscript[transcriptID][idx_geneID]\n\t\tcromosoma\t\t= dictGenes[geneID][idx_cromosoma]\n\t\tgeneName\t\t= dictGenes[geneID][idx_geneName]\n\t\ttranscriptName \t= dictTranscript[transcriptID][idx_transcriptName]\n\t\t# Inizializzazione del numero di esone/introne da stampare nel file\n\t\t#\n\t\tnrEs \t\t\t= 1\n\t\tnrIn \t\t\t= 1\n\t\t\n\t\tfor i in range(0, len(dictInput[transcriptID][idx_start])):\n\t\t\tstart\t\t= dictInput[transcriptID][idx_start][i]\n\t\t\tend\t\t\t= dictInput[transcriptID][idx_end][i]\n\t\t\ttipo\t\t= dictInput[transcriptID][idx_tipo][i]\n\n\t\t\tif tipo == esone:\n\t\t\t\tregione = exonF % (nrEs)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato exon\n\t\t\t\tnrEs += 1\n\t\t\telse:\n\t\t\t\tregione = intronF % (nrIn)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato intron\n\t\t\t\tnrIn += 1\n\t\t\t\t\n\t\t\tstrGtf = stringaGTF % (cromosoma, str(start), str(end), regione,\t\t\n\t\t\t\t\t\t\t\t geneName, transcriptName)\t\t\t\t\t# Creazione della riga del file\n\t\t\t\n\t\t\tif geneName in geneNames:\t\t\t\t\t\t\t\t\t\t\t# Se il gene presenta regioni introniche..\n\t\t\t\tfiles[geneName].write(strGtf)\t\t\t\t\t\t\t\t\t# ..si stampa il file gtf relativo alle proprie..\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..regioni introniche nella propria cartella\n\n\t\t\tfileGtf.write(strGtf)\n\t\t\t\t\n\tif geneNames:\n\t\tfor gene in files:\n\t\t\tfiles[gene].close()\n\n\tfileGtf.close()", "def analyse_genes(hpo_graph, hpo_by_proband, probands_by_gene, output_path, iterations, score_type):\n \n check_terms_in_graph(hpo_graph, hpo_by_proband)\n \n # Sometimes output_path is actually sys.stdout, other times it is a path.\n try:\n output = open(output_path, \"w\")\n except TypeError:\n output = output_path\n \n output.write(\"hgnc\\thpo_similarity_p_value\\n\")\n \n for gene in sorted(probands_by_gene):\n probands = probands_by_gene[gene]\n \n p_value = None\n if len(probands) > 1:\n p_value = test_similarity(hpo_graph, hpo_by_proband, probands, iterations, score_type)\n \n if p_value is None:\n continue\n \n output.write(\"{0}\\t{1}\\n\".format(gene, p_value))\n \n output.close()", "def _from_ppc_gen(net, ppc):\n n_gen = ppc[\"gen\"].shape[0]\n\n # if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array\n if len(ppc[\"gen\"].shape) == 1:\n ppc[\"gen\"] = np.array(ppc[\"gen\"], ndmin=2)\n\n bus_pos = _get_bus_pos(ppc, ppc[\"gen\"][:, GEN_BUS])\n\n # determine which gen should considered as ext_grid, gen or sgen\n is_ext_grid, is_gen, is_sgen = _gen_to_which(ppc, bus_pos=bus_pos)\n\n # take VG of the last gen of each bus\n vg_bus_lookup = pd.DataFrame({\"vg\": ppc[\"gen\"][:, VG], \"bus\": bus_pos})\n # vg_bus_lookup = vg_bus_lookup.drop_duplicates(subset=[\"bus\"], keep=\"last\").set_index(\"bus\")[\"vg\"]\n vg_bus_lookup = vg_bus_lookup.drop_duplicates(subset=[\"bus\"]).set_index(\"bus\")[\"vg\"]\n\n # create ext_grid\n idx_eg = list()\n for i in np.arange(n_gen, dtype=int)[is_ext_grid]:\n idx_eg.append(create_ext_grid(\n net, bus=bus_pos[i], vm_pu=vg_bus_lookup.at[bus_pos[i]],\n va_degree=ppc['bus'][bus_pos[i], VA],\n in_service=(ppc['gen'][i, GEN_STATUS] > 0).astype(bool),\n max_p_mw=ppc['gen'][i, PMAX], min_p_mw=ppc['gen'][i, PMIN],\n max_q_mvar=ppc['gen'][i, QMAX], min_q_mvar=ppc['gen'][i, QMIN]))\n\n # create gen\n idx_gen = create_gens(\n net, buses=bus_pos[is_gen], vm_pu=vg_bus_lookup.loc[bus_pos[is_gen]].values,\n p_mw=ppc['gen'][is_gen, PG], sn_mva=ppc['gen'][is_gen, MBASE],\n in_service=(ppc['gen'][is_gen, GEN_STATUS] > 0), controllable=True,\n max_p_mw=ppc['gen'][is_gen, PMAX], min_p_mw=ppc['gen'][is_gen, PMIN],\n max_q_mvar=ppc['gen'][is_gen, QMAX], min_q_mvar=ppc['gen'][is_gen, QMIN])\n\n # create sgen\n idx_sgen = create_sgens(\n net, buses=bus_pos[is_sgen], p_mw=ppc['gen'][is_sgen, PG],\n q_mvar=ppc['gen'][is_sgen, QG], sn_mva=ppc['gen'][is_sgen, MBASE], type=\"\",\n in_service=(ppc['gen'][is_sgen, GEN_STATUS] > 0),\n max_p_mw=ppc['gen'][is_sgen, PMAX], min_p_mw=ppc['gen'][is_sgen, PMIN],\n max_q_mvar=ppc['gen'][is_sgen, QMAX], min_q_mvar=ppc['gen'][is_sgen, QMIN],\n controllable=True)\n\n neg_p_gens = np.arange(n_gen, dtype=int)[(ppc['gen'][:, PG] < 0) & (is_gen | is_sgen)]\n neg_p_lim_false = np.arange(n_gen, dtype=int)[ppc['gen'][:, PMIN] > ppc['gen'][:, PMAX]]\n neg_q_lim_false = np.arange(n_gen, dtype=int)[ppc['gen'][:, QMIN] > ppc['gen'][:, QMAX]]\n if len(neg_p_gens):\n logger.info(f'These gen have PG < 0 and are not converted to ext_grid: {neg_p_gens}.')\n if len(neg_p_lim_false):\n logger.info(f'These gen have PMIN > PMAX: {neg_p_lim_false}.')\n if len(neg_q_lim_false):\n logger.info(f'These gen have QMIN > QMAX: {neg_q_lim_false}.')\n\n # unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,\n # Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf\n\n # gen_lookup\n gen_lookup = pd.DataFrame({\n 'element': np.r_[idx_eg, idx_gen, idx_sgen],\n 'element_type': [\"ext_grid\"]*sum(is_ext_grid) + [\"gen\"]*sum(is_gen) + [\"sgen\"]*sum(is_sgen)\n })\n return gen_lookup", "def writeGromacsTopolFiles(self, amb2gmx = False):\n\n self.printMess(\"Writing GROMACS files\\n\")\n\n self.setAtomType4Gromacs()\n\n self.writeGroFile()\n\n self.writeGromacsTop(amb2gmx = amb2gmx)\n\n self.writeMdpFiles()", "def MakePmapProgram(MaterialInfoList,OutputPath,GasType,GasAtomType,SpecialPairList,GasAtomDictionary,\r\n MaterialAtomDictionary,GridSpacingP,HEPCP,CutOff,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting):\r\n\r\n def MakeAtomAtomFile(PmapOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary,CutOff):\r\n\r\n with open('%s/atom_atom_file' % (PmapOutputPath), 'w') as AtomAtomFile:\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n AtomAtomFile.write('\\n')\r\n\r\n for i in range(len(MaterialInfo[5])):\r\n for j in range(len(MaterialInfo[5])):\r\n if i <= j:\r\n AtomAtomFile.write('%-10s%-10sOFF\\n' % (MaterialInfo[5][i], MaterialInfo[5][j]))\r\n\r\n for k in range(len(GasAtomType)):\r\n for l in range(len(GasAtomType)):\r\n if k <= l:\r\n Key=False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[k] in SpecialPair[0] and GasAtomType[l] in SpecialPair[0] and GasAtomType[k]!=GasAtomType[l]:\r\n Key=True\r\n if Key==False:\r\n num1 = GasAtomDictionary.get(GasAtomType[k])\r\n num2 = GasAtomDictionary.get(GasAtomType[l])\r\n sig1 = str('%.3f' % ((float(num1[0]) + float(num2[0])) / 2))\r\n eps1 = str('%.3f' % ((float(num1[1]) * float(num2[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(GasAtomType[k],GasAtomType[l],'LJ',sig1,eps1,CutOff,GasAtomType[k],GasAtomType[l],'WFCOUL',CutOff))\r\n\r\n for h in range(len(GasAtomType)):\r\n for g in range(len(MaterialInfo[5])):\r\n Key = False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[h] in SpecialPair[0] and MaterialInfo[5][g] in SpecialPair[0]:\r\n Key = True\r\n if Key==False:\r\n num3 = GasAtomDictionary.get(GasAtomType[h])\r\n num4 = MaterialAtomDictionary.get(MaterialInfo[5][g])\r\n sig2 = str('%.3f' % ((float(num3[0]) + float(num4[0])) / 2))\r\n eps2 = str('%.3f' % ((float(num3[1]) * float(num4[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(GasAtomType[h],MaterialInfo[5][g],'LJ',sig2,eps2,CutOff,GasAtomType[h],MaterialInfo[5][g],'WFCOUL',CutOff))\r\n\r\n for m in SpecialPairList:\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%[email protected]\\n%-10s%-10s%-10sHICUT@%[email protected]\\n'%(m[0][0],m[0][1],'LJ',m[1][0],m[1][1],CutOff,m[0][0],m[0][1],'WFCOUL',CutOff))\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n\r\n def MakeIntramolecularFile(PmapOutputPath,MaterialInfo,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/intramolecular_file' % (PmapOutputPath), 'w') as IntraFile:\r\n IntraFile.write('Intra: %s'%(MaterialInfo[7]))\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n IntraFile.write('\\nIntra: %s'%(i))\r\n\r\n def MakeMoleMolePmapFile(PmapOutputPath,MaterialInfo,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/mole_mole_pmap_file' % (PmapOutputPath), 'w') as MoleMolePmap:\r\n MoleMolePmap.write('''%s %s NCOUL OFF\r\n%s %s COUL OFF\\n\\n'''%(MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],MaterialInfo[7]))\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n MoleMolePmap.write('''%s %s NCOUL OFF\r\n%s %s COUL OFF\r\n\r\n%s %s NCOUL BASIC LJ FAST\r\n%s %s COUL OFF\\n\\n''' % (i, i, i, i, i,MaterialInfo[7], i, MaterialInfo[7]))\r\n\r\n def MakePmapMaker(PmapOutputPath,MaterialInfo,GasAtomType,GridSpacingP,HEPCP,GasAtomDictionary):\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n with open('%s/pmap_maker_%s_in_%s.ctr'%(PmapOutputPath,i,MaterialInfo[7]), 'w') as PmapMaker:\r\n PmapMaker.write('''------ General Information ------------------------------------------\r\n%s molecule in %s\r\n1 # No. of iterations\r\n1 # No. of steps between writes to output/log file\r\n2 # No. of steps between writes to crash file\r\n2 # No. of steps between writes to config. file\r\n1 # Start numbering simulations from .\r\n30728 # Iseed\r\n1 # specifies contents of config file\r\n%s_in_%s.res # Restart File to write to\r\n%s_in_%s.con # Configuration File\r\n\r\n------ Atomic Types --------------------------------------------------\r\n%s # number of atomic types\r\n\r\n%s\r\n%s.atm'''%(i,MaterialInfo[7],i,MaterialInfo[7],i,MaterialInfo[7],len(MaterialInfo[5])+1,i,i))\r\n\r\n for j in MaterialInfo[5]:\r\n PmapMaker.write('\\n\\n%s\\n%s.atm' % (j,j))\r\n\r\n PmapMaker.write('''\\n------ Molecule Types -------------------------------------------------\r\n2\r\n\r\n%s\r\n%s.mol\r\n\r\n%s\r\n%s.mol\r\n------ Simulation Cell Information ------------------------------------\r\n%s # Fundamental cell file\r\n%s # No. of unit cells in x, y, z direction\r\n1, 1, 1 # (1 = Periodic) in x, y, z\r\n------ Forcefield Information -------------------------------------------\r\nBASIC\r\nMOL\r\natom_atom_file # atom-atom interaction file\r\nmole_mole_pmap_file # sorbate-sorbate interaction file\r\nintramolecular_file # intramolecular interaction file/specification\r\n------ Mapmaker Information -----------------------------------------------\r\n1 # Number of maps to make\r\n\r\n%s # Sorbent to map\r\n%s # Sorbate to probe map with\r\nNCOUL LJ # Interaction type to map\r\n%s # Approxiamte grid spacing (Ang)\r\n%s # High end potential cutoff (kJ/mol)\r\n%s_in_%s.pmap # Map filename or AUTO\r\n------ Configuration Initialization -------------------------------------\r\n%s # Sorbate_Type\r\nMOLECULE NULL\r\n%s # Sorbate_Type\r\nFIXED NULL''' % (i, i,MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],', '.join(MaterialInfo[4]),MaterialInfo[7],i,GridSpacingP,HEPCP,i,MaterialInfo[7],i,MaterialInfo[7]))\r\n\r\n def MakeTorqueFile(PmapOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,GasAtomType,GasAtomDictionary,MaterialInfo,OutputPath):\r\n\r\n Node = random.choice(Nodes)\r\n\r\n with open('%s/run_pmapmaker.pbs' % (PmapOutputPath), 'w') as Torque:\r\n Torque.write('''#!/bin/bash\r\n#PBS -l nodes=%s\r\n#PBS -N MuSiC_pmap.%s\r\n#PBS -o music_pmap_jobs.out\r\n#PBS -j oe\r\n\r\n#\r\n# The number of processors you desire is indicated by replacing\r\n# <nproc> above.\r\n#\r\n\r\n#\r\n# GROMACS path and arguments to mdrun :\r\n#\r\ncd $PBS_O_WORKDIR\r\n\r\n# =============== Environment Setting ============================ #\\n''' % (Node, TaskSuffix))\r\n\r\n for i in TorqueSetting:\r\n Torque.write('%s' % (i))\r\n\r\n Torque.write('''# =============== Don't Change Above Setting ===================== #\r\n\r\necho \"============The computed nodes============\"\r\ncp -f $PBS_NODEFILE NODE.txt\r\necho \"User: \" $USER\r\ncat $PBS_NODEFILE\r\necho \"Job ID: \" $PBS_JOBID\r\necho \"Job Cookie: \" $PBS_JOBCOOKIE\r\necho \"Using executable: \" `which mpirun`\r\necho `date`\r\necho \"============Finished setting==============\"\r\n\r\n# =========== Setting Jobs ============================ #\\n''')\r\n\r\n for j in MuSiCSetting:\r\n Torque.write('%s' % (j))\r\n\r\n Torque.write('''export ATOMSDIR=%s\r\n export MOLSDIR=%s\r\n export PMAPDIR=%s\r\n export EMAPDIR=%s\r\n export SMAPDIR=%s''' % (os.path.join(OutputPath, 'Atoms'), os.path.join(OutputPath, 'Mols'),\r\n os.path.join(OutputPath, 'Maps'), os.path.join(OutputPath, 'Maps'),\r\n os.path.join(OutputPath, 'Maps')))\r\n\r\n Torque.write('''# =========== Setting Jobs ============================ #\r\n\r\n# +++++++++++++++ Start Computing +++++++++++++++++++++ #\r\n\r\nTIME_DIR=$(date '+%Y-%m-%d_%H-%M-%S')\r\nTIME_DIR=\"${USER}_jobs_${TIME_DIR}_${PBS_JOBID}\"\r\nif [ -d /utmp ]; then\r\n TEMP_DIR=/utmp/${USER}/${TIME_DIR}\r\nelse\r\n TEMP_DIR=/temp/${USER}/${TIME_DIR}\r\nfi\r\nmkdir -p ${TEMP_DIR}\r\ncp -rf * ${TEMP_DIR}\r\ncd ${TEMP_DIR}\r\nrm -f music_pmap_jobs.out\r\necho \"The temp direcotry: \" ${TEMP_DIR}\r\necho \"============Finished setting==============\"\r\n\r\necho \"+++++++++++++ Run MuSic ++++++++++++++++++++++++++++\"\\n''')\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0] != '0':\r\n Torque.write('music_mapmaker pmap_maker_%s_in_%s.ctr > pmap_maker_%s_in_%s.txt\\necho `date`\\n'%(i,MaterialInfo[7],i,MaterialInfo[7]))\r\n\r\n Torque.write('''echo \"+++++++++++++ Finish MuSic +++++++++++++++++++++++++\"\r\n\r\ncd $PBS_O_WORKDIR\r\ncp -rf ${TEMP_DIR}/* .\r\nrm -rf ${TEMP_DIR}\r\n\r\n\r\necho \"All files were copied back!\"\r\necho \"The work direcotry: \" $PBS_O_WORKDIR\r\necho `date`\r\necho \"============Finished Job ==============\"''')\r\n\r\n def main():\r\n\r\n for MaterialInfo in MaterialInfoList:\r\n if MaterialInfo[6]==True:\r\n PmapOutputPath='%s/%s/%s/%s'%(OutputPath,'MakePmap','_'.join(GasType),MaterialInfo[7])\r\n if os.path.exists(PmapOutputPath):\r\n pass\r\n else:\r\n os.makedirs(PmapOutputPath)\r\n\r\n MakeAtomAtomFile(PmapOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary,CutOff)\r\n MakeMoleMolePmapFile(PmapOutputPath, MaterialInfo, GasAtomType,GasAtomDictionary)\r\n MakePmapMaker(PmapOutputPath,MaterialInfo,GasAtomType,GridSpacingP,HEPCP,GasAtomDictionary)\r\n MakeIntramolecularFile(PmapOutputPath, MaterialInfo, GasAtomType,GasAtomDictionary)\r\n MakeTorqueFile(PmapOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,GasAtomType,GasAtomDictionary,MaterialInfo,OutputPath)\r\n\r\n if __name__ == '__main__':\r\n main()", "def write_psf(self):\n # **********************************\n # **********************************\n # psf writer (start)\n # **********************************\n # **********************************\n\n print(\"******************************\")\n print(\"\")\n print(\n \"The charmm X-plor format psf writer (the write_psf function) is running\"\n )\n\n date_time = datetime.datetime.today()\n\n print(\n \"write_psf: forcefield_selection = {}, residues = {}\".format(\n self.forcefield_selection, self.residues\n )\n )\n\n print(\"******************************\")\n print(\"\")\n\n if self.structure_box_1:\n list_of_structures = [\n self.structure_box_0_ff,\n self.structure_box_1_ff,\n ]\n list_of_file_names = [self.filename_box_0, self.filename_box_1]\n stuct_only = [self.structure_box_0_ff, self.structure_box_1_ff]\n else:\n list_of_structures = [self.structure_box_0_ff]\n list_of_file_names = [self.filename_box_0]\n stuct_only = [self.structure_box_0_ff]\n\n for q in range(0, len(list_of_structures)):\n stuct_iteration = list_of_structures[q]\n file_name_iteration = list_of_file_names[q]\n output = str(file_name_iteration) + \".psf\"\n stuct_only_iteration = stuct_only[q]\n # Lammps syntax depends on the functional form\n # Infer functional form based on the properties of the stuct_iteration\n if self.detect_forcefield_style:\n # Check for angles\n if len(stuct_iteration.urey_bradleys) > 0:\n print(\n \"Warning: Urey bradley terms detected. GOMC does no support the Urey-Bradley terms\"\n )\n warn(\n \"warning: Urey bradley terms detected. \"\n \"GOMC does no support the Urey-Bradley terms\"\n )\n use_urey_bradleys = True\n else:\n print(\"No urey bradley terms detected\")\n use_urey_bradleys = False\n\n # Check for dihedrals\n if len(stuct_iteration.rb_torsions) > 0:\n print(\n \"RB Torsions detected, will converted to CHARMM Dihedrals\"\n )\n use_rb_torsions = True\n dihedrals_list = stuct_iteration.rb_torsions\n dihedrals = [\n [\n dihedral.atom1.idx + 1,\n dihedral.atom2.idx + 1,\n dihedral.atom3.idx + 1,\n dihedral.atom4.idx + 1,\n ]\n for dihedral in stuct_iteration.rb_torsions\n ]\n else:\n use_rb_torsions = False\n\n if len(stuct_iteration.dihedrals) > 0:\n print(\n \"Charmm dihedrals detected, so CHARMM Dihedrals will remain\"\n )\n use_dihedrals = True\n dihedrals_list = stuct_iteration.dihedrals\n dihedrals = [\n [\n dihedral.atom1.idx + 1,\n dihedral.atom2.idx + 1,\n dihedral.atom3.idx + 1,\n dihedral.atom4.idx + 1,\n ]\n for dihedral in stuct_iteration.dihedrals\n ]\n else:\n use_dihedrals = False\n if (use_rb_torsions is False) and (use_dihedrals is False):\n dihedrals_list = []\n dihedrals = []\n if use_rb_torsions and use_dihedrals:\n warn(\n \"Multiple dihedral styles detected, check your \"\n \"Forcefield XML and structure files\"\n )\n\n # Check for impropers\n for dihedral in stuct_iteration.dihedrals:\n if dihedral.improper:\n warn(\n \"ERROR: Amber-style impropers are currently not supported in GOMC\"\n )\n\n impropers_list = stuct_iteration.impropers\n impropers = [\n [\n improper.atom1.idx + 1,\n improper.atom2.idx + 1,\n improper.atom3.idx + 1,\n improper.atom4.idx + 1,\n ]\n for improper in stuct_iteration.impropers\n ]\n\n no_atoms = len(stuct_iteration.atoms)\n no_bonds = len(stuct_iteration.bonds)\n no_angles = len(stuct_iteration.angles)\n\n no_dihedrals = len(dihedrals)\n no_impropers = len(impropers)\n\n no_donors = len(stuct_iteration.donors)\n no_acceptors = len(stuct_iteration.acceptors)\n no_groups = len(stuct_iteration.groups)\n\n # psf printing (start)\n\n residue_data_list = []\n residue_names_list = []\n for k, atom in enumerate(stuct_only_iteration.atoms):\n residue_data_list.append(str(atom.residue))\n residue_names_list.append(atom.residue.name)\n\n unique_residue_data_dict = {}\n unique_residue_data_list = []\n residue_data_name_list = []\n\n for m, residue in enumerate(stuct_only_iteration.residues):\n unique_residue_data_list.append(\n str(stuct_only_iteration.residues[m])\n )\n unique_residue_data_dict.update(\n {unique_residue_data_list[m]: m + 1}\n )\n residue_data_name_list.append(\n stuct_only_iteration.residues[m].name\n )\n\n res_no_chain_iter_corrected = []\n residue_id_list = []\n residue_id_adder_fixed_struct_wo_bonds = 0\n for f, PSF_atom_iteration_0 in enumerate(\n stuct_only_iteration.atoms\n ):\n if f > 0:\n if (\n PSF_atom_iteration_0.residue.chain\n == previous_residue_chain\n and len(PSF_atom_iteration_0.bonds) == 0\n ):\n residue_id_adder_fixed_struct_wo_bonds += 1\n\n previous_residue_chain = PSF_atom_iteration_0.residue.chain\n\n residue_id_int = int(\n unique_residue_data_dict[residue_data_list[f]]\n + residue_id_adder_fixed_struct_wo_bonds\n )\n res_id_adder = int(\n (residue_id_int % self.max_residue_no) % self.max_residue_no\n )\n if int(res_id_adder) == 0:\n res_no_iteration_corrected = int(self.max_residue_no)\n else:\n res_no_iteration_corrected = res_id_adder\n\n res_no_chain_iter_corrected.append(res_no_iteration_corrected)\n residue_id_list.append(residue_id_int)\n\n output_write = genopen(output, \"w\")\n\n first_indent = \"%8s\"\n psf_formating = (\n \"%8s %-4s %-4s %-4s %-4s %4s %10.6f %13.4f\" + 11 * \" \"\n )\n\n output_write.write(\"PSF \")\n output_write.write(\"\\n\\n\")\n\n no_of_remarks = 3\n output_write.write(first_indent % no_of_remarks + \" !NTITLE\\n\")\n output_write.write(\n \" REMARKS this file \"\n + file_name_iteration\n + \" - created by MoSDeF-GOMC using the\"\n + \"\\n\"\n )\n output_write.write(\n \" REMARKS parameters from the \"\n + str(self.forcefield_selection)\n + \" force field via MoSDef\\n\"\n )\n output_write.write(\n \" REMARKS created on \" + str(date_time) + \"\\n\\n\\n\"\n )\n\n # This converts the atom name in the GOMC psf and pdb files to unique atom names\n print(\n \"bead_to_atom_name_dict = {}\".format(\n self.bead_to_atom_name_dict\n )\n )\n [\n unique_individual_atom_names_dict,\n individual_atom_names_list,\n missing_bead_to_atom_name,\n ] = unique_atom_naming(\n stuct_only_iteration,\n residue_id_list,\n residue_names_list,\n bead_to_atom_name_dict=self.bead_to_atom_name_dict,\n )\n\n if None in [\n unique_individual_atom_names_dict,\n individual_atom_names_list,\n missing_bead_to_atom_name,\n ]:\n self.input_error = True\n print_error_message = (\n \"ERROR: The unique_atom_naming function failed while \"\n \"running the charmm_writer function. Ensure the proper inputs are \"\n \"in the bead_to_atom_name_dict.\"\n )\n raise ValueError(print_error_message)\n\n # ATOMS: Calculate the atom data\n # psf_formating is conducted for the for CHARMM format (i.e., atom types are base 52, letters only)\n output_write.write(first_indent % no_atoms + \" !NATOM\\n\")\n for i_atom, PSF_atom_iteration_1 in enumerate(\n stuct_iteration.atoms\n ):\n segment_id = PSF_atom_iteration_1.residue.segid or \"SYS\"\n atom_type_iter = base10_to_base52_alph(\n self.atom_types_to_index_value_dict[\n PSF_atom_iteration_1.type\n + \"_\"\n + PSF_atom_iteration_1.residue.name\n ]\n )\n\n atom_lines_iteration = psf_formating % (\n i_atom + 1,\n segment_id,\n res_no_chain_iter_corrected[i_atom],\n str(residue_names_list[i_atom])[: self.max_resname_char],\n individual_atom_names_list[i_atom],\n atom_type_iter,\n PSF_atom_iteration_1.charge,\n PSF_atom_iteration_1.mass,\n )\n\n output_write.write(\"%s\\n\" % atom_lines_iteration)\n\n output_write.write(\"\\n\")\n\n # BONDS: Calculate the bonding data\n output_write.write(first_indent % no_bonds + \" !NBOND: bonds\\n\")\n for i_bond, PSF_bond_iteration_1 in enumerate(\n stuct_iteration.bonds\n ):\n output_write.write(\n (first_indent * 2)\n % (\n PSF_bond_iteration_1.atom1.idx + 1,\n PSF_bond_iteration_1.atom2.idx + 1,\n )\n )\n\n if (i_bond + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_bonds % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_bonds == 0:\n output_write.write(\"\\n\")\n\n # ANGLES: Calculate the angle data\n output_write.write(first_indent % no_angles + \" !NTHETA: angles\\n\")\n for i_angle, angle_iteration in enumerate(stuct_iteration.angles):\n output_write.write(\n (first_indent * 3)\n % (\n angle_iteration.atom1.idx + 1,\n angle_iteration.atom2.idx + 1,\n angle_iteration.atom3.idx + 1,\n )\n )\n\n if (i_angle + 1) % 3 == 0:\n output_write.write(\"\\n\")\n\n if no_angles % 3 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_angles == 0:\n output_write.write(\"\\n\")\n\n # DIHEDRALS: Calculate the dihedral data\n output_write.write(\n first_indent % no_dihedrals + \" !NPHI: dihedrals\\n\"\n )\n for i_dihedral, dihedral_iter in enumerate(dihedrals_list):\n (\n dihedral_atom_1,\n dihedral_atom_2,\n dihedral_atom_3,\n dihedral_atom_4,\n ) = (\n dihedral_iter.atom1,\n dihedral_iter.atom2,\n dihedral_iter.atom3,\n dihedral_iter.atom4,\n )\n\n output_write.write(\n (first_indent * 4)\n % (\n dihedral_atom_1.idx + 1,\n dihedral_atom_2.idx + 1,\n dihedral_atom_3.idx + 1,\n dihedral_atom_4.idx + 1,\n )\n )\n\n if (i_dihedral + 1) % 2 == 0:\n output_write.write(\"\\n\")\n\n if no_dihedrals % 2 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_dihedrals == 0:\n output_write.write(\"\\n\")\n\n # IMPROPERS: Calculate the improper data\n output_write.write(\n first_indent % no_impropers + \" !NIMPHI: impropers\\n\"\n )\n for i_improper, improper_iter in enumerate(impropers_list):\n (\n improper_atom_1,\n improper_atom_2,\n improper_atom_3,\n improper_atom_4,\n ) = (\n improper_iter.atom1,\n improper_iter.atom2,\n improper_iter.atom3,\n improper_iter.atom4,\n )\n\n output_write.write(\n (first_indent * 4)\n % (\n improper_atom_1.idx + 1,\n improper_atom_2.idx + 1,\n improper_atom_3.idx + 1,\n improper_atom_4.idx + 1,\n )\n )\n\n if (i_improper + 1) % 2 == 0:\n output_write.write(\"\\n\")\n\n if no_impropers % 2 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_impropers == 0:\n output_write.write(\"\\n\")\n\n # DONOR: calculate the donor data\n output_write.write(first_indent % no_donors + \" !NDON: donors\\n\")\n for donor_i, donor_iter in enumerate(stuct_iteration.donors):\n output_write.write(\n (first_indent * 2)\n % (donor_iter.atom1.idx + 1, donor_iter.atom2.idx + 1)\n )\n if (donor_i + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_donors % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_donors == 0:\n output_write.write(\"\\n\")\n\n # ACCEPTOR: calculate the acceptor data\n output_write.write(\n first_indent % no_acceptors + \" !NACC: acceptors\\n\"\n )\n for acceptor_i, acceptor_iter in enumerate(\n stuct_iteration.acceptors\n ):\n output_write.write(\n (first_indent * 2)\n % (acceptor_iter.atom1.idx + 1, acceptor_iter.atom2.idx + 1)\n )\n if (acceptor_i + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_acceptors % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_acceptors == 0:\n output_write.write(\"\\n\")\n\n # NNB: calculate the NNB data\n output_write.write(first_indent % 0 + \" !NNB\\n\\n\")\n for nbb_i, atoms_iter in enumerate(stuct_iteration.atoms):\n output_write.write(first_indent % 0)\n if (nbb_i + 1) % 8 == 0:\n output_write.write(\"\\n\")\n\n if no_atoms % 8 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_atoms == 0:\n output_write.write(\"\\n\")\n\n # GROUP: calculate the group data\n try:\n group_data = stuct_iteration.groups.nst2\n except AttributeError:\n group_data = 0\n output_write.write(\n (first_indent * 2) % (no_groups or 1, group_data) + \" !NGRP \\n\"\n )\n if stuct_iteration.groups is True:\n for group_i, group_iter in enumerate(stuct_iteration.groups):\n output_write.write(\n (first_indent * 3)\n % (\n group_iter.atom.idx,\n group_iter.type,\n group_iter.move,\n )\n )\n if (group_i + 1) % 3 == 0:\n output_write.write(\"\\n\")\n\n if no_groups % 3 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_groups == 0:\n output_write.write(\"\\n\")\n\n else:\n structure_abs_charge_value = abs(\n sum(\n atom_charge_iter.charge\n for atom_charge_iter in stuct_iteration.atoms\n )\n )\n if structure_abs_charge_value < 1.0e-4:\n group_type = 1\n else:\n group_type = 2\n output_write.write((first_indent * 3) % (0, group_type, 0))\n output_write.write(\"\\n\")\n\n output_write.write(\"\\n\")\n output_write.close()\n # **********************************\n # **********************************\n # psf writer (end)\n # **********************************\n # **********************************" ]
[ "0.5435754", "0.52857256", "0.52641356", "0.5249349", "0.5163392", "0.5138307", "0.5133329", "0.5126286", "0.51097524", "0.51070285", "0.5083355", "0.50709164", "0.50644994", "0.50576615", "0.5056252", "0.5051618", "0.50503314", "0.50489813", "0.5036032", "0.5025411", "0.5020264", "0.50122327", "0.49709666", "0.49699536", "0.49346018", "0.4932389", "0.4921897", "0.4919648", "0.49164006", "0.4865996" ]
0.72425526
0
to initialise vectors, its size and randomly allocated centroids
def initialize(self): self.SIZE = self.vectors.shape[0] # todo can use max distance to allocation farthest apart points self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def __init__(self, vectors):\n \n self.vectors = vectors\n self.centroid = self.calcCentroid()", "def _init_centroid(self, seed: int):\n random.seed(seed)\n self.centroid_info = dict()\n self.cluster_result = dict()\n self.centroid_stable_flag = dict()\n for key_index, chosen_value in enumerate(\n random.sample(self.list_data, self.n_cluster)):\n self.centroid_info.setdefault(\"c\" + str(key_index), float(chosen_value))\n self.cluster_result.setdefault(\"c\" + str(key_index), list())\n self.centroid_stable_flag.setdefault(\"c\" + str(key_index), False)", "def init_centroids(self, data_points):\n # print(\"Init centroid\")\n # return list(map(lambda x: x[1], random.sample(labelled_data, self.k)))\n\n # Project the data: this step will take several seconds\n\n centroids_scaled = self.naive_sharding(data_points, self.k)\n return list(centroids_scaled)\n\n #sample = np.random.permutation(len(labelled_data))[:self.k]\n\n #return list(map(lambda x: labelled_data[x][1], sample))", "def _initiate_random_centroids(all_features, vocab_size):\n centroids = []\n # 1) Genereate points for initial centroids\n\n min_feat = np.ones(all_features[0].size)*np.inf\n max_feat = np.zeros(all_features[0].size)\n\n for a in all_features:\n for p in range(len(a)):\n if a[p] < min_feat[p]:\n min_feat[p] = a[p]\n else:\n if a[p] > max_feat[p]:\n max_feat[p] = a[p]\n\n\n for _ in range(vocab_size):\n random_vector = np.multiply(np.random.rand(1, all_features[0].size),\n max_feat-min_feat) + min_feat\n centroids.append(random_vector.flatten())\n\n return np.array(centroids)", "def initialize_centers(data, k):\n x_data_min = min(p[0] for p in data)\n x_data_max = max(p[0] for p in data)\n y_data_min = min(p[1] for p in data)\n y_data_max = max(p[1] for p in data)\n\n return generate_random_data(\n k,\n x_data_min,\n x_data_max,\n y_data_min,\n y_data_max\n )", "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center", "def init_centroids(self, points , k):\n centroids = points.copy()\n numpy.random.shuffle(centroids)\n return centroids[0:k,:]", "def makeCluster(self):\n for i in range(self.k):\n #vector of length total users, pick random number 1-5\n self.centroids.append(np.random.uniform(low=1,high=5,size=len(self.user)))\n memberList = []\n self.membership.append(memberList)\n self.centroids = np.round(self.centroids)\n\n for movie in self.dictionary.keys():\n #Finds the index of the closest centroid\n closest = np.argmin(self.calculateDistance(self.dictionary[movie]))\n newVector = []\n newVector.append(movie)\n #Add the movie to the list of members of the closest centroid\n self.membership[closest].append(newVector)\n self.recalculateCentroid(self.membership[closest], closest)", "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def generate_initial_centroids(k, data):\n centroids = []\n used_indexes = []\n while len(centroids) < k:\n random_index = random.randint(0, len(data) - 1)\n if random_index not in used_indexes:\n centroids.append(data[random_index])\n used_indexes.append(random_index)\n return centroids", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]", "def set_random_vector(self):\n self.vector = vu.create_dense_random_vector(dimension)", "def init_centroids(X,K):\n c = random.sample(list(X),K)\n return c", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def fill_vectors(self):\n # use random numbers for generating plot data:\n random.seed(9) # fix the seed for testing\n for index in range(self.npoints):\n self.vector_x.append(index) # x coordinates\n for y in range(self.ncurves):\n self.vector_y[y].append(random.uniform(0,8))", "def __init__(self, init_centers):\n\n assert len(init_centers.shape) == 2, f\"init_centers should be a KxD matrix. Got: {init_centers.shape}\"\n (self.K, self.D) = init_centers.shape\n assert self.K > 1, f\"There must be at least 2 clusters. Got: {self.K}\"\n\n # Shape: K x D\n self.centers = np.copy(init_centers)", "def init_cluster_centroids(x, number_of_clusters):\n return x[np.random.choice(x.shape[0], number_of_clusters, replace=False), :]", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids", "def __initialise_smart(self, X, args):\n\t\tcentroids = np.zeros((self.K,self.D))\n\t\tif X.shape[0] > 10*self.K:\n\t\t\tdata = X[:10*self.K,:]\n\t\telse:\n\t\t\tdata = X\n\t\tN = data.shape[0]\n\n\t\t\t#choosing centroids\n\t\t\t#points are chosen from dataset with farhtest point clustering\n\t\tran_index = np.random.choice(N)\n\t\tcentroids[0,:] = data[ran_index]\n\n\t\tfor k in range(1,self.K):\n\t\t\tdistances = np.zeros((N,k)) #(N,K)\n\t\t\tfor k_prime in range(k):\n\t\t\t\tdistances[:,k_prime] = np.sum(np.square(data - centroids[k_prime,:]), axis =1) #(N,K')\n\t\t\tdistances = np.min(distances, axis = 1) #(N,)\n\t\t\tdistances /= np.sum(distances) #normalizing distances to make it a prob vector\n\t\t\tnext_cl_arg = np.random.choice(range(data.shape[0]), p = distances) #chosen argument for the next cluster center\n\t\t\tcentroids[k,:] = data[next_cl_arg,:]\n\n\t\tvar = np.var(X, axis = 0) #(D,)\n\n\t\t\t#computing initial responsibilities\n\t\tr_0 = np.zeros((X.shape[0],self.K))\n\t\tfor k in range(self.K):\n\t\t\tr_0[:,k] = np.sum(np.divide(np.square(X - centroids[k,:]), var), axis = 1) + 1e-5\n\t\tr_0 = np.divide(r_0.T, np.sum(r_0,axis=1)).T\n\n\t\tself.gating.fit(X,r_0, *args)\n\n\t\treturn r_0", "def initialize(X, k):\n\n if not isinstance(X, np.ndarray) or X.ndim != 2:\n return None\n\n # n: number of dada points\n # d: dimension of each data point\n n, d = X.shape\n # print(X.shape)\n # print(X)\n\n if not isinstance(k, int) or k <= 0 or k > n:\n return None\n\n # Sample k centroids from a random.uniform distribution;\n # output is an array of coordinates\n C = np.random.uniform(low=np.min(X, axis=0),\n high=np.max(X, axis=0),\n size=(k, d))\n return C", "def initialize_centroids(X, K):\n idx = np.random.choice(X.shape[0], K, replace = False)\n centroids = X[idx,:]\n return centroids", "def get_random_centroids(data, k) :\r\n centroids = []\r\n columns = np.size(data, axis=1)\r\n ranges = []\r\n for i in range(columns) :\r\n ranges.append([np.min(data[:,i]), np.max(data[:,i])])\r\n \r\n for i in range(k) :\r\n centroid = []\r\n for span in ranges :\r\n centroid.append(np.random.uniform(span[0], span[1]))\r\n centroids.append(centroid)\r\n \r\n return np.matrix(centroids)", "def _init_centroid(self, data):\n\n\t\tcentroids = data[:self._k]\n\t\tx_assignee = data\n\n\t\treturn centroids, x_assignee", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def _assign_vectors_to_nearest_centroid(all_features, centroid):\n #TODO: sprawdz co lepiej dziala\n new_centroid_coor = np.zeros([len(centroid), all_features[0].size])\n #new_centroid_coor = centroid\n new_centroid_counter = np.zeros(len(centroid))\n\n dist = pdist(centroid, all_features)\n #min_dist = dist.min(axis=0)\n min_dist_index = dist.argmin(axis=0)\n\n for x in range(len(min_dist_index)):\n id = min_dist_index[x]\n new_centroid_coor[id] = np.add(new_centroid_coor[id],\n all_features[x])\n new_centroid_counter[id] += 1\n\n new_centroid_coor_out = []\n for i in range(len(new_centroid_coor)):\n if new_centroid_counter[i] == 0:\n new_centroid_coor_out.append(centroid[i])\n else:\n new_centroid_coor_out.append(np.divide(new_centroid_coor[i],new_centroid_counter[i]))\n\n return np.array(new_centroid_coor_out), new_centroid_counter", "def initialize_pos(img: np.ndarray):\n\n h, w = img.shape[0:2]\n\n for cluster in range(numclusters):\n i = np.random.randint(h) # row index\n j = np.random.randint(w) # col index\n current_cluster_centers[cluster, 0, :] = img[i, j, :]\n\n print(\"Current clusters:\\n\", current_cluster_centers)", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])" ]
[ "0.7167922", "0.70244765", "0.6918472", "0.6858616", "0.68306804", "0.67281", "0.6684833", "0.65676075", "0.6543686", "0.64338136", "0.6385544", "0.6385496", "0.6346486", "0.63083196", "0.6302265", "0.62757456", "0.618771", "0.6186494", "0.6182182", "0.6167982", "0.6166685", "0.6155842", "0.6103725", "0.6086321", "0.6048287", "0.60398144", "0.6036393", "0.6007134", "0.5994832", "0.59944856" ]
0.83943045
0
The action controls the robot using mocaps. Specifically, bodies on the robot (for example the gripper wrist) is controlled with mocap bodies. In this case the action is the desired difference in position and orientation (quaternion), in world coordinates, of the of the target body. The mocap is positioned relative to the target body according to the delta, and the MuJoCo equality constraint optimizer tries to center the welded body on the mocap.
def mocap_set_action(self, action): # @Melissa: Action = 3DOF Cartesian Position Delta + Quaternion if self.sim.model.nmocap > 0: action, _ = np.split(action, (self.sim.model.nmocap * 7, )) action = action.reshape(self.sim.model.nmocap, 7) pos_delta = action[:, :3] quat_delta = action[:, 3:] self.reset_mocap2body_xpos() self.sim.data.mocap_pos[:] = self.sim.data.mocap_pos + pos_delta self.sim.data.mocap_quat[:] = self.sim.data.mocap_quat + quat_delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mocap_set_action(sim, action, mocap_low, mocap_high, ee_name): \n if sim.model.nmocap > 0:\n action, _ = np.split(action, (sim.model.nmocap * 7, ))\n action = action.reshape(sim.model.nmocap, 7)\n\n pos_delta = action[:, :3]\n quat_delta = action[:, 3:]\n\n if np.count_nonzero(pos_delta) == 0:\n return \n\n reset_mocap2body_xpos(sim)\n mocap_pose = sim.data.mocap_pos + pos_delta\n mocap_pose = np.clip(\n mocap_pose,\n mocap_low,\n mocap_high,\n )\n\n sim.data.set_mocap_pos('mocap', mocap_pose)\n # sim.data.mocap_pos[:] = mocap_pose\n\n # print(sim.data.mocap_pos)\n # print(sim.data.mocap_quat)\n # print(sim.get_state())\n # print(mocap_low, mocap_high)\n\n # we do not control mocap quaternion in the experiment.\n # sim.data.mocap_quat[:] = sim.data.mocap_quat + quat_delta", "def mact(circuit, q_controls, q_target, ancilla):\n circuit.x(q_controls)\n circuit.mct(q_controls, q_target[0], ancilla)\n circuit.x(q_controls)\n circuit.barrier()", "def _do_mc_action(self):\n goal = self._current_mc_goal\n self._position_control_client.send_goal(\n goal,\n done_cb = self._motion_control_callback\n )", "def perform_action(self, car, action):\n action[0]=action[0]*10+20\n action[1]=action[1]*0.5\n p.setJointMotorControl2(car, 3, p.POSITION_CONTROL, targetPosition =action[1],force = self.maxForce)\n for i in [0,1]:\n p.setJointMotorControl2(car, i, p.VELOCITY_CONTROL, targetVelocity =action[0],force = self.maxForce)\n p.setJointMotorControl2(car, 7, p.VELOCITY_CONTROL, targetVelocity =action[0]*7,force = self.maxForce)\n pos1, ori1 = p.getBasePositionAndOrientation(car)\n lin, ang = p.getBaseVelocity(car)\n '''\n if(pos1[0]<-self.max_dist_x):\n p.resetBasePositionAndOrientation(car, [pos1[0]+2*self.max_dist_x,pos1[1],pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n if(pos1[0]>self.max_dist_x):\n p.resetBasePositionAndOrientation(car, [pos1[0]-2*self.max_dist_x,pos1[1],pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n if(pos1[1]<-self.max_dist_y):\n p.resetBasePositionAndOrientation(car, [pos1[0],pos1[1]+2*self.max_dist_y,pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n if(pos1[1]>self.max_dist_y):\n p.resetBasePositionAndOrientation(car, [pos1[0],pos1[1]-2*self.max_dist_y,pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n '''", "def apply_action(self, action):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n robot_x = robot_state.pose.position.x\n robot_y = robot_state.pose.position.y\n # Set the distance moved in an action such that it is at least as large as the\n # minimum distance that would let a robot in the middle of the goal go to either side\n #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5)\n if action == Learn.MOVE_LEFT:\n print(\"Move left\")\n self.set_robot(robot_x, robot_y+self.move_dist)\n elif action == Learn.MOVE_RIGHT:\n print(\"Move right\")\n self.set_robot(robot_x, robot_y-self.move_dist)\n else:\n print(\"Stay put\")", "def set_actuator(self, action):\n deltav = action[0]\n vt = np.clip(self.vt + deltav, -self.maxV, self.maxV)\n self.vt = vt\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=0,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=vt)\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=1,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=-vt)", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def _set_action(self, action):\n action = np.asarray(action)\n action = np.clip(action, self.action_space.low, self.action_space.high)\n ctrl = self.robot.denormalize_position_control(\n position_control=action, relative_action=self.constants.relative_action,\n )\n self.robot.set_position_control(ctrl)", "def apply_action(self, action):\n real_action = self.policy_action_to_robot_action(action)\n p.setGravity(0, 0, 0)\n p.resetBaseVelocity(\n self.robot_ids[0], real_action[:3], real_action[3:])", "def step_simulation(self, action):\n # target = np.zeros(6)\n # a = np.copy(action)\n # for i in range(6):\n # target[i] = a[i] + ref_pos[i + 3]\n\n target = action * 1.5\n # target = action + ref_pos[3:9]\n\n joint_angle_4, joint_velocity_4 = self.get_joint_angle_and_velocity(4)\n joint_angle_7, joint_velocity_7 = self.get_joint_angle_and_velocity(7)\n self.joint_history.append(np.asarray([joint_angle_4, joint_velocity_4, joint_angle_7, joint_velocity_7]))\n\n joint_angles = self.robot_skeleton.q[3:]\n joint_velocities = self.robot_skeleton.dq[3:]\n\n tau = np.zeros(self.robot_skeleton.ndofs) # torque to apply at each simulation clock\n tau[3:] = self.P * (target - joint_angles) - self.D * joint_velocities\n tau = np.clip(tau, -150 * self.volume_scaling, 150 * self.volume_scaling)\n self.tau_history.append(tau)\n # print(tau)\n self.do_simulation(tau, 1)", "def apply_action(self, physics, action, random_state):\n del random_state\n physics.bind(self.actuators).ctrl = action", "def movement(self, action):\r\n\r\n #if its moving horizontally only can move vertically in the next move\r\n if self.velocities[1] == 0:\r\n if action == 0 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = -1\r\n if action == 1 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = 1\r\n\r\n #if its moving vertically only can move horizontally in the next move\r\n if self.velocities[0] == 0:\r\n if action == 2 :\r\n self.velocities[0] = -1\r\n self.velocities[1] = 0\r\n if action == 3 :\r\n self.velocities[0] = 1\r\n self.velocities[1] = 0\r\n \r\n self.displacement()", "def step(self, action=[], simulation=False, m1=0, m2=0):\n\n # receive m1 and m2 if using it for the Uvirobot_model simulation\n if not simulation:\n m1, m2 = self._dediscretize_action(action)\n\n if not self.differential_car: # Ackerman model. Cambiado == por Not.\n # m1 = orientation m2= engine\n\n wm1 = (16.257 * (m1 - 180) / 75) + np.random.uniform(-0.3, 0.3, 1)[0]\n\n # the negative sign is because it turns to the left with PWM 0-127\n # and for us turning to the left is positive w_ang\n wm2 = - self.alpha_ack * (m2 - 128) / 127 + np.random.uniform(-0.3, 0.3, 1)[0]\n\n self.v_linear = wm1*self.r_ack*np.cos(wm2)\n self.w_ang = -(wm1*self.r_ack*np.cos(wm2)*np.tan(wm2))/self.l_ack\n\n else: # differential model\n # PWM to rads conversion\n wm1 = (25 * (m1 - 145) / 110) + np.random.uniform(-1, 1, 1)[0]\n wm2 = (25 * (m2 - 145) / 110) + np.random.uniform(-1, 1, 1)[0]\n\n\n # Calculate linear and angular velocity\n self.v_linear = (wm2 + wm1) * (self.r / 2)\n\n # wm1 - wm2 because m1 is the engine of the right\n # changed old ecuation because it was wrong and divided /3.35 to make it like the wrong ecuation that worked\n\n if not self.discrete_input:\n self.w_ang = (wm1 - wm2) * (self.r / self.rho)\n else:\n self.w_ang = (wm1 - wm2) * (2*self.r / self.rho)\n\n # Calculate position and theta\n self.x = self.x + self.v_linear * math.cos(self.theta) * self.time\n self.y = self.y + self.v_linear * math.sin(self.theta) * self.time\n self.theta = self.theta + self.w_ang * self.time\n\n # to set theta between [0,2pi]\n if self.theta > 2*math.pi:\n self.theta = self.theta-2*math.pi\n elif self.theta < 0:\n self.theta = self.theta+2*math.pi\n\n # return the state if i´m using it for the uvirobot_model simulation\n if simulation:\n return self.x, self.y, self.theta\n\n # add noise to position and theta\n # self.x_noise = self.x + np.random.normal(self.mu, self.sigmaxy, 1)\n # self.y_noise = self.y + np.random.normal(self.mu, self.sigmaxy, 1)\n # self.theta_noise = self.theta + np.random.normal(self.mu,\n # self.sigmaangle, 1)\n\n # Calculate the distance to the closest point in trajectory,\n # depending on distance, delta theta (ugv to trajectory) and distance\n # covered in this step\n self._distance_next()\n self._calc_zone()\n self._calc_delta_theta()\n self._distance_covered()\n # I want to know how far it went to give reward each 50 points\n\n # Calculate done and reward\n # Only want this end for open circuit\n if self.index == (len(self.x_trajectory) - 1) and not self.closed:\n done = 1\n reward = 20\n\n elif (self.x > self.max_x) or (self.x < -self.max_x) or \\\n (self.y < -self.max_y) or (self.y > self.max_y):\n done = 1\n # It had a reward of -10 but doesnt make sense cause the car doesnt\n # know where it is\n reward = 0\n\n elif self.steps >= self.max_steps:\n done = 1\n # Reward of -10 if its open circuit, for closed circuit reward = 0\n # because it wouldnt make sense to punish because it is infinite\n if self.closed:\n reward = 0\n else:\n reward = -50\n\n # elif math.fabs(self.delta_theta) > math.pi/2:\n # done = 1\n # reward = -10\n\n elif self.zone_reward == 3:\n done = 1\n if self.discrete_input:\n reward = -100\n else:\n reward = -10\n\n else:\n done = 0\n # I removed Christians rewards\n reward = -1 * BETA_DIST * math.fabs(self.distance) + \\\n BETA_GAP * self.gap\n\n if (self.index//50) > self.farthest:\n self.farthest = self.index//50\n reward += 5\n#\n # Number of iterations in a episode\n self.steps += 1\n\n if self.discrete_input:\n # discretize state for the agent to control\n\n discrete_distance, discrete_delta_theta \\\n = self._discretize_agent_state(self.distance, self.delta_theta)\n\n self.agent_state = np.array([discrete_distance,\n discrete_delta_theta])\n else:\n # self.agent_state has to be a matrix to be accepted by keras\n self.agent_state = np.array([self.distance, self.delta_theta])\n\n # self.norm_distance=(self.distance+0.071)/(0.071*2)\n # self.norm_delta_theta=(self.delta_theta+np.pi)/(2*np.pi)\n\n # Create state (x,y,theta)\n self.state = [self.x, self.y, self.theta]\n # print(self.state,self.sign)\n\n return self.state, self.agent_state, reward, done", "def execute_action(self, action, orientation=None):\n print action\n if not orientation:\n self.move_cartesian_frame_linear_interpolation(tfx.pose(self.cur_position_translation(np.array(action) * self.scale), np.array(self.get_current_cartesian_position().orientation)), 0.1)\n else:\n self.move_cartesian_frame_linear_interpolation(tfx.pose(self.cur_position_translation(np.array(action) * self.scale), np.array(orientation)), 0.1)", "def before_step(self, action, physics):\n # # Support legacy internal code.\n\n physics.named.data.xfrc_applied[:,:3]=np.zeros((3,))\n\n if self._random_location and not self._maxq:\n index = self._current_loc\n else:\n one_hot = action[:4]\n index = np.argmax(one_hot)\n action = action[4:]\n\n goal_position = action * 0.05\n corner_action = CORNER_INDEX_ACTION[index]\n corner_geom = CORNER_INDEX_POSITION[index]\n\n\n # apply consecutive force to move the point to the target position\n position = goal_position + physics.named.data.geom_xpos[corner_geom]\n dist = position - physics.named.data.geom_xpos[corner_geom]\n\n loop = 0\n while np.linalg.norm(dist) > 0.025:\n loop += 1\n if loop > 40:\n break\n physics.named.data.xfrc_applied[corner_action, :3] = dist * 20\n physics.step()\n self.after_step(physics)\n dist = position - physics.named.data.geom_xpos[corner_geom]\n\n if self._random_location and not self._maxq:\n self._current_loc = self._generate_loc()", "def action(self, action):\n low = self.action_space.low\n high = self.action_space.high\n\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n\n return action", "def _motion_control_callback(self, state, result):\n if len(self._mc_goals) > 0:\n self._current_mc_goal = self._mc_goals.pop(0)\n self._do_mc_action()\n else :\n self._current_mc_goal = None", "def step(self, action):\n\n input_1 = self._make_input(action[:7], self.env._right_hand_quat)\n if self.env.mujoco_robot.name == \"sawyer\":\n velocities = self.controller.get_control(**input_1)\n low_action = np.concatenate([velocities, action[7:]])\n elif self.env.mujoco_robot.name == \"baxter\":\n input_2 = self._make_input(action[7:14], self.env._left_hand_quat)\n velocities = self.controller.get_control(input_1, input_2)\n low_action = np.concatenate([velocities, action[14:]])\n else:\n raise Exception(\n \"Only Sawyer and Baxter robot environments are supported for IK \"\n \"control currently.\"\n )\n\n # keep trying to reach the target in a closed-loop\n for i in range(self.action_repeat):\n ret = self.env.step(low_action)\n if i + 1 < self.action_repeat:\n velocities = self.controller.get_control()\n if self.env.mujoco_robot.name == \"sawyer\":\n low_action = np.concatenate([velocities, action[7:]])\n elif self.env.mujoco_robot.name == \"baxter\":\n low_action = np.concatenate([velocities, action[14:]])\n else:\n raise Exception(\n \"Only Sawyer and Baxter robot environments are supported for IK \"\n \"control currently.\"\n )\n\n return ret", "def move(o, action):\n # if action not in Act: raise...?\n { Act.Down : lambda: o.applyGravity(),\n Act.Left : lambda: o._tryShift(o.block,Point(-1,0)),\n Act.Right : lambda: o._tryShift(o.block,Point( 1,0)),\n Act.Drop : lambda: o._setBlock(o.shadowBlock),\n Act.Hold : lambda: o._Hold(),\n Act.RotCW : lambda: o._Rotate(clockwise),\n Act.RotCCW: lambda: o._Rotate(counterClockwise),\n }[action]()", "def step(self, action):\n # print(\"############################\")\n # print(\"action: {}\".format(action))\n\n self.movement_complete.data = False\n\n # 1) Read last joint positions by getting the observation before acting\n old_observation = self.get_obs()\n\n # 2) Get the new joint positions according to chosen action (actions here are the joint increments)\n if self._joint_increment is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(action, old_observation[1:7])\n\n # 3) Move to position and wait for moveit to complete the execution\n self.publisher_to_moveit_object.pub_joints_to_moveit(next_action_position)\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n # time.sleep(s\n\n \"\"\"\n #execute action as long as the current position is close to the target position and there is no invalid collision and time spend in the while loop is below 1.2 seconds to avoid beeing stuck touching the object and not beeing able to go to the desired position \n time1=time.time()\n while np.linalg.norm(np.asarray(self.joints_state.position)-np.asarray(next_action_position))>0.1 and self.get_collisions()==False and time.time()-time1<0.1: \n rospy.loginfo(\"Not yet reached target position and no collision\")\n \"\"\"\n # 4) Get new observation and update min_distance after performing the action\n new_observation = self.get_obs()\n if new_observation[0] < self.min_distace:\n self.min_distace = new_observation[0]\n # print(\"observ: {}\".format( np.around(new_observation[1:7], decimals=3)))\n\n # 5) Convert Observations into state\n state = U.get_state(new_observation)\n\n # 6) Check if its done, calculate done_reward\n done, done_reward, invalid_contact = self.is_done(new_observation)\n\n # 7) Calculate reward based on Observatin and done_reward and update the accumulated Episode Reward\n reward = UMath.compute_reward(new_observation, done_reward, invalid_contact)\n\n ### TEST ###\n if done:\n joint_pos = self.joints_state.position\n print(\"Joint in step (done): {}\".format(np.around(joint_pos, decimals=3)))\n ### END of TEST ###\n\n self.accumulated_episode_reward += reward\n\n self.episode_steps += 1\n\n return state, reward, done, {}", "def step(self, action, update=True):\n if self.centralized_planning:\n agent_states = [human.get_full_state() for human in self.humans]\n if self.robot.visible:\n agent_states.append(self.robot.get_full_state())\n human_actions = self.centralized_planner.predict(agent_states)[:-1]\n else:\n human_actions = self.centralized_planner.predict(agent_states)\n else:\n human_actions = []\n for human in self.humans:\n ob = self.compute_observation_for(human)\n human_actions.append(human.act(ob))\n\n # collision detection\n dmin = float('inf')\n collision = False\n for i, human in enumerate(self.humans):\n px = human.px - self.robot.px\n py = human.py - self.robot.py\n if self.robot.kinematics == 'holonomic':\n vx = human.vx - action.vx\n vy = human.vy - action.vy\n else:\n vx = human.vx - action.v * np.cos(action.r + self.robot.theta)\n vy = human.vy - action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n # closest distance between boundaries of two agents\n closest_dist = point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius\n if closest_dist < 0:\n collision = True\n logging.debug(\"Collision: distance between robot and p{} is {:.2E} at time {:.2E}\".format(human.id, closest_dist, self.global_time))\n break\n elif closest_dist < dmin:\n dmin = closest_dist\n\n # collision detection between humans\n human_num = len(self.humans)\n for i in range(human_num):\n for j in range(i + 1, human_num):\n dx = self.humans[i].px - self.humans[j].px\n dy = self.humans[i].py - self.humans[j].py\n dist = (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius\n if dist < 0:\n # detect collision but don't take humans' collision into account\n logging.debug('Collision happens between humans in step()')\n\n # check if reaching the goal\n end_position = np.array(self.robot.compute_position(action, self.time_step))\n reaching_goal = norm(end_position - np.array(self.robot.get_goal_position())) < self.robot.radius\n\n if self.global_time >= self.time_limit - 1:\n reward = 0\n done = True\n info = Timeout()\n elif collision:\n reward = self.collision_penalty\n done = True\n info = Collision()\n elif reaching_goal:\n reward = self.success_reward\n done = True\n info = ReachGoal()\n elif dmin < self.discomfort_dist:\n # adjust the reward based on FPS\n reward = (dmin - self.discomfort_dist) * self.discomfort_penalty_factor * self.time_step\n done = False\n info = Discomfort(dmin)\n else:\n reward = 0\n done = False\n info = Nothing()\n\n if update:\n # store state, action value and attention weights\n if hasattr(self.robot.policy, 'action_values'):\n self.action_values.append(self.robot.policy.action_values)\n if hasattr(self.robot.policy, 'get_attention_weights'):\n self.attention_weights.append(self.robot.policy.get_attention_weights())\n if hasattr(self.robot.policy, 'get_matrix_A'):\n self.As.append(self.robot.policy.get_matrix_A())\n if hasattr(self.robot.policy, 'get_feat'):\n self.feats.append(self.robot.policy.get_feat())\n if hasattr(self.robot.policy, 'get_X'):\n self.Xs.append(self.robot.policy.get_X())\n if hasattr(self.robot.policy, 'traj'):\n self.trajs.append(self.robot.policy.get_traj())\n\n # update all agents\n self.robot.step(action)\n for human, action in zip(self.humans, human_actions):\n human.step(action)\n if self.nonstop_human and human.reached_destination():\n self.generate_human(human)\n\n self.global_time += self.time_step\n self.states.append([self.robot.get_full_state(), [human.get_full_state() for human in self.humans],\n [human.id for human in self.humans]])\n self.robot_actions.append(action)\n self.rewards.append(reward)\n\n # compute the observation\n if self.robot.sensor == 'coordinates':\n ob = self.compute_observation_for(self.robot)\n elif self.robot.sensor == 'RGB':\n raise NotImplementedError\n else:\n if self.robot.sensor == 'coordinates':\n ob = [human.get_next_observable_state(action) for human, action in zip(self.humans, human_actions)]\n elif self.robot.sensor == 'RGB':\n raise NotImplementedError\n\n return ob, reward, done, info", "def step(self, action):\n force = self.force_mag if action else -self.force_mag\n costheta = math.cos(self.theta)\n sintheta = math.sin(self.theta)\n temp = (\n force + self.polemass_length * self.theta_dot ** 2 * sintheta\n ) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta * temp) / (\n self.length\n * (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)\n )\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n self.x += self.tau * self.x_dot\n self.x_dot += self.tau * xacc\n self.theta += self.tau * self.theta_dot\n self.theta_dot += self.tau * thetaacc\n\n return self.state", "def move(self, action): # Good\n if action == 0:\n dx, dy = 0, 1\n elif action == 1:\n dx, dy = 1, 0\n elif action == 2:\n dx, dy = 0, -1\n elif action == 3:\n dx, dy = -1, 0\n else:\n dx, dy = 0, 0\n\n # Check for max speed\n if ((self.vel_x + dx)**2 + (self.vel_y + dy)**2) \\\n <= self.max_speed_sq:\n self.x_vel += dx\n self.y_vel += dy\n\n self.prev_pos = self.center\n super(Player, self).move()", "def _set_action(self, action):\n\n rospy.logdebug(\"Start Set Action ==>\"+str(action))\n # We convert the actions to speed movements to send to the parent class of Parrot\n linear_speed_vector = Vector3()\n angular_speed = 0.0\n\n if action == 0: # FORWARDS\n linear_speed_vector.x = self.linear_forward_speed\n self.last_action = \"FORWARDS\"\n elif action == 1: # BACKWARDS\n linear_speed_vector.x = -1*self.linear_forward_speed\n self.last_action = \"BACKWARDS\"\n elif action == 2: # STRAFE_LEFT\n linear_speed_vector.y = self.linear_forward_speed\n self.last_action = \"STRAFE_LEFT\"\n elif action == 3: # STRAFE_RIGHT\n linear_speed_vector.y = -1*self.linear_forward_speed\n self.last_action = \"STRAFE_RIGHT\"\n elif action == 4: # UP\n linear_speed_vector.z = self.linear_forward_speed\n self.last_action = \"UP\"\n elif action == 5: # DOWN\n linear_speed_vector.z = -1*self.linear_forward_speed\n self.last_action = \"DOWN\"\n\n # We tell drone the linear and angular speed to set to execute\n self.move_base(linear_speed_vector,\n angular_speed,\n epsilon=0.05,\n update_rate=10)\n\n rospy.logdebug(\"END Set Action ==>\"+str(action))", "def move(self, agent, action):\n\t\tpass", "def test_mmp_active_inference(self):\n\n num_obs = [3, 2]\n num_states = [4, 3]\n num_control = [1, 3]\n A = random_A_matrix(num_obs, num_states)\n B = random_B_matrix(num_states, num_control)\n\n C = obj_array_zeros(num_obs)\n C[1][0] = 1.0 \n C[1][1] = -2.0 \n\n agent = Agent(A=A, B=B, C=C, control_fac_idx=[1], inference_algo=\"MMP\", policy_len=2, inference_horizon=3)\n\n T = 10\n\n for t in range(T):\n\n o = [np.random.randint(num_ob) for num_ob in num_obs] # just randomly generate observations at each timestep, no generative process\n qx = agent.infer_states(o)\n agent.infer_policies()\n action = agent.sample_action()\n \n print(agent.prev_actions)\n print(agent.prev_obs)", "def reflect_vec(pos, action): \n normal_hat = pos/np.linalg.norm(pos)\n bounce = action - 2*np.dot(action, normal_hat.T)*normal_hat\n \n return bounce", "def _apply_action_bounds(self, action: np.ndarray,\n config: RobotGroupConfig) -> np.ndarray:\n if config.control_mode == ControlMode.JOINT_POSITION:\n # Apply position bounds.\n if config.qpos_range is not None:\n action = np.clip(action, config.qpos_range[:, 0],\n config.qpos_range[:, 1])\n\n # Apply velocity bounds.\n # NOTE: This uses the current simulation state to get the current\n # position. For hardware, this expects the hardware to update the\n # simulation state.\n if (config.qpos_indices is not None\n and config.qvel_range is not None):\n # Calculate the desired velocity using the current position.\n cur_pos = self.sim_scene.data.qpos[config.qpos_indices]\n desired_vel = (\n (action - cur_pos) / self.sim_scene.step_duration)\n # Clip with the velocity bounds.\n desired_vel = np.clip(desired_vel, config.qvel_range[:, 0],\n config.qvel_range[:, 1])\n action = cur_pos + desired_vel * self.sim_scene.step_duration\n\n elif config.control_mode == ControlMode.JOINT_VELOCITY:\n # Apply velocity bounds.\n if config.qvel_range is not None:\n action = np.clip(action, config.qvel_range[:, 0],\n config.qvel_range[:, 1])\n\n return action", "def action_to_spawn(self):\n self.scene.center_on_spawn()", "def remove_mass(self, cm_target, m_target, iT_target_ar, align):\n iT_target = tensor(*iT_target_ar)\n cm = self.cm\n iT = self.iT\n m = self.m\n cm_target_aligned = align @ cm_target\n cm_target_aligned = cm_target_aligned + self.dimension\n iT_target_aligned = align @ iT_target @ align.T\n res = mass_combine(-m, m_target, cm, cm_target_aligned, iT, -iT_target_aligned)\n (self.m, self.cm, self.iT) = res" ]
[ "0.6857181", "0.6473455", "0.6208231", "0.61743957", "0.6151875", "0.6096956", "0.58196455", "0.58098626", "0.57758635", "0.57689625", "0.5463688", "0.5459588", "0.542433", "0.5395272", "0.52858716", "0.526483", "0.5258025", "0.525683", "0.52174217", "0.52063286", "0.5194508", "0.5155291", "0.51544", "0.5124981", "0.511416", "0.5108254", "0.5104426", "0.50891626", "0.5077091", "0.5074244" ]
0.7424087
0
Resets the mocap welds that we use for actuation.
def reset_mocap_welds(self): if self.sim.model.nmocap > 0 and self.sim.model.eq_data is not None: for i in range(self.sim.model.eq_data.shape[0]): if self.sim.model.eq_type[i] == mujoco_py.const.EQ_WELD: self.sim.model.eq_data[i, :] = np.array( [0., 0., 0., 1., 0., 0., 0.]) self.sim.forward()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_mocap_welds(sim):\n if sim.model.nmocap > 0 and sim.model.eq_data is not None:\n for i in range(sim.model.eq_data.shape[0]):\n if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:\n sim.model.eq_data[i, :] = np.array(\n [0., 0., 0., 1., 0., 0., 0.])\n sim.forward()", "def reset_wm(self):\n\n self.plan = []\n self.hist = []", "def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r", "def reset_cams(self):\n self.pseye.reset_cams()", "def _reset(self) -> None:\n\n self._reset_slots()\n self._paused = False\n self.latest_action = {}\n self.latest_message = [] #clear a list in python\n self.latest_bot_utterance = BotUttered.empty()\n self.followup_action = ACTION_LISTEN_NAME\n self.active_loop = {}", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset(self):\n self._weights.clear()", "def _reset(self):\n self._interface.set('fw_wp_en', 'off')", "def reset(self):\n self.desc.put(self.desc.pvname.split(\".\")[0])\n self.scan.put(\"Passive\")\n self.calc.put(\"0\")\n self.prec.put(\"5\")\n self.dold.put(0)\n self.doln.put(\"\")\n self.dopt.put(\"Use VAL\")\n self.flnk.put(\"0\")\n self.odly.put(0)\n self.oopt.put(\"Every Time\")\n self.outn.put(\"\")\n for letter in self.channels.read_attrs:\n channel = self.channels.__getattr__(letter)\n channel.reset()", "def reset(self):\n if hasattr(self, \"W\"):\n del self.W\n if hasattr(self, \"T\"):\n del self.T\n if hasattr(self, \"P\"):\n del self.P", "def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def reset(self):\n self.creature.reset()\n self.current_world = copy.copy(self.init_world_rewards)\n self.draw_board()\n self.last_move = None", "def reset_game(self):\n self.ships_left = self.settings.ship_limit\n self.fleets_left = self.settings.fleet_waves\n self.target_miss = self.settings.target_max_miss\n self.reset_level()", "def resetSkills(self):\r\n \"\"\" Reset the default attributes \"\"\"\r\n self.player['level'] = 1\r\n self.player['xp'] = 0\r\n self.player['credits'] = int(startCredits)\r\n self.player['popup'] = int(popupStatus)\r\n self.player['name'] = self.player.name\r\n self.player['lastconnected'] = int(time.time())\r\n\r\n \r\n \"\"\" Iterate through the skills list then set each skill to 0 \"\"\"\r\n for skill in skills:\r\n self.player[skill.name] = 0\r\n\r\n \"\"\" Slay the player \"\"\"\r\n es.server.queuecmd(\"damage %s %s\" % (self.userid, es.getplayerprop(self.userid, \"CBasePlayer.m_iHealth\")))\r\n \r\n \"\"\" Notify the user \"\"\"\r\n tell(self.userid, 'info deleted')", "def resetDefences(self):\n self.currentAP = self.maxAP\n self.currentSP = self.maxSP", "def reset_fight():\n global FIGHT\n global BEARSTRENGTHVAL\n global MOVEMENT\n global ACTION\n global VALID_MOVE\n global FIGHTMOVES\n global GRIZZLY_BEAR\n\n FIGHT = False\n #ENEMY_LIST[ZERO_BASE_PLYR_POS] = len(ENEMY_LIST) # Last item is always None\n ENEMY_LIST[ZERO_BASE_PLYR_POS] = 4\n FIGHTMOVES = 0\n VALID_MOVE = True\n BEARSTRENGTHVAL = 100\n MOVEMENT = False\n ACTION = True\n GRIZZLY_BEAR = False\n process_context(\"None\")", "def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1", "def reset(self):\r\n\t\tself.player_selected_actions = np.zeros((self.num_actions,), int)\r\n\t\tself.player_reward = np.zeros((self.num_timesteps,))\r\n\t\tself.player_optimum = np.zeros_like(self.player_reward, dtype=int)", "def reset(self):\n self.params.resetParams()", "def resetSim(self):\n self.powers = []", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset(self):\n self.ai.reset()", "def kill_all(self):\n self.settings['lights_on'] = 12\n self.settings['lights_off'] = 12\n self.settings['overhead_level'] = 0\n self.settings['soil_1'] = 0\n self.settings['soil_2'] = 0\n self.settings['soil_3'] = 0\n self.settings['soil_4'] = 0\n self.scale_overhead_level.set(self.settings['overhead_level'])\n self.scale_smc1.set(self.settings['soil_1'])\n self.scale_smc2.set(self.settings['soil_2'])\n self.scale_smc3.set(self.settings['soil_3'])\n self.scale_smc4.set(self.settings['soil_4'])\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller", "def reset_variables(self) -> None:\n self.attributs = {}\n self.data = []", "def reset_bot() :\r\n\r\n open_list.clear()\r\n closed_list.clear()", "def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()" ]
[ "0.72712475", "0.6722829", "0.6504008", "0.6271008", "0.62297726", "0.6208428", "0.6196257", "0.6191569", "0.61721665", "0.6122709", "0.607302", "0.60728467", "0.606244", "0.6059849", "0.6052459", "0.60498244", "0.60480326", "0.6044585", "0.6020971", "0.6006914", "0.6000386", "0.5994785", "0.5973946", "0.59689575", "0.5955908", "0.5951558", "0.5944302", "0.59269047", "0.59199965", "0.5906978" ]
0.7608869
0
Resets the position and orientation of the mocap bodies to the same values as the bodies they're welded to.
def reset_mocap2body_xpos(self): if (self.sim.model.eq_type is None or self.sim.model.eq_obj1id is None or self.sim.model.eq_obj2id is None): return for eq_type, obj1_id, obj2_id in zip(self.sim.model.eq_type, self.sim.model.eq_obj1id, self.sim.model.eq_obj2id): if eq_type != mujoco_py.const.EQ_WELD: continue mocap_id = self.sim.model.body_mocapid[obj1_id] if mocap_id != -1: # obj1 is the mocap, obj2 is the welded body body_idx = obj2_id else: # obj2 is the mocap, obj1 is the welded body mocap_id = self.sim.model.body_mocapid[obj2_id] body_idx = obj1_id assert (mocap_id != -1) self.sim.data.mocap_pos[mocap_id][:] = self.sim.data.body_xpos[body_idx] self.sim.data.mocap_quat[mocap_id][:] = self.sim.data.body_xquat[body_idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_mocap2body_xpos(sim):\n\n if (sim.model.eq_type is None or\n sim.model.eq_obj1id is None or\n sim.model.eq_obj2id is None):\n return\n for eq_type, obj1_id, obj2_id in zip(sim.model.eq_type,\n sim.model.eq_obj1id,\n sim.model.eq_obj2id):\n if eq_type != mujoco_py.const.EQ_WELD:\n continue\n\n mocap_id = sim.model.body_mocapid[obj1_id]\n if mocap_id != -1:\n # obj1 is the mocap, obj2 is the welded body\n body_idx = obj2_id\n else:\n # obj2 is the mocap, obj1 is the welded body\n mocap_id = sim.model.body_mocapid[obj2_id]\n body_idx = obj1_id\n\n assert (mocap_id != -1)\n sim.data.mocap_pos[mocap_id][:] = sim.data.body_xpos[body_idx]\n # sim.data.mocap_quat[mocap_id][:] = sim.data.body_xquat[body_idx]", "def ResetPos(self):\n for idx in range(self.unFixJL):\n self._p.resetJointState(self.uid, idx,\n self.InitInfo[\"JPos\"][idx],\n self.InitInfo[\"JVel\"][idx])", "def reset_world(self):\n print(\"Resetting world\")\n\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print(\"Done\")", "def reset(self):\n self.position = self.initial_position\n self.velocity = [0, 0, 0]", "def reset(self):\r\n self.body = [[int(self.x_pos/2), int(self.y_pos/2)]] # initial snake starts at center of screen\r\n self.direction = \"UP\"\r\n self.length = 1\r\n self.alive = True\r\n self.speed = 10", "def clear_bodies(self):\n self.send_command('clearbodies')", "def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))", "def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))", "def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))", "def reset(self):\n # print(\"Joint (reset): {}\".format(np.around(self.joints_state.position, decimals=3)))\n init_joint_pos = [1.5, -1.2, 1.4, -1.87, -1.57, 0]\n self.publisher_to_moveit_object.set_joints(init_joint_pos)\n\n # print(\">>>>>>>>>>>>>>>>>>> RESET: waiting for the movement to complete\")\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n # print(\">>>>>>>>>>>>>>>>>>> RESET: Waiting complete\")\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(init_joint_pos, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n\n self.set_target_object(random_object=self._random_object, random_position=self._random_position)\n self._check_all_systems_ready()\n\n with open('contact_1_force.yml', 'w') as yaml_file:\n yaml.dump(0.0, yaml_file, default_flow_style=False)\n with open('contact_2_force.yml', 'w') as yaml_file:\n yaml.dump(0.0, yaml_file, default_flow_style=False)\n with open('collision.yml', 'w') as yaml_file:\n yaml.dump(False, yaml_file, default_flow_style=False)\n observation = self.get_obs()\n self.object_position = observation[9:12]\n\n # print(\"Joint (after): {}\".format(np.around(observation[1:7], decimals=3)))\n\n # get maximum distance to the object to calculate reward\n self.max_distance, _ = U.get_distance_gripper_to_object()\n self.min_distace = self.max_distance\n state = U.get_state(observation)\n self._update_episode()\n return state", "def pull():\r\n\t\tglobal bodies, counter\r\n\r\n\t\tfor n, b in bodies:\r\n\t\t\tl = list(b.getPosition())\r\n\t\t\tscalp (l, -1000 / length(l))\r\n\t\t\tb.addForce(l)\r\n\t\t\tif counter%60 == 0:\r\n\t\t\t\tb.addForce((0, 10000, 0))", "def set_body_frame_position_vectors(pa):\n nb = pa.nb[0]\n # loop over all the bodies\n for i in range(nb):\n fltr = np.where(pa.body_id == i)[0]\n cm_i = pa.cm[3 * i:3 * i + 3]\n R_i = pa.R[9 * i:9 * i + 9]\n for j in fltr:\n dx = pa.x[j] - cm_i[0]\n dy = pa.y[j] - cm_i[1]\n dz = pa.z[j] - cm_i[2]\n\n pa.dx0[j] = (R_i[0] * dx + R_i[3] * dy + R_i[6] * dz)\n pa.dy0[j] = (R_i[1] * dx + R_i[4] * dy + R_i[7] * dz)\n pa.dz0[j] = (R_i[2] * dx + R_i[5] * dy + R_i[8] * dz)", "def reset(self):\n urdf=os.path.join(os.environ[\"YUMI_PUSH_MODELS\"],\"robot_hand.urdf\")\n self._model = self._world.add_model(\n model_path=urdf,\n position=[-10.0, -10.0, 0.0],\n orientation=[0.0, 0.0, 0.0, 1.0],\n is_robot=True)\n self._model.set_dynamics(mass=self._config.get(\"act_mass\", 10.0),\n lateralFriction=0,spinningFriction=10,rollingFriction=10,\n linearDamping=0,angularDamping=0)", "def _reset_internal(self):\n super()._reset_internal()\n\n # Reset all object positions using initializer sampler if we're not directly loading from an xml\n if not self.deterministic_reset:\n\n # Sample from the placement initializer for all objects\n obj_pos, obj_quat = self.placement_initializer.sample()\n\n # Loop through all objects and reset their positions\n for i, (obj_name, _) in enumerate(self.mujoco_objects.items()):\n self.sim.data.set_joint_qpos(obj_name, np.concatenate([np.array(obj_pos[i]), np.array(obj_quat[i])]))", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset_desired_frames(self):\n self.kin.frames = self.root\n self.kin.active_joint_names = self.get_actuated_joint_names()", "def reset(self):\n self.x_pos1 = 0\n self.x_pos2 = self.x_pos1 + self.width\n self.y_pos = self.offset_y\n self.velocity = self.origin_velocity", "def reset_mocap_welds(self):\n if self.sim.model.nmocap > 0 and self.sim.model.eq_data is not None:\n for i in range(self.sim.model.eq_data.shape[0]):\n if self.sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:\n self.sim.model.eq_data[i, :] = np.array(\n [0., 0., 0., 1., 0., 0., 0.])\n self.sim.forward()", "def resetForces(self):\n for atom in range(0, self.numAtoms):\n self.atoms[atom].fx = 0\n self.atoms[atom].fy = 0\n self.atoms[atom].fz = 0\n self.atoms[atom].pot = 0", "def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)", "def reset_position(self):\n self.set_position(copy.deepcopy(self.ab_pos))", "def reset(self):\n self._position = TwoDV(0.0, 0.0)\n self._orient = TNavigator.START_ORIENTATION[self._mode]", "def setAllZero(self):\n self.robot.set_joint([0,0,0,0,0])\n self.robot.save_config()", "def reset_object_pose(self):\n roll = np.random.rand() * np.pi * 2\n pitch = np.random.rand() * np.pi * 2\n yaw = np.random.rand() * np.pi * 2\n pybullet.resetBasePositionAndOrientation(\n self.object_id,\n [0, 0, 0],\n pybullet.getQuaternionFromEuler([roll, pitch, yaw]))\n pos, rot = pybullet.getBasePositionAndOrientation(self.object_id)\n self.object_coords = coordinates.Coordinates(\n pos=pos, rot=coordinates.math.xyzw2wxyz(rot))", "def reset_shapes():\n if bpy.context.object.modeling_cloth:\n ob = bpy.context.object\n else: \n ob = extra_data['last_object']\n\n if ob.data.shape_keys == None:\n ob.shape_key_add('Basis') \n if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:\n ob.shape_key_add('modeling cloth source key') \n if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:\n ob.shape_key_add('modeling cloth key') \n ob.data.shape_keys.key_blocks['modeling cloth key'].value=1\n \n keys = ob.data.shape_keys.key_blocks\n count = len(ob.data.vertices)\n co = np.zeros(count * 3, dtype=np.float32)\n keys['modeling cloth source key'].data.foreach_get('co', co)\n keys['modeling cloth key'].data.foreach_set('co', co)\n\n data[ob.name].vel *= 0\n \n ob.data.shape_keys.key_blocks['modeling cloth key'].mute = True\n ob.data.shape_keys.key_blocks['modeling cloth key'].mute = False", "def reset(self):\n self.world.reset()\n self.ref_state = self.ref_root_state\n # self.continue_from_now_by_phase(random() if self.rsi else 0.)\n self.skel.set_positions(self.ref_state.angles)\n # self.skel.set_positions(self.ref_motion.get_q(self.phase_frame))\n # dq = self.ref_motion.get_dq_dart(self.phase_frame)\n # self.skel.set_velocities(dq)\n self.skel.set_velocities(np.zeros(self.skel.ndofs))\n\n return self.state()", "def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1., 1., 1.]\n self.screen = [400., 400.]\n self.offset = [20., 20.]", "def _reset_wheel(self):\n [j.reset_dynamic_object() for j in self.wheels]\n\n p = [[-pi / 4, 0, 0], [pi / 4, 0, pi], [-pi / 4, 0, 0], [pi / 4, 0, pi]]\n\n for i in range(self.num_wheels):\n self.joints_slipping[i].set_position([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.joints_slipping[i].set_orientation(p[i],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_position([0, 0, 0], relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_orientation([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)", "def reset(self):\n log.debug(\"RESET\")\n self.ref_pos_x = -1\n self.ref_pos_y = -1\n self.ref_pos_z = -1\n self.pos_x = -1\n self.pos_y = -1\n self.pos_z = -1\n self.yaw = 0\n self.throw_ongoing = False", "def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()" ]
[ "0.6893635", "0.6499374", "0.6466375", "0.63432604", "0.63174415", "0.6309813", "0.6250218", "0.6250218", "0.6250218", "0.6227424", "0.5993534", "0.59932613", "0.5980894", "0.5974436", "0.5955571", "0.59437466", "0.59390795", "0.5894605", "0.58935535", "0.5870204", "0.5842682", "0.5828666", "0.5820798", "0.57952726", "0.5756398", "0.57539296", "0.5726474", "0.57253486", "0.5707295", "0.570537" ]
0.7228065
0
Initializes territory selection phase runs until all of the territories in the game world are selected
def init_territory_selection_phase(self): phase_name = "Territory Selection Phase!\n\n" selected_territories = 0 while selected_territories < len(self.world.territories): for i, player in enumerate(self.players): complain = "" selected_territory = None while True: clear_output() self.world.show_territories() try: selected_territory = ' '.join([x.capitalize() for x in input( f"{phase_name}{complain}{player.color} player's Turn\nType in the name of one of the territories displayed, choose wisely!:\n").split()]) # updates territory owner # updates player's owned territories and troops if next(x["Owner"] for x in self.world.territories if x["Name"] == selected_territory) == None: self.world.update_territory_data( selected_territory, player.color) self.players[i].add_territory(selected_territory) self.players[i].remove_troops(1) break else: complain = "Territory has an owner already!\n" except: complain = "Not a valid territory!\n" pass selected_territories += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)", "def ROOMSELECTION_LOOP():\n pass", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def setup_mode_selector(self):\n # read modes from json file\n # TODO use more simple ways to read\n modes_reader = file(self.modes_file)\n self.init_modes = json.load(modes_reader)\n\n # set selector\n self.modes_names = self.init_modes.keys()\n self.modes_names.insert(0, \"Set by hand\")\n self.modes_selector = Pmw.ComboBox(\n self.toolbar,\n label_text = 'Modes selector',\n labelpos = 'nw',\n selectioncommand = self.prepare_world,\n scrolledlist_items = self.modes_names,\n )\n self.modes_selector.grid(row = 0, column = 0, sticky = tk.W)\n first = self.modes_names[0]\n self.modes_selector.selectitem(first)\n self.prepare_world(first)", "def final_init(self, **kwargs):\n # Loading hierarchical settings and creating initial routine\n self.create_initial_routine(load_parameters=False)\n if self.autorun:\n # FIXME: if the init does not finish the object does not exist and\n # the routine results are not accessible\n try:\n self.run()\n self.post_run()\n except:\n log.error(\n \"Autorun failed to fully run, concluded routine steps \"\n \"are stored in the routine_steps attribute.\",\n exc_info=True,\n )", "def init_population(self):\n pass", "def _prepare_first_step(self):\n if self.townhalls:\n self._game_info.player_start_location = self.townhalls.first.position\n self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers()", "def initializeTamagochi():\n\n global tamagochiState # Do not delete this part\n global nutrition # or this part\n\n pass\n\n nutrition = 100 #Full in beginning ", "def initialize_region(self):\n self.new_region_name = \"\"\n self.map.regions.create_new_region()", "def prepare_world(self, mode_name):\n self.world_alive = False\n self.world_setable = True\n if (self.init_modes.has_key(mode_name)):\n mode = self.init_modes[mode_name]\n self.world_status.init_status(mode)\n\n self.init_world = self.world_status.now.copy()\n if (not (len(self.world) == 0)):\n for row in range(self.cell_row):\n for col in range(self.cell_col):\n item_id = self.world[row, col]\n if (self.world_status.now[row, col]):\n self.canvas.itemconfig(item_id,\n fill = self.color_alive)\n else:\n self.canvas.itemconfig(item_id,\n fill = self.color_dead)", "def init_game_setting(self):\n self.state.state_counter_while_testing += 1", "def update_selection(self, game, obj):\n self.clear()\n if obj:\n if obj in game.towers:\n self.selected_tower = obj\n self.update_selection_tower(self.selected_tower)\n elif obj in game.monsters:\n self.selected_monster = obj\n self.update_selection_monster(self.selected_monster)\n elif obj in self.monsters_images:\n self.selected_monster = obj\n self.update_selection_monster(self.selected_monster, False)", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def initialize(self):\r\n state_name = self.state\r\n\r\n state_name = state_name.lower()\r\n\r\n response = requests.get(\"https://cdn-api.co-vin.in/api/v2/admin/location/states\") \r\n\r\n if response.ok:\r\n\r\n df = pd.DataFrame(json.loads(response.text)[\"states\"]) \r\n\r\n state = process.extractOne(state_name, df[\"state_name\"].tolist()) # fuzzy match to get best state match \r\n\r\n self.state_id = df.loc[df.state_name == state[0],[\"state_id\"]].values[0][0] \r\n self.load_districts()", "def initialize_states(self, time_zero):\n self.asu_tank_states[time_zero] = []\n for idx, row in self.data.tanks.iterrows():\n state_row = []\n asu_id = int(row[['asu_id']])\n n = int(row[['n']])\n current_shift = 2 - time_zero % 2\n\n state_row.append(asu_id)\n state_row.append(n)\n state_row.append(self.data.tank_sku[asu_id, n]) # sku\n state_row.append(time_zero) # shift\n state_row.append(self.init_states[asu_id, n]) # volume\n state_row.append(float(row[['capacity_min']]))\n state_row.append(float(row[['capacity']]))\n state_row.append(0 if state_row[self.output_column_keys['volume']] >= state_row[self.output_column_keys['death_vol']] else 1)\n day_to_death = self.calculate_time_to_death(time_zero,\n state_row[self.output_column_keys['volume']] + self.data.volumes_to_add.get(\n (asu_id, n, time_zero + 1), 0),\n state_row[self.output_column_keys['asu_id']],\n state_row[self.output_column_keys['n']],\n state_row[self.output_column_keys['death_vol']])\n state_row.append(day_to_death) # days_to_death\n state_row.append(0) # consumption\n state_row.append(0) # delivery\n state_row.append(0) # added_load\n next_shift_closed = .5 if self.data.asu_work_shift[asu_id][current_shift] == 0 else 0\n state_row.append(day_to_death - .5 * (self.data.trip_duration(asu_id) // self.data.parameters.shift_size) - next_shift_closed -\n 0.25 * ((self.data.trip_duration(asu_id) % self.data.parameters.shift_size) / self.data.parameters.shift_size)) # days_to_death_drive\n\n self.asu_tank_states[time_zero].append(state_row)", "def setUpClass(cls):\n cls.use_temp_region()\n cls.runModule(\"g.region\", raster=\"elev_state_500m\")", "def sync_territories(self):\n for territory_state in self.territory.all():\n territory_state.sync()", "def _init_world(self):\n self.world.restricted_world = {\n 'not_road': [],\n 'cross_road': [],\n }\n for polygon in self._data_loader.data.get_polygons(0):\n polygon_name = polygon['label']\n polygon_points = polygon['points']\n if polygon_name in {'not_road', 'cross_road'}:\n self.world.restricted_world[polygon_name].append(geometry.Polygon(\n self._data_loader.convertIMG2PLAY(polygon_points)\n ))", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def initiate(self):\n self._load_parameters()\n self._initiate_region_dict()\n self._initiate_parameter_dict()\n self.initiated = True", "def _initialize_trees(self):", "def _team_init(self):\r\n\t\tfor team_type, team_info in self._teams.items():\r\n\t\t\tteam_info.team_type = team_type\r\n\t\t\tteam_info.maze_pos_finder = \\\r\n\t\t\t\tself._maze_manager.get_finder_by_name(team_type.__str__())", "def _load_map(self):\n map = choice(self.environment_template)\n environment = Environment(map.name, map.desc, map.habitable, self.level)\n\n # Display map description\n description = environment.description.format(noise=environment.monster_ctrl.monsters[0].noise)\n description = \"\\n\".join(wrap(description, width=80, fix_sentence_endings=True, initial_indent=\" \",\n subsequent_indent=\" \", break_long_words=False))\n print(\"\\n\", description, \"\\n\")\n input(\"Press any key to continue...\")\n\n initiative_monster = \"Monster has\" if environment.monster_ctrl.monster_count == 1 else \"Monsters have\"\n first_attacker = \"Hero has\" if environment.initiative.value == 0 else initiative_monster\n\n while environment.monster_ctrl.monster_count > 0:\n display_battle(self.hero, environment, first_attacker)\n decision = get_user_input([1, 2, 3])\n if decision == 1:\n self._duels(environment)\n\n elif decision == 2:\n self._show_bag()\n\n else:\n if random() < self.hero.health * .1:\n print(\"[+] Successfully ran away!\")\n input(\"Press any key to continue...\")\n return\n else:\n print(\"[!] Bummer, you failed to run away. You loss two dice rolls on your next attack.\")\n input(\"Press any key to continue...\")\n self.hero.dice_count -= 2\n self._duels(environment)\n\n self.level += 1\n display_no_combat_start(self.hero, environment)\n\n decision = 0\n # Keep iterating until user decides to move on\n while decision != 1:\n if environment.has_loot:\n decision = get_user_input([1, 2, 3, -1])\n else:\n decision = get_user_input([1, 2, -1])\n\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n display_no_combat_start(self.hero, environment)\n elif decision == 3:\n print(\"[+] Looted\")\n for loot in environment.loot_room():\n self.hero.set_loot(loot)\n display_no_combat_start(self.hero, environment)\n else:\n return", "def __init__(self):\n self.action_space = [(0,0)] + list(permutations([i for i in range(m)], 2))\n self.state_space = [(X,T,D) for X in range(m) for T in range(t) for D in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def force_load(self):\n for selection in self.selections.normal_values():\n selection.force_load()", "def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))", "def _finish_init(self):\n\n # This is usually done in set_other(), but we already set it as part of\n # the constructor.\n self.this_branch.fetch(self.other_branch,\n last_revision=self.other_basis)", "def __init__(self):\n self.opening_scene = DungeonGate()\n # this list define the order of scenes in the corridor\n self.corridor_scenes = [GuardsRoom(), Cell(), Armory(), EmptyRoom(), Dormitory()]\n shuffle(self.corridor_scenes)\n self.explored_scenes = {\n \"GuardsRoom\": \"unexplored\",\n \"Cell\": \"unexplored\",\n \"Dormitory\": \"unexplored\",\n \"Armory\": \"unexplored\",\n \"EmptyRoom\": \"unexplored\",\n \"DungeonGate\": \"unexplored\"\n }" ]
[ "0.54485637", "0.5442429", "0.54404324", "0.53849924", "0.5252147", "0.52514434", "0.5197759", "0.5194349", "0.5169358", "0.51636755", "0.5162915", "0.51623625", "0.5112647", "0.5089078", "0.50820476", "0.50782955", "0.50541395", "0.50344735", "0.5020178", "0.5017316", "0.49985644", "0.49871936", "0.4986844", "0.49786645", "0.49641648", "0.49616927", "0.49541858", "0.49323788", "0.49278367", "0.49154595" ]
0.7551169
0
finds the epipolar lines in two images given a set of pointcorrespondences
def find_epilines(imgLeft, imgRight, ptsLeft, ptsRight, F): color = [] for i in range(ptsLeft.shape[0]): color.append(tuple(np.random.randint(0, 255, 3).tolist())) print(color) # Find epilines corresponding to points in right image (right image) linesLeft = cv2.computeCorrespondEpilines(ptsRight.reshape(-1, 1, 2), 2, F) linesLeft = linesLeft.reshape(-1, 3) # Draw its lines on left image img5, img6 = drawlines(imgLeft, imgRight, linesLeft, ptsLeft, ptsRight, color) # Find epilines corresponding to points in left image (left image) linesRight = cv2.computeCorrespondEpilines(ptsLeft.reshape(-1, 1, 2), 1, F) linesRight = linesRight.reshape(-1, 3) # Draw its lines on right image img3, img4 = drawlines(imgRight, imgLeft, linesRight, ptsRight, ptsLeft, color) plt.subplot(121), plt.imshow(img5) plt.subplot(122), plt.imshow(img3) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_epipolar_lines(self, img1, img2, p1, p2, E, save_path):\n # get fundamental matrix\n F, mask_fdm = cv2.findFundamentalMat(p1, p2, cv2.RANSAC)\n p1_selected = p1[mask_fdm.ravel() == 1]\n p2_selected = p2[mask_fdm.ravel() == 1]\n\n # draw lines\n lines1 = cv2.computeCorrespondEpilines(\n p2_selected.reshape(-1, 1, 2), 2, F).reshape(-1, 3)\n img5, _ = self.drawlines(\n img1, img2, lines1, p1_selected, p2_selected, 100)\n\n lines2 = cv2.computeCorrespondEpilines(\n p1_selected.reshape(-1, 1, 2), 1, F).reshape(-1, 3)\n img3, _ = self.drawlines(\n img2, img1, lines2, p2_selected, p1_selected, 100)\n canvas = np.concatenate((img5, img3), axis=1)\n cv2.imwrite(save_path, canvas)", "def intersectConics(E1, E2):\n\n P = np.array([])\n r1 = matrix_rank(E1)\n r2 = matrix_rank(E2)\n \n if(r1==3 and r2==3):\n P = completeIntersection(E1,E2) \n else:\n if (r2 < 3): #E2 is degenerate\n defE = E2\n fullE = E1\n else:\n defE = E1 #E1 is degenerate\n fullE = E2\n m, l = decomposeDegenerateConic(defE)\n P1 = intersectConicLine(fullE,m)\n P2 = intersectConicLine(fullE,l)\n P = np.array([P1, P2])\n points_x = []\n points_y = []\n for i in range(2):\n P1 = P[i]\n if(P1.size!=0):\n for j in range(P1.shape[0]):\n points_x.append(P1[j,0]/P1[j,2])\n points_y.append(P1[j,1]/P1[j,2])\n return points_x, points_y", "def drawlines(img1,img2,lines,pts1,pts2):\n r, c = img1.shape[:2]\n if len(img1.shape)==2: img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)\n if len(img2.shape)==2: img1 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)\n for r, pt1, pt2 in zip(lines, pts1, pts2):\n color = tuple(np.random.randint(0,255,3).tolist())\n x0, y0 = map(int, [0, -r[2]/r[1] ])\n x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])\n img1 = cv2.line(img1, (x0,y0), (x1,y1), color, 5)\n img1 = cv2.circle(img1,tuple(pt1),20,color,-1)\n img2 = cv2.circle(img2,tuple(pt2),20,color,-1)\n # for\n return img1, img2", "def drawlines(img1, img2, lines, pts1, pts2, color):\n r, c = img1.shape\n\n img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)\n img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)\n for r, pt1, pt2, co in zip(lines, pts1, pts2, color):\n x0, y0 = map(int, [0, -r[2] / r[1] ])\n x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1] ])\n img1 = cv2.line(img1, (x0, y0), (x1, y1), co, 1)\n img1 = cv2.circle(img1, tuple(pt1), 5, co, -1)\n img2 = cv2.circle(img2, tuple(pt2), 5, co, -1)\n return img1, img2", "def plot_epilines(img1, img2, matches, epip_tup, fundamental, name, plot_f=False):\r\n # Source of heatmap plotting code for displaying the fundamental matrix:\r\n # https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/image_annotated_heatmap.html\r\n\r\n fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(50, 15)) if plot_f \\\r\n else plt.subplots(nrows=1, ncols=2, figsize=(40, 11))\r\n fig.suptitle(\"Epilines ({})\".format(name))\r\n ax[0].imshow(img1)\r\n ax[0].set_title(\"Left Image\")\r\n ax[1].imshow(img2)\r\n ax[1].set_title(\"Right Image\")\r\n\r\n colour_list = ['r', 'g', 'b', 'c', 'm', 'y']\r\n e_l, e_r = epip_tup\r\n\r\n for p_l, p_r in matches:\r\n colour = random.randint(0, len(colour_list) - 1)\r\n ax[0].plot((e_l[0], p_l[0]), (e_l[1], p_l[1]), marker='o', ls='-', c=colour_list[colour])\r\n ax[1].plot((e_r[0], p_r[0]), (e_r[1], p_r[1]), marker='o', ls='-', c=colour_list[colour])\r\n\r\n if plot_f:\r\n ax[2].imshow(fundamental)\r\n ax[2].set_title(\"Fundamental Matrix\")\r\n for i in range(len(fundamental)):\r\n for j in range(len(fundamental)):\r\n ax[2].text(j, i, round(fundamental[i, j], 5), ha=\"center\", va=\"center\", color=\"w\")\r\n\r\n plt.show()", "def road_lines():\n cv2.polylines(frame_1, [pts_1], True, yellow_color)\n cv2.polylines(frame_2, [pts_2], True, yellow_color)", "def draw_matches(im1, im2, im1_pts, im2_pts, inlier_mask=None):\n height1, width1 = im1.shape[:2]\n height2, width2 = im2.shape[:2]\n canvas_height = max(height1, height2)\n canvas_width = width1 + width2\n\n canvas = np.zeros((canvas_height, canvas_width, 3), im1.dtype)\n canvas[:height1, :width1, :] = im1\n canvas[:height2, width1:width1+width2, :] = im2\n\n im2_pts_adj = im2_pts.copy()\n im2_pts_adj[:, 0] += width1\n\n if inlier_mask is None:\n inlier_mask = np.ones(im1_pts.shape[0], dtype=np.bool)\n\n # Converts all to integer for plotting\n im1_pts = im1_pts.astype(np.int32)\n im2_pts_adj = im2_pts_adj.astype(np.int32)\n\n # Draw points\n all_pts = np.concatenate([im1_pts, im2_pts_adj], axis=0)\n for pt in all_pts:\n cv2.circle(canvas, (pt[0], pt[1]), 4, _COLOR_BLUE, 2)\n\n # Draw lines\n for i in range(im1_pts.shape[0]):\n pt1 = tuple(im1_pts[i, :])\n pt2 = tuple(im2_pts_adj[i, :])\n color = _COLOR_GREEN if inlier_mask[i] else _COLOR_RED\n cv2.line(canvas, pt1, pt2, color, 2)\n\n return canvas", "def correspondence_points(img1, img2, tag='c'):\n if len(img1.shape) == 3:\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n if len(img2.shape) == 3:\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.SURF(800)\n norm = cv2.NORM_L2\n flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)\n kp1, desc1 = detector.detectAndCompute(img1, None)\n kp2, desc2 = detector.detectAndCompute(img2, None)\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\n if len(p1) >= 4:\n H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n print '%d / %d inliers/matched' % (np.sum(status), len(status))\n status = status.reshape(-1) # flatten\n p1 = p1[status == 1]\n p2 = p2[status == 1]\n kp_pairs = [kp_pairs[i] for i in range(len(kp_pairs)) if status[i] == 1]\n else:\n # Just depend on the thresholding for filtering matches\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches, ratio=0.3)\n\n draw_correspondence_points(img1, img2, kp_pairs, tag=tag)\n return p1, p2, kp_pairs", "def draw_matches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1, rows2]), cols1 + cols2, 3), dtype='uint8')\n out = img2.copy()\n # Place the first image to the left\n # out[:rows1,:cols1,:] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n # out[:rows2,cols1:cols1+cols2,:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1, y1) = kp1[img1_idx].pt\n (x2, y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n # cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)\n cv2.circle(out, (int(x2) + cols1, int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n # cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)\n return out", "def drawMatches(orbimg1, kp1, orbimg2, kp2, matches):\r\n\r\n # Create a new output image that concatenates the two images together\r\n # (a.k.a) a montage\r\n rows1 = orbimg1.shape[0]\r\n cols1 = orbimg1.shape[1]\r\n rows2 = orbimg2.shape[0]\r\n cols2 = orbimg2.shape[1]\r\n\r\n # Create the output image\r\n # The rows of the output are the largest between the two images\r\n # and the columns are simply the sum of the two together\r\n # The intent is to make this a colour image, so make this 3 channels\r\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\r\n\r\n # Place the first image to the left\r\n out[:rows1,:cols1] = np.dstack([orbimg1, orbimg1, orbimg1])\r\n\r\n # Place the next image to the right of it\r\n out[:rows2,cols1:] = np.dstack([orbimg2, orbimg2, orbimg2])\r\n\r\n # For each pair of points we have between both images\r\n # draw circles, then connect a line between them\r\n for mat in matches:\r\n\r\n # Get the matching keypoints for each of the images\r\n orbimg1_idx = mat.queryIdx\r\n orbimg2_idx = mat.trainIdx\r\n\r\n # x - columns\r\n # y - rows\r\n (x1,y1) = kp1[orbimg1_idx].pt\r\n (x2,y2) = kp2[orbimg2_idx].pt\r\n\r\n # Draw a small circle at both co-ordinates\r\n # radius 4\r\n # colour blue\r\n # thickness = 1\r\n cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1) \r\n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)\r\n\r\n # Draw a line in between the two points\r\n # thickness = 1\r\n # colour blue\r\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255,0,0), 1)\r\n\r\n\r\n # Show the image\r\n #cv2.imshow('Matched Features', out)\r\n #cv2.waitKey(0)\r\n #cv2.destroyWindow('Matched Features')\r\n\r\n # Also return the image if you'd like a copy\r\n return out", "def find_matching_points(img1, img2, max_pix_movement=50, normalize=True, show=False):\n\n # Initiate ORB detector\n orb = cv2.ORB_create()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1, None)\n kp2, des2 = orb.detectAndCompute(img2, None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1,des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n # Draw first 10 matches.\n if show:\n img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:500], None,flags=2)\n plt.imshow(img3),plt.show()\n # Get the matching keypoints for each of the images\n\n list_kp1 = []\n list_kp2 = []\n for mat in matches:\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n list_kp1.append(kp1[img1_idx].pt)\n list_kp2.append(kp2[img2_idx].pt)\n\n n_kp1, n_kp2 = np.float32(list_kp1), np.float32(list_kp2)\n n_kp1 /= np.asarray([img1.shape[1], img1.shape[0]], np.float32)\n n_kp2 /= np.asarray([img2.shape[1], img2.shape[0]], np.float32)\n n_kp1 = n_kp1 * 2. - 1.\n n_kp2 = n_kp2 * 2. - 1.\n\n return np.int32(list_kp1), np.int32(list_kp2), n_kp1, n_kp2", "def line_a_b(img_rows: int, img_cols: int, point_a: Point2D, point_b: Point2D):\n rr, cc = draw.line(point_a.row, point_a.col, point_b.row, point_b.col)\n rr_s, rr_e = constrain_interval(rr, img_rows)\n cc_s, cc_e = constrain_interval(cc, img_cols)\n start = max(rr_s, cc_s)\n end = min(rr_e, cc_e)\n if end != -1:\n rr = rr[start:end]\n cc = cc[start:end]\n line_pixels = [Point2D(rr[i], cc[i]) for i in range(len(cc))]\n return line_pixels", "def draw_matches(img1, kp1, img2, kp2, matches, color=None):\n # We're drawing them side by side. Get dimensions accordingly.\n # Handle both color and grayscale images.\n if len(img1.shape) == 3:\n new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], img1.shape[2])\n elif len(img1.shape) == 2:\n new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1])\n new_img = np.zeros(new_shape, type(img1.flat[0]))\n # Place images onto the new image.\n new_img[0:img1.shape[0], 0:img1.shape[1]] = img1\n new_img[0:img2.shape[0], img1.shape[1]:img1.shape[1] + img2.shape[1]] = img2\n\n # Draw lines between matches. Make sure to offset kp coords in second image appropriately.\n r = 15\n thickness = 2\n if color:\n c = color\n md = 0\n for m in matches:\n if m.distance > md:\n md = m.distance\n for m in matches:\n # Generate random color for RGB/BGR and grayscale images as needed.\n if not color:\n c = np.random.randint(0, 256, 3) if len(img1.shape) == 3 else np.random.randint(0, 256)\n # So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,\n # wants locs as a tuple of ints.\n c = [int(c[0]), int(c[1]), int(c[2])]\n #c = int(255*m.distance/md)\n print(c)\n #c = [255,255,255]\n\n end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))\n end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(int) + np.array([img1.shape[1], 0]))\n cv2.line(new_img, end1, end2, c, thickness)\n cv2.circle(new_img, end1, r, c, thickness)\n cv2.circle(new_img, end2, r, c, thickness)\n\n return new_img", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def draw_matches(img1, kp1, img2, kp2, matches, inliers, ignore_indexes, filter_by_dist=True, color=None):\n # We're drawing them side by side. Get dimensions accordingly.\n # Handle both color and grayscale images.\n\n\n\n img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2RGB)\n img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2RGB)\n if len(img1.shape) == 3:\n new_shape = (img1.shape[0] + img2.shape[0], max(img1.shape[1], img2.shape[1]), img1.shape[2])\n elif len(img1.shape) == 2:\n new_shape = (img1.shape[0] + img2.shape[0], max(img1.shape[1], img2.shape[1]))\n new_img = np.zeros(new_shape, type(img1.flat[0])) \n # Place images onto the new image.\n new_img[0:img1.shape[0],0:img1.shape[1]] = img1\n new_img[img1.shape[0]:img1.shape[0]+img2.shape[0],0:img1.shape[1]] = img2\n \n # Draw lines between matches. Make sure to offset kp coords in second image appropriately.\n r = 1\n thickness = 1\n if color:\n c = color\n\n # print(new_img.shape)\n distances = []\n for m in matches:\n distances.append(m.distance)\n \n dist_threshold = min(distances) * 2\n # print(dist_threshold)\n \n for i, m in enumerate(matches):\n if inliers:\n if not i in inliers:\n continue\n if ignore_indexes:\n if i in ignore_indexes:\n continue\n if filter_by_dist:\n if m.distance > 50:\n continue\n \n # Generate random color for RGB/BGR and grayscale images as needed.\n if not color: \n c = tuple(np.random.randint(0,256,3)) if len(img1.shape) == 3 else np.random.randint(0,256)\n c = ( int (c [ 0 ]), int (c [ 1 ]), int (c [ 2 ])) \n \n # So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,\n # wants locs as a tuple of ints.\n\n try:\n end1 = tuple(np.round(kp1[m.queryIdx].pt).astype(int))\n end2 = tuple(np.round(kp2[m.trainIdx].pt).astype(int) + np.array([ 0, img1.shape[0]]))\n cv2.line(new_img, end1, end2, c, thickness)\n cv2.circle(new_img, end1, r, c, thickness)\n cv2.circle(new_img, end2, r, c, thickness)\n except:\n continue\n \n\n return new_img", "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = des_dict[mat.trainIdx][1]\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour green\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (0, 255, 0)) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (0, 255, 0))\n\n # Draw a line in between the two points\n # thickness = 1\n # colour green\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (0, 255, 0), 1)\n\n\n # Show the image\n #cv2.imshow('Matched Features', out)\n #cv2.waitKey(0)\n #cv2.destroyWindow('Matched Features')\n\n # Also return the image if you'd like a copy\n return out", "def draw_arrows(img, p1, p2, color):\n for i in range(p1.shape[0]):\n x = tuple(p1[i].ravel())\n y = tuple(p2[i].ravel())\n img = cv2.arrowedLine(img, x, y, color, thickness=3)\n return img", "def drawMatches(img1, kp1, img2, kp2, matches):\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1) \n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)\n\n\n # Show the image\n\n # Also return the image if you'd like a copy\n return out", "def get_intersection_points(lines, debug_img=None):\n\n # Convert [a,b,c,d] to [(a,b), (b,c), (c,d), (d,a)]\n line_pairs = list(zip(lines, lines[1:]+lines[:1]))\n\n corners = [get_intersection_point(*p) for p in line_pairs]\n\n if debug_img is not None:\n int_corners = np.array(corners, np.int32)\n draw_corners(debug_img, int_corners, (0, 255, 0))\n\n return corners", "def draw_match(img1, p1, img2, p2, mask=None, H=None):\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:h1, :w1] = img1\n vis[:h2, w1:w1 + w2] = img2\n vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)\n\n if H is not None:\n corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])\n corners = np.int32(\n cv2.perspectiveTransform(\n corners.reshape(1, -1, 2), H).reshape(-1, 2) \\\n + (w1, 0))\n cv2.polylines(vis, [corners], True, (255, 255, 255))\n\n if mask is None:\n mask = np.ones(len(p1), np.bool_)\n\n green = (63, 255, 0)\n red = (0, 0, 255)\n for (x1, y1), (x2, y2), inlier in zip(np.int32(p1), np.int32(p2), mask):\n col = [red, green][inlier]\n if inlier:\n cv2.line(vis, (x1, y1), (x2 + w1, y2), col)\n cv2.circle(vis, (x1, y1), 4, col, 2)\n cv2.circle(vis, (x2 + w1, y2), 4, col, 2)\n else:\n r = 2\n thickness = 3\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col, thickness)\n cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col, thickness)\n cv2.line(vis, (x2 + w1 - r, y2 - r), (x2 + w1 + r, y2 + r), col, thickness)\n cv2.line(vis, (x2 + w1 - r, y2 + r), (x2 + w1 + r, y2 - r), col, thickness)\n return vis", "def plot_images(img_A, img_B, match_A, match_B, name=\"p1\", method=\"greedy\"):\n (hA, wA) = img_A.shape[:2]\n (hB, wB) = img_B.shape[:2]\n vis = np.zeros((max(hA, hB), wA + wB, 3), dtype=\"uint8\")\n vis[0:hA, 0:wA] = img_A\n vis[0:hB, wA:] = img_B\n\n # loop over the matches\n for ptA, ptB in zip(match_A, match_B):\n ptB = ptB + np.array([wA, 0])\n cv2.line(vis, tuple(ptA), tuple(ptB), (0, 255, 0), 1)\n\n cv2.imwrite(\"output/sift_\" + method + \"_\" + name + \".png\", vis)\n cv2.imshow(\"Keypoint matching\", vis)\n cv2.waitKey(0)", "def _get_intersections_in_frame_parameter_space(lines,img_width,img_height):\n pts = []\n for i in range(len(lines)):\n for j in range(i+1,len(lines)): # Compare each point\n x,y = _get_intersection_parameter_space(lines[i],lines[j]) # Get intercetions\n if x < img_width and x >= 0 and y < img_height and y >= 0: # Intercetion is within frame bounds\n pts.append((x,y))\n return pts", "def drawMatches(image_1, image_1_keypoints, image_2, image_2_keypoints, matches):\n # Compute number of channels.\n num_channels = 1\n if len(image_1.shape) == 3:\n num_channels = image_1.shape[2]\n # Separation between images.\n margin = 10\n # Create an array that will fit both images (with a margin of 10 to separate\n # the two images)\n joined_image = np.zeros((max(image_1.shape[0], image_2.shape[0]),\n image_1.shape[1] + image_2.shape[1] + margin,\n 3))\n if num_channels == 1:\n for channel_idx in range(3):\n joined_image[:image_1.shape[0],\n :image_1.shape[1],\n channel_idx] = image_1\n joined_image[:image_2.shape[0],\n image_1.shape[1] + margin:,\n channel_idx] = image_2\n else:\n joined_image[:image_1.shape[0], :image_1.shape[1]] = image_1\n joined_image[:image_2.shape[0], image_1.shape[1] + margin:] = image_2\n\n for match in matches:\n image_1_point = (int(image_1_keypoints[match.queryIdx].pt[0]),\n int(image_1_keypoints[match.queryIdx].pt[1]))\n image_2_point = (int(image_2_keypoints[match.trainIdx].pt[0] + \\\n image_1.shape[1] + margin),\n int(image_2_keypoints[match.trainIdx].pt[1]))\n\n cv2.circle(joined_image, image_1_point, 5, (0, 0, 255), thickness = -1)\n cv2.circle(joined_image, image_2_point, 5, (0, 255, 0), thickness = -1)\n cv2.line(joined_image, image_1_point, image_2_point, (255, 0, 0), \\\n thickness = 3)\n return joined_image", "def intersectingEdges(self, x1y1, x2y2, points):\n x1, y1 = x1y1\n x2, y2 = x2y2\n for i in range(4):\n x3, y3 = points[i]\n x4, y4 = points[(i + 1) % 4]\n denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)\n nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)\n nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)\n if denom == 0:\n # This covers two cases:\n # nua == nub == 0: Coincident\n # otherwise: Parallel\n continue\n ua, ub = nua / denom, nub / denom\n if 0 <= ua <= 1 and 0 <= ub <= 1:\n x = x1 + ua * (x2 - x1)\n y = y1 + ua * (y2 - y1)\n m = QPointF((x3 + x4) / 2, (y3 + y4) / 2)\n d = distance(m - QPointF(x2, y2))\n yield d, i, (x, y)", "def get_intersect_lines(self, p10, p11, p20, p21):\n t = (p20 - p10) / (p11 - p10 - p21 + p20)\n return p10 + t * (p11 - p10)", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def detect_lane_lines(image_edges, image_orig):\n \n HOUGH_RHO = 1 # distance resolution in pixels of the Hough grid\n HOUGH_THETA = np.pi/180 # angular resolution in radians of the Hough grid\n HOUGH_THRESH = 15 # minimum number of votes (intersections in Hough grid cell) <15,20>\n HOUGH_MIN_LEN = 40 # minimum number of pixels making up a line <40,100>\n HOUGH_MAX_GAP = 100 # maximum gap in pixels between connectable line segments <100,250>\n \n LINE_MIN_ANGLE = 20 # degrees\n \n image_wk = np.copy(image_orig) # working copy\n \n # Run Hough transform on edge-detected image\n raw_lines = cv2.HoughLinesP(image_edges, HOUGH_RHO, HOUGH_THETA, HOUGH_THRESH, np.array([]),\n HOUGH_MIN_LEN, HOUGH_MAX_GAP)\n \n # Group lines by left/right angle and side of center line\n left_lines = []\n right_lines = []\n x_center = np.int32((image_wk.shape[1]/2))\n for line in raw_lines:\n for x1, y1, x2, y2 in line:\n theta = np.arctan((y2-y1)/(x2-x1)) /np.pi*180\n \n if (theta < -LINE_MIN_ANGLE) and (x1 < x_center) and (x2 < x_center):\n left_lines.append(line)\n \n elif (theta > LINE_MIN_ANGLE) and (x1 > x_center) and (x2 > x_center):\n right_lines.append(line)\n \n # Draw raw left/right lines on road image\n draw_lines(image_wk, left_lines, (255,0,255), 2)\n draw_lines(image_wk, right_lines, (0,255,0), 2)\n \n # Output road image with drawn raw lines and lists of left/right line coordinates\n return (image_wk, left_lines, right_lines)", "def match3(img1, img2, coordinates1, coordinates2, PATCH_SIZE, threshold=0.7):\n\n\t#creating patches for all points from img1 and img2\n\tcoord1_patches = [make_patch(coordinate, PATCH_SIZE, img1) for coordinate in coordinates1]\n\tcoord2_patches = [make_patch(coordinate, PATCH_SIZE, img2) for coordinate in coordinates2]\n\n\t# creating a matrix with dissimilarity measures for all pairs\n\tall_matches = np.zeros((len(coordinates1), len(coordinates2)))\n\n\tfor (x, y), _ in np.ndenumerate(all_matches):\n\t\tall_matches[x,y] = count_difference(coord1_patches[x], coord2_patches[y])\n\n\t#looking for best left-to-right and right-to-left matches\n\tmatches = []\n\t#left-to-right\n\tfor i, coord1 in enumerate(coordinates1):\n\t\tbest_ltr_match = np.argmin(all_matches[i, :]) #best left-to-right match for coord1\n\t\tbest_rtl_match = np.argmin(all_matches[:, best_ltr_match]) #best match for a best match\n\t\tif (i == best_rtl_match): #hurray, there is a super match\n\n\t\t\tmatches.append([coord1, coordinates2[best_ltr_match], all_matches[i, best_ltr_match]])\n\t\n\treturn matches", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y" ]
[ "0.70122874", "0.64854825", "0.6441403", "0.63546497", "0.6317812", "0.6274026", "0.6253232", "0.6238574", "0.61882734", "0.6186069", "0.6170266", "0.61325264", "0.6126266", "0.610908", "0.6090316", "0.6057525", "0.60160977", "0.6001683", "0.6000284", "0.5994642", "0.5993322", "0.5964865", "0.5930779", "0.5900136", "0.58776677", "0.58758855", "0.58597136", "0.58454436", "0.5805643", "0.5774967" ]
0.7567746
0
Estimate the p_observations_given_state matrix for a set of observations. If observations is a list/array of length N, returns an array of shape (N, S), where element [t, s] is the probability of the observation at time t assuming the system was in fact in state s.
def __call__(self, observations): observations = numpy.asarray(observations) if self.continuous: state_probabilities = [kde(observations) for kde in self.state_distributions] else: state_probabilities = [hist[observations] for hist in self.state_distributions] return numpy.transpose(state_probabilities)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def observation_from_state(self, state):\n state_index = self.latent_variable_markov_chain.index_dict[state]\n return np.random.choice(self.observation_states,\n p=self.emission_probabilities[state_index, :])", "def viterbi(p_observations_given_state, p_transition, p_initial):\n p_observations_given_state = numpy.asarray(p_observations_given_state)\n p_transition = numpy.asarray(p_transition)\n p_initial = numpy.asarray(p_initial)\n N, S = p_observations_given_state.shape\n assert p_transition.shape in {(S, S), (N-1, S, S)}\n if p_transition.shape == (S, S):\n p_transition = numpy.array([p_transition for i in range(N-1)])\n assert numpy.allclose(numpy.sum(p_transition, axis=2), 1)\n assert p_initial.shape == (S,)\n assert numpy.allclose(numpy.sum(p_initial), 1)\n\n # convert all probabilities to log probabilities so we can sum instead of\n # multiplying, which better controls numerical error.\n err = numpy.seterr(divide='ignore') # allow log(0) to go to -inf, as desired\n lp_observations_given_state = numpy.log(p_observations_given_state)\n lp_transition = numpy.log(p_transition)\n lp_initial = numpy.log(p_initial)\n numpy.seterr(**err)\n\n states = numpy.arange(S)\n # path[i] always contains the maximum likelihood sequence of states ending at state i\n path = [[i] for i in states]\n # lp_state contains the current log probability of being in the state given the sequence\n # of observations thus far considered.\n lp_state = lp_observations_given_state[0] + lp_initial\n\n for lp_obs, lp_trans in zip(lp_observations_given_state[1:], lp_transition):\n # For each observation after the first timepoint, construct an (S, S)\n # shape array where [si, sj] contains the log probability of going from\n # state si to state sj between time t and t+1.\n # Assume we know for each state si prob(si at time t), the probability\n # of being in that state at that time, then we can calculate the probability\n # of being in any given state sj at time t+1:\n # prob(transition from si at time t to sj at time t+1) = prob(si at t) *\n # prob(si->sj between t and t+1) *\n # prob(observation at t+1 given state sj)\n # prob(j at time t+1) = max_i(prob(i at time t -> j at time t+1))\n #\n # Thus we merely need to keep updating our estimates for the probability\n # of being in each state at each time, and keep a list of the path that\n # lead to each state.\n #\n # The actual code in use is 100% equivalent to the code below; however it\n # is rather more efficient.\n #\n # lp_transition_t = numpy.zeros((s, s), dtype=float)\n # new_path = []\n # lp_state = []\n # for s_to in states:\n # best_from_lp = -numpy.inf\n # for s_from in states:\n # lp_transition_t[s_from, s_to] = lp_state[s_from] + lp_trans[s_from, s_to] + lp_obs[s_to]\n # if lp_transition_t[s_from, s_to] > best_from_lp:\n # best_from = s_from\n # best_from_lp = lp_transition_t[s_from, s_to]\n # lp_state.append(best_from_lp)\n # new_path.append(path[best_from] + [s_to])\n # path = new_path\n lp_transition_t = lp_state[:,numpy.newaxis] + lp_trans + lp_obs[numpy.newaxis,:]\n best_from = numpy.argmax(lp_transition_t, axis=0)\n path = [path[s_from]+[s_to] for s_to, s_from in enumerate(best_from)]\n lp_state = lp_transition_t[best_from, states]\n last_state = numpy.argmax(lp_state)\n return numpy.array(path[last_state])", "def get_state_observed_values(self):\n obsState = numpy.zeros(self.get_num_variables())\n i = 0\n for v in self.variables:\n obsState[i] = v.read_value_in_fmu(self.fmu)\n i += 1\n return obsState", "def _generate_sample_from_state(self, state):\n\n res = []\n for e in range(self.n_emissions):\n cdf = np.cumsum(self.B[e][state, :])\n res.append((cdf > np.random.rand()).argmax())\n return np.asarray(res)", "def predict(\n self,\n observation: Union[np.ndarray, Mapping[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n if state is None:\n timesteps = np.zeros(len(observation), dtype=int)\n else:\n assert len(state) == 1\n timesteps = state[0]\n assert len(timesteps) == len(observation), \"timestep and obs batch size differ\"\n\n if episode_start is not None:\n timesteps[episode_start] = 0\n\n actions: List[int] = []\n for obs, t in zip(observation, timesteps):\n assert self.observation_space.contains(obs), \"illegal state\"\n dist = self.pi[t, obs, :]\n if deterministic:\n actions.append(int(dist.argmax()))\n else:\n actions.append(self.rng.choice(len(dist), p=dist))\n\n timesteps += 1 # increment timestep\n state = (timesteps,)\n return np.array(actions), state", "def initial_probabilities_from_trajectories(n_states, trajectories):\n p = np.zeros(n_states)\n\n for t in trajectories:\n p[t.transitions()[0][0]] += 1.0\n\n return p / len(trajectories)", "def estimate_hmm_params(state_sequences, pseudocount=1, moving=True, time_sigma=1):\n state_sequences = numpy.asarray(state_sequences)\n n, t = state_sequences.shape\n s = state_sequences.max() + 1 # number of states\n initial_counts = numpy.bincount(state_sequences[:,0], minlength=s) + pseudocount\n p_initial = initial_counts / (n + s*pseudocount)\n p_transition = []\n for i in range(t-1):\n from_states = state_sequences[:, i]\n to_states = state_sequences[:, i+1]\n p_trans = []\n for from_s in range(s):\n from_mask = (from_states == from_s)\n tos = to_states[from_mask]\n p_trans.append(numpy.bincount(tos, minlength=s))\n p_transition.append(p_trans)\n p_transition = numpy.array(p_transition) # shape (n-1, s, s)\n if not moving:\n p_transition = p_transition.sum(axis=0) # shape (s, s)\n p_transition += pseudocount\n denom = p_transition.sum(axis=-1) # shape (n-1, s) or (s,)\n denom[denom == 0] = 1 # avoid 0/0 cases. Just set them to probability = 0 by converting to 0/1\n p_transition = p_transition / denom[...,numpy.newaxis]\n if moving and time_sigma:\n p_transition = ndimage.gaussian_filter1d(p_transition, time_sigma, axis=0, mode='nearest')\n return p_initial, p_transition", "def get_params_from_seq(self, X, state_sequence): # TODO remove forward-looking params and slice X accordingly for X.ndim == 1\n\n # Slice data\n if X.ndim == 1: # Makes function compatible on higher dimensions\n X = X[(self.window_len - 1): -self.window_len]\n elif X.ndim > 1:\n X = X[:, 0]\n\n # group by states\n diff = np.diff(state_sequence)\n df_states = pd.DataFrame({'state_seq': state_sequence,\n 'X': X,\n 'state_sojourns': np.append([False], diff == 0),\n 'state_changes': np.append([False], diff != 0)})\n\n state_groupby = df_states.groupby('state_seq')\n\n # Transition probabilities\n # TODO only works for a 2-state HMM\n self.tpm = np.diag(state_groupby['state_sojourns'].sum())\n state_changes = state_groupby['state_changes'].sum()\n self.tpm[0, 1] = state_changes[0]\n self.tpm[1, 0] = state_changes[1]\n self.tpm = self.tpm / self.tpm.sum(axis=1).reshape(-1, 1) # make rows sum to 1\n\n # init dist and stationary dist\n self.start_proba = np.zeros(self.n_states)\n self.start_proba[state_sequence[0]] = 1.\n self.stationary_dist = self.get_stationary_dist(tpm=self.tpm)\n\n # Conditional distributions\n self.mu = state_groupby['X'].mean().values.T # transform mean back into 1darray\n self.std = state_groupby['X'].std(ddof=1).values.T", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def predict_proba(states):\r\n # convert states, compute logits, use softmax to get probability\r\n predicted = agent(torch.Tensor(states))\r\n probs = F.softmax(predicted).data.numpy()\r\n return probs", "def get_expected_states(qs, B, policy):\n n_steps = policy.shape[0]\n n_factors = policy.shape[1]\n\n # initialise posterior predictive density as a list of beliefs over time, including current posterior beliefs about hidden states as the first element\n qs_pi = [qs] + [utils.obj_array(n_factors) for t in range(n_steps)]\n \n # get expected states over time\n for t in range(n_steps):\n for control_factor, action in enumerate(policy[t,:]):\n qs_pi[t+1][control_factor] = B[control_factor][:,:,int(action)].dot(qs_pi[t][control_factor])\n\n return qs_pi[1:]", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def calc_probabilities_one(states, T):\n return np.exp(-beta(T) * states) / calc_partition_function_one(states, T)", "def predict_proba(self, states):\n states = Variable(torch.FloatTensor(states))\n probas = F.softmax(self.network.forward(states))\n return probas.data.numpy()", "def stateOccupationProbabilityGeneration(self):\n self.L = zeros((self.noOfEmmittingStates, self.T))\n\n for j in range(self.noOfEmmittingStates):\n for t in range(self.T):\n self.L[j,t] = (self.alpha[j+1, t+1] * self.beta[j+1, t+1]) / self.observationLikelihood", "def init_start_prob(n_states):\n start_prob_est = np.random.rand(n_states, 1)\n start_prob_est /= np.sum(start_prob_est, 0)\n assert np.isclose(np.sum(start_prob_est, 0), 1.)\n return start_prob_est", "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def compute_qvalues(self, state):\n return self.model.predict([np.expand_dims(item, 0) for item in state])[0]", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def observationsMatchingBatchDim(self):\n ret = []\n for inp in range(len(self._observations)):\n all_obs = self._observations[inp].getSlice(0)\n processed = all_obs\n # If we have more than 1 observation per state\n if self._batch_dimensions[inp][0] > 1 and len(all_obs) > 0:\n obs_per_state = self._batch_dimensions[inp][0]\n processed = np.zeros((len(all_obs), obs_per_state, ) + all_obs.shape[1:])\n # for every observation, we create a state\n for i in range(all_obs.shape[0]):\n state = np.zeros((obs_per_state,) + all_obs.shape[1:])\n # everything before state_start_idx is all_obs[0]\n state_start_idx = 0\n\n # start index in all_obs\n start_idx = i - obs_per_state\n\n # if we're in the first obs_per_state observations, we need to fill the first\n # -start_idx elements with all_obs[0]\n if start_idx < 0:\n n_to_fill = -start_idx\n state[0:n_to_fill] = np.repeat(all_obs[0][None, :, :], n_to_fill, axis=0)\n\n # start of where to fill the rest\n state_start_idx = n_to_fill\n\n # new start_idx for\n start_idx = 0\n state[state_start_idx:] = all_obs[start_idx+1:i+1]\n processed[i] = state\n\n ret.append(processed)\n return ret", "def probability(problem, train_ixs, obs_labels, selected_ixs, batch_size, **kwargs):\n points = problem['points']\n model = problem['model']\n\n test_X = points[selected_ixs]\n\n p_x = model.predict_proba(test_X)\n\n return p_x[:,1].reshape(-1)", "def call(self, states):\n dist, mode = self.get_dist_and_mode(states)\n samples = dist.sample()\n log_probs = dist.log_prob(samples)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return mode, samples, log_probs", "def BWMeansEstimate(self):\n for i in range(self.noOfEmmittingStates):\n self.outputProbabilities[i,0] = ((self.L[i,:] *\n self.observationSequence).sum() /\n self.L[i,:].sum())", "def probability(self, state, parentstates):\n if not isinstance(state, int):\n raise TypeError(f\"Expected state to be of type int; got type {type(state)}.\")\n if not isinstance(parentstates, dict):\n raise TypeError(f\"Expected parentstates to be of type dict; got type {type(parentstates)}.\")\n if state >= self.no_states:\n raise ValueError(f\"Recieved state={state}; this variable's last state is {self.no_states - 1}.\")\n if state < 0:\n raise ValueError(f\"Recieved state={state}; state cannot be negative.\")\n\n table_index = 0\n for variable in self.parents:\n if variable not in parentstates:\n raise ValueError(f\"Variable {variable} does not have a defined value in parentstates.\")\n #TODO sjekk opp feil her, tydeligvis være variable istedenfor .name\n var_index = self.parents.index(variable)\n table_index += parentstates[variable] * np.prod(self.no_parent_states[:var_index])\n\n return self.table[state, int(table_index)]", "def p(self, i, j, time, observation):\n trans = self.transition_map\n em = self.emission_map\n states = self.states\n O = observation\n\n num = self.alpha(i,time,O)*trans[i][j]*em[i][O[time]]*self.beta(j,time,O)\n denom = sum(self.alpha(m,time,O)*trans[m][n]*em[m][O[time]]*self.beta(n,time+1,O) for n in states for m in states)\n return num / denom", "def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n if not deterministic and np.random.rand() < self.exploration_rate:\n if self.policy.is_vectorized_observation(observation):\n if isinstance(observation, dict):\n n_batch = observation[next(iter(observation.keys()))].shape[0]\n else:\n n_batch = observation.shape[0]\n action = np.array([self.action_space.sample() for _ in range(n_batch)])\n else:\n action = np.array(self.action_space.sample())\n else:\n action, state = self.policy.predict(observation, state, episode_start, deterministic)\n return action, state", "def calc_obs(self, states, covs=None):\n\n if covs is None:\n return states @ self.hx[0].T + self.hx[1]\n\n var = np.diagonal(covs, axis1=1, axis2=2)\n std = np.sqrt(var)\n iv95 = np.stack((states - 1.96*std, states, states + 1.96*std))\n\n obs = (self.hx[0] @ states.T).T + self.hx[1]\n std_obs = (self.hx[0] @ std.T).T\n iv95_obs = np.stack((obs - 1.96*std_obs, obs, obs + 1.96*std_obs))\n\n return iv95_obs, iv95", "def sample_HMM(parameters, T, seed=None):\n\n K = parameters[\"num_states\"]\n pi_0 = parameters[\"init_prob\"]\n A = parameters[\"trans_matrix\"]\n\n D = parameters[\"obs_dim\"]\n mean = parameters[\"mean\"]\n cov = parameters[\"cov\"]\n\n np.random.seed(seed)\n\n # create empty numpy arrays to store samples\n states = np.empty(T, np.int32)\n obs = np.empty((T, D), np.float32)\n\n for t in range(T):\n if t == 0:\n # sample the first state from initial distribution\n states[t] = np.random.choice(K, p=pi_0)\n else:\n # get the next state based on transition matrix (the row\n # corresponding to the previous state)\n states[t] = np.random.choice(K, p=A[states[t - 1]])\n\n # sample observation from the corresponding Gaussian distribution\n obs[t] = np.random.multivariate_normal(\n mean[states[t]], cov[states[t]])\n\n return states, obs", "def o_func(self, state, covs=None, pars=None):\n\n if pars is not None:\n\n obs = []\n for sti, par in zip(state, pars):\n self.set_par(par, get_hx_only=True)\n ob = sti[:, :self.dimp] @ self.hx[0].T + \\\n sti[:, self.dimp:] @ self.hx[1].T + self.hx[2]\n obs.append(ob)\n\n return np.array(obs)\n\n try:\n obs = state[..., :self.dimp] @ self.hx[0].T + \\\n state[..., self.dimp:] @ self.hx[1].T + self.hx[2]\n except ValueError as e:\n raise ValueError(\n str(e) + ' you probably want to use the filter with `reduced_form=False`.')\n\n if np.ndim(state) <= 1:\n data = self.data.index if hasattr(self, 'data') else None\n obs = pd.DataFrame(obs, index=data, columns=self.observables)\n\n if covs is None:\n return obs\n\n var = np.diagonal(covs, axis1=1, axis2=2)\n std = np.sqrt(var)\n iv95 = np.stack((state - 1.96*std, state, state + 1.96*std))\n\n std_obs = (np.hstack((self.hx[0], self.hx[1])) @ std.T).T\n iv95_obs = np.stack((obs - 1.96*std_obs, obs, obs + 1.96*std_obs))\n\n return iv95_obs, iv95", "def make_state_appliable(state):\n size = len(state)\n st_appl = np.zeros((size, size), dtype=complex)\n for p1 in range(size):\n for p2 in range(size):\n st_appl[p1, p2] = state[p1, p2] * sqrt(factorial(p1) * factorial(p2))\n return st_appl" ]
[ "0.6838239", "0.58764845", "0.5864366", "0.5794693", "0.57128555", "0.57071984", "0.5683988", "0.55933166", "0.55871", "0.5572618", "0.552575", "0.55044127", "0.5496997", "0.5407656", "0.53901947", "0.5342929", "0.5342017", "0.52807003", "0.52772355", "0.5257948", "0.5251679", "0.5241031", "0.52252173", "0.5216976", "0.521627", "0.520629", "0.5187249", "0.5186513", "0.5184122", "0.51822406" ]
0.6578636
1
Given a set of state sequences, estimate the initial and transition probabilities for each state (i.e. the p_initial and p_transition matrices needed for HMM inference).
def estimate_hmm_params(state_sequences, pseudocount=1, moving=True, time_sigma=1): state_sequences = numpy.asarray(state_sequences) n, t = state_sequences.shape s = state_sequences.max() + 1 # number of states initial_counts = numpy.bincount(state_sequences[:,0], minlength=s) + pseudocount p_initial = initial_counts / (n + s*pseudocount) p_transition = [] for i in range(t-1): from_states = state_sequences[:, i] to_states = state_sequences[:, i+1] p_trans = [] for from_s in range(s): from_mask = (from_states == from_s) tos = to_states[from_mask] p_trans.append(numpy.bincount(tos, minlength=s)) p_transition.append(p_trans) p_transition = numpy.array(p_transition) # shape (n-1, s, s) if not moving: p_transition = p_transition.sum(axis=0) # shape (s, s) p_transition += pseudocount denom = p_transition.sum(axis=-1) # shape (n-1, s) or (s,) denom[denom == 0] = 1 # avoid 0/0 cases. Just set them to probability = 0 by converting to 0/1 p_transition = p_transition / denom[...,numpy.newaxis] if moving and time_sigma: p_transition = ndimage.gaussian_filter1d(p_transition, time_sigma, axis=0, mode='nearest') return p_initial, p_transition
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_probabilities_from_trajectories(n_states, trajectories):\n p = np.zeros(n_states)\n\n for t in trajectories:\n p[t.transitions()[0][0]] += 1.0\n\n return p / len(trajectories)", "def init_start_prob(n_states):\n start_prob_est = np.random.rand(n_states, 1)\n start_prob_est /= np.sum(start_prob_est, 0)\n assert np.isclose(np.sum(start_prob_est, 0), 1.)\n return start_prob_est", "def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)", "def viterbi(self, hmm, initial, emissions):\n probabilities = hmm.emission(emissions[0]) * initial\n stack = []\n \n for emission in emissions[5:]:\n trans_probabilities = hmm.transition_probabilities * np.row_stack(probabilities) #Matrix for transition probabilities\n max_col_ixs = np.argmax(trans_probabilities, axis=0)\n probabilities = hmm.emission(emission) * trans_probabilities[max_col_ixs, np.arange(hmm.num_states)] #Probabilities\n stack.append(max_col_ixs) #Store the axis and the data in the stack\n state_seq = [np.argmax(probabilities)] #Store the resulted probabilities\n\n while stack:\n max_col_ixs = stack.pop() #Take out the top data store in stack\n state_seq.append(max_col_ixs[state_seq[-1]])\n state_seq.reverse()\n return state_seq", "def viterbi(p_observations_given_state, p_transition, p_initial):\n p_observations_given_state = numpy.asarray(p_observations_given_state)\n p_transition = numpy.asarray(p_transition)\n p_initial = numpy.asarray(p_initial)\n N, S = p_observations_given_state.shape\n assert p_transition.shape in {(S, S), (N-1, S, S)}\n if p_transition.shape == (S, S):\n p_transition = numpy.array([p_transition for i in range(N-1)])\n assert numpy.allclose(numpy.sum(p_transition, axis=2), 1)\n assert p_initial.shape == (S,)\n assert numpy.allclose(numpy.sum(p_initial), 1)\n\n # convert all probabilities to log probabilities so we can sum instead of\n # multiplying, which better controls numerical error.\n err = numpy.seterr(divide='ignore') # allow log(0) to go to -inf, as desired\n lp_observations_given_state = numpy.log(p_observations_given_state)\n lp_transition = numpy.log(p_transition)\n lp_initial = numpy.log(p_initial)\n numpy.seterr(**err)\n\n states = numpy.arange(S)\n # path[i] always contains the maximum likelihood sequence of states ending at state i\n path = [[i] for i in states]\n # lp_state contains the current log probability of being in the state given the sequence\n # of observations thus far considered.\n lp_state = lp_observations_given_state[0] + lp_initial\n\n for lp_obs, lp_trans in zip(lp_observations_given_state[1:], lp_transition):\n # For each observation after the first timepoint, construct an (S, S)\n # shape array where [si, sj] contains the log probability of going from\n # state si to state sj between time t and t+1.\n # Assume we know for each state si prob(si at time t), the probability\n # of being in that state at that time, then we can calculate the probability\n # of being in any given state sj at time t+1:\n # prob(transition from si at time t to sj at time t+1) = prob(si at t) *\n # prob(si->sj between t and t+1) *\n # prob(observation at t+1 given state sj)\n # prob(j at time t+1) = max_i(prob(i at time t -> j at time t+1))\n #\n # Thus we merely need to keep updating our estimates for the probability\n # of being in each state at each time, and keep a list of the path that\n # lead to each state.\n #\n # The actual code in use is 100% equivalent to the code below; however it\n # is rather more efficient.\n #\n # lp_transition_t = numpy.zeros((s, s), dtype=float)\n # new_path = []\n # lp_state = []\n # for s_to in states:\n # best_from_lp = -numpy.inf\n # for s_from in states:\n # lp_transition_t[s_from, s_to] = lp_state[s_from] + lp_trans[s_from, s_to] + lp_obs[s_to]\n # if lp_transition_t[s_from, s_to] > best_from_lp:\n # best_from = s_from\n # best_from_lp = lp_transition_t[s_from, s_to]\n # lp_state.append(best_from_lp)\n # new_path.append(path[best_from] + [s_to])\n # path = new_path\n lp_transition_t = lp_state[:,numpy.newaxis] + lp_trans + lp_obs[numpy.newaxis,:]\n best_from = numpy.argmax(lp_transition_t, axis=0)\n path = [path[s_from]+[s_to] for s_to, s_from in enumerate(best_from)]\n lp_state = lp_transition_t[best_from, states]\n last_state = numpy.argmax(lp_state)\n return numpy.array(path[last_state])", "def __init__(self,numstates,alphabet,fixed=True,pi_values={},transition_map={},emission_map={}):\n self.states = frozenset(range(numstates))\n self.alphabet = frozenset(alphabet)\n\n assert (len(self.alphabet) != 0)\n\n #Initialize the pi values\n #start the probability mass at 1, and reduce it for every element of pi_values\n mass = 1\n numvalues = numstates\n self.pi_values = {}\n for (k,v) in pi_values.items():\n self.pi_values[k] = v\n mass -= v\n numvalues -= 1\n if numvalues > 0:\n #assign the remaining mass evenly\n if fixed:\n p = mass/numvalues\n for s in self.states.difference(pi_values.keys()):\n self.pi_values[s] = p\n #If the probability distribution is not fixed, distribute the remaining mass randomly\n else:\n d = {}\n for s in self.states.difference(pi_values.keys()):\n d[s] = random()\n normalize(d)\n for s in d:\n self.pi_values[s] = mass*d[s]\n\n #Initialize the transition matrix\n self.transition_map = {}\n for (s1,d) in transition_map.items():\n self.transition_map[s1] = {}\n for s2 in d:\n self.transition_map[s1][s2] = d[s2]\n #As with pi_values, we compute the reserve probability mass, but we must do so on a state by state basis\n for s1 in self.states:\n if s1 not in transition_map:\n self.transition_map[s1] = {}\n mass = 1\n numvalues = numstates\n for s2 in self.states:\n if s2 in self.transition_map[s1]:\n mass -= self.transition_map[s1][s2]\n numvalues -= 1\n if numvalues > 0:\n #and assign that remaining mass evenly\n if fixed:\n p = mass / numvalues\n for s2 in self.states:\n if s2 not in self.transition_map[s1]:\n self.transition_map[s1][s2] = p\n #If the probability distribution is not fixed, distribute the remaining mass randomly\n else:\n d = {}\n for s2 in self.states:\n if s2 not in self.transition_map[s1]:\n d[s2] = random()\n normalize(d)\n for s2 in d:\n self.transition_map[s1][s2] = mass*d[s2]\n\n #Initialize the emission map\n self.emission_map = {}\n for s in self.states:\n #If the state has nothing specified, it takes on the reasonable default\n if s not in emission_map:\n #assign equal probability to each letter in each state\n if fixed:\n p = 1/len(self.alphabet)\n self.emission_map[s] = { l:p for l in self.alphabet }\n #If the probability distribution is not fixed, distribute the remaining mass randomly\n else:\n d = { k:random() for k in self.alphabet }\n normalize(d)\n self.emission_map[s] = {}\n for k in d:\n self.emission_map[s][k] = mass*d[k]\n\n else:\n mass = 1\n numvalues = len(self.alphabet)\n state_map = emission_map[s]\n self.emission_map[s] = {}\n #Write all of the values that we have into the map\n for l in state_map:\n v = state_map[l]\n self.emission_map[s][l] = v\n mass -= v\n numvalues -= 1\n #Assign the remainder probability\n if numvalues > 0:\n if fixed:\n p = mass / numvalues\n for l in self.alphabet.difference(state_map.keys()):\n self.emission_map[s][l] = p\n #If the probability distribution is not fixed, distribute the remaining mass randomly\n else:\n d = { k:random() for k in self.alphabet.difference(state_map.keys()) }\n normalize(d)\n for k in d:\n self.emission_map[s][k] = mass*d[k]\n\n self.current_state = select_from_probability_dict(random(),self.pi_values)", "def get_params_from_seq(self, X, state_sequence): # TODO remove forward-looking params and slice X accordingly for X.ndim == 1\n\n # Slice data\n if X.ndim == 1: # Makes function compatible on higher dimensions\n X = X[(self.window_len - 1): -self.window_len]\n elif X.ndim > 1:\n X = X[:, 0]\n\n # group by states\n diff = np.diff(state_sequence)\n df_states = pd.DataFrame({'state_seq': state_sequence,\n 'X': X,\n 'state_sojourns': np.append([False], diff == 0),\n 'state_changes': np.append([False], diff != 0)})\n\n state_groupby = df_states.groupby('state_seq')\n\n # Transition probabilities\n # TODO only works for a 2-state HMM\n self.tpm = np.diag(state_groupby['state_sojourns'].sum())\n state_changes = state_groupby['state_changes'].sum()\n self.tpm[0, 1] = state_changes[0]\n self.tpm[1, 0] = state_changes[1]\n self.tpm = self.tpm / self.tpm.sum(axis=1).reshape(-1, 1) # make rows sum to 1\n\n # init dist and stationary dist\n self.start_proba = np.zeros(self.n_states)\n self.start_proba[state_sequence[0]] = 1.\n self.stationary_dist = self.get_stationary_dist(tpm=self.tpm)\n\n # Conditional distributions\n self.mu = state_groupby['X'].mean().values.T # transform mean back into 1darray\n self.std = state_groupby['X'].std(ddof=1).values.T", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def get_matrix(self):\n matrix = np.zeros([len(self.states), len(self.states)])\n starting_states = []\n transitions = []\n\n for chords in self.training_data:\n states = []\n is_starting_state = True\n chunks = [chords[x:x+self.order] for x in range(0,\n len(chords), self.order)]\n for chunk in chunks:\n chunk_string = \" \".join(chunk)\n if is_starting_state:\n starting_states.append(chunk_string)\n is_starting_state = False\n states.append(chunk_string)\n\n for i in range(0, len(states)):\n if i < (len(states)) - 1:\n transitions.append([states[i], states[i + 1]])\n else:\n transitions.append([states[i]])\n\n self.starting_probabilities = np.zeros([len(self.states)])\n\n for transition in transitions:\n for row, row_contents in enumerate(self.transitions):\n for col, _ in enumerate(row_contents):\n if transition == self.transitions[row][col]:\n matrix[row][col] += 1\n\n for i, state in enumerate(self.states):\n for j, possible_state in enumerate(starting_states):\n if state == possible_state:\n self.starting_probabilities[j] += 1\n\n num = sum(self.starting_probabilities)\n for i, prob in enumerate(self.starting_probabilities):\n self.starting_probabilities[i] = prob / num\n\n for m in range(len(matrix)):\n num = sum(matrix[m])\n if int(num) is not 0:\n for i in range(len(matrix[m])):\n matrix[m][i] = (matrix[m][i] / num)\n else:\n matrix[m] = self.starting_probabilities\n return matrix", "def random_start_probs(self) -> np.ndarray:\n return self.random_state.dirichlet(np.ones(self.n_states), size=1).flatten()", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def hmmestimate(seq,states):\r\n numStates = max(states) \r\n seqLen = len(seq) \r\n tr = np.zeros((numStates,numStates))\r\n\r\n # count up the transitions from the state path\r\n for count in range(seqLen-1):\r\n tr[states[count]-1,states[count+1]-1] += 1\r\n\r\n trRowSum = np.array(np.sum(tr,1))\r\n\r\n #% if we don't have any values then report zeros instead of NaNs.\r\n trRowSum[trRowSum == 0] = -np.inf\r\n\r\n #% normalize to give frequency estimate.\r\n tr = tr/np.tile(trRowSum[:,None],numStates)\r\n\r\n return tr", "def uniform_start_probs(self) -> np.ndarray:\n return np.ones(self.n_states) / self.n_states", "def _starting_prob(self, s):\n return self._starting_state_distribution.pdf(s)", "def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans", "def prob_given_state(self, start=1, end=len(self.T)): # , start, end):\n\n # for state_index in range(len(self.tagset)):\n # self.alpha[1][state_index] = 0\n\n raise NotImplementedError", "def learn_hmm(dict_path = wordlist, training_inputs = inputs_path,\n training_outputs = outputs_path):\n init_counts()\n words = open ( dict_path, 'r' )\n states = set(['word_start'])\n trans = {'word_start' : {}}\n observations = tuple ( punctuation + ' ' + digits + ascii_lowercase)\n \n # Compute states and state transition probabilities\n for w in words:\n w = w.lower()\n w = w[:-1] # remove EOL char\n for i in range( len(w) ): \n new = w[:i+1]\n if new not in states:\n states.add(new)\n trans[new] = {}\n if i == 0:\n trans['word_start'][new] = eta * prefix_rel_freq(w[:i+1],'')\n else:\n prev = w[:i]\n trans[prev][new] = eta * prefix_rel_freq(w[:i+1],w[:i])\n if i == len(w) - 1: # last character in a word\n trans[new]['word_start'] = word_rel_freq(w,w[:i])\n\n for state in trans:\n trans[state][state] = 1 - eta\n states = list(states)\n num_states = len(states)\n num_obs = len(observations)\n\n # Compute observation emission probabilities via MLE\n observed_chars = (char.lower()\n for line in open(training_inputs)\n for char in line[:-1])\n true_chars = (char.lower()\n for line in open(training_outputs)\n for char in line[:-1])\n paired = itertools.izip(observed_chars,true_chars)\n\n def c_to_i(s):\n if s == 'word_start':\n return len(ascii_lowercase)\n else:\n return ascii_lowercase.index(s)\n\n def c_from_i(i):\n if i == len(ascii_lowercase):\n return 'word_start'\n else:\n return ascii_lowercase[i]\n\n def to_index(letter,ob):\n return c_to_i(letter) * num_obs + observations.index(ob) \n def from_index(i):\n char_index = i / num_obs\n ob_index = i % num_obs\n return (c_from_i(char_index),observations[ob_index])\n\n # Construct linear programming problem for cvxopt\n P = matrix(numpy.zeros( (27 * num_obs,27 * num_obs) ),tc='d')\n q = matrix(numpy.zeros(27 * num_obs),tc='d')\n G = matrix(numpy.diag([-1] * (27 * num_obs)),tc='d')\n h = matrix(numpy.zeros(27 * num_obs),tc='d')\n A = numpy.zeros( (27, 27*num_obs) )\n b = matrix(numpy.ones(27),tc='d')\n # construct q\n for o,a in paired:\n if o not in observations: continue\n if a == '-':\n q[to_index(last_a,o)] += 1\n elif a != ' ':\n if a not in ascii_lowercase: continue\n q[to_index(a,o)] += 1\n last_a = a\n else:\n q[to_index('word_start',o)] += 1\n last_a = 'word_start'\n q = -q # Invert since we want maximum not minimum\n\n # construct A\n for i in range(27):\n for k in range(num_obs):\n A[i][i * num_obs + k] = 1\n A = matrix(A,tc='d')\n\n # Solve linear program\n sol = list(solvers.qp(P,q,G,h,A,b)['x'])\n\n # Convert solution into dictionary of emission probabilities\n emission_probs = dict( [(s,{}) for s in states] )\n for s in emission_probs.keys():\n for o in observations:\n if s != 'word_start':\n emission_probs[s][o] = sol[to_index(s[-1],o)]\n else:\n emission_probs[s][o] = sol[to_index(s,o)]\n\n return (tuple(states), observations, trans, emission_probs)", "def make_transition_probs(self):\n n = len(self.speakers) # TODO why this line ???\n transitions = np.random.randint(5, size=(n, n)) + 1\n transitions += transitions.transpose()\n for i in range(0, math.floor(n / 2)):\n s1 = np.random.randint(n)\n s2 = np.random.randint(n)\n transitions[s1][s2] += 10\n transitions[s2][s1] += 8\n return(transitions)", "def get_transition_prob(self, state, action, next_state):\n return self.get_next_states(state, action).get(next_state, 0.0)", "def estimate_transition_params(symbol_symbol_counts, symbol_counts):\n\n transition_probabilities = {}\n for symbol1 in symbol_symbol_counts:\n transition_probabilities[symbol1] = {}\n for symbol2 in symbol_symbol_counts[symbol1]:\n if symbol_counts[symbol1] == 0:\n transition_probabilities[symbol1][symbol2] = 0\n else:\n transition_probabilities[symbol1][symbol2] = float(symbol_symbol_counts[symbol1][symbol2])/symbol_counts[symbol1]\n\n return transition_probabilities", "def _transition_probability(self, s, a, s1):\n unreachable_states = [4, # F with prod_score == 4\n 5] # M with prod_score == 0\n\n if s1 in unreachable_states:\n return 0\n else:\n return 1 / (self.n_states - len(unreachable_states))", "def calc_probabilities_one(states, T):\n return np.exp(-beta(T) * states) / calc_partition_function_one(states, T)", "def predict_proba(states):\r\n # convert states, compute logits, use softmax to get probability\r\n predicted = agent(torch.Tensor(states))\r\n probs = F.softmax(predicted).data.numpy()\r\n return probs", "def predict_next_state_gt(self, states, actions):\n # TODO: write your code here\n\n # return [self.env.get_nxt_state(states[i], actions) for i in range(self.num_particles)]\n return np.array([[self.env.get_nxt_state(states[j][i], actions[j]) for i in range(self.num_particles)] for j in range(self.popsize)])", "def predict(self, states, actions):\n obs = normalize(states,self.normalization['observations'][0],self.normalization['observations'][1])\n #delta = normalize(delta,normalization['delta'])\n acs = normalize(actions,self.normalization['actions'][0],self.normalization['actions'][1])\n done=False\n start=0;end=0\n test_count=len(states)\n #print(test_count)\n prediction=self.sess.run(self.delta_prediction, feed_dict={self.sy_ob:obs, self.sy_ac:acs })\n \n \n return denormalize(prediction,self.normalization['delta'][0],self.normalization['delta'][1]) + states", "def shannon_parry_markov_chain(self):\n from sage.modules.free_module_element import vector\n if not self.is_deterministic():\n raise NotImplementedError(\"Automaton must be deterministic.\")\n if not self.digraph().is_aperiodic():\n raise NotImplementedError(\"Automaton must be aperiodic.\")\n if not self.digraph().is_strongly_connected():\n raise NotImplementedError(\"Automaton must be strongly connected.\")\n if not all(s.is_final for s in self.iter_states()):\n raise NotImplementedError(\"All states must be final.\")\n from sage.rings.integer_ring import ZZ\n M = self.adjacency_matrix().change_ring(ZZ)\n states = {state: i for i, state in enumerate(self.iter_states())}\n w_all = sorted(M.eigenvectors_right(),\n key=lambda x: abs(x[0]),\n reverse=True)\n w = w_all[0][1][0]\n mu = w_all[0][0]\n u_all = sorted(M.eigenvectors_left(),\n key=lambda x: abs(x[0]),\n reverse=True)\n u = u_all[0][1][0]\n u = 1/(u*w) * u\n final = vector(int(s.is_final) for s in self.iter_states())\n ff = u*final\n\n assert u*w == 1\n P = Transducer(initial_states=[s.label() for s in self.iter_initial_states()],\n final_states=[s.label() for s in self.iter_final_states()],\n on_duplicate_transition=duplicate_transition_add_input)\n for t in self.iter_transitions():\n P.add_transition(t.from_state.label(),\n t.to_state.label(),\n w[states[t.to_state]]/w[states[t.from_state]]/mu,\n t.word_in)\n for s in self.iter_states():\n P.state(s.label()).color = 1/(w[states[s]] * ff)\n P.state(s.label()).initial_probability = w[states[s]] * u[states[s]]\n return P", "def pathProb(self, path):\n # Establish initial state distribution.\n estState = []\n for s in range(self.P):\n estState.append(self.initial(path[0][0], s))\n logProb = 0\n for step in range(1, len(path)):\n # Calculate a softmax probability that the agent uses each alpha\n # vector, then sort by action.\n lastF = path[step-1][0]\n lastP = path[step-1][1]\n thisF = path[step][0]\n thisP = path[step][1]\n\n # These are log probs.\n actionProbs = [0.0]*self.A\n totalWeight = float('-inf')\n maxScore = float('-inf')\n for action in range(self.A):\n score = self.valueLookAhead(lastF, estState, action)\n maxScore = max(score, maxScore)\n actionProbs[action] = self.tau * score\n totalWeight = logAdd(totalWeight, self.tau * score)\n # Tally up the probability that the agent goes to the correct state.\n pTrans = 0\n actionTable = {}\n for action in range(self.A):\n nextSTable = self.trans(lastF, lastP)[action]\n if not (thisF, thisP) in nextSTable:\n continue\n pThisAction = nextSTable[(thisF, thisP)] * \\\n math.exp(actionProbs[action] - totalWeight)\n actionTable[action] = pThisAction\n pTrans += pThisAction\n if pTrans == 0:\n return float('-inf')\n logProb += math.log(pTrans)\n\n # Choose which action we are taking.\n for action in actionTable:\n actionTable[action] /= pTrans\n thisAction = randomSample(actionTable) #random!\n\n # Update the agent's guess of the hidden states.\n nextEstState = [0.0]*self.P\n thisObs = randomSample(self.obs(lastF, lastP)) #random!\n for guessP in range(self.P):\n # What is the probability we are in state guessP?\n pGuessP = estState[guessP] * self.obs(lastF, guessP)[thisObs]\n # Given that we are in state guessP, what is the probability that\n # we move to each new state in P?\n newStates = self.trans(lastF, guessP)[thisAction]\n for newState, prob in newStates.iteritems():\n if newState[0] == thisF:\n nextEstState[newState[1]] += pGuessP * prob\n # Normalize nextEstState.\n estState = [i/sum(nextEstState) for i in nextEstState]\n return logProb", "def train(self, instance_list):\n \"\"\"Observation probabilities b_t=c(o_t=x,q_t=y)/c(q_t=y)\n Transition probabilities a_t=c(q_t-1=i,q_t=j)/c(q_t-1=i)\n Based on the empirical counts from _collect_counts, I compute probabilities for each word being emitted in given state and for each state-to-state transition, including START->state.\n <UNK> is used to account for unseen features in the training set.\n \"\"\"\n # Get labels and final V (replacing rare words with <UNK>) for the training data\n self.get_labels(instance_list)\n self.get_rare_words(instance_list)\n self.get_V(instance_list)\n\n # Get maps of label and indices:\n for i in xrange(len(self.labels)):\n self.label2index[self.labels[i]] = i\n self.index2label.append(self.labels[i])\n\n # transition probabilities: matrix labels x labels\n self.transition_matrix = numpy.zeros((len(self.labels)+1,len(self.labels))) #a\n # observation probabilities: matrix of V x labels\n self.emission_matrix = numpy.zeros((len(self.V),len(self.labels))) #b\n self.transition_count_table = numpy.zeros((len(self.labels)+1,len(self.labels)))\n self.feature_count_table = numpy.zeros((len(self.V),len(self.labels)))\n self._collect_counts(instance_list)\n #TODO: estimate the parameters from the count tables\n for instance in instance_list:\n index = 0\n for t in instance.data:\n index = instance.data.index(t)\n if t in self.V:\n self.emission_matrix[self.V.index(t)][self.labels.index(instance.label[index])] = self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])]/self.feature_count_table[:,self.labels.index(instance.label[index])].sum()\n else:\n self.emission_matrix[self.V.index('<UNK>')][self.labels.index(instance.label[index])] = self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])]/self.feature_count_table[:,self.labels.index(instance.label[index])].sum()\n\n if index > 0:\n self.transition_matrix[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] = self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])]/self.transition_count_table[self.labels.index(instance.label[index-1]), :].sum()\n else:\n self.transition_matrix[len(self.labels)][self.labels.index(instance.label[index])] = self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])]/self.transition_count_table[len(self.labels), :].sum()", "def __init__(self, transition_probs, rewards, initial_state=None):\n self._check_param_consistency(transition_probs, rewards)\n self._transition_probs = transition_probs\n self._rewards = rewards\n self._initial_state = initial_state\n self.n_states = len(transition_probs)\n self.reset()", "def __init__(self, num_states, observation_states, prior_probabilities,\n transition_matrix, emission_probabilities):\n # As latent variables form a Markov chain, we can use\n # use the previous defined MarkovChain class to create it\n self.latent_variable_markov_chain = MarkovChain(\n transition_matrix=transition_matrix,\n states=['z{index}'.format(index=index) for index in\n range(num_states)],\n )\n self.observation_states = observation_states\n self.prior_probabilities = np.atleast_1d(prior_probabilities)\n self.transition_matrix = np.atleast_2d(transition_matrix)\n self.emission_probabilities = np.atleast_2d(emission_probabilities)" ]
[ "0.6737312", "0.6540201", "0.643304", "0.64260924", "0.6414045", "0.6372045", "0.6327534", "0.63086444", "0.62992555", "0.62833136", "0.6242119", "0.61996263", "0.61793613", "0.6153507", "0.6136968", "0.6071576", "0.6045001", "0.6036908", "0.6011966", "0.5985723", "0.5974724", "0.5929086", "0.59289473", "0.59241855", "0.5917426", "0.59067976", "0.59027004", "0.587162", "0.5860927", "0.583734" ]
0.7366758
0
The bit position getter.
def bit_pos(self): return self.byte_ptr * 8 + self.bit_ptr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_position(self):\n\n return (self._fileobj.tell() - self._pos) * 8 - self._bits", "def get_position(self) -> Tuple[int]:\n return self.position.copy()", "def __getpos__(self, num):\n return self.num_to_pos[num]", "def get_bit(num, position):\n\treturn (num >> position) & 0b1", "def get_bit(self):\n try:\n current_byte = self.contents[self.current_bit_position >> 3]\n except IndexError:\n raise EmptyStreamError(f\"Attempting read at bit position {self.current_bit_position} \"\n f\"(byte {self.current_bit_position >> 3})\")\n bit = min(1, current_byte & (1 << (7 - (self.current_bit_position % 8))))\n self.current_bit_position += 1\n return bit", "def get_bit_position(x, k):\n\n return x & (1 << k)", "def byteIndex(self):\n return self.offset", "def __getitem__(self, pos):\n row, column = pos\n if row <= self.n_rows-1 and column <= self.n_columns-1:\n return self.bits[row][column]\n else:\n return False", "def __int__(self):\n\n return self.bitflags", "def access_bits(self):\n return self.unpack_dword(0xC) & 0xFF", "def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError", "def position(self) -> int:\n return self.__pos", "def getPosition(self):\n c = self.connection.getChannel(self.chanNamePrefix % 'position')\n\n return c.read()", "def offset(self):\n return self.unpack_dword(0x0)", "def get_pos(self) -> tuple:\n return self.pos", "def _get_pos(self):\n return self._pos", "def position(self):\n return self._position", "def position(self):\n return (self.__position)", "def get_bit_positions(bit_mask):\n\tbit_positions = []\n\t# find bit positions of enabled bits in mask\n\tfor i in range(16):\n\t\tif (bit_mask & (1 << i)) != 0:\n\t\t\tbit_positions.append(i)\n\treturn bit_positions", "def get_pos(self):\n return self.pos", "def bit(self, idx: int) -> int:\n pos = self.start() + idx\n chunk = self.raw_key()[(pos // 8)]\n bit = pos % 8\n return ((1 << bit) & chunk) >> bit", "def get_pos(self):\n\n return self._pos", "def get_pos(self):\r\n return self.pos", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def bit_pos(self, bits):\n\n if bits > len(self):\n raise BitReaderError('bit_pos(%s) is out of boundary', bits)\n\n self.byte_ptr, self.bit_ptr = divmod(bits, 8)", "def getPosition(self):\n if self.connection is not None:\n c = self.connection.getChannel(self.chanNamePrefix % 'position')\n\n return c.read()", "def position(self) -> int:\n return self._position", "def position(self) -> int:\n return self._position" ]
[ "0.70577097", "0.68061477", "0.67975134", "0.6695327", "0.66924286", "0.6679596", "0.66726327", "0.6656201", "0.661305", "0.6570994", "0.65080434", "0.64964867", "0.64851624", "0.6482087", "0.6471589", "0.64665365", "0.646452", "0.6454277", "0.6441795", "0.64201766", "0.6404283", "0.63956153", "0.63927287", "0.63801146", "0.63801146", "0.63801146", "0.6378664", "0.6351728", "0.6341693", "0.6341693" ]
0.8436633
0
The bit position setter.
def bit_pos(self, bits): if bits > len(self): raise BitReaderError('bit_pos(%s) is out of boundary', bits) self.byte_ptr, self.bit_ptr = divmod(bits, 8)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bit_pos(self):\n\n return self.byte_ptr * 8 + self.bit_ptr", "def initializeOffsets(self, bitPosition: int, _value: int) -> int:\n\n return bitPosition + self.bitSizeOf()", "def initializeOffsets(self, bitPosition: int, _value: int) -> int:\n\n return bitPosition + self.bitSizeOf()", "def __setitem__(self, pos, is_on):\n row, column = pos\n self.bits[row][column] = is_on", "def __setitem__(self, n, bit):\n self.num ^= (np.uint64(-bit) ^ self.num) & (UINT64_ONE << np.uint64(n))", "def initializeOffsets(bitPosition: int, _value: bool) -> int:\n\n return bitPosition + BoolArrayTraits.bitSizeOf()", "def initializeOffsets(bitPosition: int, value: typing.Any) -> int:\n\n return value.initializeOffsets(bitPosition)", "def set_position(self, position):\n raise NotImplementedError()", "def position(self, pos: int):\n self.__pos = pos", "def setbit(self, key, offset, value):\n key = self._encode(key)\n index, bits, mask = self._get_bits_and_offset(key, offset)\n\n if index >= len(bits):\n bits.extend(b\"\\x00\" * (index + 1 - len(bits)))\n\n prev_val = 1 if (bits[index] & mask) else 0\n\n if value:\n bits[index] |= mask\n else:\n bits[index] &= ~mask\n\n self.redis[key] = bytes(bits)\n\n return prev_val", "def set_position(self, position):\n self.position = tuple(position)", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarIntArrayTraits.bitSizeOf(bitPosition, value)", "def set_pos(self, p: tuple) -> None:\n self.pos = p", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarUIntArrayTraits.bitSizeOf(bitPosition, value)", "def initializeOffsets(self, bitPosition: int) -> int:\n\n endBitPosition = bitPosition\n size = len(self._rawArray)\n if self._isAuto:\n endBitPosition += getBitSizeOfVarSize(size)\n\n for index in range(size):\n if self._setOffsetMethod is not None:\n endBitPosition = alignTo(8, endBitPosition)\n self._setOffsetMethod(index, endBitPosition)\n endBitPosition = self._arrayTraits.initializeOffsets(endBitPosition, self._rawArray[index])\n\n return endBitPosition", "def initializeOffsets(bitPosition: int, value: BitBuffer) -> int:\n\n return bitPosition + BitBufferArrayTraits.bitSizeOf(bitPosition, value)", "def set_bit(self, index_of_byte, index_of_bit, new_value):\n if index_of_bit >= self.binary_size:\n print(\"You tried to modify a byte at %d index. This cannot be done. The maximum index is %d.\"%(index_of_bit, self.binary_size - 1))\n else:\n new_value = str(new_value)\n byte = self.binary_array[index_of_byte]\n new_byte = byte[0:index_of_bit] + new_value\n if index_of_bit < self.binary_size - 1: # you aren't changing the final bit in the byte\n new_byte += byte[index_of_bit + 1:]\n #apply changes\n self.binary_array[index_of_byte] = new_byte", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarInt16ArrayTraits.bitSizeOf(bitPosition, value)", "def set_pos(self, x):\n self._pos = x", "def set_position(self, pos, debug=False):\n pos = max(pos, 0)\n pos = min(pos, 1)\n posrange = pos * self.range\n pos = posrange + self.min\n if debug:\n print('Setting Dynamixel {} with posrange {} to position {}'.format(self.id, posrange, pos))\n self.motor.set_position(int(pos))", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarSizeArrayTraits.bitSizeOf(bitPosition, value)", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarUInt16ArrayTraits.bitSizeOf(bitPosition, value)", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarInt64ArrayTraits.bitSizeOf(bitPosition, value)", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarInt32ArrayTraits.bitSizeOf(bitPosition, value)", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarUInt64ArrayTraits.bitSizeOf(bitPosition, value)", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarUInt32ArrayTraits.bitSizeOf(bitPosition, value)", "def set_position(self, idx, pos):\n if self.EMULATOR_MODE:\n return\n if idx >= self.nleaflets or idx < 0:\n raise IndexError('index specified is out of bounds')\n self._fserial.write(self.MAGIC_BYTES + bytes([idx]) + pos.to_bytes(2, byteorder='big', signed=False) )\n self._fserial.reset_input_buffer()", "def set_at(self,x,y,set=True):\n\t\tif ( not self._validate(x,y )):\n\t\t\treturn\n\n\t\t# set the bit in the grid\n\t\tif set:\n\t\t\tself.Grid[y] = self.Grid[y] | (1 << x)\n\t\telse:\n\t\t\tself.Grid[y] = self.Grid[y] & ~(1 << x)", "def setPosition(position):", "def setbit(num,bit):\n num=shiftright(num,bit)\n num=shiftleft(num,31)\n num=shiftright(num,31 - bit)\n return num" ]
[ "0.6843015", "0.6645116", "0.6645116", "0.6591307", "0.65042365", "0.6372688", "0.6217314", "0.62171125", "0.61885184", "0.61506695", "0.6108199", "0.6107682", "0.60736126", "0.6068757", "0.6064724", "0.60437346", "0.60085493", "0.6003586", "0.59972906", "0.5992333", "0.5991685", "0.59916294", "0.5970855", "0.5968806", "0.59643805", "0.59633946", "0.59084934", "0.5890957", "0.5859407", "0.58531797" ]
0.70632917
0
Read bit_length bits as an integer.
def read(self, bit_length): ret = self.peek(bit_length) self.bit_pos += bit_length return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_integer(self, number_of_bits):\n\n value = 0\n\n for _ in range(number_of_bits):\n value <<= 1\n value |= self.read_bit()\n\n return value", "def extract_bits(data, bit, length=1):\n bits = bitarray(data, endian='big')\n if length > 1:\n out = bits[bit:bit+length]\n try:\n out = struct.unpack('>B', out.tobytes())[0]\n except struct.error:\n out = 0\n else:\n try:\n out = bits[bit]\n except IndexError:\n out = 0\n return int(out)", "def read_int(self):\n return self.bits.read(32).intle", "def bit_length(self, ???):", "def read_bits_as_int(self, num_bits) -> int:\n if num_bits > 0:\n bits = self.read_bits(num_bits)\n log.info(f\"bits: {bits}\")\n log.info(f\"num_bits: {num_bits}\")\n try:\n int_bits = int(bits, 2)\n except ValueError:\n raise NoMoreBitsException(self.original_message)\n return int_bits", "def read_bits(fd, reg, bitStart, length):\n b = read_byte(fd, reg)\n mask = ((1 << length) - 1) << (bitStart - length + 1)\n b &= mask;\n b >>= (bitStart - length + 1);\n return b", "def peek(self, bit_length):\n\n if bit_length < 0:\n raise BitReaderError('bit_length(%s) should be greater than 0',\n bit_length)\n elif self.bit_pos + bit_length > len(self):\n raise BitReaderError('out of data boundary')\n\n ret = 0\n byte_ptr, bit_ptr = self.byte_ptr, self.bit_ptr\n\n while bit_length > 0:\n byte = ord(self.data[byte_ptr])\n remaining_bits = 8 - bit_ptr\n\n if bit_length > remaining_bits:\n bit_length -= remaining_bits\n ret |= ((byte & ((1 << remaining_bits) - 1)) << bit_length)\n byte_ptr += 1\n bit_ptr = 0\n else:\n ret |= ((byte >> (remaining_bits - bit_length)) & \\\n ((1 << bit_length) - 1))\n break\n\n return ret", "def read_random_int(nbits):\n\n randomdata = read_random_bits(nbits)\n value = transform.bytes2int(randomdata)\n\n # Ensure that the number is large enough to just fill out the required\n # number of bits.\n value |= 1 << (nbits - 1)\n\n return value", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def python_int_bitwidth():\r\n # 'l' denotes a C long int, and the size is expressed in bytes.\r\n return struct.calcsize('l') * 8", "def read_unsigned_integer(stream, size):\n\t\n\tvalue = 0\n\tfor i in xrange(0, size):\n\t\tbyte = ord(stream.read(1))\n\t\tvalue = (value << 8) | byte\n\treturn value", "def _unpack_varint_with_readlength(data: bytes) -> Tuple[int, int]:\n result, n = _unpack_uvarint(data)\n return _zigzag_decode(result), n", "def readInteger(self, address, signed, nbits, endian):\n value = self.readBits(address, nbits, endian)\n\n # Signe number. Example with nbits=8:\n # if 128 <= value: value -= 256\n if signed and (1 << (nbits - 1)) <= value:\n value -= (1 << nbits)\n return value", "def readInt(self) -> int:\n return self._unpack('!i', 4)", "def _read_int(f, already_read=''):\n len_str = already_read\n while True:\n c = f.read(1)\n if c == b'\\n':\n break\n elif len(c) == 0:\n raise ValueError(\"Unexpected EOF while parsing message length\")\n else:\n len_str = len_str + c\n try:\n return int(len_str)\n except ValueError:\n raise ValueError(\"Malformed message length\")" ]
[ "0.75628495", "0.7081306", "0.6991238", "0.69427687", "0.67833155", "0.677061", "0.6710745", "0.65669936", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65387785", "0.65248", "0.6465487", "0.6254515", "0.62272185", "0.6209447", "0.6166672" ]
0.72332305
1
Add image to a webfacet.
def upload_webfacet_image(request): if request.method == 'POST': imageform=ImageAssetForm(request.POST, request.FILES) if imageform.is_valid(): webimage = imageform.save(commit=False) # retrieve the webfacet the image should be associated with webfacet_id = request.POST.get('webfacet') webfacet = get_object_or_404(WebFacet, id=webfacet_id) # set request based attributes webimage.owner = request.user webimage.organization = request.user.organization webimage.save() # add image asset to webfacet image_assets webfacet.image_assets.add(webimage) webfacet.save() return redirect('story_detail', pk=webfacet.story.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_webfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n webfacet_id = request.POST.get('webfacet')\r\n print \"WEBFACETid: \", webfacet_id\r\n webfacet = get_object_or_404(WebFacet, id=webfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n webfacet.image_assets.add(img_ins)\r\n webfacet.save()\r\n return redirect('story_detail', pk=webfacet.story.id)", "def add_printfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n printfacet_id = request.POST.get('printfacet')\r\n print \"printFACETid: \", printfacet_id\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n printfacet.image_assets.add(img_ins)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def add_image(self, tag, img_tensor, global_step=None, caption=None):\n img_tensor = make_np(img_tensor)\n self.vis.image(img_tensor, opts={'title': tag, 'caption': caption})", "def upload_printfacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n printimage = imageform.save(commit=False)\r\n # retrieve the printfacet the image should be associated with\r\n printfacet_id = request.POST.get('printfacet')\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n # set request based attributes\r\n printimage.owner = request.user\r\n printimage.organization = request.user.organization\r\n printimage.save()\r\n # add image asset to printfacet image_assets\r\n printfacet.image_assets.add(printimage)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def add_image(self, image_name):\n if self.current_trip is None:\n print \"no trip to add image\"\n return\n self.current_trip.store_image(image_name)", "def _add_image(self, image):\n document = self._control.document()\n name = str(image.cacheKey())\n document.addResource(QtGui.QTextDocument.ImageResource,\n QtCore.QUrl(name), image)\n format = QtGui.QTextImageFormat()\n format.setName(name)\n return format", "def add_image(self, image_html, column_name, row_num):\n column_num = self.table[0].index(column_name)\n self.images[column_num, row_num + 1].append(image_html)", "def add_inline_image(self, element, body):\n # TODO defensive coding\n src_url = element.attrib['src']\n picid, picname, picdescription, width, height = self.download_image(src_url)\n picrelid = 'rId'+str(len(self.relationships)+1)\n self.relationships.append(['http://schemas.openxmlformats.org/officeDocument/2006/relationships/image', 'media/'+picname])\n graphic = self.create_graphic_tag(width, height, picrelid, picid, picname, picdescription)\n inline = docx.makeelement('inline', nsprefix='wp', attributes={'distT':'0', 'distR':'0', 'distL':'0', 'distB':'0'})\n inline.append(docx.makeelement('extent', nsprefix='wp', attributes={'cy':str(height), 'cx':str(width)}))\n inline.append(docx.makeelement('effectExtent', nsprefix='wp', attributes={'r':'0', 'b':'0', 'l':'25400', 't':'0'}))\n inline.append(docx.makeelement('docPr', nsprefix='wp', attributes={'id':picid, 'descr':picdescription, 'name':picname}))\n cNvGraphicFramePr = docx.makeelement('cNvGraphicFramePr', nsprefix='wp')\n cNvGraphicFramePr.append(docx.makeelement('graphicFrameLocks', nsprefix='a', attributes={'noChangeAspect':'1',}))\n inline.append(cNvGraphicFramePr)\n inline.append(graphic)\n drawing = docx.makeelement('drawing', nsprefix='w')\n drawing.append(inline)\n r = docx.makeelement('r', nsprefix='w')\n r.append(drawing)\n p = docx.makeelement('p', nsprefix='w')\n p.append(r)\n body.append(p)", "def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.img_lst.append(image)", "def add_image_to_html(self, image_path):\r\n file_object = open(self.file_name, 'a+')\r\n html_content = f'<div><img src={image_path} width=\"500\" height=\"300\"></div>'\r\n file_object.write(html_content)", "def add_image(self, image):\n\n # we're only for new images, no i'ds allowed\n # if u want to set an id by hand use set_image\n if image.id:\n raise o.Exception('Can not add image with id')\n\n if not image.data:\n raise o.Exception('Image must have data')\n\n if not image.source_page_url:\n raise o.Exception('Image must have source page url')\n\n # update it's stats\n image = self.populate_image_stats(image)\n\n # only add the image if we haven't seen it beforeQ\n # if we've seen it before there will be an id which\n # the set of images w/ this data and from this page share\n ids = self.rc.sinter('images:datainstance:%s' % image.shahash,\n 'images:page_ids:%s' % image.source_page_url)\n\n\n # we don't need to continue\n # we'll return back their original msg, w/o the id set\n if ids:\n print 'image already exists [%s], not setting' % ids\n return image\n\n # so the image appears to be new, good for it\n return self.set_image(image)", "def add_image(context, person):\n args = context.message.content.split(\" \")\n\n parsed_url = urllib.parse.urlparse(args[1])\n host = parsed_url.hostname.split('.')[0]\n media_type = args[1].split('.')[-1]\n\n if not valid_host(host) and not valid_media_type(media_type): # Tests for imgur image URL\n return context.send('Invalid URL, try again.')\n\n collection = db['people']\n # Prevent duplicate inputs\n collection.update_one({'image_url': args[1]}, {'$set': {'person': person}}, upsert=True)\n return context.send(f'Added to the `{person}` collection')", "def add_image(self, image, mode='normal', state='on'):\n raise NotImplementedError", "def register_collection_additional_image(self, image):\n save_path = os.path.join(self.session_dir, 'additional.jpg')\n image.save(save_path)", "def set_image(self, image):\n self.data['image'] = image", "def add_image(jid, img):\n jrd.hset(_generate_job_key(jid), 'image_status', 'created')\n image_rd.hset(jid, 'image', img)", "def add_audiofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n audiofacet_id = request.POST.get('audiofacet')\r\n print \"audioFACETid: \", audiofacet_id\r\n audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n audiofacet.image_assets.add(img_ins)\r\n audiofacet.save()\r\n return redirect('story_detail', pk=audiofacet.story.id)", "def write_image(self, tag, image, step):\n\n self.writer.add_figure(tag, image, global_step=step)", "def add_image(self, fname, image_str, sid=None):\n src = \"%s/%s\" % (self.IMAGES, fname)\n if not src in self.images:\n if sid is None:\n sid = sluggify(src)\n self.opf.add_manifest(sid, src, \"image/jpeg\")\n filename = os.path.join(\"OEBPS\", self.IMAGES, fname)\n self.zip.writestr(filename, image_str)\n self.images.append(src)\n\n return \"../%s\" % src", "def add_figure(self, tag, figure, global_step=None, close=True):\n self.add_image(tag, figure_to_image(figure, close), global_step)", "def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.image = image\n \n fields = self.eod.eodms_rapi.get_collections()[self.image.get_collId()]\\\n ['fields']\n \n self.metadata['imageUrl'] = self.image.get_metadata('thisRecordUrl')\n self.metadata['imageMetadata'] = self.image.get_metadata(\\\n 'metadataUrl')\n self.metadata['imageStartDate'] = self.image.get_date()", "def add_image(self, image):\n if self.temp_dir is None:\n self.temp_dir = tempfile.mkdtemp()\n if self.img_shape is None:\n self.img_shape = image.shape\n assert self.img_shape == image.shape\n filename = self.get_filename(self.current_index)\n plt.imsave(fname=filename, arr=image)\n self.current_index += 1\n return filename", "def add_image_file(self, imagefilename):\n #import matplotlib.image as mpimg\n\n # set aspect to auto mode\n self.axes.set_aspect('auto')\n\n img = matplotlib.image.imread(str(imagefilename))\n # lum_img = img[:,:,0]\n # FUTURE : refactor for image size, interpolation and origin\n imgplot = self.axes.imshow(img, extent=[0, 1000, 800, 0], interpolation='none', origin='lower')\n\n # Set color bar. plt.colorbar() does not work!\n if self._colorBar is None:\n # set color map type\n imgplot.set_cmap('spectral')\n self._colorBar = self.fig.colorbar(imgplot)\n else:\n self._colorBar.update_bruteforce(imgplot)\n\n self._flush()\n\n return", "def upload_audiofacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n audioimage = imageform.save(commit=False)\r\n # retrieve the audiofacet the image should be associated with\r\n audiofacet_id = request.POST.get('audiofacet')\r\n audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id)\r\n # set request based attributes\r\n audioimage.owner = request.user\r\n audioimage.organization = request.user.organization\r\n audioimage.save()\r\n # add image asset to audiofacet image_assets\r\n audiofacet.image_assets.add(audioimage)\r\n audiofacet.save()\r\n return redirect('story_detail', pk=audiofacet.story.id)", "def _addImage(self, image, preprocessor):\n if self.images == None:\n self.images = []\n\n processed_image = preprocessor(image,\n self.inputShape()[2],\n self.inputShape()[1])\n\n self.images.append(processed_image)", "def add_profile_photo():\n pass", "def add_image(self, image):\n if type(image) is not self.imageType:\n raise TypeError('`image` must be of type {0}'.format(self.imageType))\n\n listBinning = self.imageList[0].binning\n if image.binning != listBinning:\n raise ValueError('`image` must have binning ({0} x {1})'.format(*listBinning))\n\n self.__imageList = self.imageList + (image,)\n\n return None", "def newAvatarImage(self, imgPath, imgName): \n img = ocempgui.draw.Image.load_image(imgPath)\n if not self.images[imgName]: \n imgOcemp = guiobjects.OcempImageMapTransparent(img)\n imgOcemp.topleft = 528, 114\n self.window.add_child(imgOcemp)\n self.images[imgName] = imgOcemp\n else:\n self.images[imgName].picture = img" ]
[ "0.6959996", "0.6487224", "0.6455513", "0.62596583", "0.6204542", "0.6056155", "0.6029566", "0.59842545", "0.5923838", "0.5902165", "0.5883469", "0.5882552", "0.5859965", "0.5838259", "0.5744651", "0.57241696", "0.57239974", "0.5716125", "0.5703071", "0.56786585", "0.5642951", "0.5578748", "0.5547676", "0.5492443", "0.54923505", "0.54869264", "0.5479863", "0.5472922", "0.5472801", "0.5468107" ]
0.6796275
1
Add image to a audiofacet.
def upload_audiofacet_image(request): if request.method == 'POST': imageform=ImageAssetForm(request.POST, request.FILES) if imageform.is_valid(): audioimage = imageform.save(commit=False) # retrieve the audiofacet the image should be associated with audiofacet_id = request.POST.get('audiofacet') audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id) # set request based attributes audioimage.owner = request.user audioimage.organization = request.user.organization audioimage.save() # add image asset to audiofacet image_assets audiofacet.image_assets.add(audioimage) audiofacet.save() return redirect('story_detail', pk=audiofacet.story.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_audiofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n audiofacet_id = request.POST.get('audiofacet')\r\n print \"audioFACETid: \", audiofacet_id\r\n audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n audiofacet.image_assets.add(img_ins)\r\n audiofacet.save()\r\n return redirect('story_detail', pk=audiofacet.story.id)", "def add_image(self, tag, img_tensor, global_step=None, caption=None):\n img_tensor = make_np(img_tensor)\n self.vis.image(img_tensor, opts={'title': tag, 'caption': caption})", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def _add_image(self, image):\n document = self._control.document()\n name = str(image.cacheKey())\n document.addResource(QtGui.QTextDocument.ImageResource,\n QtCore.QUrl(name), image)\n format = QtGui.QTextImageFormat()\n format.setName(name)\n return format", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def add_image(self, image, mode='normal', state='on'):\n raise NotImplementedError", "def add_image(self, image):\n if self.temp_dir is None:\n self.temp_dir = tempfile.mkdtemp()\n if self.img_shape is None:\n self.img_shape = image.shape\n assert self.img_shape == image.shape\n filename = self.get_filename(self.current_index)\n plt.imsave(fname=filename, arr=image)\n self.current_index += 1\n return filename", "def add_printfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n printfacet_id = request.POST.get('printfacet')\r\n print \"printFACETid: \", printfacet_id\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n printfacet.image_assets.add(img_ins)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.img_lst.append(image)", "def add_webfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n webfacet_id = request.POST.get('webfacet')\r\n print \"WEBFACETid: \", webfacet_id\r\n webfacet = get_object_or_404(WebFacet, id=webfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n webfacet.image_assets.add(img_ins)\r\n webfacet.save()\r\n return redirect('story_detail', pk=webfacet.story.id)", "def add_image(self, image_name, alpha=1):\n self.image_name = image_name\n self.image_alpha = alpha", "def set_image(self, image):\r\n\r\n with image:\r\n pic = Picture()\r\n pic.data = image.read()\r\n pic.type = COVER_FRONT\r\n pic.mime = image.mime_type\r\n pic.width = image.width\r\n pic.height = image.height\r\n\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio[\"metadata_block_picture\"] = base64.b64encode(\r\n pic.write()).decode(\"ascii\")", "def write_image(self, tag, image, step):\n\n self.writer.add_figure(tag, image, global_step=step)", "def add_image(self, image_name):\n if self.current_trip is None:\n print \"no trip to add image\"\n return\n self.current_trip.store_image(image_name)", "def add_image(self, image):\n if type(image) is not self.imageType:\n raise TypeError('`image` must be of type {0}'.format(self.imageType))\n\n listBinning = self.imageList[0].binning\n if image.binning != listBinning:\n raise ValueError('`image` must have binning ({0} x {1})'.format(*listBinning))\n\n self.__imageList = self.imageList + (image,)\n\n return None", "def add_image(self, image_file_name):\n # check\n if os.path.exists(image_file_name) is False:\n raise NotImplementedError(\"Image file %s does not exist.\" % image_file_name)\n\n self._myCanvas.add_image_file(image_file_name)\n\n return", "def upload_webfacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n webimage = imageform.save(commit=False)\r\n # retrieve the webfacet the image should be associated with\r\n webfacet_id = request.POST.get('webfacet')\r\n webfacet = get_object_or_404(WebFacet, id=webfacet_id)\r\n # set request based attributes\r\n webimage.owner = request.user\r\n webimage.organization = request.user.organization\r\n webimage.save()\r\n # add image asset to webfacet image_assets\r\n webfacet.image_assets.add(webimage)\r\n webfacet.save()\r\n return redirect('story_detail', pk=webfacet.story.id)", "def add_image_file(self, imagefilename):\n #import matplotlib.image as mpimg\n\n # set aspect to auto mode\n self.axes.set_aspect('auto')\n\n img = matplotlib.image.imread(str(imagefilename))\n # lum_img = img[:,:,0]\n # FUTURE : refactor for image size, interpolation and origin\n imgplot = self.axes.imshow(img, extent=[0, 1000, 800, 0], interpolation='none', origin='lower')\n\n # Set color bar. plt.colorbar() does not work!\n if self._colorBar is None:\n # set color map type\n imgplot.set_cmap('spectral')\n self._colorBar = self.fig.colorbar(imgplot)\n else:\n self._colorBar.update_bruteforce(imgplot)\n\n self._flush()\n\n return", "def add_figure(self, tag, figure, global_step=None, close=True):\n self.add_image(tag, figure_to_image(figure, close), global_step)", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def upload_printfacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n printimage = imageform.save(commit=False)\r\n # retrieve the printfacet the image should be associated with\r\n printfacet_id = request.POST.get('printfacet')\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n # set request based attributes\r\n printimage.owner = request.user\r\n printimage.organization = request.user.organization\r\n printimage.save()\r\n # add image asset to printfacet image_assets\r\n printfacet.image_assets.add(printimage)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def augment(self, image):\n pass", "def add_stimulus(self, fig: matplotlib.figure.Figure, stim_path: Path):\n stim_path = Path(stim_path)\n if stim_path.suffix == \".mp4\":\n vidcap = cv2.VideoCapture(str(stim_path))\n success, image = vidcap.read()\n else:\n image = plt.imread(get_sample_data(stim_path))\n newax = fig.add_axes([0.38, 0, 0.2, 0.4], zorder=-1)\n newax.imshow(image)\n newax.axis(\"off\")\n return fig", "def on_image(self, image):", "def add_image(image, fig, left, bottom, width=None, height=None, **kwargs):\n if isinstance(image, Image.Image):\n image_width, image_height = image.size\n else:\n image_height, image_width = image.shape[:2]\n\n image_aspect = image_width / image_height\n\n figsize = fig.get_size_inches()\n fig_aspect = figsize[0] / figsize[1]\n\n if height is None:\n height = width / image_aspect * fig_aspect\n\n if width is None:\n width = height * image_aspect / fig_aspect\n\n # add image\n ax_image = fig.add_axes((left, bottom, width, height))\n ax_image.axis('off') # axis off so no labels/ ticks\n\n ax_image.imshow(image, **kwargs)\n\n return ax_image", "def set_image(self, image):\n self.data['image'] = image", "def add_image(self, image_list):\n self.image_filenames_list.append(image_list)", "def _addImage(self, image, preprocessor):\n if self.images == None:\n self.images = []\n\n processed_image = preprocessor(image,\n self.inputShape()[2],\n self.inputShape()[1])\n\n self.images.append(processed_image)", "def add_image(jid, img):\n jrd.hset(_generate_job_key(jid), 'image_status', 'created')\n image_rd.hset(jid, 'image', img)", "def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100):\n snd_tensor = make_np(snd_tensor)\n self.vis.audio(tensor=snd_tensor, opts={\n 'sample_frequency': sample_rate})" ]
[ "0.66578627", "0.64728636", "0.61345947", "0.60722554", "0.60476905", "0.60405284", "0.59467155", "0.59415364", "0.5939142", "0.5883977", "0.5809388", "0.5729445", "0.56972593", "0.56908375", "0.5679853", "0.56495744", "0.5649321", "0.564413", "0.5638859", "0.56222016", "0.5610289", "0.5601687", "0.559505", "0.55673015", "0.5559034", "0.5554582", "0.55483156", "0.5546476", "0.5541203", "0.55384123" ]
0.66707206
0
Add image to a videofacet.
def upload_videofacet_image(request): if request.method == 'POST': imageform=ImageAssetForm(request.POST, request.FILES) if imageform.is_valid(): videoimage = imageform.save(commit=False) # retrieve the videofacet the image should be associated with videofacet_id = request.POST.get('videofacet') videofacet = get_object_or_404(VideoFacet, id=videofacet_id) # set request based attributes videoimage.owner = request.user videoimage.organization = request.user.organization videoimage.save() # add image asset to videofacet image_assets videofacet.image_assets.add(videoimage) videofacet.save() return redirect('story_detail', pk=videofacet.story.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_videofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n videofacet_id = request.POST.get('videofacet')\r\n print \"videoFACETid: \", videofacet_id\r\n videofacet = get_object_or_404(VideoFacet, id=videofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n videofacet.image_assets.add(img_ins)\r\n videofacet.save()\r\n return redirect('story_detail', pk=videofacet.story.id)", "def add_image(self, tag, img_tensor, global_step=None, caption=None):\n img_tensor = make_np(img_tensor)\n self.vis.image(img_tensor, opts={'title': tag, 'caption': caption})", "def insert_vmedia(self, url):\n target_uri = self._get_action_element('insert').target_uri\n data = {'Image': url}\n self._conn.post(target_uri, data=data)", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def add_image(self, image_name):\n if self.current_trip is None:\n print \"no trip to add image\"\n return\n self.current_trip.store_image(image_name)", "def add_image(self, in_image):\n image = in_image\n if not isinstance(image, Image):\n image = Image()\n image.parse_record(in_image)\n self.img_lst.append(image)", "def add_image(self, image, mode='normal', state='on'):\n raise NotImplementedError", "def add_image(project, img, id, snap, parent, public):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.add_image(project, img, id, snap, parent, public)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def add_video(self, tag, vid_tensor, global_step=None, fps=4):\n shape = vid_tensor.shape\n # A batch of videos (tensorboardX format) is a 5D tensor\n if len(shape) > 4:\n for i in range(shape[0]):\n # Reshape each video to Visdom's (T x H x W x C) and write each video\n # TODO: reverse the logic here, shoudl do the permutation in numpy\n if isinstance(vid_tensor, np.ndarray):\n import torch\n ind_vid = torch.from_numpy(\n vid_tensor[i, :, :, :, :]).permute(1, 2, 3, 0)\n else:\n ind_vid = vid_tensor[i, :, :, :, :].permute(1, 2, 3, 0)\n scale_factor = 255\n # Visdom looks for .ndim attr, this is something raw Tensors don't have\n # Cast to Numpy array to get .ndim attr\n ind_vid = ind_vid.numpy()\n ind_vid = (ind_vid * scale_factor).astype(np.uint8)\n assert ind_vid.shape[3] in [1, 3, 4], \\\n 'Visdom requires the last dimension to be color, which can be 1 (grayscale), 3 (RGB) or 4 (RGBA)'\n self.vis.video(tensor=ind_vid, opts={'fps': fps})\n else:\n self.vis.video(tensor=vid_tensor, opts={'fps': fps})", "def add_image(jid, img):\n jrd.hset(_generate_job_key(jid), 'image_status', 'created')\n image_rd.hset(jid, 'image', img)", "def add_picture(self, file, left, top, width=None, height=None):\n pkg = Package.containing(self.__slide)\n image = pkg._images.add_image(file)\n rel = self.__slide._add_relationship(RT_IMAGE, image)\n pic = self.__pic(rel._rId, file, left, top, width, height)\n self.__spTree.append(pic)\n picture = Picture(pic)\n self.__shapes.append(picture)\n return picture", "def add_image(context, person):\n args = context.message.content.split(\" \")\n\n parsed_url = urllib.parse.urlparse(args[1])\n host = parsed_url.hostname.split('.')[0]\n media_type = args[1].split('.')[-1]\n\n if not valid_host(host) and not valid_media_type(media_type): # Tests for imgur image URL\n return context.send('Invalid URL, try again.')\n\n collection = db['people']\n # Prevent duplicate inputs\n collection.update_one({'image_url': args[1]}, {'$set': {'person': person}}, upsert=True)\n return context.send(f'Added to the `{person}` collection')", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def add_video(self, video: Video):\n\n self._videos[video.video_id] = video", "async def add_img(self, ctx: BBContext, url: str, artist: Optional[discord.User] = None):\n\n art = Art(url, artist.id, artist.name) if artist else Art(url)\n con = await ctx.get_connection()\n query = f'INSERT INTO {TABLE_ARTS}(url, artist_id, artist_name) VALUES($1, $2, $3)'\n\n await con.execute(query, art.url, art.artist_id, art.artist_name)\n await ctx.tick(True)", "def add_file(self, path):\n self.files.append(filetypes.WrapVideoFile(path))", "def add_image(self, image_name, version, image_hash):\n raise NotImplementedError()", "def add_image_to_frame_list(self,startFrame, endFrame, imageName): \n for i in range(startFrame-1, endFrame-1):\n try:\n # image = imageio.imread(imageName)\n im = Image.open(imageName)\n im = im.resize((720, 720))\n self.frame_list.append(im)\n # self.frame_list.append(im)\n\n except:\n print (imageName, \" not found.\")\n # BufferedImage bi= new BufferedImage(320,240,BufferedImage.TYPE_BYTE_GRAY);\n im=self.blank\n self.frame_list.append(im)", "def newAvatarImage(self, imgPath, imgName): \n img = ocempgui.draw.Image.load_image(imgPath)\n if not self.images[imgName]: \n imgOcemp = guiobjects.OcempImageMapTransparent(img)\n imgOcemp.topleft = 528, 114\n self.window.add_child(imgOcemp)\n self.images[imgName] = imgOcemp\n else:\n self.images[imgName].picture = img", "def augment(self, image):\n pass", "def _setMoviePicture(self, imageUrl):\n imageFilename = imageUrl.split(\"/\")[-1]\n imagePath = \"cache/\" + imageFilename\n\n # Create 'cache' folder if it does not exist.\n if not os.path.exists(\"./cache/\"):\n os.makedirs(\"./cache/\")\n\n try:\n if not os.path.exists(imagePath):\n # print \"Creating '%s'...\" % ( imagePath )\n urllib.urlretrieve(imageUrl, imagePath)\n urllib.urlcleanup()\n\n try:\n # Scaffold image loading. If any exception arises for image\n # parsing, the 'image' file won't be locked.\n with open(imagePath, 'rb') as imageFile:\n image = Image.open(imageFile)\n self.photoImage = ImageTk.PhotoImage(image)\n self.picture.create_image(0, 0, image = self.photoImage, anchor = NW)\n return\n except IOError:\n print \"Unable to load cache image '%s'.\" % ( imagePath )\n os.remove(imagePath)\n except IOError:\n print \"Unable to retrieve the movie image.\"\n\n self.clearMoviePicture()", "def _addImage(self, image, preprocessor):\n if self.images == None:\n self.images = []\n\n processed_image = preprocessor(image,\n self.inputShape()[2],\n self.inputShape()[1])\n\n self.images.append(processed_image)", "def add_image(\n self,\n url_or_path: str,\n width: Optional[int] = None,\n height: Optional[int] = None,\n ) -> None: # noqa: E501\n\n self._client.add_element(\n Container(content=Image(src=url_or_path, width=width, height=height))\n )", "def add_image(self, image):\n if type(image) is not self.imageType:\n raise TypeError('`image` must be of type {0}'.format(self.imageType))\n\n listBinning = self.imageList[0].binning\n if image.binning != listBinning:\n raise ValueError('`image` must have binning ({0} x {1})'.format(*listBinning))\n\n self.__imageList = self.imageList + (image,)\n\n return None", "def SetImage(self,img):\n\n # opencv reads images in BGR order\n # If we are in RGB order change it now\n # see Constants.py for RGB_R\n\n if RGB_R==0:\n im=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\n else:\n im=img\n\n # the on screen display will be a different size\n # it is expected to be bigger than the actual panel\n self.numpyEnlarge(img,self.options.scale)\n\n if self.video:\n self.video.write(self.frameBuffer)", "def insert_image(self, image_path: str) -> str:\n mm = self.anki.media.MediaManager(self.collection, None)\n name = mm.addFile(image_path)\n self.collection.save()\n return name", "def add_image(self, img, input_or_output, filename, title, description=\"\"):\n if input_or_output.lower() != \"input\" and input_or_output.lower() != \"output\":\n raise Exception(\"input_or_output parameter can only contain 'input' or 'output'\")\n\n image_object = Image(self._name, img, input_or_output, self._img_folder + \"/\" + filename, title, description=\"\")\n image_object.commit()\n s = image_object.to_series()\n self._commit(s)", "def Add(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Add(self, *args)", "def update_cap_image(self):\n\n fn = self._get_cap_filename()\n try:\n im = PIL.Image.open(fn)\n except FileNotFoundError:\n return\n\n frame = np.array(im)\n\n \"\"\"\n frame = cv2.imread(fn, cv2.IMREAD_ANYDEPTH)\n if (frame is None):\n return\n \"\"\"\n\n frame = (frame >> (16 - self.camera.pixel_bits)).astype(np.uint16)\n\n ndx = self.dpar.cur_cap\n\n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(frame, self.dpar.iwindow[ndx])\n self.live_screen.live_title = self._cap_title(ndx)\n self.live_screen.setPixmap(pix)\n else:\n pix, gray = self._get_pixmap(frame[::4,::4], self.dpar.iwindow[ndx])\n self.cap_screen.cap_title = self._cap_title(ndx)\n self.cap_screen.setPixmap(pix)\n self.cap_screen.format_for_cap() # This is because first time, format is for \"no stills\".", "def add_image(self, image_list):\n self.image_filenames_list.append(image_list)" ]
[ "0.6674277", "0.62054956", "0.61609864", "0.6160968", "0.6141157", "0.60072285", "0.598328", "0.5955084", "0.591528", "0.57893074", "0.57588404", "0.5740034", "0.5733411", "0.57132757", "0.56607807", "0.5579022", "0.55734813", "0.5563795", "0.55606425", "0.5558055", "0.5555334", "0.5553261", "0.5549526", "0.5509872", "0.5505287", "0.549871", "0.5492379", "0.54859406", "0.5472679", "0.5465812" ]
0.65812075
1
Add existing image(s) in the library to another webfacet.
def add_webfacet_image(request): if request.method == "POST": add_image_form = AddImageForm(request.POST, request=request) if add_image_form.is_valid(): webfacet_id = request.POST.get('webfacet') print "WEBFACETid: ", webfacet_id webfacet = get_object_or_404(WebFacet, id=webfacet_id) images = request.POST.getlist('images') print "IMAGES: ", images for image in images: img_ins = get_object_or_404(ImageAsset, id=image) print "IMGins: ", img_ins webfacet.image_assets.add(img_ins) webfacet.save() return redirect('story_detail', pk=webfacet.story.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_printfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n printfacet_id = request.POST.get('printfacet')\r\n print \"printFACETid: \", printfacet_id\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n printfacet.image_assets.add(img_ins)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def upload_webfacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n webimage = imageform.save(commit=False)\r\n # retrieve the webfacet the image should be associated with\r\n webfacet_id = request.POST.get('webfacet')\r\n webfacet = get_object_or_404(WebFacet, id=webfacet_id)\r\n # set request based attributes\r\n webimage.owner = request.user\r\n webimage.organization = request.user.organization\r\n webimage.save()\r\n # add image asset to webfacet image_assets\r\n webfacet.image_assets.add(webimage)\r\n webfacet.save()\r\n return redirect('story_detail', pk=webfacet.story.id)", "def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)", "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def add_audiofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n audiofacet_id = request.POST.get('audiofacet')\r\n print \"audioFACETid: \", audiofacet_id\r\n audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n audiofacet.image_assets.add(img_ins)\r\n audiofacet.save()\r\n return redirect('story_detail', pk=audiofacet.story.id)", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def add_image(self, other):\n newcls = self.__class__(None)\n newcls.polygon = self.union(other)\n\n newcls._members = []\n for v in self.members:\n newcls._members.append(v)\n for v in other.members:\n if v not in newcls._members:\n newcls._members.append(v)\n\n if self.is_mf_mosaic or other.is_mf_mosaic:\n newcls._update_mosaic_flag_id(True)\n else:\n newcls._update_mosaic_flag_id(None)\n\n return newcls", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def add_image(self, image):\n\n # we're only for new images, no i'ds allowed\n # if u want to set an id by hand use set_image\n if image.id:\n raise o.Exception('Can not add image with id')\n\n if not image.data:\n raise o.Exception('Image must have data')\n\n if not image.source_page_url:\n raise o.Exception('Image must have source page url')\n\n # update it's stats\n image = self.populate_image_stats(image)\n\n # only add the image if we haven't seen it beforeQ\n # if we've seen it before there will be an id which\n # the set of images w/ this data and from this page share\n ids = self.rc.sinter('images:datainstance:%s' % image.shahash,\n 'images:page_ids:%s' % image.source_page_url)\n\n\n # we don't need to continue\n # we'll return back their original msg, w/o the id set\n if ids:\n print 'image already exists [%s], not setting' % ids\n return image\n\n # so the image appears to be new, good for it\n return self.set_image(image)", "def add_image(self, image_list):\n self.image_filenames_list.append(image_list)", "def add_videofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n videofacet_id = request.POST.get('videofacet')\r\n print \"videoFACETid: \", videofacet_id\r\n videofacet = get_object_or_404(VideoFacet, id=videofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n videofacet.image_assets.add(img_ins)\r\n videofacet.save()\r\n return redirect('story_detail', pk=videofacet.story.id)", "def content_library_images_some(self, content_library_images_some):\n\n self._content_library_images_some = content_library_images_some", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def copy_images(apps, schema_editor):\n\n FieldImage = apps.get_model('field_wagtail', 'FieldImage')\n Image = apps.get_model('wagtailimages', 'Image')\n django_content_type = apps.get_model('contenttypes', 'contenttype')\n tagged_item_model = apps.get_model('taggit', 'TaggedItem')\n\n images = Image.objects.all()\n new_images = []\n for image in images:\n new_images.append(FieldImage(\n id=image.id,\n title=image.title,\n file=image.file,\n width=image.width,\n height=image.height,\n created_at=image.created_at,\n focal_point_x=image.focal_point_x,\n focal_point_y=image.focal_point_y,\n focal_point_width=image.focal_point_width,\n focal_point_height=image.focal_point_height,\n file_size=image.file_size,\n collection=image.collection,\n uploaded_by_user=image.uploaded_by_user,\n alt_text=''\n ))\n\n FieldImage.objects.bulk_create(new_images)\n\n ct_extended_model, created = django_content_type.objects.get_or_create(\n app_label='field_wagtail',\n model='fieldimage'\n )\n ct_wagtail_model = django_content_type.objects.get(\n app_label='wagtailimages',\n model='image'\n )\n\n tagged_item_model.objects.filter(\n content_type_id=ct_wagtail_model.id).update(\n content_type_id=ct_extended_model.id\n )", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def add_image_to_frame_list(self,startFrame, endFrame, imageName): \n for i in range(startFrame-1, endFrame-1):\n try:\n # image = imageio.imread(imageName)\n im = Image.open(imageName)\n im = im.resize((720, 720))\n self.frame_list.append(im)\n # self.frame_list.append(im)\n\n except:\n print (imageName, \" not found.\")\n # BufferedImage bi= new BufferedImage(320,240,BufferedImage.TYPE_BYTE_GRAY);\n im=self.blank\n self.frame_list.append(im)", "def addTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file ',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'/home')\n\t\tif fname:\n\t\t\tfor sha in shas:\n\t\t\t\t#get texture Path\n\t\t\t\tif not sha.a.texture_Occ.exists:\n\t\t\t\t\toccText = sha.a.texture_Occ.add( dt='string' )\n\t\t\t\tsha.a.texture_Occ.v = fname", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def add_image(self, file):\n # use Image constructor to validate and characterize image file\n image = Image(file)\n # return matching image if found\n for existing_image in self._values:\n if existing_image._sha1 == image._sha1:\n return existing_image\n # otherwise add it to collection and return new image\n self._values.append(image)\n self.__rename_images()\n return image", "def pictures(self, pictures):\n\n self.container['pictures'] = pictures", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n \n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n \n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (0,0), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show() #broked", "def upload_printfacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n printimage = imageform.save(commit=False)\r\n # retrieve the printfacet the image should be associated with\r\n printfacet_id = request.POST.get('printfacet')\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n # set request based attributes\r\n printimage.owner = request.user\r\n printimage.organization = request.user.organization\r\n printimage.save()\r\n # add image asset to printfacet image_assets\r\n printfacet.image_assets.add(printimage)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def register_collection_additional_image(self, image):\n save_path = os.path.join(self.session_dir, 'additional.jpg')\n image.save(save_path)", "def setAddPreviewImages(self,value):\n self.PDFreactorConfiguration.in1[\"addPreviewImages\"] = value", "def additional_images(self, additional_images):\n\n self._additional_images = additional_images", "def add_pets(\n all_pets: typing.List[pet_record.PetRecord],\n new_pets: typing.List[int],\n url: str,\n img_url: str,\n img_path: str,\n returned: bool = False\n) -> None:\n pets = add_pet_images(new_pets, url, img_url, img_path, returned)\n all_pets += list(pets)", "def add_misc_images(id):\n event = Event.query.get_or_404(id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n image_form = MultipleImageForm()\n if image_form.validate_on_submit():\n services.save_misc_images(image_form.images.data, event, images, db.session)\n db.session.commit()\n flash(\"Your upload was successful.\", \"success\")\n return redirect(url_for(\"events.media\", id=id))\n else:\n session[\"image_form_errors\"] = image_form.images.errors\n return redirect(url_for(\"events.media\", id=event.id))", "def webAdd( self, web ):\n web.add( self )" ]
[ "0.62151027", "0.59925026", "0.58333486", "0.583099", "0.5817499", "0.57698077", "0.5705304", "0.56660897", "0.56204855", "0.56093407", "0.54203415", "0.54093826", "0.5408779", "0.5378336", "0.53766954", "0.5346963", "0.5326423", "0.53248286", "0.530735", "0.5260022", "0.5252853", "0.5242394", "0.5236748", "0.52344024", "0.5234291", "0.5228334", "0.5190236", "0.5168009", "0.51578295", "0.5152887" ]
0.676759
0
Add existing image(s) in the library to another printfacet.
def add_printfacet_image(request): if request.method == "POST": add_image_form = AddImageForm(request.POST, request=request) if add_image_form.is_valid(): printfacet_id = request.POST.get('printfacet') print "printFACETid: ", printfacet_id printfacet = get_object_or_404(PrintFacet, id=printfacet_id) images = request.POST.getlist('images') print "IMAGES: ", images for image in images: img_ins = get_object_or_404(ImageAsset, id=image) print "IMGins: ", img_ins printfacet.image_assets.add(img_ins) printfacet.save() return redirect('story_detail', pk=printfacet.story.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def addTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file ',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'/home')\n\t\tif fname:\n\t\t\tfor sha in shas:\n\t\t\t\t#get texture Path\n\t\t\t\tif not sha.a.texture_Occ.exists:\n\t\t\t\t\toccText = sha.a.texture_Occ.add( dt='string' )\n\t\t\t\tsha.a.texture_Occ.v = fname", "def upload_printfacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n printimage = imageform.save(commit=False)\r\n # retrieve the printfacet the image should be associated with\r\n printfacet_id = request.POST.get('printfacet')\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n # set request based attributes\r\n printimage.owner = request.user\r\n printimage.organization = request.user.organization\r\n printimage.save()\r\n # add image asset to printfacet image_assets\r\n printfacet.image_assets.add(printimage)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def add_image(self, other):\n newcls = self.__class__(None)\n newcls.polygon = self.union(other)\n\n newcls._members = []\n for v in self.members:\n newcls._members.append(v)\n for v in other.members:\n if v not in newcls._members:\n newcls._members.append(v)\n\n if self.is_mf_mosaic or other.is_mf_mosaic:\n newcls._update_mosaic_flag_id(True)\n else:\n newcls._update_mosaic_flag_id(None)\n\n return newcls", "def add_to_pdf(self, pdf):\n pdf.savefig(self.fig)", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n \n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n \n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (0,0), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show() #broked", "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n #print new_image\n #print new_image2\n #if image_to_show == \"\":\n # print_directory_list2()\n # return \"Use one of these\"\n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (width, height), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show()", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def add_webfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n webfacet_id = request.POST.get('webfacet')\r\n print \"WEBFACETid: \", webfacet_id\r\n webfacet = get_object_or_404(WebFacet, id=webfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n webfacet.image_assets.add(img_ins)\r\n webfacet.save()\r\n return redirect('story_detail', pk=webfacet.story.id)", "def paintTags(self):\n imagesTagOrder = [\"gender\", \"skin\", \"head\", \"body\", \"mask\", \"hair\", \"shirt\", \"trousers\", \"skirt\", \"shoes\"]\n pos = 0\n for img in imagesTagOrder:\n self.imagesTag[img].topleft = 296, pos * 76\n self.imagesTag[img].connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.paintCustomizeZone, img)\n self.window.add_child(self.imagesTag[img])\n pos += 1", "def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)", "def _merge_face(merge, image, face):\n scaled = merge.image.resize(face.as_dimension()).convert(\"RGBA\")\n image.image = image.image.convert(\"RGBA\")\n image.image.paste(scaled, face.as_box(), mask=scaled)\n return image", "def displayImages(self):\n\n plt.figure(figsize=(8,6))\n plt.subplot(1,2,1)\n plt.imshow( self.original_image, cmap=\"gray\")\n plt.title(\"Original Image\")\n plt.subplot(1,2,2)\n plt.imshow( self.blurred_image, cmap=\"gray\")\n plt.title(\"Blurred Image\")", "def PImageAdd (in1Image, in2Image, outImage, err, \\\n chkPos=False, factor1=1.0, factor2=1.0):\n ################################################################\n # Checks\n if not Image.PIsA(in1Image):\n raise TypeError,\"in1Image MUST be a Python Obit Image\"\n if not Image.PIsA(in2Image):\n raise TypeError,\"in2Image MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n #\n # Clone output from input 1\n in1Image.Clone (outImage, err)\n # Open images\n Image.POpen (in1Image, Image.READONLY, err)\n Image.POpen (in2Image, Image.READONLY, err)\n Image.POpen (outImage, Image.WRITEONLY, err)\n # Get input descriptor to see how many planes\n in1Desc = in1Image.Desc\n in2Desc = in2Image.Desc\n # Check compatibility\n ImageDesc.PCheckCompat (in1Desc, in2Desc, chkPos=chkPos)\n inDescDict = in1Desc.Dict\n ndim = inDescDict[\"naxis\"]\n inNaxis = inDescDict[\"inaxes\"]\n # Work buffer\n inImageArray = Image.PGetFArray(in1Image)\n ImageBuffer1 = FArray.PCopy(inImageArray, err)\n ImageBuffer2 = FArray.PCopy(inImageArray, err)\n\n # list of planes to loop over (0-rel)\n if (ndim>0) and (inNaxis[2]>0): \n planes = range(inNaxis[2])\n else:\n planes = [0]\n \n # Loop over planes\n for iPlane in planes:\n doPlane = [iPlane+1,1,1,1,1]\n # Get image planes\n Image.PGetPlane (in1Image, ImageBuffer1, doPlane, err)\n Image.PGetPlane (in2Image, ImageBuffer2, doPlane, err)\n\n # Scale\n FArray.PSMul(ImageBuffer1, factor1)\n FArray.PSMul(ImageBuffer2, factor2)\n\n # Add\n FArray.PAdd(ImageBuffer1, ImageBuffer2, ImageBuffer2)\n\n # Write output\n Image.PPutPlane (outImage, ImageBuffer2, doPlane, err)\n\n # end loop over planes\n # Close\n in2Image.Close(err)\n in2Image.Close(err)\n outImage.Close(err)\n # Error?\n if err.isErr:\n OErr.printErrMsg(err, \"Error subtracting Images\")\n # Write history\n in1History = History.History(\"history\", in1Image.List, err)\n in2History = History.History(\"history\", in2Image.List, err)\n outHistory = History.History(\"history\", outImage.List, err)\n # Copy Histories\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1, \"/ PImageAdd Input 1 History\",err)\n outHistory.Close(err)\n info = in1Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in1History, outHistory, err)\n #Not needed History.PCopy(in1History, outHistory, err)\n outHistory.Open(History.READWRITE, err)\n outHistory.WriteRec(-1, \"/ \",err)\n outHistory.WriteRec(-1, \"/ ****** PImageAdd Input 2 History\",err)\n outHistory.Close(err)\n info = in2Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in2History, outHistory, err)\n History.PCopy(in2History, outHistory, err)\n # Add this programs history\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor1 = \"+str(factor1),err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor2 = \"+str(factor2),err)\n outHistory.Close(err)", "def updateImageGroups(self):\n self.img_grps = self.splitImages()\n grps = self.img_grps\n self.detail.clear()\n detail = \"Available Groups : \\n\"\n if len(grps) >= 1:\n for i in range(len(grps)):\n detail += \"Group \"+ str(i+1)+ \" : \" + str(grps[i][0]) + \" ... \" + str(grps[i][-1]) + '\\n'\n\n self.detail.insertPlainText(detail)\n self.detail.moveCursor(QTextCursor.End)", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def polyAppendFacetCtx(*args, append: bool=True, exists: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n isRotateAvailable: bool=True, maximumNumberOfPoints: Union[int, bool]=0,\n planarConstraint: bool=True, rotate: Union[float, bool]=0.0,\n subdivision: Union[int, bool]=1, texture: Union[int, bool]=1, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()", "def viz2(img1, interest_points1, img2, interest_points2, matches, PATCH_SIZE, threshold, min_sigma, max_sigma, num_sigma):\n \n\n\tfig = plt.figure(figsize=(10,5))\n\tax1 = fig.add_subplot(121)\n\tax2 = fig.add_subplot(122)\n\n #adding the two images to axes \n\tax1.imshow(img1, cmap='gray')\n\tax2.imshow(img2, cmap='gray')\n\n\tpositionimg1 = ax1.get_position()\n\tnew_pos = [positionimg1.x0+0.09, positionimg1.y0+0.025, \\\n\t\tpositionimg1.width / 1.1, positionimg1.height / 1.1] \n\tax1.set_position(new_pos)\n\n\tx1 = [a[1] for a in interest_points1] #blob detection x axis\n\ty1 = [a[0] for a in interest_points1] #blob detection y axis\n\ts1 = [a[2] for a in interest_points1] #blob detected at sigma \n \n\tx2 = [a[1] for a in interest_points2] #blob detection x axis\n\ty2 = [a[0] for a in interest_points2] #blob detection y axis\n\ts2 = [a[2] for a in interest_points2] #blob detected at sigma \n \n\tdifferences = [a[2] for a in matches]\n\n\n\tweighted_differences = normalize(differences)\n\n #iterating through the input list of matches\n\tfor coordinates, difference in zip(matches, weighted_differences):\n\t\tcord_a = (coordinates[0][1], coordinates[0][0]) #extracting coordinates for interest point in img1\n\t\tcord_b = (coordinates[1][1], coordinates[1][0]) #extracting coordinates for interest point in img2\n\t\tif difference <=0.33:\n\t\t\tcolor = \"green\"\n\t\telif difference > 0.33 and difference <= 0.66:\n\t\t\tcolor = \"yellow\"\n\t\telse:\n\t\t\tcolor = \"red\"\n\n\t#defining the path from cord_a to cord_b\n\t\tcon = ConnectionPatch(xyA=cord_a, xyB=cord_b, coordsA=\"data\", coordsB=\"data\",\n\t\t\t\t\t\t\t axesA=ax2, axesB=ax1, color=color) #arrowstyle='->')\n\t#adding line to axes2 \n\t\tax2.add_artist(con)\n\n #showing the image // can be changed to saving the image locally \n\tfor x, y, s in zip(x1, y1, s1):\n\t\tax1.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img1\n\tfor x, y, s in zip(x2, y2, s2):\n\t\tax2.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img2\n\tax1.axis('off')\n\tax2.axis('off')\n\ttitle = 'Patch Size=' + str(PATCH_SIZE) + ', Threshold=' + str(threshold) + ', min sigma=' + \\\n\tstr(min_sigma) + ', max sigma=' + str(max_sigma) + ', num sigma=' + str(num_sigma)\n\tplt.title(title, x=+0.1)\n\t#plt.show()\n\tplt.savefig(title+'.png')\n\n\n\treturn", "def add_audiofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n audiofacet_id = request.POST.get('audiofacet')\r\n print \"audioFACETid: \", audiofacet_id\r\n audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n audiofacet.image_assets.add(img_ins)\r\n audiofacet.save()\r\n return redirect('story_detail', pk=audiofacet.story.id)", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def create_image_caption_pairs(self):", "def addFigure(self,fig,xl,yl,scale):\n img = py.image.load(fig)\n w,h = img.get_size()\n img = py.transform.scale(img,(int(w*scale),int(h*scale)))\n self.figures.append(img)\n self.locs.append((xl,yl))", "def hdri_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n \r\n box = layout.box()\r\n row = box.row()\r\n row.prop(AM, \"existing_thumb\", text = \"Use existing Thumbnails\")\r\n \r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n row = box.row(align = True)\r\n \r\n row.operator(\"wm.ibl_importer\", text=\"OK\", icon='FILE_TICK')\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def AddImageFrames(self):\n img_lst = self.CollectImageFilenames()\n for img in img_lst:\n img_exist_ptrn = re.compile(r'.*({}).*'.format(img))\n if not img_exist_ptrn.search(self._latex_str):\n self._latex_str += bmr_frame.format(img, img.replace('_', ' '))\n print(\"Added image {}.\".format(img))", "def register_collection_additional_image(self, image):\n save_path = os.path.join(self.session_dir, 'additional.jpg')\n image.save(save_path)", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def add_image(self, image_list):\n self.image_filenames_list.append(image_list)" ]
[ "0.57439345", "0.5572703", "0.55238974", "0.5361123", "0.53004485", "0.52716833", "0.52288663", "0.51478827", "0.514103", "0.51245344", "0.5079901", "0.5075902", "0.5065897", "0.50491333", "0.5048254", "0.50375414", "0.50358367", "0.50343746", "0.50187576", "0.5002619", "0.49928337", "0.4986341", "0.4980079", "0.49785075", "0.49694258", "0.49444336", "0.49396354", "0.49231818", "0.49152857", "0.49127796" ]
0.63303715
0
Add existing image(s) in the library to another audiofacet.
def add_audiofacet_image(request): if request.method == "POST": add_image_form = AddImageForm(request.POST, request=request) if add_image_form.is_valid(): audiofacet_id = request.POST.get('audiofacet') print "audioFACETid: ", audiofacet_id audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id) images = request.POST.getlist('images') print "IMAGES: ", images for image in images: img_ins = get_object_or_404(ImageAsset, id=image) print "IMGins: ", img_ins audiofacet.image_assets.add(img_ins) audiofacet.save() return redirect('story_detail', pk=audiofacet.story.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_image(self, other):\n newcls = self.__class__(None)\n newcls.polygon = self.union(other)\n\n newcls._members = []\n for v in self.members:\n newcls._members.append(v)\n for v in other.members:\n if v not in newcls._members:\n newcls._members.append(v)\n\n if self.is_mf_mosaic or other.is_mf_mosaic:\n newcls._update_mosaic_flag_id(True)\n else:\n newcls._update_mosaic_flag_id(None)\n\n return newcls", "def upload_audiofacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n audioimage = imageform.save(commit=False)\r\n # retrieve the audiofacet the image should be associated with\r\n audiofacet_id = request.POST.get('audiofacet')\r\n audiofacet = get_object_or_404(AudioFacet, id=audiofacet_id)\r\n # set request based attributes\r\n audioimage.owner = request.user\r\n audioimage.organization = request.user.organization\r\n audioimage.save()\r\n # add image asset to audiofacet image_assets\r\n audiofacet.image_assets.add(audioimage)\r\n audiofacet.save()\r\n return redirect('story_detail', pk=audiofacet.story.id)", "def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)", "def add_webfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n webfacet_id = request.POST.get('webfacet')\r\n print \"WEBFACETid: \", webfacet_id\r\n webfacet = get_object_or_404(WebFacet, id=webfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n webfacet.image_assets.add(img_ins)\r\n webfacet.save()\r\n return redirect('story_detail', pk=webfacet.story.id)", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def addTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file ',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'/home')\n\t\tif fname:\n\t\t\tfor sha in shas:\n\t\t\t\t#get texture Path\n\t\t\t\tif not sha.a.texture_Occ.exists:\n\t\t\t\t\toccText = sha.a.texture_Occ.add( dt='string' )\n\t\t\t\tsha.a.texture_Occ.v = fname", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def add_printfacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n printfacet_id = request.POST.get('printfacet')\r\n print \"printFACETid: \", printfacet_id\r\n printfacet = get_object_or_404(PrintFacet, id=printfacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n printfacet.image_assets.add(img_ins)\r\n printfacet.save()\r\n return redirect('story_detail', pk=printfacet.story.id)", "def append_component_images(pldm_fw_up_pkg, image_files):\n for image in image_files:\n with open(image, \"rb\") as file:\n for line in file:\n pldm_fw_up_pkg.write(line)", "def _augment_images(self, images, random_state, parents, hooks):\n nb_images = len(images)\n samples = self.p.draw_samples((nb_images,), random_state=random_state)\n for i in sm.xrange(nb_images):\n if samples[i] == 1:\n if self.axis == 1:\n images[i] = np.fliplr(images[i])\n elif self.axis == 0:\n images[i] = np.flipud(images[i])\n self.samples = samples\n return images", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def add_image(self, image_list):\n self.image_filenames_list.append(image_list)", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n \n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n \n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (0,0), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show() #broked", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def PImageAdd (in1Image, in2Image, outImage, err, \\\n chkPos=False, factor1=1.0, factor2=1.0):\n ################################################################\n # Checks\n if not Image.PIsA(in1Image):\n raise TypeError,\"in1Image MUST be a Python Obit Image\"\n if not Image.PIsA(in2Image):\n raise TypeError,\"in2Image MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n #\n # Clone output from input 1\n in1Image.Clone (outImage, err)\n # Open images\n Image.POpen (in1Image, Image.READONLY, err)\n Image.POpen (in2Image, Image.READONLY, err)\n Image.POpen (outImage, Image.WRITEONLY, err)\n # Get input descriptor to see how many planes\n in1Desc = in1Image.Desc\n in2Desc = in2Image.Desc\n # Check compatibility\n ImageDesc.PCheckCompat (in1Desc, in2Desc, chkPos=chkPos)\n inDescDict = in1Desc.Dict\n ndim = inDescDict[\"naxis\"]\n inNaxis = inDescDict[\"inaxes\"]\n # Work buffer\n inImageArray = Image.PGetFArray(in1Image)\n ImageBuffer1 = FArray.PCopy(inImageArray, err)\n ImageBuffer2 = FArray.PCopy(inImageArray, err)\n\n # list of planes to loop over (0-rel)\n if (ndim>0) and (inNaxis[2]>0): \n planes = range(inNaxis[2])\n else:\n planes = [0]\n \n # Loop over planes\n for iPlane in planes:\n doPlane = [iPlane+1,1,1,1,1]\n # Get image planes\n Image.PGetPlane (in1Image, ImageBuffer1, doPlane, err)\n Image.PGetPlane (in2Image, ImageBuffer2, doPlane, err)\n\n # Scale\n FArray.PSMul(ImageBuffer1, factor1)\n FArray.PSMul(ImageBuffer2, factor2)\n\n # Add\n FArray.PAdd(ImageBuffer1, ImageBuffer2, ImageBuffer2)\n\n # Write output\n Image.PPutPlane (outImage, ImageBuffer2, doPlane, err)\n\n # end loop over planes\n # Close\n in2Image.Close(err)\n in2Image.Close(err)\n outImage.Close(err)\n # Error?\n if err.isErr:\n OErr.printErrMsg(err, \"Error subtracting Images\")\n # Write history\n in1History = History.History(\"history\", in1Image.List, err)\n in2History = History.History(\"history\", in2Image.List, err)\n outHistory = History.History(\"history\", outImage.List, err)\n # Copy Histories\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1, \"/ PImageAdd Input 1 History\",err)\n outHistory.Close(err)\n info = in1Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in1History, outHistory, err)\n #Not needed History.PCopy(in1History, outHistory, err)\n outHistory.Open(History.READWRITE, err)\n outHistory.WriteRec(-1, \"/ \",err)\n outHistory.WriteRec(-1, \"/ ****** PImageAdd Input 2 History\",err)\n outHistory.Close(err)\n info = in2Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in2History, outHistory, err)\n History.PCopy(in2History, outHistory, err)\n # Add this programs history\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor1 = \"+str(factor1),err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor2 = \"+str(factor2),err)\n outHistory.Close(err)", "def addSample(self,\n img, \n anns, \n pointcloud=None,\n img_id=None,\n img_format='BGR', \n write_img=True,\n other=None):\n\n # Sanity check\n assert img_format in ['BGR','RGB'], \"Image format not supported.\"\n assert isinstance(anns, (list,)), \"Annotations must be provided in a list.\"\n assert isinstance(img, np.ndarray), \"Image must be a numpy array.\"\n\n if img_id is None:\n img_id = self._getNewImgId()\n else:\n assert isinstance(img_id, int), \"Image ID must be an integer.\"\n assert img_id not in self.imgs, \"Image ID {} already exists.\".format(img_id)\n\n # Create the image info\n heigth, width, _ = img.shape\n img_info = self._createImageInfo(height=heigth, \n width=width, \n img_id=img_id,\n other=other)\n # Update the dataset and index\n self.dataset['images'].append(img_info)\n self.imgs[img_id] = img_info\n \n ## Add the new annotations to dataset\n for ann in anns:\n assert ann['category_id'] in self.cats, \\\n \"Category '{}' does not exist in dataset.\".format(ann['category_id'])\n\n ann['image_id'] = img_id\n if ann['id'] is None:\n ann['id'] = self._getNewAnnId()\n\n # Update the dataset and index\n self.dataset['annotations'].append(ann)\n self.anns[ann['id']] = ann\n self.catToImgs[ann['category_id']].append(ann['image_id'])\n self.imgToAnns[img_id].append(ann)\n\n ## Add the pointcloud to the dataset if applicable\n if pointcloud is not None:\n assert isinstance(pointcloud,(list,)), \"Pointcloud must be a list of points.\"\n\n pc_id = self._getNewPclId()\n pc = {'id': pc_id,\n 'img_id': img_id,\n 'points': pointcloud}\n\n self.dataset['pointclouds'].append(pc)\n self.pointclouds[pc_id] = pc\n self.imgToPc[img_id] = pc\n \n if self.imgs[img_id]['id'] != pc['img_id']:\n raise Exception(\"Image ID not matching the corresponding pointcloud\")\n\n img_path = os.path.join(self.imgs_dir, img_info['file_name'])\n \n if write_img:\n ## Write the image to disk \n if img_format == 'RGB':\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imwrite(img_path, img)\n \n return img_path", "def augment(self, image):\n pass", "def imageAdd(img, num):\n return myimg.imageAdd(img.tolist(), num)", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n #print new_image\n #print new_image2\n #if image_to_show == \"\":\n # print_directory_list2()\n # return \"Use one of these\"\n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (width, height), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show()", "def add_image_to_frame_list(self,startFrame, endFrame, imageName): \n for i in range(startFrame-1, endFrame-1):\n try:\n # image = imageio.imread(imageName)\n im = Image.open(imageName)\n im = im.resize((720, 720))\n self.frame_list.append(im)\n # self.frame_list.append(im)\n\n except:\n print (imageName, \" not found.\")\n # BufferedImage bi= new BufferedImage(320,240,BufferedImage.TYPE_BYTE_GRAY);\n im=self.blank\n self.frame_list.append(im)", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def __add__(self, other):\n train = copy.deepcopy(self.train)\n\n for img_path, pid, camid, dsetid in other.train:\n pid += self.num_train_pids\n camid += self.num_train_cams\n dsetid += self.num_datasets\n train.append((img_path, pid, camid, dsetid))\n\n ###################################\n # Note that\n # 1. set verbose=False to avoid unnecessary print\n # 2. set combineall=False because combineall would have been applied\n # if it was True for a specific dataset; setting it to True will\n # create new IDs that should have already been included\n ###################################\n if isinstance(train[0][0], str):\n return ImageDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False\n )\n else:\n return VideoDataset(\n train,\n self.query,\n self.gallery,\n transform=self.transform,\n mode=self.mode,\n combineall=False,\n verbose=False,\n seq_len=self.seq_len,\n sample_method=self.sample_method\n )", "def add_beam_images(self, beam_images, **kwargs):\n for beam_image in beam_images:\n self.add_beam_image(beam_image, **kwargs)", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def add(self, other, offset=(0,0)):\n if (isinstance(other, Scene)):\n for item in other.items:\n newitem = item.clone()\n newitem.pan(offset)\n self.items.add(newitem)\n elif (isinstance(other, SvgObject)):\n newitem = other.clone()\n newitem.pan(offset)\n self.items.add(newitem)", "def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)", "def _addClicked(self):\n volume = self.volume()\n if volume is not None:\n dataRange = volume.getDataRange()\n if dataRange is None:\n dataRange = 0., 1.\n\n volume.addIsosurface(\n numpy.mean((dataRange[0], dataRange[-1])),\n '#0000FF')", "def add(input_a, input_b):\n add_comp = input_b.duplicate()\n\n ImageBufAlgo.add(add_comp, input_a, input_b)\n\n if add_comp.has_error:\n print \"Error merging adding:\", add_comp.geterror()\n\n return add_comp" ]
[ "0.60004723", "0.58277434", "0.5797199", "0.5697797", "0.5677714", "0.5672392", "0.56353205", "0.5634928", "0.5560553", "0.5501598", "0.5486434", "0.54514974", "0.53752357", "0.5367797", "0.53480774", "0.5342311", "0.53182214", "0.5294023", "0.5289791", "0.528354", "0.5255328", "0.5244117", "0.5241142", "0.5196688", "0.51882666", "0.5150562", "0.51102287", "0.510907", "0.51064694", "0.5105255" ]
0.63307816
0
Given list of ``Tag`` instances, creates a string representation of the list suitable for editing by the user, such that submitting the given string representation back without changing it will give the same list of tags. Tag names which contain DELIMITER will be double quoted. Adapted from Taggit's _edit_string_for_tags() Ported from Jonathan Buchanan's `djangotagging
def join_tags(tags): names = [] delimiter = settings.TAGGIT_SELECTIZE['DELIMITER'] for tag in tags: name = tag.name if delimiter in name or ' ' in name: names.append('"%s"' % name) else: names.append(name) return delimiter.join(sorted(names))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_string_for_tags(tags):\r\n names = []\r\n use_commas = False\r\n for tag in tags:\r\n name = tag.name\r\n if u',' in name:\r\n names.append('\"%s\"' % name)\r\n continue\r\n elif u' ' in name:\r\n if not use_commas:\r\n use_commas = True\r\n names.append(name)\r\n if use_commas:\r\n glue = u', '\r\n else:\r\n glue = u' '\r\n return glue.join(names)", "def tag_list(self, obj): # pylint: disable=no-self-use\n return u\", \".join(o.name for o in obj.tags.all())", "def tag_list(self, obj):\n logger.debug('Called Tag_list in admin: %s', self)\n return u\", \".join(o.name for o in obj.tags.all())", "def formatted_tag(tag_ids):\n if tag_ids is None:\n return ''\n else:\n ids = []\n for tag_id in tag_ids.split(','):\n ids.append(Tag.id == int(tag_id))\n tags = Tag.query.filter(or_(*ids)).all()\n if tags is None:\n return ''\n else:\n return 'with tags: ' + ', '.join([tag.name.title() for tag in tags])", "def create_list_string(list_):\n return f\"[{' '.join(list_)}]\"", "def format_list(list):\n return \" \".join(str(tok) for tok in list)", "def change_tags_format(page_tags):\n\treturn [tags.replace('\\n', ', ') if not tags == None else None for tags in page_tags]", "def taglist(self):\n tags = []\n for tag in self.tags:\n tags.append(tag.title)\n return ', '.join(map(str, tags))", "def create_tags(tags_list):\n\n Tags.create_multiple(tags_list)", "def generate_tag_string(post_id, tags=[], new=False):\n if new:\n tags.append('new')\n\n post = get_post(post_id)\n\n tags2 = map(lambda x: x.decode(\"utf8\", \"ignore\"), chain(post.get('tags'), tags))\n\n return '{}: {}'.format(post.get('author'), ', '.join(tags2))", "def _tuple_to_cpppo_tags(cls, tags, serializer=':'):\n\n tags_string = ''\n for tag in tags:\n tags_string += str(tag[0])\n for field in tag[1:-1]:\n tags_string += serializer\n # print 'DEBUG _tuple_to_cpppo_tags field: ', field\n tags_string += str(field)\n\n tags_string += '='\n tags_string += str(tag[-1])\n tags_string += ' '\n # print('DEBUG enip server tags_string: ', tags_string)\n\n return tags_string", "def listToStringFormat(self, list) ->str:\n string = ''\n for element in list:\n string = string + str(element) + \"\\n\"\n return string", "def tags_nice(self):\n ret = ''\n\n if not self.tags.all():\n return ret\n\n for t in self.tags.all():\n ret += str(t) + \", \"\n\n return ret[:-2]", "def save_tags(self, post_getlist_tags):\n cleaned_tags = []\n for name in post_getlist_tags:\n if Tag.objects.filter(name=name).exists():\n tag = Tag.objects.filter(name=name).first()\n cleaned_tags.append(tag)\n else:\n if bool(name.strip()):\n tag = Tag.objects.create(name=name)\n tag.save()\n cleaned_tags.append(tag)\n return cleaned_tags", "def _format_list_for_query(input_list):\n return (\n \", \".join(input_list).replace(\" \", \"\").replace(\"'\", \"\").replace(\",\", \"%2C\")\n )", "def __str__(self):\n out = str(self.tag_pairs)\n if self.comment:\n out += \"{\" + self.comment + \"} \"\n out += self.format_body()\n return out", "def special_tags_to_text(self):\n if (self.windtag is None and self.tornadotag is None and\n self.hailtag is None and self.tornadodamagetag is None and\n self.waterspouttag is None and not self.flood_tags):\n return \"\"\n\n parts = []\n if self.tornadotag is not None:\n parts.append(\"tornado: %s\" % (\n self.tornadotag))\n if self.waterspouttag is not None:\n parts.append(\"waterspout: %s\" % (\n self.waterspouttag))\n if self.tornadodamagetag is not None:\n parts.append(\"tornado damage threat: %s\" % (\n self.tornadodamagetag))\n if self.windtag is not None:\n parts.append(\"wind: %s%s %s\" % (\n self.winddirtag.replace(\">\", \"&gt;\").replace(\"<\", \"&lt;\"),\n self.windtag, self.windtagunits))\n if self.hailtag is not None:\n parts.append(\"hail: %s%s IN\" % (\n self.haildirtag.replace(\">\", \"&gt;\").replace(\"<\", \"&lt;\"),\n self.hailtag))\n for k, v in self.flood_tags.items():\n parts.append(\"%s: %s\" % (k.lower(), v.lower()))\n return \" [\" + \", \".join(parts) + \"] \"", "def _list2str(self, data, delimiter=\",\", classify=lambda x: x):\n res = \"\"\n for i in range(len(data)):\n res += classify(data[i])\n if i != len(data) - 1:\n res += delimiter + \" \"\n return res", "def _format_list(param_list: Iterable[Any]):\n fmt_list = []\n for item in param_list:\n if isinstance(item, str):\n fmt_list.append(f\"'{item}'\")\n else:\n fmt_list.append(f\"{item}\")\n return \",\".join(fmt_list)", "def set_tags(self, tags):\n self.tags = []\n for tag in [t.strip() for t in tags.split(', ')]:\n self.tags.append(Tag(title=tag))", "def list_to_sentence(self, list):\n sentence = \"\\n\"\n for i in range(0, len(list)):\n if i == len(list) - 1:\n sentence += \"'\" + list[i] + \"'\"\n else:\n sentence += \"'\" + list[i] + \"'\\n\"\n return sentence", "def tag_list(context, addon, dev_tags=None, user_tags=None):\n if not dev_tags and not user_tags:\n return ''\n if not dev_tags:\n dev_tags = []\n if not user_tags:\n user_tags = []\n\n c = {\n 'request': context['request'],\n 'addon': addon,\n 'dev_tags': dev_tags,\n 'user_tags': user_tags,\n }\n t = env.get_template('tags/tag_list.html').render(**c)\n return jinja2.Markup(t)", "def surround(inp):\r\n if inp is list:\r\n for i in range(len(inp)):\r\n inp[i] = \"'\"+str(inp[i])+\"'\"\r\n return inp\r\n return \"'\"+str(inp)+\"'\"", "def tag_strings(self):\n return [tag.tag_text for tag in self.tags.all()]", "def transform_tags(self, instance):\n return instance.tags.split(',')", "def generate_sql_update_set_formatted_string(keys_list: List[str]):\n\n return \", \".join([f\"{key} = :{key}\" for key in keys_list])", "def list_string(join_list):\n joined_list = '[{}]'.format(join_list, join_list)\n return joined_list", "def list_to_str(a_list):\n new_str = \"\"\n for item in a_list:\n item = str(item).replace(\"\\'\", \"\\'\\'\")\n if new_str:\n new_str += \", '\" + item + \"'\"\n else:\n new_str = \"'\" + item + \"'\"\n return new_str", "def _to_string(self, lst, indent=''):\n result = []\n for elem in lst:\n if isinstance(elem, list):\n if len(elem) > 0:\n result.append('\\n')\n result.append(self._to_string(elem, indent + ' '))\n elif isinstance(elem, float):\n result.append('%.6f' % elem)\n elif isinstance(elem, basestring):\n for char in ('(', ')', ' '):\n if char in elem:\n result.append('\"%s\"' % elem)\n break\n else:\n result.append(str(elem))\n elif elem is not None:\n result.append(str(elem))\n return indent + '(' + ' '.join(result) + ')\\n' + indent", "def _convertListToString(self, list_of_objects):\n return (';').join(list_of_objects)" ]
[ "0.74842143", "0.61348253", "0.60322964", "0.60187674", "0.5813906", "0.5634633", "0.5617626", "0.55532956", "0.55511045", "0.5493373", "0.54873353", "0.5473861", "0.5438646", "0.54261506", "0.54241925", "0.53745574", "0.53239423", "0.52918303", "0.52737874", "0.5259001", "0.5248354", "0.52404624", "0.52358544", "0.5226311", "0.5221614", "0.5208282", "0.51979834", "0.5193386", "0.5188725", "0.5182856" ]
0.64570826
1
Goes to form with AMOUNT_OF_COURSES text boxes to input courses to schedule, form action=/schedules, method=POST
def how_many_post(): default_courses = ['CS 442', 'CS 392', 'CS 519', 'MA 331'] resp = make_response(render_template( "sched_entry.html", quantity=AMOUNT_OF_COURSES, title='Scheduler', default_vals=default_courses)) resp.set_cookie('course_combos', '', expires=0) return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_form_post():\n text_list = []\n #make list of form inputs\n for i in range(1, AMOUNT_OF_COURSES + 1):\n form_num = 'text' + str(i)\n text_list.append(request.form[form_num])\n #remove items with no input, generate string of courses\n final_list = []\n for text in text_list:\n if not text == \"\":\n final_list.append(text)\n courses_str = \"\"\n for course in final_list[:-1]:\n courses_str += (str(course) + ',')\n courses_str += str(final_list[-1])\n courses_str = courses_str.upper()\n #turn string of courses entered into list\n c_list = courses_str.split(',')\n #get the schedules\n #print \"\\nCourse list:\"\n #print str(c_list) + \"\\n\"\n my_combos = scheduler.schedule(c_list)\n resp = make_response(redirect('/sched'))\n resp.set_cookie('course_combos', '', expires=0)\n resp.set_cookie('course_combos', json.dumps(my_combos))\n return resp", "def scheduleMe(page):\n querystring_combos = request.cookies.get('course_combos')\n if not querystring_combos:\n return render_template('404.html'), 404\n combos = json.loads(querystring_combos)\n #print querystring_combos\n\n count = len(combos)\n pagination_needed = count > PER_PAGE\n this_page_combos = combos\n if pagination_needed:\n this_page_combos = getCombosForPage(page, PER_PAGE, count, combos)\n last_page = isLastPage(page, count, PER_PAGE)\n if not this_page_combos and page != 1:\n return render_template('404.html'), 404\n return render_template(\"sched.html\",\n title=\"Scheduler\",\n combos=this_page_combos,\n combo_amount=str(count),\n page=page,\n last_page=last_page,\n pagination=pagination_needed)", "def take_monthly_agreements():\n\n #collect data from form template\n chore_id = request.form.get(\"chore_id\")\n date_monthly = request.form.get(\"date_monthly\")\n\n #add agreements to database\n dbwrangler.add_commitment(date_monthly, chore_id)\n\n #redirect to form for further agreements\n return redirect(\"/takeachore\")", "def cpt_calc():\n\n if request.method == \"POST\":\n testmin = float(request.form.get(\"techTestMin\"))\n scoremin = float(request.form.get(\"techScoreMin\"))\n computerTestCheckBox = request.form.get(\"computer-test-checkbox\")\n\n # If the \"Computer Testing\" prompt is selected, indicate as such\n if computerTestCheckBox:\n compCheckBox = \"✓\"\n else:\n compCheckBox = \"\"\n\n testhr = testmin / 60\n scorehr = scoremin / 60\n totalmin = testmin + scoremin\n totalhr = totalmin / 60\n\n # Calculate time for 96138 (\"eight\") and work towards calculating 96139 (\"nine\")\n eight_min = 30\n remaining = totalmin - 30\n\n # Calcuate the technician's remaining time divided by 30 to determine whether the person meets the cutoff for >50% of unit 96138\n remaining_30 = remaining / 30\n\n # Round the whole number down\n remaining_floor = math.floor(remaining_30)\n fractional, whole = math.modf(remaining_30)\n\n # Cutoff is set at 16 out of 30 minutes\n cutoff = 0.53\n\n # Add an extra unit to 96139 if user input meets the cutoff\n if fractional >= cutoff:\n extra = 1\n else:\n extra = 0\n\n if eight_min == 30:\n eight = 1\n\n nine = remaining_floor + extra\n\n return render_template('/index.html', techTestMin=testmin, techScoreMin=scoremin, techTestHr=round(testhr, 2),\n testScoreHr=round(scorehr, 2),techTotalHr=round(totalhr, 2), techTotalMin=round(totalmin, 2),\n eight=eight, nine=nine, neurCheckBox=compCheckBox)\n else:\n return render_template(\"index.html\")", "def Register(self,schedule):\n # oscar login page\n oscar = \"https://oscar.gatech.edu/pls/bprod/twbkwbis.P_GenMenu?name=bmenu.P_StuMainMnu\"\n \n #mechanize boilerplate\n br = mechanize.Browser()\n cj = cookielib.LWPCookieJar()\n br.set_cookiejar(cj)\n br.set_handle_equiv(True)\n br.set_handle_gzip(True)\n br.set_handle_redirect(True)\n br.set_handle_referer(True)\n br.set_handle_robots(False)\n br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n br.addheaders = [(\"User-agent\", \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1\")]\n\n #open oscar sign-in page and grab login form\n r = br.open(oscar)\n br.form = list(br.forms())[0]\n br[\"sid\"] = self._id\n br[\"PIN\"] = self.pin\n res = br.submit()\n\n #initial landing page once signed into oscar\n br.open(\"https://oscar.gatech.edu/pls/bprod/twbkwbis.P_GenMenu?name=bmenu.P_RegMnu\")\n\n #jump to registration sub menu\n br.open(\"https://oscar.gatech.edu/pls/bprod/bwskfreg.P_AltPin\")\n\n #the year selection form is the second(hence 1st index)\n #defaults to the current year so we can just submit\n br.form = list(br.forms())[1]\n br.submit()\n\n #now we are at the registration page\n #the text fields are in the second form\n br.form = list(br.forms())[1]\n fields = []\n\n #the text fields all have the same name and type\n #so we'll just insert them into a list \n for control in br.form.controls:\n if control.type == \"text\" and control.name == \"CRN_IN\":\n fields.append(control)\n\n #set each text fields equal to a class in the schedule\n for field, course in zip(fields, schedule):\n field.value = str(course)\n \n response = br.submit()\n registered_classes = self.EnrolledClasses(response)\n return registered_classes", "def submit_headcount():\n db = get_db()\n if request.form.get(\"date\") is None or request.form.get(\"time\") is None:\n session[\"last_error\"] = (\n \"Submitted headcounts must have a time \"\n \"associated with them, and the request \"\n \"you just made didn't.\"\n )\n return redirect(url_for(\"error\"))\n provided_time = try_strptime(\n request.form[\"date\"] + \"T\" + request.form[\"time\"], \"%Y-%m-%dT%H:%M:%S\"\n )\n if provided_time is None:\n provided_time = try_strptime(\n request.form[\"date\"] + \"T\" + request.form[\"time\"], \"%Y-%m-%dT%H:%M\"\n )\n if provided_time is None:\n session[\"last_error\"] = \"The headcount time was formatted improperly.\"\n return redirect(url_for(\"error\"))\n current_time = datetime.datetime.now()\n if current_time - provided_time > datetime.timedelta(hours=2):\n session[\n \"last_error\"\n ] = \"You can't submit a headcount for times more than two hours in the past.\"\n return redirect(url_for(\"error\"))\n # Copy the request arguments\n counts = dict(request.form)\n # Delete the ones that I don't need\n del counts[\"date\"]\n del counts[\"time\"]\n del counts[\"submit\"]\n if \"reverse-inputs\" in counts.keys():\n del counts[\"reverse-inputs\"]\n provided_rooms = set(counts.keys())\n configured_rooms = {room.name for room in app.config[\"HC_CONFIG\"].values()}\n if provided_rooms != configured_rooms:\n extraneous = provided_rooms - configured_rooms\n missing = configured_rooms - provided_rooms\n session[\"last_error\"] = (\n \"You provided extraneous rooms %s and did \"\n \"not include required rooms %s.\" % (extraneous, missing)\n )\n return redirect(url_for(\"error\"))\n badkeys = []\n oversizekeys = []\n # Loop over all of the provided rooms\n for key, value in counts.items():\n # Interpret missing values as 0, as per [se.rit.edu #25]\n if value is \"\":\n value = [\"\"]\n # Value is actually a list, so just take the last item out of it\n value = value[-1:][0]\n # Interpret missing values as 0, as per [se.rit.edu #25]\n if value == \"\":\n value = \"0\"\n # Update the dictionary, fixes [se.rit.edu #95]\n counts[key] = [value]\n # If it's not numeric,\n if not value.isdigit():\n # Mark the key as bad\n badkeys.append(key)\n elif int(value) > app.config[\"HC_CONFIG\"][key].max_occupancy:\n # If the value is larger than the value configured in the\n # config file, mark the key as too big\n oversizekeys.append(key)\n # If the length of the badkeys list is non-zero, throw back an error\n if len(badkeys) > 0:\n session[\n \"last_error\"\n ] = \"Your request had non-numeric values for these rooms: \" + str(badkeys)\n return redirect(url_for(\"error\"))\n # If the length of the oversize keys list is non-zero, throw back an\n # error\n if len(oversizekeys) > 0:\n session[\"last_error\"] = (\n \"The application isn't configured to allow that many people in these rooms: %s\"\n % (str(oversizekeys),)\n )\n return redirect(url_for(\"error\"))\n # Get the requesting user from the database\n user = db.get_user_by_name(session[\"username\"])\n # Give those arguments to the database\n db.add_headcount(user[\"id\"], current_time, provided_time, counts)\n return redirect(url_for(\"show_main\"))", "def createCourse():\n\tif request.method == 'POST':\n\t\tcname = request.form['cname']\n\t\tcourseterm = request.form['courseterm']\n\t\tcoursepoint = request.form['coursepoint']\n\t\tcoursetype = request.form['coursetype']\t\n\t\tcourseyear = request.form['courseyear']\t\n\t\ttname = request.form['tname']\t\n\t\terror = None\n\n\t\tif not cname:\n\t\t\terror = 'Course name is required.'\n\t\telif not courseterm:\n\t\t\terror = 'Course term is required'\n\t\telif not courseterm:\n\t\t\terror = 'Course point is required'\n\n\t\tif error is not None:\n\t\t\tflash(error)\n\t\telse:\n\t\t\tdb = get_db()\n\t\t\tcur = db.cursor()\n\t\t\tcur.execute(\n\t\t\t\t'INSERT INTO course (cname, courseyear, coursetype, courseterm, coursepoint, tname)'\n\t\t\t\t' VALUES (%s, %s, %s, %s, %s, %s)',\n\t\t\t\t(cname, courseyear, coursetype, courseterm, coursepoint, tname)\n\t\t\t)\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('info.index'))\n\n\treturn render_template('info/createCourse.html')", "def take_weekly_agreements():\n\n #collect data from form template\n chore_id = request.form.get(\"chore_id\")\n daysagreed = request.form.get(\"daysagreed\")\n daysagreed = daysagreed.split(\"|\")\n\n #no more unicode\n days_agreed = [str(i) for i in daysagreed]\n\n #recast agreements from T/F to days of the week (by name)\n days_agreed = [days_of_the_week[i] for i in range(7) if days_agreed[i] == 'true']\n\n #format list of daily agreements for addition to database (string)\n days_agreed = \"|\".join(days_agreed) \n\n #save to database\n dbwrangler.add_commitment(days_agreed, chore_id)\n\n #redirect to form for further agreements\n return redirect(\"/takeachore\")", "def add_schedule():\n if request.method == 'GET':\n groups = list(map(lambda x: x.json(), GroupModel.query.all()))\n if len(groups) == 0 :\n \n flash(\"It seems you have not added the @ConsumerSurveyorBot to any group of any channel. Please, add the the bot to any group to schedule the message for the same.\")\n return redirect(url_for('dashboard.index'))\n return render_template('dashboard/add_schedule.html', groups = groups)\n\n if request.method == 'POST':\n\n error = None\n schedule = parse(request.form['schedule']+'+05:30')\n if schedule < datetime.datetime.now(pytz.timezone('Asia/Kolkata')):\n error = 'You can not schedule a message in past'\n if error is not None:\n flash(error)\n else:\n print(request.form)\n job = schedule_msg(request.form['message'],\n schedule, request.form['group_id'])\n message = MessageModel( \n job.id, request.form['name'], request.form['message'], request.form['schedule']+'+05:30', request.form['group_id'] )\n message.save_to_db()\n return redirect(url_for('dashboard.index')) \n return render_template('dashboard/add_schedule.html')", "def scores_post_request():\n date = request.form[\"date\"]\n print(date)\n return render_score_page(\"scores.html\", date, date)", "def submit_main_edit():\n db = get_db()\n\n # are we updating the headcounts or deleting them?\n if \"delete\" in request.form.keys():\n for key in request.form.keys():\n key_split = key.split(\"-\")\n if len(key_split) < 2:\n continue\n if key_split[0] != \"delete\":\n continue\n if request.form.get(key) != \"on\":\n continue\n count_id = key_split[1]\n try:\n count_id = int(count_id)\n if not db.can_modify(session[\"username\"], count_id):\n session[\"last_error\"] = \"You cannot delete that headcount.\"\n return redirect(url_for(\"error\"))\n db.del_headcount(count_id)\n except ValueError:\n continue\n elif \"save\" in request.form.keys():\n updates = {}\n for key, val in request.form.items():\n key_split = key.split(\"-\")\n if len(key_split) <= 1:\n continue\n count_id = key_split[1]\n try:\n count_id = int(count_id)\n if count_id not in updates.keys():\n updates[count_id] = {\n \"submit_time\": datetime.datetime.now(),\n \"entered_date\": \"\",\n \"entered_time\": \"\",\n \"rooms\": {},\n }\n if key_split[0] == \"date\":\n updates[count_id][\"entered_date\"] = val\n elif key_split[0] == \"time\":\n updates[count_id][\"entered_time\"] = val\n else:\n rm_max = app.config[\"HC_CONFIG\"][key_split[0]].max_occupancy\n if int(val) > rm_max:\n session[\"last_error\"] = (\n \"Room %s can't have more than %d people in it.\"\n % (key_split[0], rm_max)\n )\n return redirect(url_for(\"error\"))\n updates[count_id][\"rooms\"][key_split[0]] = int(val)\n except ValueError:\n continue\n for key in updates.keys():\n if not db.can_modify(session[\"username\"], key):\n session[\"last_error\"] = \"You cannot edit that headcount.\"\n return redirect(url_for(\"error\"))\n t = try_strptime(\n updates[key][\"entered_date\"] + \"T\" + updates[key][\"entered_time\"],\n \"%Y-%m-%dT%H:%M:%S\",\n )\n if t is None:\n provided_time = try_strptime(\n updates[key][\"entered_date\"] + \"T\" + updates[key][\"entered_time\"],\n \"%Y-%m-%dT%H:%M\",\n )\n if t is None:\n session[\"last_error\"] = \"The headcount time was formatted improperly.\"\n return redirect(url_for(\"error\"))\n updates[key][\"entered_time\"] = t\n db.edit_headcount(updates[key], key)\n return redirect(url_for(\"show_main_edit\"))", "def new_job(request):\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n terms, _current_term_id = get_term_data_for_school(sis_account_id)\n school_id = sis_account_id.split(\":\")[1]\n canvas_site_templates = get_canvas_site_templates_for_school(school_id)\n potential_course_sites_query = None\n departments = []\n course_groups = []\n selected_term_id = None\n selected_course_group_id = None\n selected_department_id = None\n\n # Only display the Course Groups dropdown if the tool is launched in the COLGSAS sub-account\n if school_id == 'colgsas':\n try:\n course_groups = get_course_group_data_for_school(sis_account_id, include_ile_sb=False)\n except Exception:\n logger.exception(f\"Failed to get course groups with sis_account_id {sis_account_id}\")\n # For all other schools, display just the Departments dropdown\n else:\n try:\n departments = get_department_data_for_school(sis_account_id, include_ile_sb=False)\n except Exception:\n logger.exception(f\"Failed to get departments with sis_account_id {sis_account_id}\")\n\n logging_dept_cg_text = ' and no selected department or course group'\n if request.method == \"POST\":\n selected_term_id = request.POST.get(\"courseTerm\", None)\n selected_course_group_id = request.POST.get(\"courseCourseGroup\", None)\n selected_department_id = request.POST.get(\"courseDepartment\", None)\n\n logging_dept_cg_text = f' and course group ID {selected_course_group_id}' if selected_course_group_id \\\n else f' and department ID {selected_department_id}' if selected_department_id \\\n else ' and no selected department or course group.'\n logger.debug(f'Retrieving potential course sites for term ID '\n f'{selected_term_id}{logging_dept_cg_text}', extra={\"sis_account_id\": sis_account_id,\n \"school_id\": school_id,\n })\n\n # Retrieve all course instances for the given term_id and account that do not have Canvas course sites\n # nor are set to be fed into Canvas via the automated feed\n potential_course_sites_query = get_course_instance_query_set(\n selected_term_id, sis_account_id\n ).filter(canvas_course_id__isnull=True,\n sync_to_canvas=0,\n bulk_processing=0,\n term__term_id=selected_term_id)\n\n # Filter potential_course_sites_query by course group.\n if selected_course_group_id and selected_course_group_id != '0':\n potential_course_sites_query = potential_course_sites_query.filter(course__course_group=selected_course_group_id)\n # Filter potential_course_sites_query by department.\n elif selected_department_id and selected_department_id != '0':\n potential_course_sites_query = potential_course_sites_query.filter(course__department=selected_department_id)\n\n # TODO maybe better to use template tag unless used elsewhere?\n # TODO cont. this may be included in a summary generation to be displayed in page (see wireframe and Jira ticket)\n potential_course_site_count = (\n potential_course_sites_query.count() if potential_course_sites_query else 0\n )\n\n logger.debug(f'Retrieved {potential_course_site_count} potential course sites for term '\n f'{selected_term_id}{logging_dept_cg_text}', extra={\"sis_account_id\": sis_account_id,\n \"school_id\": school_id,\n })\n\n context = {\n \"terms\": terms,\n \"potential_course_sites\": potential_course_sites_query,\n \"potential_site_count\": potential_course_site_count,\n \"canvas_site_templates\": canvas_site_templates,\n \"departments\": departments,\n \"course_groups\": course_groups,\n 'selected_term_id': selected_term_id,\n 'selected_course_group_id': selected_course_group_id,\n 'selected_department_id': selected_department_id,\n 'canvas_url': settings.CANVAS_URL,\n }\n return render(request, \"bulk_site_creator/new_job.html\", context=context)", "def post(self, request, *args, **kwargs):\r\n\t\tself.object = get_object_or_404(Cruise, pk=self.kwargs.get('pk'))\r\n\t\tif not self.object.is_editable_by(request.user):\r\n\t\t\traise PermissionDenied\r\n\t\tform_class = self.get_form_class()\r\n\t\tform_class.user = request.user\r\n\t\tform = self.get_form(form_class)\r\n\t\tcruiseday_form = CruiseDayFormSet(self.request.POST, instance=self.object)\r\n\t\tparticipant_form = ParticipantFormSet(self.request.POST, instance=self.object)\r\n\t\tdocument_form = DocumentFormSet(data=request.POST, files=request.FILES, instance=self.object)\r\n\t\tequipment_form = EquipmentFormSet(self.request.POST, instance=self.object)\r\n\t\tinvoice_form = InvoiceFormSet(self.request.POST, instance=self.object)\r\n\r\n\t\t# check if all our forms are valid, handle outcome\r\n\t\tif (form.is_valid() and cruiseday_form.is_valid() and participant_form.is_valid() and document_form.is_valid() and equipment_form.is_valid() and invoice_form.is_valid()):\r\n\t\t\treturn self.form_valid(form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form)\r\n\t\telse:\r\n\t\t\treturn self.form_invalid(form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form)", "def edit_current_schedule(current_courses, full_courses):\n\n days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']\n valid_grades= [\"A\", \"A-\",\"B+\",\"B\",\"B-\",\"C+\",\"C\",\"C-\",\"D\",\"F\"]\n\n clear_screen()\n while True:\n try:\n print(\"Here are your current classes\")\n for val in current_courses:\n print(val)\n choice = int(input(\"Please select which one you'd like to edit:\\n1.Days\\n2.Time\\n3.Grade\\n4.Save and Quit \"))\n if choice !=4:\n class_code = input(\"Which class? \")\n if choice == 1:\n days = input(\"Please input days using style: mon,tues,wed,thurs,fri,sat,sun. Separate by comma \").split(',')\n for val in days:\n if val not in days_list:\n print(\"Invalid option\")\n days = current_courses[class_code][0]\n current_courses[class_code][0] = days\n else:\n current_courses[class_code][0] = days\n elif choice == 2:\n start_time = int(input(\"Using format 2400, input start time: \"))\n end_time = int(input(\"Using format 2400, input end time: \"))\n current_courses[class_code][1] = start_time\n current_courses[class_code][2] = end_time\n continue\n elif choice == 3:\n grade = input(\"Update current letter grade: \")\n if grade not in valid_grades:\n print(\"Invalid input\")\n grade = current_courses[class_code][3]\n current_courses[class_code][3] = grade.upper()\n full_courses[class_code][1] = grade.upper()\n else:\n current_courses[class_code][3] = grade.upper()\n full_courses[class_code][1] = grade.upper()\n continue\n else:\n with open('current_courses.json', 'w') as fp:\n json.dump(current_courses, fp)\n with open('full_courses.json', 'w') as f_file:\n json.dump(full_courses, f_file)\n break\n except ValueError:\n print(\"Invalid input.\")\n continue\n return 0", "def test_rate_entry_courses(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def submit_app_form():\n\n firstname = request.form.get(\"fstname\")\n lastname = request.form.get(\"lstname\")\n salary = request.form.get(\"salaryreq\")\n position = request.form.get(\"job\")\n\n return render_template(\"application-response.html\",\n fstname=firstname,\n lstname=lastname,\n salaryreq=salary,\n job=position,\n )", "def post(self):\n if self.data.GET.get('cbox'):\n cbox = True\n else:\n cbox = False\n\n if self.validate():\n self.redirect.program()\n self.redirect.to('edit_gci_timeline', validated=True, cbox=cbox)\n else:\n self.get()", "def add():\n add_form = AddCourseForm(request.form)\n if request.method == 'POST':\n Course.new(name=add_form.name.data,\n owner_id=g.user.id,\n visibility=add_form.visibility.data,\n term=add_form.term.data)\n flash('New course added')\n return redirect(url_for('courses.index'))\n return render_template('courses/add.html', add_form=add_form)", "def requestSubmitted(request):", "def form_valid(self, form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form):\r\n\r\n\t\tis_submitting = False\r\n\r\n\t\tCruise = form.save(commit=False)\r\n\t\t\r\n\t\tCruise.leader = self.request.user\r\n\t\ttry:\r\n\t\t\tCruise.organization = Cruise.leader.userdata.organization\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\tform.cleaned_data[\"leader\"] = self.request.user\r\n\r\n\t\tif hasattr(self, \"request\"):\r\n\t\t\t# check whether we're saving or submitting the form\r\n\t\t\tif self.request.POST.get(\"save_cruise\"):\r\n\t\t\t\tCruise.is_submitted = False\r\n\t\t\t\tmessages.add_message(self.request, messages.SUCCESS, mark_safe('Cruise successfully saved. You may edit and submit it on the \"<a href=\"/user/cruises/unsubmitted/\">Unsubmitted Cruises</a>\" page.'))\r\n\t\t\telif self.request.POST.get(\"submit_cruise\"):\r\n\t\t\t\tis_submitting = True\r\n\r\n\t\t\t\tcruiseday_form = CruiseDayFormSet(self.request.POST)\r\n\t\t\t\tparticipant_form = ParticipantFormSet(self.request.POST)\r\n\t\t\t\tcruise_days = cruiseday_form.cleaned_data\r\n\t\t\t\tcruise_participants = participant_form.cleaned_data\r\n\t\t\t\tcruise_invoice = invoice_form.cleaned_data\r\n\t\t\t\tif (Cruise.is_submittable(user=self.request.user, cleaned_data=form.cleaned_data, cruise_invoice=cruise_invoice, cruise_days=cruise_days, cruise_participants=cruise_participants)):\r\n\t\t\t\t\tCruise.is_submitted = True\r\n\t\t\t\t\tCruise.submit_date = timezone.now()\r\n\t\t\t\t\tmessages.add_message(self.request, messages.SUCCESS, mark_safe('Cruise successfully submitted. You may track its approval status on the \"<a href=\"/user/cruises/submitted/\">Submitted Cruises</a>\" page.'))\r\n\t\t\t\telse:\r\n\t\t\t\t\tCruise.is_submitted = False\r\n\t\t\t\t\tmessages.add_message(self.request, messages.ERROR, mark_safe('Cruise could not be submitted:' + str(Cruise.get_missing_information_string(cleaned_data=form.cleaned_data, cruise_invoice=cruise_invoice, cruise_days=cruise_days, cruise_participants=cruise_participants)) + '<br>If you decide to do this later, you can get back to this cruise to review and add any missing or invalid information on the \"<a href=\"/user/cruises/unsubmitted/\">Unsubmitted Cruises</a>\" page.'))\r\n\t\t\telse:\r\n\t\t\t\tCruise.is_submitted = False\r\n\t\t\t\tmessages.add_message(self.request, messages.ERROR, mark_safe('Cruise could not be submitted: We were unable to determine the action you wished to take on submit. Please try to submit again below.'))\r\n\r\n\t\tCruise.save()\r\n\t\tself.object = form.save()\r\n\t\tcruiseday_form.instance = self.object\r\n\t\tcruiseday_form.save()\r\n\t\tparticipant_form.instance = self.object\r\n\t\tparticipant_form.save()\r\n\t\tdocument_form.instance = self.object\r\n\t\tdocument_form.save()\r\n\t\tequipment_form.instance = self.object\r\n\t\tequipment_form.save()\r\n\t\tinvoice_form.instance = self.object\r\n\t\tinvoice_form.save()\r\n\r\n\t\treturn HttpResponseRedirect(self.get_success_url(is_submitting, Cruise))", "def newchore():\n #get info from form template\n name = request.form.get('chore_name')\n description = request.form.get('chore_description')\n duration_hours = request.form.get('duration_hours') or 0\n duration_minutes = request.form.get('duration_minutes') or 0\n #compse duration in minutes\n duration_minutes = (int(duration_hours) * 60 + int(duration_minutes))\n occurance = request.form.get('occurance')\n comment = request.form.get('comment')\n by_time = request.form.get('by-time')\n days_weekly = request.form.getlist('days_weekly')\n date_monthly = request.form.get('date_monthly')\n #send chore details to DB\n dbwrangler.newchore(name, description, duration_minutes, occurance, by_time, \n comment, days_weekly, date_monthly)\n\n return redirect(\"/\")", "def discipline():\n discipline = db.get_table('discipline')\n disciplines = discipline.get()\n form = DisciplineForm()\n if form.validate_on_submit():\n discipline.add(form.title.data)\n return redirect(url_for('discipline'))\n return render_template(\n 'discipline.html', disciplines=disciplines, form=form\n )", "def add_course(race_id):\n # use the CourseForm\n form = CourseForm()\n\n # check if this is a post request and all fields are valid\n if form.validate_on_submit():\n # get race object from database\n race = Race.query.get(race_id)\n host = race.host_school\n location = race.location\n\n # create a new course based on input on form from user\n course = Course(name=form.course_name.data,\n description = form.course_description.data,\n distance = form.distance.data,\n location_id=location.id)\n\n # add course to database and commit changes\n db.session.add(course)\n db.session.commit()\n\n # set course for race to the newly created course and commit changes\n race.course_id = course.id\n db.session.commit()\n\n # set flash to notify user that course setup was successfull\n flash(f\"Successfullly added course: '{course.name}' at location:\"\n f\"'{location.name}'\", 'success')\n\n # return to race_setup.html\n return redirect(url_for('setup.race_setup', race_id=race_id))\n\n # if this is get request, render add_course.html\n return render_template('add_course.html', form=form)", "def submit_data(self):\n\n database = Database()\n project_data = []\n\n project_entries = [\"\",\n \"\",\n \"\",\n self.proj_date.get(),\n self.proj_descrpt.get(),\n self.proj_estdatest.get(),\n self.proj_estdateend.get(),\n self.proj_estbudget.get(),\n self.proj_actdatest.get(),\n self.proj_actdateend.get(),\n self.proj_actcost.get()]\n\n index = 0\n num_filled = 0\n for item in project_entries:\n if item == \"\":\n project_entries[index] = None\n else:\n num_filled += 1\n index += 1\n\n cus_name = self.customer_name.get()\n\n if num_filled == 0 and cus_name == \"\":\n ErrorMessageWindow(\"You have to fill in at least one argument!\")\n else:\n # If a customer name is provided.\n if cus_name != \"\":\n customer_data = database.query_customer(cus_name=cus_name)\n if customer_data:\n project_entries[1] = customer_data[0][0]\n project_data = self.multi_project(database.query_project(\n project_query_options=project_entries))\n else:\n ErrorMessageWindow(\"No customer with this name found.\")\n else:\n project_data = self.multi_project(database.query_project(\n project_query_options=project_entries))\n\n if project_data:\n schedule_data = database.query_project_tasks(\n project_data=project_data)\n customer_data = database.query_customer(project_data[0][1])\n\n region_data = database.query_region(\n region_id=customer_data[0][1])\n\n # Project schedule window definition.\n ps_window = tkinter.Tk()\n ps_window.wm_title(\"Project Schedule Display\")\n tkinter.Label(\n ps_window, text=\"Project Information:\"\n ).grid()\n\n # Display project information.\n tkinter.Label(\n ps_window,\n text=\"Project ID: {}\".format(project_data[0][0]),\n ).grid(\n pady=5, column=0, row=1\n )\n tkinter.Label(\n ps_window,\n text=\"Description: {}\".format(project_data[0][4]),\n ).grid(\n pady=5, column=1, row=1\n )\n tkinter.Label(\n ps_window,\n text=\"Company: {}\".format(customer_data[0][2]),\n ).grid(\n pady=5, column=0, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Contract Date: {}\".format(project_data[0][3]),\n ).grid(\n pady=5, column=1, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Region: {}\".format(region_data[0][1]),\n ).grid(\n pady=5, column=2, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Start Date: {}\".format(project_data[0][5]),\n ).grid(\n pady=5, column=0, row=3\n )\n tkinter.Label(\n ps_window,\n text=\"End Date: {}\".format(project_data[0][6]),\n ).grid(\n pady=5, column=1, row=3\n )\n tkinter.Label(\n ps_window,\n text=\"Budget: ${}\".format(project_data[0][7]),\n ).grid(\n pady=5, column=2, row=3\n )\n\n # Schedule table definition.\n p_s_view = tkinter.ttk.Treeview(ps_window)\n p_s_view.grid(pady=10, column=1, row=5)\n\n p_s_view[\"show\"] = \"headings\"\n p_s_view[\"columns\"] = (\n \"Start Date\", \"End Date\", \"Task Description\",\n \"Skill(s) Required\", \"Quantity Required\"\n )\n\n # Table column headings.\n for heading in p_s_view[\"columns\"]:\n p_s_view.heading(heading, text=heading)\n p_s_view.column(heading, width=250)\n\n # Load data into table.\n for item in schedule_data:\n p_s_view.insert('', 'end', values=item)\n else:\n ErrorMessageWindow(\"No project found with given info.\")", "def schedule(request):\r\n\r\n return render(request, 'editorial/schedule.html', {})", "def test_post_entry_courses(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def add_objective(request, teacher_email, teacher_class_id, date):\n teacher = Teacher.objects.get(email=teacher_email)\n if teacher.user != request.user:\n # weird mistake or evil to manipulate another person's data? start over\n return redirect('top.index')\n\n teacher_class = TeacherClass.objects.get(id=teacher_class_id)\n\n if request.POST:\n form = EntryForm(request.POST)\n if form.is_valid():\n try:\n with transaction.atomic():\n entry = form.save(commit=False)\n entry.teacher = teacher\n entry.teacher_class = teacher_class\n entry.date = datetime.datetime.strptime(date, '%Y-%m-%d')\n entry.save()\n start_of_week_datetime = entry.date - datetime.timedelta(days=entry.date.weekday())\n start_of_week = datetime.date(start_of_week_datetime.year, start_of_week_datetime.month,\n start_of_week_datetime.day)\n return redirect('teachers.views.dashboard', teacher_email=teacher_email,\n teacher_class_id=teacher_class_id, start_of_week=start_of_week)\n except IntegrityError:\n # bad bad bad; I guess the EntryForm has to be initialized\n # with the date and teacher so that its clean() method can look at it.\n form._errors['objective'] = ['This objective is already on the calendar for this day.']\n pass\n else:\n objectives = objectives_for_course(teacher_class.course_id, teacher_class.repo_provider)\n if not objectives:\n # XXX fail with an error message\n pass\n # like EntryForm() above, but dynamically created to use a selection\n # of objectives specific to this course\n form = create_entry_form(objectives)\n\n args = {'teacher_email': teacher_email,\n 'teacher_class_id': teacher_class_id,\n 'dashboard_emails': get_dashboard_emails(request),\n 'date': date}\n args.update(csrf(request))\n args['form'] = form\n return render(request, 'teachers/add_objective.html', args)", "def reservs(request):\n a = request.GET\n print(a)\n if request.method == 'POST':\n # create a form\n form = NewReservationsOfficesForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('coworkings:index')\n else:\n form = NewReservationsOfficesForm()\n\n context = {\"form\": form}\n return render(request, 'coworkings/reservs.html', context)", "def standings_post_request():\n date = request.form[\"date\"]\n datetime_object = datetime.datetime.strptime(date, \"%m-%d-%Y\")\n\n scoreboard = nba_py.Scoreboard(month=datetime_object.month,\n day=datetime_object.day,\n year=datetime_object.year)\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return render_template(\"standings.html\",\n title=\"standings\",\n east_standings=enumerate(east_standings, 1),\n west_standings=enumerate(west_standings, 1),\n team=CITY_TO_TEAM)", "def create_new_schedule():\n\n # collect all relevant information from form\n user_id = int(session['user_id'])\n user = User.query.filter_by(user_id=int(session['user_id'])).one()\n contact_form_value = request.form.get('contact_id')\n start_date_unicode = request.form.get('start_date')\n period = int(request.form.get('period'))\n\n # extracts email from contact_form_value string using re library\n contact_email = contact_form_value.partition('<')[-1].rpartition('>')[0]\n\n # pull contact from database\n contact = Contact.query.filter_by(email=contact_email).one()\n contact_id = contact.contact_id\n\n # turns start_date into datetime object using dateutil library\n start_date = parser.parse(start_date_unicode)\n\n # calculates send_date from start_date and period\n send_date = start_date + datetime.timedelta(days=period)\n\n # write scheduled message to database\n new_scheduled_msg = ScheduledMessage(user_id=user_id, \n contact_id=contact_id,\n send_date=send_date,\n sent=False)\n\n # set new period on contact in database\n contact.contact_period = period\n\n db.session.add(new_scheduled_msg)\n db.session.commit()\n\n print 'user_id:', user_id\n print 'contact_form_value:', contact_form_value\n print 'start_date:', start_date, 'type:', type(start_date)\n print 'contact_email:', contact_email\n print 'contact:', contact\n print 'contact_id:', contact.contact_id\n print 'period:', period\n print 'send_date:', send_date\n return jsonify({})" ]
[ "0.7359131", "0.5738121", "0.56755453", "0.56118274", "0.5451969", "0.5445266", "0.5368269", "0.5310079", "0.5273463", "0.5239701", "0.52320564", "0.5225265", "0.52122545", "0.5199207", "0.5196593", "0.5195842", "0.5134032", "0.51298195", "0.5128839", "0.5125523", "0.5111229", "0.5104434", "0.50913984", "0.5076613", "0.5073321", "0.5048052", "0.5005494", "0.49793577", "0.4953738", "0.4945518" ]
0.6717341
1
Gets input from form, puts it in a list, gets the schedules, send JSON of course combinations and send then to /sched as a cookie
def my_form_post(): text_list = [] #make list of form inputs for i in range(1, AMOUNT_OF_COURSES + 1): form_num = 'text' + str(i) text_list.append(request.form[form_num]) #remove items with no input, generate string of courses final_list = [] for text in text_list: if not text == "": final_list.append(text) courses_str = "" for course in final_list[:-1]: courses_str += (str(course) + ',') courses_str += str(final_list[-1]) courses_str = courses_str.upper() #turn string of courses entered into list c_list = courses_str.split(',') #get the schedules #print "\nCourse list:" #print str(c_list) + "\n" my_combos = scheduler.schedule(c_list) resp = make_response(redirect('/sched')) resp.set_cookie('course_combos', '', expires=0) resp.set_cookie('course_combos', json.dumps(my_combos)) return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scheduleMe(page):\n querystring_combos = request.cookies.get('course_combos')\n if not querystring_combos:\n return render_template('404.html'), 404\n combos = json.loads(querystring_combos)\n #print querystring_combos\n\n count = len(combos)\n pagination_needed = count > PER_PAGE\n this_page_combos = combos\n if pagination_needed:\n this_page_combos = getCombosForPage(page, PER_PAGE, count, combos)\n last_page = isLastPage(page, count, PER_PAGE)\n if not this_page_combos and page != 1:\n return render_template('404.html'), 404\n return render_template(\"sched.html\",\n title=\"Scheduler\",\n combos=this_page_combos,\n combo_amount=str(count),\n page=page,\n last_page=last_page,\n pagination=pagination_needed)", "def how_many_post():\n default_courses = ['CS 442', 'CS 392', 'CS 519', 'MA 331']\n resp = make_response(render_template(\n \"sched_entry.html\",\n quantity=AMOUNT_OF_COURSES,\n title='Scheduler',\n default_vals=default_courses))\n resp.set_cookie('course_combos', '', expires=0)\n return resp", "def getCombosAPI():\n all_args = request.args.lists()\n course_list = all_args[0][1][0].split(\",\")\n u_COURSE_LIST = map((lambda x: x.upper()), course_list)#make all caps just in case\n COURSE_LIST = map( str, u_COURSE_LIST)#unicode list -> list of python strs\n combos = scheduler.schedule(COURSE_LIST)\n return jsonify(combos)", "def edit_current_schedule(current_courses, full_courses):\n\n days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']\n valid_grades= [\"A\", \"A-\",\"B+\",\"B\",\"B-\",\"C+\",\"C\",\"C-\",\"D\",\"F\"]\n\n clear_screen()\n while True:\n try:\n print(\"Here are your current classes\")\n for val in current_courses:\n print(val)\n choice = int(input(\"Please select which one you'd like to edit:\\n1.Days\\n2.Time\\n3.Grade\\n4.Save and Quit \"))\n if choice !=4:\n class_code = input(\"Which class? \")\n if choice == 1:\n days = input(\"Please input days using style: mon,tues,wed,thurs,fri,sat,sun. Separate by comma \").split(',')\n for val in days:\n if val not in days_list:\n print(\"Invalid option\")\n days = current_courses[class_code][0]\n current_courses[class_code][0] = days\n else:\n current_courses[class_code][0] = days\n elif choice == 2:\n start_time = int(input(\"Using format 2400, input start time: \"))\n end_time = int(input(\"Using format 2400, input end time: \"))\n current_courses[class_code][1] = start_time\n current_courses[class_code][2] = end_time\n continue\n elif choice == 3:\n grade = input(\"Update current letter grade: \")\n if grade not in valid_grades:\n print(\"Invalid input\")\n grade = current_courses[class_code][3]\n current_courses[class_code][3] = grade.upper()\n full_courses[class_code][1] = grade.upper()\n else:\n current_courses[class_code][3] = grade.upper()\n full_courses[class_code][1] = grade.upper()\n continue\n else:\n with open('current_courses.json', 'w') as fp:\n json.dump(current_courses, fp)\n with open('full_courses.json', 'w') as f_file:\n json.dump(full_courses, f_file)\n break\n except ValueError:\n print(\"Invalid input.\")\n continue\n return 0", "def edit_schedule():\n days_list = ['mon', 'tues', 'wed','thurs', 'fri', 'sat', 'sun']\n valid_grades= [\"A\", \"A-\",\"B+\",\"B\",\"B-\",\"C+\",\"C\",\"C-\",\"D\",\"F\",\"0\"]\n clear_screen()\n with open('full_courses.json', 'r') as f_file:\n full_courses = json.load(f_file)\n with open('current_courses.json', 'r') as s_file:\n current_courses = json.load(s_file)\n while True:\n try:\n print(\"====Course Editing Menu====\")\n menu = int(input(\"1.Edit Class Schedule\\n2.Close out current_classes\\n3.Add Class to current schedule\\n4.Remove courses\\n5.Exit\"))\n if menu == 1:\n edit_current_schedule(current_courses, full_courses)\n elif menu ==2:\n choice = input(\"Are you sure you want to close out your schedule? This will wipe out your current_courses file (Y/N) \")\n if choice.upper() == \"Y\":\n for val,val2 in current_courses.items():\n grade = input(\"Enter final letter grade for class: \"+val)\n full_courses[val][1] = grade\n full_courses[val][2] = \"C\"\n with open('full_courses.json', 'w') as fp:\n json.dump(full_courses, fp) \n fp = open('current_courses.json', 'w')\n fp.close()\n print(\"Current_courses file wiped\")\n continue\n elif choice.upper() == 'N':\n continue\n elif menu == 3:\n class_code = input(\"Input class code, i.e IT106 \")\n if class_code not in full_courses.keys():\n print(\"Class does not exist \")\n continue\n else:\n days = input(\"Using format mon, tues, wed, thurs, fri, sat, sun, input class days. Separate by comma\").split(',')\n for val in days:\n if val not in days_list:\n clear_screen()\n print(\"WARNING: Invalid option\")\n days = \"0\"\n continue\n \n start_time = int(input(\"Using format 2400, input start time: \"))\n end_time = int(input(\"Using format 2400, input end time: \"))\n grade = input(\"Input letter grade for this class. If no grade, input 0: \")\n if grade not in valid_grades:\n grade = \"0\"\n print(\"Invalid option\")\n continue\n else:\n current_courses[class_code.upper()] = [days,start_time,end_time,grade.upper()]\n with open('current_courses.json', 'w') as fp:\n json.dump(current_courses, fp)\n continue\n elif menu == 4:\n print(\"Here are the courses of your semester: \")\n for val in current_courses:\n print(val)\n course_code = input(\"Which class do you want to delete? \")\n if course_code not in current_courses.keys():\n print(\"Invalid Entry\")\n continue\n else:\n choice = input(\"Are you sure you want to delete: \" +course_code+\"?(Y/N) \")\n if choice.upper() == \"Y\":\n del current_courses[course_code]\n with open('current_courses.json', 'w')as fp:\n json.dump(current_courses, fp)\n continue\n else:\n continue\n elif menu == 5:\n break\n except ValueError:\n print(\"Invalid input, try again\")\n continue\n return 0", "def current_load(full_list):\n days_list = ['mon', 'tues', 'wed', 'thurs', 'fri','sat','sun']\n valid_grades= [\"A\", \"A-\",\"B+\",\"B\",\"B-\",\"C+\",\"C\",\"C-\",\"D\",\"F\",'0']\n\n clear_screen()\n current_schedule = {}\n print(\"Here are all of the classes you have input thus far: \", full_list.keys())\n input(\"Now, we will begin to build you current course schedule. Press any key to continue\")\n \n class_code = input(\"Input class code, or type 'q' to quit: \")\n while class_code!= 'q':\n print(current_schedule)\n try:\n if class_code == 'q':\n break\n elif class_code.upper() not in full_list.keys():\n print(\"This class does not exist in your full list. Please try again:\")\n class_code = input(\"Input class code, or type 'q' to quit: \")\n continue\n elif class_code.upper() in current_schedule:\n print(\"You have already entered the information for this class. Please try again \")\n continue\n else:\n class_code = class_code.upper()\n day = input(\"What days does \"+class_code+\" take place on? Separate by comma and use format:\\nmon\\ntues\\nwed\\nthurs\\nfri\\nsat\\nsun \").split(',')\n for val in day:\n if val not in days_list:\n print(\"Invalid option\")\n continue\n start_time = int(input(\"Using format 2400, what time does \"+class_code+\" begin?\\n\"))\n end_time = int(input(\"Using format 2400, what time does \"+class_code+\" end?\\n\"))\n grade = input(\"What letter grade do you currently have? If no grade, input 0 \")\n if grade not in valid_grades:\n print(\"Invalid input\")\n continue\n current_schedule[class_code] = [day, start_time, end_time, grade]\n class_code = input(\"Input class code, or type 'q' to quit: \")\n except ValueError:\n input(\"Invalid input. Press any key to continue \")\n continue\n return current_schedule", "def Register(self,schedule):\n # oscar login page\n oscar = \"https://oscar.gatech.edu/pls/bprod/twbkwbis.P_GenMenu?name=bmenu.P_StuMainMnu\"\n \n #mechanize boilerplate\n br = mechanize.Browser()\n cj = cookielib.LWPCookieJar()\n br.set_cookiejar(cj)\n br.set_handle_equiv(True)\n br.set_handle_gzip(True)\n br.set_handle_redirect(True)\n br.set_handle_referer(True)\n br.set_handle_robots(False)\n br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n br.addheaders = [(\"User-agent\", \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1\")]\n\n #open oscar sign-in page and grab login form\n r = br.open(oscar)\n br.form = list(br.forms())[0]\n br[\"sid\"] = self._id\n br[\"PIN\"] = self.pin\n res = br.submit()\n\n #initial landing page once signed into oscar\n br.open(\"https://oscar.gatech.edu/pls/bprod/twbkwbis.P_GenMenu?name=bmenu.P_RegMnu\")\n\n #jump to registration sub menu\n br.open(\"https://oscar.gatech.edu/pls/bprod/bwskfreg.P_AltPin\")\n\n #the year selection form is the second(hence 1st index)\n #defaults to the current year so we can just submit\n br.form = list(br.forms())[1]\n br.submit()\n\n #now we are at the registration page\n #the text fields are in the second form\n br.form = list(br.forms())[1]\n fields = []\n\n #the text fields all have the same name and type\n #so we'll just insert them into a list \n for control in br.form.controls:\n if control.type == \"text\" and control.name == \"CRN_IN\":\n fields.append(control)\n\n #set each text fields equal to a class in the schedule\n for field, course in zip(fields, schedule):\n field.value = str(course)\n \n response = br.submit()\n registered_classes = self.EnrolledClasses(response)\n return registered_classes", "def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")", "def receiveData():\r\n preference = request.get_json()\r\n program = preference.pop('program')\r\n enroll_yr = preference.pop('enroll_yr')\r\n enroll_sem = preference.pop('enroll_sem')\r\n spec = 0\r\n if 'spec' in preference:\r\n spec = int(preference['spec'])\r\n preference.pop('spec')\r\n\r\n program_link = 'https://programsandcourses.anu.edu.au/2019/program/'\r\n\r\n program_link = str(program_link) + str(program)\r\n # calculate which type of semester does the enrolled semester fall in\r\n # S1 in odd year, S2 in odd year, S1 in even year or S2 in even year \r\n if int(enroll_yr)%2 == 1:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 1\r\n else:\r\n sem = 2\r\n else:\r\n if int(enroll_sem)%2 == 1:\r\n sem = 3\r\n else:\r\n sem = 4\r\n \r\n # call the pre-processing program which put the model in file test1.mzn & test1.dzn\r\n scraper = dp.DegreeRuleScraper(str(program_link))\r\n orders = scraper.build_program_order_struct()\r\n orders.buildAModel(preference, sem, spec)\r\n \r\n # call MiniZinc to solve for the model\r\n cmd = 'minizinc --solver OSICBC test1.mzn test1.dzn > plan.txt'\r\n os.system(cmd)\r\n jsondata = readmyJson('plan')\r\n \r\n return jsonify(jsondata)", "def standings_post_request():\n date = request.form[\"date\"]\n datetime_object = datetime.datetime.strptime(date, \"%m-%d-%Y\")\n\n scoreboard = nba_py.Scoreboard(month=datetime_object.month,\n day=datetime_object.day,\n year=datetime_object.year)\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return render_template(\"standings.html\",\n title=\"standings\",\n east_standings=enumerate(east_standings, 1),\n west_standings=enumerate(west_standings, 1),\n team=CITY_TO_TEAM)", "def schedule_content(request):\r\n\r\n stories = Story.objects.filter(organization=request.user.organization).exclude(archived=True)\r\n\r\n # data = {}\r\n # data['success'] = 1\r\n # data['result'] = []\r\n data = []\r\n\r\n for story in stories:\r\n # Facet Schedules\r\n for facet in story.facetstory.all():\r\n credit = {}\r\n for user in facet.credit.all():\r\n credit['id'] = []\r\n credit['id'].append(user.credit_name)\r\n credit['id'].append(user.get_absolute_url())\r\n editor = {}\r\n for user in facet.editor.all():\r\n editor['id'] = []\r\n editor['id'].append(user.credit_name)\r\n editor['id'].append(user.get_absolute_url())\r\n print credit\r\n if facet.due_edit:\r\n edit_event_dict = {}\r\n edit_event_dict['id'] = facet.id\r\n edit_event_dict['title'] = facet.name.encode('utf-8')\r\n edit_event_dict['description'] = facet.description.encode('utf-8')\r\n edit_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n edit_event_dict['editor'] = facet.editor.credit_name\r\n edit_event_dict['credit'] = credit\r\n edit_event_dict['url'] = facet.get_absolute_url()\r\n edit_event_dict['start'] = time.mktime(facet.due_edit.timetuple()) * 1000\r\n edit_event_dict['end'] = (time.mktime(facet.due_edit.timetuple()) * 1000) + 60\r\n edit_event_dict['overlap'] = True\r\n edit_event_dict['allDay'] = False\r\n edit_event_dict['backgroundColor'] = '#00aced'\r\n edit_event_dict['textColor'] = '#fff'\r\n data.append(edit_event_dict)\r\n if facet.run_date:\r\n run_event_dict = {}\r\n run_event_dict['id'] = facet.id\r\n run_event_dict['title'] = facet.name.encode('utf-8')\r\n run_event_dict['description'] = facet.description.encode('utf-8')\r\n run_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n run_event_dict['editor'] = facet.editor.credit_name\r\n run_event_dict['credit'] = credit\r\n run_event_dict['url'] = facet.get_absolute_url()\r\n run_event_dict['class'] = 'event_run'\r\n run_event_dict['start'] = time.mktime(facet.run_date.timetuple()) * 1000\r\n run_event_dict['end'] = (time.mktime(facet.run_date.timetuple()) * 1000) + 60\r\n run_event_dict['overlap'] = True\r\n run_event_dict['backgroundColor'] = '#5cb85c'\r\n run_event_dict['textColor'] = '#fff'\r\n data.append(run_event_dict)\r\n\r\n # print \"DATA: \", data\r\n\r\n return HttpResponse(json.dumps(data), content_type='application/json')", "def _handleRequestSchedules(self, data):\r\n print(\"\\\"Request Schedules\\\" received\")\r\n message = self.whitebeet.v2gParseRequestSchedules(data)\r\n print(\"Max entries: {}\".format(message['max_entries']))\r\n success = True\r\n schedule_out = []\r\n time_now = time.time()\r\n index = 0\r\n for entry in self.schedule:\r\n # Check if schedule is still valid\r\n if entry[\"valid_until\"] - time_now < 0:\r\n success = False\r\n break\r\n interval = int(entry[\"valid_until\"] - time_now)\r\n max_power = entry[\"max_power\"]\r\n # Currently only uint16 supported for interval therefore we need to split\r\n if interval > 65535:\r\n loop = True\r\n while loop:\r\n schedule_out.append((index, 65535, max_power))\r\n interval -= 65535\r\n index += 1\r\n if interval < 65535:\r\n break\r\n schedule_out.append((index, interval, max_power))\r\n index += 1\r\n if success:\r\n # Limit to maximum number of entries sent by the EV\r\n if len(schedule_out) > message['max_entries']:\r\n del schedule_out[message['max_entries']:len(schedule_out)]\r\n print(\"Set the schedule: {}\".format(schedule_out))\r\n try:\r\n self.whitebeet.v2gSetSchedules(0, int(time_now), schedule_out)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))\r\n else:\r\n try:\r\n self.whitebeet.v2gSetSchedules(1, None, None)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))", "def get_schedules(dates, token):\n\n if True: # Safety check\n return\n\n conn = sqlite3.connect(db)\n cursor = conn.cursor()\n\n stmt_delete = 'DELETE FROM departures;'\n cursor.execute(stmt_delete) # Remove this in case of re-execution due to error or sudden termination\n\n headers = {'Authorization': 'Bearer ' + token}\n params = {'maxPlaces': 1, 'modes': 'bus', 'maxPerBoard': 50}\n url = 'https://transit.hereapi.com/v8/departures'\n\n stmt_stations = 'SELECT id, coordinates_overpass FROM stations ORDER BY id;'\n stmt_station_update = \"\"\"UPDATE stations SET id_here = ?, name_here = ?, coordinates_here = ?, \n no_data = 0, duplicate = 0 WHERE id = ?;\"\"\"\n stmt_departures = \"\"\"INSERT INTO departures (station_id, bus, headsign, day, time)\n VALUES (?, ?, ?, ?, ?);\"\"\"\n stmt_station_check_stream = 'SELECT id_here FROM stations WHERE id = ?;'\n stmt_station_check_duplicate = 'SELECT count(*) FROM stations WHERE id_here = ? OR coordinates_here = ?;'\n stmt_count_check = 'SELECT count(*) FROM departures WHERE station_id = ? AND day = ?;'\n stmt_station_no_data = 'UPDATE stations SET no_data = 1 WHERE id = ?;'\n stmt_station_set_duplicate = 'UPDATE stations SET duplicate = 1 WHERE id = ?;'\n stmt_buses = 'SELECT DISTINCT name FROM buses;'\n\n buses = [bus[0] for bus in cursor.execute(stmt_buses).fetchall()]\n\n cursor.execute(stmt_stations)\n stations = cursor.fetchall()\n\n for day, date in tqdm(dates.items()):\n min_time = datetime.datetime.strptime(date, '%Y-%m-%d')\n max_time = min_time + datetime.timedelta(days=1)\n \n for station in tqdm(stations):\n params['in'] = station[1]\n params['time'] = min_time\n \n while params['time'] < max_time:\n cursor.execute(stmt_count_check, (station[0], day))\n \n if cursor.fetchone()[0] > 1440:\n raise Exception('Something went wrong! Too many departures for station {}!'.format(station[0]))\n\n params['time'] = params['time'].isoformat()\n response = requests.get(url, headers=headers, params=params)\n \n try:\n data = response.json()['boards'][0]\n except:\n cursor.execute(stmt_station_no_data, (station[0],))\n break\n\n cursor.execute(stmt_station_check_stream, (station[0],))\n id_here = cursor.fetchone()[0]\n \n if id_here is None:\n coordinates_here = ','.join(map(str, [data['place']['location']['lat'], data['place']['location']['lng']]))\n cursor.execute(stmt_station_check_duplicate, (data['place']['id'], coordinates_here))\n \n if cursor.fetchone()[0]:\n cursor.execute(stmt_station_set_duplicate, (station[0],))\n break\n \n station_data = (data['place']['id'], data['place']['name'], coordinates_here, station[0])\n cursor.execute(stmt_station_update, station_data)\n \n elif id_here != data['place']['id']:\n raise Exception('Here ID mismatch for station {}!'.format(station[0]))\n \n for departure in data['departures']:\n if datetime.datetime.fromisoformat(departure['time']).replace(tzinfo=None) >= max_time:\n break\n if departure['transport']['name'] not in buses:\n continue\n departure_data = (station[0], departure['transport']['name'], departure['transport']['headsign'], day, departure['time'][11:16])\n cursor.execute(stmt_departures, departure_data)\n\n params['time'] = datetime.datetime.fromisoformat(data['departures'][-1]['time']).replace(tzinfo=None) + datetime.timedelta(minutes=1)\n conn.commit() # Commit during iterations so we do not lose progress in case of error or sudden termination\n\n cursor.close()\n conn.close()", "def send_fleet(self, ships, res, dest, mission, speed=10, planet=None):\n \"\"\"it seems we need to send a post to fleet3 with some info (acs, ships, speed, dest, type, union and mission)\n and then the needed token will be in the response of that post.\n then just send the form like we already have below to the movement page with everything.\n \"\"\"\n # get fleet1: needs planet=planet\n # post fleet2: just the hidden fields (from fleet1) and ships\n # post fleet3: hidden field but overwrite speed, coords and type\n # post movement: hidden fields and resources and mission\n fleet1 = self.get_soup(\"fleet1\", planet=planet)\n\n form = {\n \"galaxy\": dest[0], \"system\": dest[1], \"position\": dest[2],\n \"type\": \"1\", # planet/debris/moon\n \"mission\": codes.missions[mission],\n \"speed\": str(speed) # this one was easy\n }\n # now we add the ships\n for ship in ships: form[\"am{}\".format(codes.ships[ship])] = ships[ship]\n\n # second page\n fleet2 = self.session.post(self.page_url(\"fleet2\", planet=planet), data=form).content\n\n # third page\n fleet3 = self.session.post(self.page_url(\"fleet3\", planet=planet), data=form).content\n form.update({\n \"acsValues\": \"-\", # no clue\n })\n # maybe i need to do 3 separate requests for each of the pages\n form = {\"holdingtime\": \"1\", # dont know what this is yet\n \"expeditiontime\": \"1\", # also dont know what this is yet\n \"token\": self.get_token(\"fleet3\", in_post=False, planet=planet),\n \"galaxy\": dest[0], \"system\": dest[1], \"position\": dest[2],\n \"type\": \"1\", # planet/debris/moon\n \"mission\": codes.missions[mission],\n \"union2\": \"0\", # dont know this one either\n \"holdingOrExpTime\": \"0\", # nope\n \"speed\": str(speed), # this one was easy\n \"acsValues\": \"-\", # no clue\n \"prioMetal\": \"1\", # nope\n \"prioCrystal\": \"2\", # nope\n \"prioDeuterium\": \"3\"} # aaaaand nope\n # now we add the ships\n for ship in ships: form[\"am{}\".format(codes.ships[ship])] = ships[ship]\n # next we add the resources to take\n for r in res: form[r] = res[r]\n\n # now that the fleet cake is done we just give to the server\n url = self.page_url(\"movement\", planet)\n self.session.post(url, data=form)", "def add_schedule():\n if request.method == 'GET':\n groups = list(map(lambda x: x.json(), GroupModel.query.all()))\n if len(groups) == 0 :\n \n flash(\"It seems you have not added the @ConsumerSurveyorBot to any group of any channel. Please, add the the bot to any group to schedule the message for the same.\")\n return redirect(url_for('dashboard.index'))\n return render_template('dashboard/add_schedule.html', groups = groups)\n\n if request.method == 'POST':\n\n error = None\n schedule = parse(request.form['schedule']+'+05:30')\n if schedule < datetime.datetime.now(pytz.timezone('Asia/Kolkata')):\n error = 'You can not schedule a message in past'\n if error is not None:\n flash(error)\n else:\n print(request.form)\n job = schedule_msg(request.form['message'],\n schedule, request.form['group_id'])\n message = MessageModel( \n job.id, request.form['name'], request.form['message'], request.form['schedule']+'+05:30', request.form['group_id'] )\n message.save_to_db()\n return redirect(url_for('dashboard.index')) \n return render_template('dashboard/add_schedule.html')", "def respondToSubmit(formData):\n\tdata = header()\n\t# The command line expected\n\targs = [\"web\", formData[\"stationName\"], formData.get(\"day\", \"Now\"), formData[\"time\"]]\n\n\t# If no time was specified\n\tif not args[-1]:\n\t\t# Remove the last argument\n\t\targs = args[:-1]\n\t\t# If today is specified, then assume current time if no time is mentioned\n\t\tif args[-1] == \"Today\":\n\t\t\targs[-1] = \"Now\"\n\t# Process all the command line\n\tweather = stage2.process(args)\n\tif \"error\" not in weather:\n\t\t# Fill in the details from the forecast\n\t\tdata += '<p class=\"bg-success lead\">%s</p><div class=\"row\">&nbsp;</div>' % details(weather)\n\telse:\n\t\t# Fill in error message\n\t\tdata += '<p class=\"bg-danger lead\">%s</p>' % weather[\"error\"]\n\t# Complete the web page\n\tdata += footer()\n\n\treturn data", "def Scheduler():\n courses = \"cs108 cs112 cs214 stat343 cs336 cs300\".split()\n profs = \"norman adams schuurman pruim vanderlinden\".split()\n slots = \"mwf900 mwf1130 tth1030 tth130\".split()\n rooms = \"sb354 nh064\".split()\n \n variables = courses\n assignments = {}\n assignments['cs108'] = \"norman\"\n assignments['cs112'] = \"adams\"\n assignments['cs214'] = \"adams\"\n assignments['stat343'] = \"pruim\"\n assignments['cs336'] = \"vanderlinden\"\n assignments['cs300'] = \"schuurman\"\n neighbors = parse_neighbors(\"\"\"\n cs108: norman; cs112: adams; \n cs214: adams; stat343: pruim; \n cs336: vanderlinden; cs300: schuurman\n \"\"\", variables)\n domains = {}\n for course in courses:\n domains[course] = []\n for course in courses:\n for prof in profs:\n for room in rooms:\n for slot in slots:\n domains[course].append(prof + \" \" + room + \" \" + slot)\n \n for type in [courses]:\n for A in type:\n for B in type:\n if A != B:\n if B not in neighbors[A]:\n neighbors[A].append(B)\n if A not in neighbors[B]:\n neighbors[B].append(A)\n\n def scheduler_constraints(A, a, B, b, recurse=0):\n ADomain = a.split()\n BDomain = b.split()\n A_Prof = ADomain[0]\n B_Prof = BDomain[0]\n A_Room = ADomain[1]\n B_Room = BDomain[1]\n A_Slot = ADomain[2]\n B_Slot = BDomain[2]\n A_Course = A\n B_Course = B\n \n if(A_Prof == B_Prof and A_Slot == B_Slot):\n return False\n if(A_Room == B_Room and A_Slot == B_Slot):\n return False\n\n if('norman' in a and A == 'cs108'):\n return True\n if('adams' in a and A == 'cs112'):\n return True\n if('adams' in a and A == 'cs214'):\n return True\n if('pruim' in a and A == 'stat343'):\n return True\n if('vanderlinden' in a and A == 'cs336'):\n return True\n if('schuurman' in a and A == 'cs300'):\n return True\n if(A in courses and B in courses):\n return False\n if(recurse == 0):\n return scheduler_constraints(B, b, A, a, 1)\n return True\n \n return CSP(variables, domains, neighbors, scheduler_constraints)", "def schedules(self, term, include_units=False):\n params = {'termCode': term.code}\n r = self.get(self.HOME_ENDPOINT, params=params)\n soup = BeautifulSoup(r.text, 'html.parser')\n schedules = dict()\n # Finding schedule names\n name_matches = list(re.finditer('Schedules\\[Schedules\\.length\\] = \\{\"Name\":\"(.+?)\"',\n r.text))\n course_re = re.compile('Schedules\\[Schedules\\.length \\- 1\\]\\.SelectedList\\.t'\n '([0-9A-Z]+) =.+?\"UNITS\":\"([0-9])\"', flags=re.DOTALL)\n start = 0\n\n for idx, name_match in enumerate(name_matches):\n name = name_match.group(1)\n schedules[name] = list()\n\n try:\n end = name_matches[idx + 1].start()\n except IndexError:\n end = len(r.text)\n course_match = None\n for course_match in course_re.finditer(r.text, name_match.start(), end):\n crn = course_match.group(1)\n if include_units:\n units = int(course_match.group(2))\n schedules[name].append((crn, units))\n else:\n schedules[name].append(crn)\n\n return schedules", "def home():\n\n form = SubmissionForm(request.form)\n\n # Form has been submitted\n if request.method == 'POST' and form.validate():\n\n # Plug in the data into a dictionary object \n # - data from the input form\n # - text data must be converted to lowercase\n data = {\n \"Inputs\": {\n \"input1\": {\n \"ColumnNames\": [\n \"Open\",\n \"High\",\n \"Low\",\n \"Close\",\n \"Volume\",\n \"T3_Vol_Diff\",\n \"T3_Close_Diff\",\n \"T3_Open_Diff\",\n \"T2_Vol_Diff\",\n \"T2_Close_Diff\",\n \"T2_Open_Diff\",\n \"T1_Vol_Diff\",\n \"T1_Close_Diff\",\n \"T1_Open_Diff\",\n \"Prior_Day_Vert_Delta_Ratio\",\n \"Retracement_Signal\",\n \"Prior_Day_Derivative\",\n \"T+1_Close\",\n ],\n \"Values\": [\n [\n form.Open.data,\n form.High.data,\n form.Low.data,\n form.Close.data,\n form.Volume.data,\n form.T3_Vol_Diff.data,\n form.T3_Close_Diff.data,\n form.T3_Open_Diff.data,\n form.T2_Vol_Diff.data,\n form.T2_Close_Diff.data,\n form.T2_Open_Diff.data,\n form.T1_Vol_Diff.data,\n form.T1_Close_Diff.data,\n form.T1_Open_Diff.data,\n form.Prior_Day_Vert_Delta_Ratio.data,\n form.Retracement_Signal.data,\n form.Prior_Day_Derivative.data,\n \"\"\n ]\n ]\n }\n },\n \"GlobalParameters\": {}\n}\n\n # Serialize the input data into json string\n body = str.encode(json.dumps(data))\n# str.encode\n # Formulate the request\n #req = urllib.request.Request(URL, body, HEADERS)\n req = urllib.request.Request(Bayesian_URL, body, HEADERS)\n\n # Send this request to the AML service and render the results on page\n try:\n # response = requests.post(URL, headers=HEADERS, data=body)\n response = urllib.request.urlopen(req)\n #print(response)\n respdata = response.read()\n result = json.loads(str(respdata, 'utf-8'))\n result = do_something_pretty(result)\n # result = json.dumps(result, indent=4, sort_keys=True)\n return render_template(\n 'result.html',\n title=\"This is the result from AzureML running our example T+1 Prediction:\",\n result=result)\n\n # An HTTP error\n except urllib.error.HTTPError as err:\n result=\"The request failed with status code: \" + str(err.code)\n return render_template(\n 'result.html',\n title='There was an error',\n result=result)\n #print(err)\n\n # Just serve up the input form\n return render_template(\n 'form.html',\n form=form,\n title='Run App',\n year=datetime.now().year,\n message='Demonstrating a website using Azure ML Api')", "def collect_courses():\n clear_screen()\n full_courses = {}\n input(\"First, We need to build a list of every class required for your major, and their respective credit values.\")\n while True:\n clear_screen()\n print(full_courses)\n class_code = input(\"Please input course code. i.e: IT106\\n If you are finished, press q to quit\\n\")\n if class_code == 'q':\n break\n elif class_code.upper() in full_courses.keys():\n print(\"You have already input this class. Please try again\")\n continue\n class_code = class_code.upper()\n try:\n credit_hours = int(input(\"input the credit value for course: \"+class_code+\"\\n\"))\n grade = input(\"If you have already finished \" + class_code+\", please give your final letter grade. Otherwise type 0\\n\")\n status = input(\"Please give the status of this class: A-Actively Taking D-Dropped W-Withdrawn C-Completed\\n\")\n if status.upper() == 'A' or status.upper() == 'D' or status.upper() == 'W' or status.upper() == 'C': # changed this, OR can't be used after a single == like it was before\n full_courses[class_code] = [credit_hours, grade, status]\n else:\n input(\"Invalid selection\")\n continue\n except ValueError:\n input(\"Invalid entry. \")\n continue\n return full_courses", "def create_challenge(request):\n\tif request.method == \"POST\":\n\t\tselected_schedule_pk = request.POST[\"schedule-result-selected\"]\n\t\t\n\t\tselected_schedule = ReadingSchedule.objects.get(pk = selected_schedule_pk)\n\t\t\n\t\tnew_challenge = Challenge()\n\t\tnew_challenge.name = request.POST[\"challenge-name\"]\n\t\tnew_challenge.schedule = selected_schedule\n\t\tnew_challenge.schedule_name = selected_schedule.title\n\t\tif(\"challenge-is-private\" in request.POST):\n\t\t\tnew_challenge.invite_only = request.POST[\"challenge-is-private\"]\n\t\telse:\n\t\t\tnew_challenge.invite_only = False\n\t\tnew_challenge.save()\n\t\tnew_challenge.admin.add(request.user)\n\t\t\n\t\t\n\t\t\n\t\tmessages.success(request, \"Successfully created a challenge\")\n\t\treturn redirect(\"/challenge\")\n\t\t\n\telse:\n\t\tall_schedules = ReadingSchedule.objects.filter(start_date__gte = datetime.datetime.today())\n\t\t#turn into JSON for selector\n\t\tlist_of_sched = []\n\t\tfor schedule in all_schedules:\n\t\t\tlist_of_sched.append({ 'name' : schedule.title, 'date' : parse_date_to_string(schedule.start_date), 'pk' : schedule.pk })\n\t\t\n\t\tprint(json.dumps(list_of_sched))\n\t\t\n\t\tcontext = RequestContext(request, {\"all_schedule_json\" : json.dumps(list_of_sched)})\n\t\treturn render_to_response(\"encourage/create_challenge.html\", context)", "def fillSchedule(self, schedule):\n\n self.rooster = schedule\n\n # select courses from zaalrooster\n courses2 = []\n for key, value in self.rooster.items():\n if key == self.room:\n value = value\n for courses in value:\n for course in courses:\n course = str(course)\n courses2.append(course)\n\n # fill schedule with courses from zaalrooster\n for i in range(5):\n for j in range(5):\n self.w.create_text(100 + i, 150 + j, text = courses2[i], width = 80)\n self.w.create_text(100 + i, 250 + j, text = courses2[i+1], width = 80)\n self.w.create_text(100 + i, 350 + j, text = courses2[i+2], width = 80)\n self.w.create_text(100 + i, 450 + j, text = courses2[i+3], width = 80)\n self.w.create_text(300 + i, 150 + j, text = courses2[i+4], width = 80)\n self.w.create_text(300 + i, 250 + j, text = courses2[i+5], width = 80)\n self.w.create_text(300 + i, 350 + j, text = courses2[i+6], width = 80)\n self.w.create_text(300 + i, 450 + j, text = courses2[i+7], width = 80)\n self.w.create_text(500 + i, 150 + j, text = courses2[i+8], width = 80)\n self.w.create_text(500 + i, 250 + j, text = courses2[i+9], width = 80)\n self.w.create_text(500 + i, 350 + j, text = courses2[i+10], width = 80)\n self.w.create_text(500 + i, 450 + j, text = courses2[i+11], width = 80)\n self.w.create_text(700 + i, 150 + j, text = courses2[i+12], width = 80)\n self.w.create_text(700 + i, 250 + j, text = courses2[i+13], width = 80)\n self.w.create_text(700 + i, 350 + j, text = courses2[i+14], width = 80)\n self.w.create_text(700 + i, 450 + j, text = courses2[i+15], width = 80)\n self.w.create_text(900 + i, 150 + j, text = courses2[i+16], width = 80)\n self.w.create_text(900 + i, 250 + j, text = courses2[i+17], width = 80)\n self.w.create_text(900 + i, 350 + j, text = courses2[i+18], width = 80)\n self.w.create_text(900 + i, 450 + j, text = courses2[i+19], width = 80)\n\n\n mainloop()", "def submit_app_form():\n\n firstname = request.form.get(\"fstname\")\n lastname = request.form.get(\"lstname\")\n salary = request.form.get(\"salaryreq\")\n position = request.form.get(\"job\")\n\n return render_template(\"application-response.html\",\n fstname=firstname,\n lstname=lastname,\n salaryreq=salary,\n job=position,\n )", "def RegisterPopupForm(request, program_key, activity_key, users=None,\n notify='1', force_status='0'):\n # Get the schedules.\n schedules_query = models.Activity.SchedulesQueryFromActivityKey(activity_key)\n schedules_query.order('start_time')\n\n # Get the access point to load and make a list of schedules.\n schedules_list = []\n access_point_keys = set()\n access_points_secondary_keys = set()\n\n common_access_points = set()\n same_access_points = True\n\n for schedule in schedules_query:\n all_access_points = schedule.GetAllAccessPoints()\n if same_access_points:\n if not common_access_points:\n # We populate the set for the first time\n common_access_points.update(all_access_points)\n elif common_access_points != all_access_points:\n # Access points are different\n same_access_points = False\n\n schedules_list.append(schedule)\n access_point_keys.update(schedule.access_points)\n access_points_secondary_keys.update(schedule.access_points_secondary)\n\n access_point_keys.update(access_points_secondary_keys)\n # Load all the access points that are of interest.\n access_points = db.get(list(access_point_keys))\n assert None not in access_points\n access_points = dict(zip(access_point_keys, access_points))\n\n user = request.user\n schedule_info_list = []\n for schedule in schedules_list:\n schedule_info = {}\n\n # Format session times to display.\n schedule_info['key'] = str(schedule.key())\n schedule_info['start_time_local'] = user.GetLocalTime(schedule.start_time)\n\n # Add the access points that are available for each schedule.\n access_point_list = []\n for access_point_key in schedule.GetAllAccessPoints():\n access_point_display = str(access_points[access_point_key])\n if access_point_key in access_points_secondary_keys:\n access_point_display += ' (P)'\n access_point_list.append({'key': str(access_point_key),\n 'display': access_point_display})\n\n # sort access points by name\n schedule_info['access_point_list'] = sorted(access_point_list,\n key=lambda x: x['display'])\n\n # Add the schedule info to the list\n schedule_info_list.append(schedule_info)\n\n data = {'schedule_list': schedule_info_list,\n 'activity_key': activity_key,\n 'program_key': program_key,\n 'notify': notify,\n 'force_status': force_status}\n\n if same_access_points:\n data['common_access_points'] = schedule_info_list[0]['access_point_list']\n\n if users:\n data['users_count'] = len(users.split(','))\n data['users'] = users\n return data", "def register_courses(self, term, schedule, items, allow_waitlisting=True, at=None):\n crns, units = zip(*items)\n query = {'Term': term.code,\n 'CourseCRNs': ','.join([str(x) for x in crns]),\n 'Schedule': schedule,\n 'WaitlistedFlags': 'Y' if allow_waitlisting else 'N',\n 'Units': ','.join([str(x) for x in units]),\n 'ShowDebug': 0,\n '_': int(float(time.time()) * 10**3) # timestamp in milliseconds\n }\n\n if at:\n seconds = (at - datetime.now()).total_seconds()\n if seconds > 0:\n time.sleep(seconds)\n\n r = self.get(self.REGISTER_ENDPOINT, params=query)\n # Error checking\n for e in self.REGISTRATION_ERRORS:\n if e in r.text:\n raise RegistrationError(e)", "def Scheduling():\n Faculty = 'Adams Schuurman VanderLinden Bailey'.split()\n Times = 'mwf900 mwf1030 tth900 tth1030'.split()\n Classrooms = 'nh253 sb382'.split()\n Courses = 'cs104 cs108 cs112 cs212 cs214 cs336 cs344'.split()\n variables = Courses\n domains = {}\n combo = list(itertools.product(Times, Faculty, Classrooms))\n for var in variables:\n domains[var] = combo\n\n # domains['Adams1'] = [1, 5]\n\n # neighbor parsing -- not implemented\n neighbors = parse_neighbors(\"\"\"cs104: cs108; cs344: cs336\"\"\", variables)\n for type in [Courses, Faculty, Times, Classrooms]:\n for A in type:\n for B in type:\n if A != B:\n if B not in neighbors[A]:\n neighbors[A].append(B)\n if A not in neighbors[B]:\n neighbors[B].append(A)\n\n def constraint(A, a, B, b, recurse=0):\n # a room can only have one class at each time\n same_timespace = (a[0] == b[0] and a[2] == b[2])\n # faculty member can only teach one thing at a time\n same_profslot = (a[0] == b[0] and a[1] == b[1])\n if recurse == 0:\n return constraint(B, b, A, a, 1)\n return not (same_timespace or same_profslot)\n\n return CSP(variables, domains, neighbors, constraint)", "def ajaxSubmit():\n\n postRequest = request.json or request.form # Short circuit the data fetch\n print postRequest\n print postRequest.getlist('answer')\n alist = eval(\"\".join(postRequest.getlist('answer')))\n statusid = postRequest.getlist('id')[0]\n if statusid == \"-2\" and dna.currentquestion == -1:\n SESSION_INFO.result = dna.currentList\n q = Question()\n q.qid = \"-1\"\n SESSION_INFO.question = q\n SESSION_INFO.answerlist = dna.answerList\n return json.dumps({\"session_info\": SESSION_INFO.toJson()})\n elif statusid != \"-2\":\n if alist == []:\n return json.dumps({\"session_info\": SESSION_INFO.toJson()})\n if dna.currentquestion != -1:\n dna.answer(alist)\n dna.newQ()\n\n if dna.currentquestion == -1 or dna.currentquestion == \"error\":\n print \"error got\"\n SESSION_INFO.result = dna.currentList\n q = Question()\n q.qid = \"-1\"\n SESSION_INFO.question = q\n SESSION_INFO.answerlist = dna.answerList\n return json.dumps({\"session_info\": SESSION_INFO.toJson()})\n SESSION_INFO.question = dna.currentquestion.toQestion()\n print SESSION_INFO.toJson()\n return json.dumps({\"session_info\": SESSION_INFO.toJson()})\n else:\n return json.dumps({\"session_info\": SESSION_INFO.toJson()})", "def schedule(request):\r\n\r\n return render(request, 'editorial/schedule.html', {})", "def new_job(request):\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n terms, _current_term_id = get_term_data_for_school(sis_account_id)\n school_id = sis_account_id.split(\":\")[1]\n canvas_site_templates = get_canvas_site_templates_for_school(school_id)\n potential_course_sites_query = None\n departments = []\n course_groups = []\n selected_term_id = None\n selected_course_group_id = None\n selected_department_id = None\n\n # Only display the Course Groups dropdown if the tool is launched in the COLGSAS sub-account\n if school_id == 'colgsas':\n try:\n course_groups = get_course_group_data_for_school(sis_account_id, include_ile_sb=False)\n except Exception:\n logger.exception(f\"Failed to get course groups with sis_account_id {sis_account_id}\")\n # For all other schools, display just the Departments dropdown\n else:\n try:\n departments = get_department_data_for_school(sis_account_id, include_ile_sb=False)\n except Exception:\n logger.exception(f\"Failed to get departments with sis_account_id {sis_account_id}\")\n\n logging_dept_cg_text = ' and no selected department or course group'\n if request.method == \"POST\":\n selected_term_id = request.POST.get(\"courseTerm\", None)\n selected_course_group_id = request.POST.get(\"courseCourseGroup\", None)\n selected_department_id = request.POST.get(\"courseDepartment\", None)\n\n logging_dept_cg_text = f' and course group ID {selected_course_group_id}' if selected_course_group_id \\\n else f' and department ID {selected_department_id}' if selected_department_id \\\n else ' and no selected department or course group.'\n logger.debug(f'Retrieving potential course sites for term ID '\n f'{selected_term_id}{logging_dept_cg_text}', extra={\"sis_account_id\": sis_account_id,\n \"school_id\": school_id,\n })\n\n # Retrieve all course instances for the given term_id and account that do not have Canvas course sites\n # nor are set to be fed into Canvas via the automated feed\n potential_course_sites_query = get_course_instance_query_set(\n selected_term_id, sis_account_id\n ).filter(canvas_course_id__isnull=True,\n sync_to_canvas=0,\n bulk_processing=0,\n term__term_id=selected_term_id)\n\n # Filter potential_course_sites_query by course group.\n if selected_course_group_id and selected_course_group_id != '0':\n potential_course_sites_query = potential_course_sites_query.filter(course__course_group=selected_course_group_id)\n # Filter potential_course_sites_query by department.\n elif selected_department_id and selected_department_id != '0':\n potential_course_sites_query = potential_course_sites_query.filter(course__department=selected_department_id)\n\n # TODO maybe better to use template tag unless used elsewhere?\n # TODO cont. this may be included in a summary generation to be displayed in page (see wireframe and Jira ticket)\n potential_course_site_count = (\n potential_course_sites_query.count() if potential_course_sites_query else 0\n )\n\n logger.debug(f'Retrieved {potential_course_site_count} potential course sites for term '\n f'{selected_term_id}{logging_dept_cg_text}', extra={\"sis_account_id\": sis_account_id,\n \"school_id\": school_id,\n })\n\n context = {\n \"terms\": terms,\n \"potential_course_sites\": potential_course_sites_query,\n \"potential_site_count\": potential_course_site_count,\n \"canvas_site_templates\": canvas_site_templates,\n \"departments\": departments,\n \"course_groups\": course_groups,\n 'selected_term_id': selected_term_id,\n 'selected_course_group_id': selected_course_group_id,\n 'selected_department_id': selected_department_id,\n 'canvas_url': settings.CANVAS_URL,\n }\n return render(request, \"bulk_site_creator/new_job.html\", context=context)", "def missions(server, missions_to_complete=\"ALL\", action=\"ALL\", session=\"\"):\r\n URL = f\"https://{server}.e-sim.org/\"\r\n if action.lower() not in (\"start\", \"complete\", \"skip\", \"all\"):\r\n print(\"action must be `start`/`complete`/`skip`/`ALL`\")\r\n return\r\n if not session:\r\n session = login(server)\r\n if missions_to_complete.lower() != \"all\":\r\n if action.lower() != \"all\":\r\n if action.lower() == \"start\":\r\n c = session.post(URL + \"betaMissions.html?action=START\", data={\"submit\": \"Mission start\"})\r\n if \"MISSION_START_OK\" not in str(c.url) and \"?action=START\" not in str(c.url):\r\n print(c.url)\r\n return\r\n if action.lower() == \"complete\":\r\n c = session.post(URL + \"betaMissions.html?action=COMPLETE\", data={\"submit\": \"Receive\"})\r\n if \"MISSION_REWARD_OK\" not in str(c.url) and \"?action=COMPLETE\" not in str(c.url):\r\n print(c.url)\r\n return\r\n if action.lower() == \"skip\":\r\n c = session.post(URL + \"betaMissions.html\",\r\n data={\"action\": \"SKIP\", \"submit\": \"Skip this mission\"})\r\n if \"MISSION_SKIPPED\" not in str(c.url):\r\n print(c.url)\r\n return\r\n print(\"Done\")\r\n return\r\n if missions_to_complete.lower() == \"all\":\r\n RANGE = 20\r\n else:\r\n RANGE = int(missions_to_complete)\r\n for _ in range(1, RANGE+1):\r\n try:\r\n home_page = session.get(URL)\r\n tree = fromstring(home_page.content)\r\n check = tree.xpath('//*[@id=\"taskButtonWork\"]//@href')\r\n if check:\r\n double_click(server, session=session)\r\n my_id = str(tree.xpath('//*[@id=\"userName\"]/@href')[0]).split(\"=\")[1]\r\n try:\r\n num = int(str(tree.xpath('//*[@id=\"inProgressPanel\"]/div[1]/strong')[0].text).split(\"#\")[1].split(\":\")[0])\r\n except:\r\n # need to collect reward / no more missions\r\n c = session.post(URL + \"betaMissions.html?action=COMPLETE\", data={\"submit\": \"Receive\"})\r\n if \"MISSION_REWARD_OK\" not in str(c.url) and \"?action=COMPLETE\" not in str(c.url):\r\n print(f\"No more missions today. Come back tommorrow!\")\r\n return\r\n print(c.url)\r\n continue\r\n\r\n if not num:\r\n print(\"You have completed all your missions for today, come back tomorrow!\")\r\n return\r\n print(f\"Mission number {num}\") \r\n c = session.post(URL + \"betaMissions.html?action=START\", data={\"submit\": \"Mission start\"})\r\n if \"MISSION_START_OK\" not in str(c.url):\r\n c = session.post(URL + \"betaMissions.html?action=COMPLETE\", data={\"submit\": \"Receive\"})\r\n if \"MISSION_REWARD_OK\" not in str(c.url) and \"?action=COMPLETE\" not in str(c.url):\r\n if num == 1:\r\n session.get(URL + \"inboxMessages.html\")\r\n session.get(f\"{URL}profile.html?id={my_id}\")\r\n \r\n elif num in (2, 4, 16, 27, 28, 36, 43, 59):\r\n double_click(server, session=session)\r\n elif num in (3, 7):\r\n job(server, session)\r\n elif num in (5, 26, 32, 35, 38, 40, 47, 51, 53, 64):\r\n if num == 31:\r\n restores = \"3\"\r\n print(f\"Hitting {restores} restores, it might take a while\")\r\n elif num == 46:\r\n restores = \"2\"\r\n print(f\"Hitting {restores} restores, it might take a while\")\r\n auto_fight(server, restores=\"1\")\r\n elif num == 6:\r\n session.post(f\"{URL}food.html?quality=1\")\r\n elif num == 8:\r\n session.get(URL + \"editCitizen.html\")\r\n elif num == 9:\r\n session.get(URL + \"notifications.html\")\r\n elif num == 10:\r\n session.get(URL + \"newMap.html\")\r\n elif num == 11:\r\n product_market = session.get(f\"{URL}productMarket.html\")\r\n tree = fromstring(product_market.content)\r\n productId = tree.xpath('//*[@id=\"command\"]/input[1]')[0].value\r\n payload = {'action': \"buy\", 'id': productId, 'quantity': 1, \"submit\": \"Buy\"}\r\n session.post(URL + \"productMarket.html\", data=payload)\r\n elif num in (12, 54):\r\n Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()\r\n apiRegions = requests.get(URL + \"apiRegions.html\").json()\r\n capital = [row['id'] if row['homeCountry'] == Citizen['citizenshipId'] and\r\n row['capital'] else 1 for row in apiRegions][0]\r\n fly(server, capital, 3, session=session)\r\n elif num in (13, 66):\r\n session.get(URL + 'friends.html?action=PROPOSE&id=8')\r\n citizenAchievements = session.get(URL + \"citizenAchievements.html\")\r\n tree = fromstring(citizenAchievements.content)\r\n ID = str(tree.xpath('//*[@id=\"userName\"]/@href')[0]).split(\"=\")[1]\r\n session.post(URL + \"citizenAchievements.html\",\r\n data={\"id\": ID, \"submit\": \"Recalculate achievements\"})\r\n elif num == 14:\r\n i = session.get(URL + 'storage.html?storageType=EQUIPMENT')\r\n tree = fromstring(i.content)\r\n ID = tree.xpath(f'//*[starts-with(@id, \"cell\")]/a/text()')[0]\r\n payload = {'action': \"EQUIP\", 'itemId': ID.replace(\"#\", \"\")}\r\n session.post(URL + \"equipmentAction.html\", data=payload)\r\n elif num == 15:\r\n session.post(f\"{URL}vote.html?id=1\")\r\n # day 2\r\n elif num == 18:\r\n shout_body = choice([\"Mission: Say hello\", \"Hi\", \"Hello\", \"Hi guys :)\", \"Mission\"])\r\n payload = {'action': \"POST_SHOUT\", 'body': shout_body, 'sendToCountry': \"on\",\r\n \"sendToMilitaryUnit\": \"on\", \"sendToParty\": \"on\", \"sendToFriends\": \"on\"}\r\n session.post(f\"{URL}shoutActions.html\", data=payload)\r\n elif num == 19:\r\n Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()\r\n monetaryMarket = session.get(\r\n URL + 'monetaryMarket.html?buyerCurrencyId=0&sellerCurrencyId=' + str(\r\n int(Citizen['currentLocationRegionId'] / 6)))\r\n tree = fromstring(monetaryMarket.content)\r\n ID = tree.xpath(\"//tr[2]//td[4]//form[1]//input[@value][2]\")[0].value\r\n payload = {'action': \"buy\", 'id': ID, 'ammount': 0.5, \"submit\": \"OK\"}\r\n session.post(URL + \"monetaryMarket.html\", data=payload)\r\n elif num == 21:\r\n i = session.get(URL + 'storage.html?storageType=EQUIPMENT')\r\n tree = fromstring(i.content)\r\n ID = tree.xpath(f'//*[starts-with(@id, \"cell\")]/a/text()')[0]\r\n sell_eqs(server, ID, 0.01, 48, session)\r\n elif num == 22:\r\n Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()\r\n payload = {'product': \"GRAIN\", 'countryId': Citizen['citizenshipId'], 'storageType': \"PRODUCT\",\r\n \"action\": \"POST_OFFER\", \"price\": 0.1, \"quantity\": 100}\r\n sell_grain = session.post(URL + \"storage.html\", data=payload)\r\n print(sell_grain.url)\r\n elif num == 25:\r\n payload = {'setBg': \"LIGHT_I\", 'action': \"CHANGE_BACKGROUND\"}\r\n session.post(URL + \"editCitizen.html\", data=payload) \r\n # day 3\r\n elif num == 29:\r\n for article_id in range(2, 7):\r\n session.post(f\"{URL}vote.html?id={article_id}\")\r\n elif num == 30:\r\n session.post(f\"{URL}sub.html?id=1\")\r\n elif num == 31:\r\n citizenship_or_mu_application(server, randint(1, 21), \"mu\", session)\r\n # day 4\r\n elif num == 37:\r\n shout_body = choice([\"Mission: Get to know the community better\", \"Hi\",\r\n \"Hello\", \"Hi guys :)\", \"Mission\", \"IRC / Skype / TeamSpeak\"])\r\n payload = {'action': \"POST_SHOUT\", 'body': shout_body, 'sendToCountry': \"on\",\r\n \"sendToMilitaryUnit\": \"on\", \"sendToParty\": \"on\", \"sendToFriends\": \"on\"}\r\n session.post(f\"{URL}shoutActions.html\", data=payload)\r\n elif num == 39:\r\n session.get(URL + 'friends.html?action=PROPOSE&id=1')\r\n elif num == 41:\r\n for _ in range(10):\r\n ID = randint(1, 100)\r\n payload = {\"action\": \"NEW\", \"key\": f\"Article {ID}\", \"submit\": \"Publish\",\r\n \"body\": choice([\"Mission\", \"Hi\", \"Hello there\", \"hello\", \"Discord?\"])}\r\n comment = session.post(URL + \"comment.html\", data=payload)\r\n if \"MESSAGE_POST_OK\" in str(comment.url):\r\n break\r\n elif num == 42:\r\n try:\r\n b = session.get(URL + \"partyStatistics.html?statisticType=MEMBERS\")\r\n tree = fromstring(b.content)\r\n ID = str(tree.xpath('//*[@id=\"esim-layout\"]//table//tr[2]//td[3]//@href')[0]).split(\"=\")[1]\r\n payload1 = {\"action\": \"JOIN\", \"id\": ID, \"submit\": \"Join\"}\r\n b = session.post(URL + \"partyStatistics.html\", data=payload1)\r\n if str(b.url) != URL + \"?actionStatus=PARTY_JOIN_ALREADY_IN_PARTY\":\r\n print(b.url)\r\n except:\r\n pass\r\n # day 5\r\n elif num == 45:\r\n session.post(URL + \"replyToShout.html?id=1\",\r\n data={\"body\": choice([\"OK\", \"Whatever\", \"Thanks\", \"Discord?\"]),\r\n \"submit\": \"Shout!\"})\r\n elif num == 46:\r\n payload = {'itemType': \"STEROIDS\", 'storageType': \"SPECIAL_ITEM\", 'action': \"BUY\", \"quantity\": 1}\r\n session.post(URL + \"storage.html\", data=payload)\r\n elif num == 49:\r\n i = session.get(URL + 'storage.html?storageType=EQUIPMENT')\r\n tree = fromstring(i.content)\r\n ID = tree.xpath(f'//*[starts-with(@id, \"cell\")]/a/text()')[0]\r\n payload = {'action': \"EQUIP\", 'itemId': ID.replace(\"#\", \"\")}\r\n session.post(URL + \"equipmentAction.html\", data=payload)\r\n elif num == 50:\r\n session.post(f\"{URL}shoutVote.html?id=1&vote=1\")\r\n elif num == 52:\r\n fly(server, 1, 3, session)\r\n elif num == 55:\r\n requests.get(URL + f\"lan.{my_id}/\")\r\n elif num in (61, 55):\r\n send_motivates(server, \"ALL\", session)\r\n elif num == 57:\r\n Citizen = requests.get(f'{URL}apiCitizenById.html?id={my_id}').json()\r\n payload = {'receiverName': f\"{Citizen['citizenship']} Org\", \"title\": \"Hi\",\r\n \"body\": choice([\"Hi\", \"Can you send me some gold?\", \"Hello there!\", \"Discord?\"]), \"action\": \"REPLY\", \"submit\": \"Send\"}\r\n session.post(URL + \"composeMessage.html\", data=payload)\r\n\r\n elif num == 58:\r\n session.post(f\"{URL}sub.html?id=2\")\r\n\r\n elif num == 60:\r\n friends(server, \"online\", session)\r\n elif num == 63:\r\n session.post(f\"{URL}medkit.html\")\r\n # if food & gift limits > 10 it won't work.\r\n else:\r\n print(\"I don't know how to finish this mission. you have few seconds to stop me before i skip it\")\r\n time.sleep(randint(1, 7))\r\n c = session.post(URL + \"betaMissions.html?action=COMPLETE\", data={\"submit\": \"Receive\"})\r\n if \"MISSION_REWARD_OK\" not in str(c.url) and \"?action=COMPLETE\" not in str(c.url):\r\n c = session.post(URL + \"betaMissions.html?action=COMPLETE\", data={\"submit\": \"Receive\"})\r\n if \"MISSION_REWARD_OK\" not in str(c.url) and \"?action=COMPLETE\" not in str(c.url):\r\n c = session.post(URL + \"betaMissions.html\",\r\n data={\"action\": \"SKIP\", \"submit\": \"Skip this mission\"})\r\n if \"MISSION_SKIPPED\" not in str(c.url) and \"?action=SKIP\" not in str(c.url):\r\n return\r\n else:\r\n print(f\"Skipped mission {num}\")\r\n print(c.url)\r\n except Exception as error:\r\n print(error)\r\n time.sleep(5)" ]
[ "0.65634745", "0.64576983", "0.62224543", "0.6023303", "0.5766378", "0.5760955", "0.5739446", "0.5460604", "0.5388673", "0.5352139", "0.5332513", "0.53309214", "0.5291213", "0.5275477", "0.524384", "0.524187", "0.52391493", "0.52390355", "0.5194757", "0.51929027", "0.5184178", "0.51784945", "0.5166454", "0.51588446", "0.5144451", "0.51289636", "0.5102464", "0.5090221", "0.5085754", "0.50812197" ]
0.81814945
0
Upon a GET request containing csv course names in a query string... Find the combos and send them as JSON
def getCombosAPI(): all_args = request.args.lists() course_list = all_args[0][1][0].split(",") u_COURSE_LIST = map((lambda x: x.upper()), course_list)#make all caps just in case COURSE_LIST = map( str, u_COURSE_LIST)#unicode list -> list of python strs combos = scheduler.schedule(COURSE_LIST) return jsonify(combos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_courses(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_courses',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n print(request)\n\n for req in request:\n try:\n if req['id']==1:\n pass\n else:\n self.create({\n 'course_id': req['id'], \n 'category':req['categoryid'],\n 'fullname':req['fullname'], \n 'shortname':req['shortname'],\n 'summary': req['summary']\n }\n )\n except Exception:\n print('Course not created')", "def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []", "def list_all_courses(request):\r\n courses = Course.objects.all()\r\n courses = [dict(course_name = c.course_name, course_code = c.course_code, course_year = c.year,\r\n course_url = '/course/%s/' % c.course_code.lower()) for c in courses]\r\n\r\n response = {'courses': courses}\r\n\r\n return render_to_response('all_courses.json', response, mimetype = 'application/json',\r\n context_instance = RequestContext(request))", "def autocomplete(request):\n courses = (\n Course.objects.filter(course_filters_pcr_allow_xlist)\n .order_by(\"semester\")\n .values(\"full_code\", \"title\")\n .distinct()\n )\n course_set = sorted(\n [\n {\n \"title\": course[\"full_code\"],\n \"desc\": [course[\"title\"]],\n \"url\": f\"/course/{course['full_code']}\",\n }\n for course in courses\n ],\n key=lambda x: x[\"title\"],\n )\n departments = Department.objects.all().values(\"code\", \"name\")\n department_set = sorted(\n [\n {\n \"title\": dept[\"code\"],\n \"desc\": dept[\"name\"],\n \"url\": f\"/department/{dept['code']}\",\n }\n for dept in departments\n ],\n key=lambda d: d[\"title\"],\n )\n\n instructors = (\n Instructor.objects.filter(\n id__in=Subquery(Section.objects.filter(section_filters_pcr).values(\"instructors__id\"))\n )\n .distinct()\n .values(\"name\", \"id\", \"section__course__department__code\")\n )\n instructor_set = {}\n for inst in instructors:\n if inst[\"id\"] not in instructor_set:\n instructor_set[inst[\"id\"]] = {\n \"title\": inst[\"name\"],\n \"desc\": set([inst[\"section__course__department__code\"]]),\n \"url\": f\"/instructor/{inst['id']}\",\n }\n instructor_set[inst[\"id\"]][\"desc\"].add(inst[\"section__course__department__code\"])\n\n def join_depts(depts):\n try:\n return \",\".join(sorted(list(depts)))\n except TypeError:\n return \"\"\n\n instructor_set = sorted(\n [\n {\n \"title\": v[\"title\"],\n \"desc\": join_depts(v[\"desc\"]),\n \"url\": v[\"url\"],\n }\n for v in instructor_set.values()\n ],\n key=lambda x: x[\"title\"],\n )\n\n return Response(\n {\"courses\": course_set, \"departments\": department_set, \"instructors\": instructor_set}\n )", "def get_course(data):\n\n return {item['course'] for item in data}", "def search_courses():\n current_user = view_helpers.get_current_user()\n courses, has_more = m.Course.search(flask.request.values, current_user)\n\n course_dicts, user_course_dicts, _ = (\n m.Course.get_course_and_user_course_dicts(courses, current_user))\n\n return api_util.jsonify({\n 'courses': course_dicts,\n 'user_courses': user_course_dicts,\n 'has_more': has_more,\n })", "def view_all_courses(request, username):\n if request.method == 'GET':\n\n # if user log in \n try:\n user = User.objects.get(username=username)\n if ensure_login(user) == False:\n return JsonResponse({'login': 'User must login'}, status=403) \n except:\n return JsonResponse({'login': 'User must login'}, status=403)\n\n if user.is_staff:\n courses = courseQuerySetSerializer(user.created_courses.all())\n else:\n courses = courseQuerySetSerializer(user.enrolled_courses.all())\n\n if courses is None:\n return JsonResponse({'error': 'No courses to view'}, status=404)\n \n return JsonResponse({'success': True, 'courses': courses}, status=200) # each course_code should be stored in data-course attribte inorder to grap it when perfoming actions on a speific course\n else:\n return JsonResponse({'error': 'Method not allowed'}, status=405)", "def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})", "def get_courts(self, name=\"\", abbreviation=\"\", jurisdiction=\"\", slugs_only=False):\n url_base = self._get_api_url() + \"courts/\"\n url_queries = []\n\n if name:\n url_queries.append(\"name=%s\" % name)\n\n if abbreviation:\n url_queries.append(\"name_abbreviation=%s\" % abbreviations)\n\n if jurisdiction:\n jurisdiction = jurisdiction.lower()\n valid_jurisdictions = [elem['slug'] for elem in self.get_jurisdictions()[\"results\"]]\n if jurisdiction not in valid_jurisdictions:\n raise Exception(\"Jurisdiction not recognized. Check spelling?\")\n url_queries.append(\"jurisdiction=%s\" % jurisdiction)\n\n uri = self._build_uri(url_base, url_queries)\n courts = self._request(uri)\n\n if slugs_only:\n names = self._extract_from_paginated(courts.json(), \"slug\")\n return names\n\n print(uri)\n return courts.json()", "def get_keys_and_uses_from_csv(request):\n\n labels = []\n pubmed = []\n reports = []\n concepts = []\n json_resp = {}\n type_selected = ''\n for filename, file in request.FILES.items():\n if filename.startswith('reports'):\n type_selected = 'reports'\n reports.append(file)\n if filename.startswith('pubmed'):\n type_selected = 'pubmed'\n reports.append(file)\n if filename.startswith('labels'):\n type_selected = 'labels'\n reports.append(file)\n if filename.startswith('concepts'):\n type_selected = 'concepts'\n reports.append(file)\n\n keys,uses,final_uses = get_keys_and_uses_csv(reports)\n json_resp['keys'] = keys\n # print(uses)\n # print(type(uses))\n #\n uses = list(map(lambda x: x.lower(), uses))\n final_uses = list(map(lambda x: x.lower(), final_uses))\n json_resp['uses'] = list(uses)\n # print(json_resp['uses'])\n return JsonResponse(json_resp)", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def scrape(url, filename):\n courselist = []\n headers = ['title', 'description', 'course number', 'duration', 'difficulty', 'instructors', 'course url']\n with open(filename, 'wb') as outfile:\n wr = csv.writer(outfile)\n wr.writerow(headers)\n courses = json.load(urllib2.urlopen(url))\n for course in courses['courses']:\n c = Course()\n c.title = course['title']\n c.desc = course['summary']\n c.course_number = course['key']\n c.duration = str(course['expected_duration']) + ' ' + str(course['expected_duration_unit'])\n c.difficulty = course['level']\n c.url = 'https://www.udacity.com/course/' + course['slug']\n l = len(course['instructors'])\n for i in xrange(l):\n if(i == 0):\n c.instructors += course['instructors'][i]['name']\n else:\n c.instructors += ';' + course['instructors'][i]['name']\n with open(filename, 'ab') as outfile:\n wr = csv.writer(outfile)\n wr.writerow(c.getaslist())", "def export_courses():\n courses = Course.query().fetch()\n dictionary = {}\n\n for course in courses:\n dictionary[course.department + \"\" + course.number] = course.to_dict()\n\n return dictionary", "def course_query(self, term, **kwargs):\n data = {\n 'course_number': kwargs.get('course_number', ''),\n 'subject': kwargs.get('subject', ''),\n 'instructor': kwargs.get('instructor', ''),\n 'course_start_eval': 'After', # todo verify vs 'at'\n 'course_start_time': kwargs.get('start', '-'), # todo parse arg into correct time\n 'course_end_eval': 'Before', # todo verify vs 'at'\n 'course_end_time': kwargs.get('end', '-'), # todo parse arg into correct time,\n 'course_level': kwargs.get('level', '-'),\n 'course_units': kwargs.get('units', '-'),\n 'course_status': 'ALL',\n 'sortBy': '',\n 'showMe': '',\n 'runMe': '1',\n 'clearMe': '1',\n 'termCode': term.code,\n 'expandFilters': ''\n }\n try:\n r = self.post(self.COURSE_SEARCH_ENDPOINT, data=data)\n results = json.loads(r.text)['Results'] # {'COLUMNS': [...], 'DATA': [[col1_data, ...], ...}\n except KeyError:\n r = self.post(self.COURSE_SEARCH_ENDPOINT, data=data)\n results = json.loads(r.text)['Results']\n\n nrml_course_responses = self._normalize_course_query_response(results)\n\n courses = [self._course_from_query_response(term, resp) for resp in nrml_course_responses]\n return courses", "def getCandidates(request):\n if request.is_ajax():\n candidates = getCand(request.POST.get(\"input\", None), request.POST.get(\"all_tags\", None))\n return JsonResponse(candidates)", "def validate_and_get_data(self, request):\n data = {'format': None, 'course': None}\n aux_resumen = request.GET.get('format', '')\n if aux_resumen == 'resumen':\n data['format'] = True\n elif aux_resumen == 'all':\n data['format'] = False\n # valida curso\n if request.GET.get(\"course\", \"\") != \"\":\n # valida si existe el curso\n if self.validate_course(request.GET.get(\"course\", \"\")):\n data['course'] = request.GET.get(\"course\", \"\")\n \n return data", "def my_form_post():\n text_list = []\n #make list of form inputs\n for i in range(1, AMOUNT_OF_COURSES + 1):\n form_num = 'text' + str(i)\n text_list.append(request.form[form_num])\n #remove items with no input, generate string of courses\n final_list = []\n for text in text_list:\n if not text == \"\":\n final_list.append(text)\n courses_str = \"\"\n for course in final_list[:-1]:\n courses_str += (str(course) + ',')\n courses_str += str(final_list[-1])\n courses_str = courses_str.upper()\n #turn string of courses entered into list\n c_list = courses_str.split(',')\n #get the schedules\n #print \"\\nCourse list:\"\n #print str(c_list) + \"\\n\"\n my_combos = scheduler.schedule(c_list)\n resp = make_response(redirect('/sched'))\n resp.set_cookie('course_combos', '', expires=0)\n resp.set_cookie('course_combos', json.dumps(my_combos))\n return resp", "def species_autocomplete(request, format='csv'):\n \n if request.GET.get('q'):\n q = request.GET.get('q')\n \n species = Species.objects.all().order_by('taxon_code')\n \n # split tokens by period or white space\n q_tokens = split(r'[.\\s]+', q)\n \n # prefix match for each token in the search string against genus name or species name\n for token in q_tokens:\n species = species.filter(Q(species_name__istartswith=token) | Q(genus_name__genus_name__istartswith=token))\n \n \n \n \n # empty species list if no query provided by the user\n else:\n species = []\n \n\n \n \n if format == 'csv':\n # serialize results as CSV\n return CSVResponse(\n [{'species': s.taxon_code} for s in species], \n fields=('species',) )\n \n \n else:\n # serialize results as JSON\n JSON_objects = [{'label': (s.genus_name_id + ' ' + s.species_name), 'value': s.taxon_code} for s in species]\n return JSONResponse({'species': JSON_objects})", "def _course_from_query_response(self, term, response):\n units_low, units_hi = float(response['UNITS_LOW']), float(response['UNITS_HIGH'])\n if units_low > units_hi:\n # Yes, this is an actual response case...\n # Occurs when a course has a constant # of units.\n # I think units_hi should equal units_low when actual units is constant.\n units_hi = units_low\n\n instructor_name = instructor_email = None\n try:\n instructor_meta = next(instr for instr in response['INSTRUCTORS'] if instr['PRIMARY_IND'] == 'Y')\n instructor_name = '{} {}'.format(instructor_meta['FIRST_NAME'], instructor_meta['LAST_NAME'])\n instructor_name = instructor_name.strip()\n instructor_email = instructor_meta['EMAIL']\n except StopIteration:\n # No instructor specified\n pass\n\n ge_areas = list()\n try:\n area_codes = filter(None, response['GE3CREDIT'].split(','))\n ge_areas = [GE_AREA_NAMES_BY_SB_CODE[area_code] for area_code in area_codes]\n except KeyError as e:\n logging.exception('Unrecognized GE code')\n\n meetings = list()\n for meeting in response['COURSEMEETINGDATA']:\n days = meeting['WEEKDAYS'].replace(',', '')\n times = None\n try:\n begin_hour, begin_minutes = meeting['BEGIN_TIME'][:2], meeting['BEGIN_TIME'][2:]\n end_hour, end_minutes = meeting['END_TIME'][:2], meeting['END_TIME'][2:]\n begin = timedelta(hours=int(begin_hour), minutes=int(begin_minutes))\n end = timedelta(hours=int(end_hour), minutes=int(end_minutes))\n times = (begin, end)\n except TypeError:\n # times are None, indicating TBA\n pass\n\n location = meeting['BLDG_DESC']\n if meeting['ROOM']:\n location += ' ' + meeting['ROOM']\n\n meeting = {\n 'days': days,\n 'times': times,\n 'location': location,\n 'type': meeting['MEET_TYPE_DESC_SHORT']\n }\n meetings.append(meeting)\n\n final_exam = None\n try:\n final_exam = datetime.strptime(response['FINALEXAMSTARTDATE'], '%B, %d %Y %H:%M:%S')\n except TypeError:\n # No final exam\n pass\n\n drop_time = response['ALLOWEDDROPDESC']\n drop_days_match = re.match(r'^([0-9]+)', drop_time)\n if drop_days_match:\n drop_time = int(drop_days_match.group(1))\n if response['DESCRIPTION']:\n response['DESCRIPTION'] = response['DESCRIPTION'].replace('\\n', ' ').replace('\\r', '').strip()\n response['TITLE'] = response['TITLE'].strip()\n\n return Course(\n term=term,\n crn=response['PASSEDCRN'],\n subject_code=response['SUBJECT_CODE'],\n name='{} {}'.format(response['SUBJECT_CODE'], response['COURSE_NUMBER']),\n number=response['COURSE_NUMBER'],\n section=response['SEC'],\n title=response['TITLE'].strip(),\n description=response['DESCRIPTION'],\n instructor_consent_required=bool(int(response['CONSENTOFINSRUCTORREQUIRED'])),\n units=(units_low, units_hi),\n instructor=instructor_name,\n instructor_email=instructor_email,\n ge_areas=ge_areas,\n available_seats=response['BLEND_SEATS_AVAIL'],\n wl_length=response['BLEND_WAIT_COUNT'],\n meetings=meetings,\n final_exam=final_exam,\n drop_time=drop_time,\n prerequisites=re.sub(r'\\s+', ' ', response['PREREQUISITES']) if response['PREREQUISITES'] else None)", "def parse_catalogs(campuses=['Seattle', 'Bothell', 'Tacoma'], struct='df', \n show_progress=False):\n assert type(campuses) == list or type(campuses) == dict, 'Type of \"campuses\" must be list or dict'\n if type(campuses) == dict:\n for key, value in campuses.items():\n if type(key) != str or type(value) != list:\n raise ValueError('''\"campuses\" dict must have keys of type str and\n values of type list''')\n # Check if all campuses in 'campuses' are valid\n assert all([c in ['Seattle', 'Bothell', 'Tacoma'] for c in list(map(str.title, campuses))])\n assert type(struct) == str, 'Type of \"struct\" must be str'\n assert type(show_progress) == bool, 'Type of \"show_progress\" must be bool'\n assert struct in ['df', 'dict'], f'{struct} is an invalid argument for \"struct\"'\n\n # Progress bar for Course Schedule Parsing\n if show_progress:\n progress_bar = tqdm()\n\n def parse_campus(department_data, campus):\n \"\"\"\n Parses all courses from a UW Campus\n\n @params\n\n 'department_data': BeautifulSoup object with the department list website source\n for the given 'campus'\n\n 'campus': The campus to get courses from\n\n Returns\n\n A pandas DataFrame with all courses in the given campus\n \"\"\"\n\n def extract_data(department_link):\n \"\"\"\n Extracts all course information from a UW Department\n\n @params:\n\n 'department_link': The url to the UW Department to get course\n information from\n\n Returns\n\n A list of lists. Each nested list represents one course section with the\n following values (in this order):\n\n 'Campus', 'Department Name', 'Course Number', 'Course Name', 'Credits',\n 'Areas of Knowledge', 'Quarters Offered', 'Offered with', \n 'Prerequisites', 'Co-Requisites', 'Description'\n \"\"\"\n # Update the progress bar\n if show_progress:\n progress_bar.update()\n\n # Regular expressions for searching course descriptions stored in local variables\n # for better peformance\n local_course_re = course_re\n local_course_name_re = course_name_re\n local_credits_re = credits_re\n local_credits_num_re = credits_num_re\n local_offered_jointly_re = offered_jointly_re\n local_CAMPUSES = CAMPUSES\n\n # Method used in extracting data from course descriptions found in the local scope\n # are stored in local variables for better performance\n local_complete_description = complete_description\n local_get_offered = get_offered\n local_get_requisites = get_requisites\n\n # All the courses in the department\n courses = []\n dep_file = department_link.get('href')\n\n # If the user entered a dict as the 'campuse' parameter, departments\n # are checked here\n try:\n # The String in the conditional is the abbreviated Department Name i.e EE\n # for Electrical Engineering\n if normalize('NFKD', department_link.text).rsplit('(', 1) \\\n [-1].replace(' ', '')[:-1] not in campuses[campus]:\n return None\n except TypeError:\n pass\n \n # The only links that are used for finding departments are those\n # of the format [a-z]+.html\n if '/' not in dep_file and dep_file.endswith('.html') \\\n and dep_file not in parsed_departments:\n parsed_departments.add(dep_file)\n department = BeautifulSoup(requests.get( \\\n f'{local_CAMPUSES[campus]}{dep_file}').text, features='lxml')\n for course in department.find_all('a'):\n course_ID = course.get('name') \n if course_ID:\n course_ID = course_ID.upper()\n course_title = course.find('b').text\n # The Course Description\n description = course.get_text().replace(course_title, '', 1) \n instructors = course.find('i')\n if instructors:\n description = description.replace(str(instructors.get_text()), '', 1)\n del instructors\n course_text = local_complete_description( \\\n description.rsplit('View course details in MyPlan', 1)[0])\n # Course Number i.e 351\n course_number = re.sub(local_course_re, '', course_ID)\n match_name = re.search(local_course_name_re, course_title)\n match_credit_num = re.search(local_credits_num_re, course_title)\n match_credit_types = re.findall(local_credits_re, course_title)\n # Jointly offered course with the given course\n if 'jointly with' in course_text: \n offered_jointly = course_text.rsplit('jointly with ', 1)[-1].rsplit(';', 1)[0] \n offered_jointly = ','.join(re.findall( \\\n local_offered_jointly_re, offered_jointly)).replace(' ', '') \n else:\n offered_jointly = ''\n courses.append(\n # Campus, Department Name and Course Number\n [campus, course_ID[:-3], course_number, \n # Course Name\n match_name.group(0).split(course_number, 1)[-1].strip() \\\n if match_name else '',\n # Number of credits for the course\n match_credit_num.group(0)[1:-1] \\\n if match_credit_num else '', \n # Course Credit Types (I&S, DIV, NW, VLPA, QSR, C)\n ','.join([list(filter(('').__ne__, x))[0] for x in match_credit_types]) \\\n if match_credit_types else '', \n local_get_offered(course_text),\n offered_jointly, local_get_requisites(course_text, 'Prerequisite:'), \n local_get_requisites(course_text, 'Co-requisite'), course_text]\n )\n return courses\n\n # In the course catalog website, several department links appear multiple times\n # To prevent parsing the same department more than once, parsed departments\n # are tracked in 'parsed_departments'\n parsed_departments = set()\n local_extract_data = extract_data\n department_data = BeautifulSoup(requests.get(department_data).text, features='lxml')\n\n campus_catalog = []\n # Extract data from department websites in parallel to reduce idle time\n with cf.ThreadPoolExecutor() as executor:\n results = [executor.submit(local_extract_data, department_link) \n for department_link in department_data.find_all('a')]\n for result in cf.as_completed(results):\n dptmnt = result.result()\n if dptmnt:\n campus_catalog.append(dptmnt)\n\n # DataFrame with all courses in the campus\n return pd.DataFrame(\n [course for department in campus_catalog for course in department], \n columns=COLUMN_NAMES\n )\n\n # The pandas DataFrame to store the entire course catalog for each UW Campus entered\n # by the user\n course_catalog = pd.DataFrame()\n\n # Parse all three campuses in parallel for faster run time as well\n # as get the departments dictionary from the 'get_departments' method\n # to add a 'Colleges' column to categorize all courses in their College.\n with cf.ThreadPoolExecutor() as executor:\n results = []\n campuses_for_dict = campuses\n if type(campuses) == dict:\n campuses_for_dict = list(campuses.keys())\n results.append(executor.submit(get_departments, campuses=campuses_for_dict, struct='dict'))\n for campus, link in CAMPUSES.items():\n if campus.title() in campuses: \n results.append(executor.submit(parse_campus, link, campus.title()))\n for result in cf.as_completed(results):\n returned = result.result()\n if type(returned) == dict:\n # Departments dict used to create the 'College' column in the main DataFrame\n departments = returned\n else:\n course_catalog = pd.concat([course_catalog, returned])\n\n # Add Course ID as the index of the DataFrame to allow for easy course searching\n # Course ID = Department Name + Course Number\n # Example: EE235 = EE + 235\n course_catalog['Course ID'] = course_catalog['Department Name'] + course_catalog['Course Number']\n course_catalog['College'] = course_catalog['Department Name'].apply(check_campus, args=(departments, 'College'))\n course_catalog.set_index('Course ID', inplace=True)\n # Re-order indices to place 'College' right after the 'Department Name'\n course_catalog = course_catalog[['Campus', 'Department Name', 'College', 'Course Number', 'Course Name', 'Credits',\n 'Areas of Knowledge', 'Quarters Offered', 'Offered with', \n 'Prerequisites', 'Co-Requisites', 'Description']]\n \n if struct == 'df':\n return course_catalog\n elif struct == 'dict':\n return course_catalog.to_dict(orient='index')", "def citations(request, format='csv'):\n \n filtered = False # make sure we're filtering by something\n records = Record.objects.distinct() #.order_by('gabi_acc_number')\n \n \n # accession number\n if request.GET.get('gabi_acc_number'):\n filtered = True\n records = records.filter(gabi_acc_number=request.GET.get('gabi_acc_number').upper())\n \n # species AND bentity\n if request.GET.get('species'):\n filtered = True\n if request.GET.get('species'): \n records = records.filter(valid_species_name_id=request.GET.get('species').capitalize())\n if request.GET.get('bentity_id'):\n records = records.filter(bentity_id=request.GET.get('bentity_id').upper())\n \n # lat and lon\n if request.GET.get('lat') and request.GET.get('lon'):\n filtered = True\n if request.GET.get('lat'):\n records = records.filter(lat=request.GET.get('lat'))\n if request.GET.get('lon'):\n records = records.filter(lon=request.GET.get('lon'))\n \n # status\n if request.GET.get('status'):\n records = records.filter(status=request.GET.get('status')[0].upper())\n \n \n # error message if the user didn't supply an argument to filter the records\n if not filtered: \n return errorResponse(\"Please supply at least one these argument-combinations: 'gabi_acc_number', ('species' and 'bentity_id'), or ('lat' and 'lon').\", format, {'records': []})\n \n \n # fetch all the bentitites at once, so we don't have to hit the database once for each record\n records = records.prefetch_related('bentity') \n \n output_objects = [{\n 'gabi_acc_number': r.gabi_acc_number,\n 'species': r.valid_species_name_id,\n 'bentity_id': r.bentity_id,\n 'bentity_name': r.bentity.bentity,\n 'status': r.status,\n 'type_of_data': r.type_of_data,\n 'lat': r.lat,\n 'lon': r.lon, \n 'citation': r.citation,\n } for r in records]\n \n \n \n if format == 'csv':\n return CSVResponse(output_objects, ('gabi_acc_number', 'species', 'bentity_id', 'bentity_name', 'lat', 'lon', 'status', 'type_of_data', 'citation'))\n \n else:\n return JSONResponse({'records': output_objects})", "def get_courses(self, selected_domain_url: str) -> Tuple[List[str], List[str]]:\n\n courses, courses_url = [], []\n print(\"\\nDownloading Courses...\\n\")\n try:\n selected_domain_page = BeautifulSoup(\n requests.get(selected_domain_url).text, \"lxml\"\n )\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n sys.exit(1)\n\n for course_header in selected_domain_page.find_all(\n COURSE_HEAD[\"tag\"], class_=COURSE_HEAD[\"class\"]\n ):\n course = course_header.find(\n COURSE_URL[\"tag\"], class_=COURSE_URL[\"class\"]\n ).text\n courses.append(course)\n\n course_link = course_header.find(\n COURSE_URL[\"tag\"], class_=COURSE_URL[\"class\"]\n )\n course_slug = course_link[\"href\"]\n courses_url.append(ROOT_URL + course_slug)\n return courses, courses_url", "def get_course_table(self, table):\n json_result = {}\n row_list = table.xpath('.//table[@id = \"s_course\"]/tr[position() > 1]')\n for row in row_list:\n session = row.xpath('./td[1]/text()')\n course_full_code_list = row.xpath('.//a[starts-with(@href, \"javascript:course_popup\")]/text()')\n course_name_list = row.xpath('.//font[@style = \"font-size:7pt;\"]/text()')\n course_list = []\n if len(course_full_code_list) != len(course_name_list):\n # year course design project would be count twice\n if (\"Design Project\" == course_name_list[0]) & \\\n (len(course_full_code_list) + 1 == len(course_name_list)):\n course_name_list = course_name_list[1:]\n else:\n raise ProfileException(\n \"Error: unmatched lists. course code list:\",\n course_full_code_list, \"\\n course name list:\", course_name_list)\n for i, full_code in enumerate(course_full_code_list):\n if re.match(re.compile('\\w{3}\\d{3}[YH]1\\s+[SFY]'), full_code) is None:\n raise ProfileException(\"Illegal course code!:\" + full_code)\n course_list.append({\n \"courseName\": course_name_list[i],\n \"courseCode\": full_code[0:6],\n \"courseTime\": full_code[-1],\n \"courseLength\": full_code[6:8]\n })\n # there is a empty session\n if session:\n json_result.update({session[0]: course_list})\n if json_result:\n return json_result\n else:\n raise ProfileException(\"Failed to get course_table table(row list is empty)\")", "def bulk_cavs_search():\n\n data = flask.request.json\n if not data or not data.get(\"ids\"):\n return exceptions.BadRequest()\n response = _get_bulk_cad_assessment_data(data)\n return flask.Response(json.dumps(response), mimetype='application/json')", "def get_batch_list(request):\n\n\n json_resp = {}\n json_resp['batch_list'] = []\n\n usecase = request.GET.get('usecase',None)\n # print(usecase)\n if usecase is None:\n batch = Report.objects.all().exclude(institute='PUBMED').values('batch')\n else:\n use_obj = UseCase.objects.get(name=usecase)\n batch = Report.objects.filter(name=use_obj).exclude(institute = 'PUBMED').values('batch')\n\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_raw_requisites_data(url):\n response = requests.get(url)\n if response.status_code != 200:\n raise Exception('Error while making GET request: %s' % response.status_code)\n\n soup = BeautifulSoup(response.text)\n\n course_name = soup.find('span', course_name_class).text\n course_name = course_name.split('.')[0]\n course_name = ' '.join(course_name.split())\n course_data = course_name.split(' ')\n course_num = course_data[:-1]\n course_dept = ' '.join(course_data[0:len(course_data)-1])\n course_dept = dept_map.get(course_dept, course_dept)\n\n course_requisites = soup.find('span', {'id': enforced_requisites_id}).text\n return course_requisites, course_dept", "def get_requisites(url):\n course_requisites, course_dept = get_raw_requisites_data(url)\n if course_requisites == 'None':\n return []\n\n # Order matters!\n bad_strings = ['(', ')', 'C- or better', 'corequisite', 'courses', 'course']\n for bad_string in bad_strings:\n course_requisites = course_requisites.replace(bad_string, '').strip()\n\n course_requisites = course_requisites.split(' and ')\n fixed_requisites = [requisite.split(' or ') for requisite in course_requisites]\n\n depts = []\n for req_list in fixed_requisites:\n course_name = req_list[0]\n if string_is_course_id(course_name):\n depts.append(course_dept)\n else:\n data = course_name.split(' ')\n dept = ' '.join(data[0:len(data)-1])\n depts.append(dept)\n\n for i, req_list in enumerate(fixed_requisites):\n for j, course in enumerate(req_list):\n if string_is_course_id(course):\n fixed_name = depts[i] + ' ' + course\n req_list[j] = fixed_name\n fixed_requisites[i] = req_list\n\n return fixed_requisites", "def _request(self, method, path, *args, **kwargs):\n path = '/courses/{course_id}/' + path\n path = path.format(**{n: getattr(self, n) for n in ('course_id', 'course_num', 'course_run', 'course_org')})\n logging.debug(path)\n return getattr(self.client, method)(path, *args, **kwargs)", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses", "def get_course_results(self,subj,cnum=\"\"):\n\t\tinfo = self.info # take info from self\n\n\t\turl = info['url_base'] + info['url_action']\n\t\tvals = PostData()\n\t\tvals.add_item('TRM'\t\t\t\t, \"U\")\n\t\tvals.add_item('term_in'\t\t\t, \"201501\")\n\t\tvals.add_item('sel_subj'\t\t, \"dummy\")\n\t\tvals.add_item('sel_day'\t\t\t, \"dummy\")\n\t\tvals.add_item('sel_schd'\t\t, \"dummy\")\n\t\tvals.add_item('sel_insm'\t\t, \"dummy\")\n\t\tvals.add_item('sel_camp'\t\t, \"dummy\")\n\t\tvals.add_item('sel_levl'\t\t, \"dummy\")\n\t\tvals.add_item('sel_sess'\t\t, \"dummy\")\n\t\tvals.add_item('sel_instr'\t\t, \"dummy\")\n\t\tvals.add_item('sel_ptrm'\t\t, \"dummy\")\n\t\tvals.add_item('sel_attr'\t\t, \"dummy\")\n\t\tvals.add_item('sel_subj'\t\t, subj)\n\t\tvals.add_item('sel_crse'\t\t, cnum)\n\t\tvals.add_item('sel_title'\t\t, \"\")\n\t\tvals.add_item('sel_schd'\t\t, \"%\")\n\t\tvals.add_item('sel_insm'\t\t, \"%\")\n\t\tvals.add_item('sel_from_cred'\t, \"\")\n\t\tvals.add_item('sel_to_cred'\t\t, \"\")\n\t\tvals.add_item('sel_camp'\t\t, \"%\")\n\t\tvals.add_item('begin_hh'\t\t, \"0\")\n\t\tvals.add_item('begin_mi'\t\t, \"0\")\n\t\tvals.add_item('begin_ap'\t\t, \"a\")\n\t\tvals.add_item('end_hh'\t\t\t, \"0\")\n\t\tvals.add_item('end_mi'\t\t\t, \"0\")\n\t\tvals.add_item('end_ap'\t\t\t, \"a\")\n\n\t\t#data = urllib.urlencode(vals.get_string())\n\t\tdata = vals.get_string()\n\t\treq = urllib2.Request(url, data=data)\n\t\tprint(\"=== Headers ===\")\n\t\tprint(req.headers)\n\t\tprint(\"=== Data ===\")\n\t\tprint(req.data)\n\t\tresponse = urllib2.urlopen(req)\n\t\treturn response.read()" ]
[ "0.6156919", "0.6040665", "0.60034776", "0.5928247", "0.5916178", "0.5860666", "0.58110094", "0.57956994", "0.5633802", "0.5619331", "0.5583209", "0.5545663", "0.5522311", "0.5501062", "0.54804116", "0.5449549", "0.54390436", "0.5390255", "0.53620845", "0.53605145", "0.5345557", "0.53399056", "0.5319056", "0.5298876", "0.5261197", "0.52338564", "0.5225055", "0.5223008", "0.5220107", "0.52134585" ]
0.68711597
0
Returns the set of combos for the current page
def getCombosForPage(page_num, per_page, count_of_combos, combos): combos_start = (per_page * (page_num - 1)) + 1 combos_end = combos_start + per_page these_combos = {} for key in range(combos_start, combos_end): try: # if new dict is not an int schedules are not sorted on the page these_combos[key] = combos[str(key)] except KeyError: pass return these_combos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combos():\n print 'Loading combo info page'\n\n test_data_folder = os.path.join('data', 'testdata')\n base_file_name = 'CU-PENN.dvw'\n base_file_key = os.path.join(test_data_folder, base_file_name)\n\n parser = Parser(base_file_key)\n combo_list = parser.read_combos()\n\n combo_dicts = [{'combo': combo, 'name': name} for combo, (name, _) in sorted(combo_list.iteritems())]\n\n return render_template('combos.html', rows=combo_dicts)", "def getAttributeCombos(self):\r\n return self.attributeCombos", "def get_all_menu():", "def generate_option_combos(self):\n available_options = list()\n for option in self.options:\n # generate a list of dicts for every value of the option\n tmp = list()\n for value in option.values:\n tmp.append({option.name: value})\n\n available_options.append(tmp)\n\n # generate a list of tuples for each product option combination\n option_combos = list(itertools.product(*available_options))\n\n return option_combos", "def getSets():", "def combinations(self):\n return self._combinations", "def get_building_choices(call_type=None):\n dataservices = DataService.objects()\n buildings_list = []\n for dataservice in dataservices:\n for building in dataservice.buildings:\n print building\n if building not in buildings_list:\n buildings_list.append(building)\n if not call_type:\n return zip(buildings_list, buildings_list)\n else:\n return buildings_list", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def _identify_combos(model, combo_tags):\n \n # Identify which load combinations to evaluate\n if combo_tags is None:\n combo_list = model.LoadCombos.values()\n else:\n combo_list = []\n for combo in model.LoadCombos.values():\n if any(tag in combo.combo_tags for tag in combo_tags):\n combo_list.append(combo)\n \n return combo_list", "def iter_combos(include_unknown=False):\n if include_unknown:\n return _combos\n else:\n return _combos[:-7]", "def get_page_mode(self):\n return 'combination'", "def Allcombos():\n\n global allcombos\n\n allcombos = []\n\n results = product(\"ABCDEF\", repeat=4)\n\n allcombos = resulttolist(results)\n\n return AIguessing(allcombos)", "def getPossibilities(self):\n \n return sorted(self._possibilities)", "def generate_grid(self):\n # Get a list of parameters values\n list_params = list(self.params_grid_data.values())\n\n # Generate a list of all combos for params\n params_combos = list(product(*list_params))\n names = self._generate_column_names()\n\n # Loop over the array of dictionary for the compounds and amounts:\n for experiment in self.compounds_data:\n for params in params_combos:\n compounds = experiment['compounds']\n amounts = experiment['amounts']\n self.all_combos += [chain(compounds, amounts,\n params)]\n self.all_combos = pd.DataFrame.from_records(self.all_combos)\n self.all_combos.columns = names\n return self.all_combos", "def get_all_combinations(self):\n stuffs = map(lambda row: row.split(\" \"), self.expanded['GS'] )\n\n combs = self.all_combinations(stuffs)\n\n cls_repeated = self.expanded['CLS'].reset_index(drop=True)[np.array(combs[0])]\n\n A = cls_repeated.reset_index(drop=True)\n B = pd.Series(combs[1])\n\n combo_table = pd.DataFrame([A, B]).T\n\n combo_table.columns = ['CLS','GSCMB']\n\n df = combo_table\n\n df['srt'] = [ ' '.join(map(str, g)) for g in df[\"GSCMB\"] ]\n keep_idx = df[[0,2]].drop_duplicates().index\n gewd = df.iloc[keep_idx,:].reset_index(drop=True)[[\"CLS\",\"GSCMB\"]]\n\n combo_table = gewd\n\n combo_dict = combo_table.groupby('CLS')['GSCMB'].apply(lambda x: x.tolist())\n return combo_dict", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def combinations_from_options(self):\n combs = []\n Nmetals = self.options.metal_sbu_per_structure\n for combo in self.options.sbu_combinations:\n # first sbus have to be metals.\n met = []\n for i in range(Nmetals):\n met.append(self.sbus.get(combo[i], _METAL=True))\n combs.append(tuple(met + [self.sbus.get(i) for i in combo[Nmetals:]]))\n return combs", "def generarCombinaciones(self):\n combi = [list(x) for x in itertools.combinations(self.ResultConsultaLibre, 2)]\n self.CombiConsultaLibre=combi\n #print(self.CombiConsultaLibre)", "def collection(self):\n questions = []\n choice_list = []\n answers = []\n\n if self.form=='The correct German word':\n for i in range(self.num_ques):\n question, options, answer = self.generate_eng2ger()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n else:\n for i in range(self.num_ques):\n question, options, answer = self.generate_ger2eng()\n questions.append(question)\n choice_list.append(options)\n answers.append(answer)\n\n return questions, choice_list, answers", "def getCombosAPI():\n all_args = request.args.lists()\n course_list = all_args[0][1][0].split(\",\")\n u_COURSE_LIST = map((lambda x: x.upper()), course_list)#make all caps just in case\n COURSE_LIST = map( str, u_COURSE_LIST)#unicode list -> list of python strs\n combos = scheduler.schedule(COURSE_LIST)\n return jsonify(combos)", "def combos_char_grp(char_grp_code):\n select_args = (Survey.code, Indicator.code)\n joined = DatalabData.all_joined(*select_args)\n filtered = joined.filter(DatalabData.char_grp1.code == char_grp_code)\n results = filtered.distinct().all()\n survey_codes = set()\n indicator_codes = set()\n for item in results:\n survey_code = item[0]\n survey_codes.add(survey_code)\n indicator_code = item[1]\n indicator_codes.add(indicator_code)\n to_return = {\n 'survey.id': sorted(list(survey_codes)),\n 'indicator.id': sorted(list(indicator_codes))\n }\n return to_return", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def combos_indicator(indicator):\n select_args = (Survey.code, DatalabData.char_grp1.code)\n joined = DatalabData.all_joined(*select_args)\n filtered = joined.filter(Indicator.code == indicator)\n results = filtered.distinct().all()\n survey_codes = set()\n char_grp_codes = set()\n for item in results:\n survey_code = item[0]\n survey_codes.add(survey_code)\n char_grp_code = item[1]\n char_grp_codes.add(char_grp_code)\n to_return = {\n 'survey.id': sorted(list(survey_codes)),\n 'characteristicGroup.id': sorted(list(char_grp_codes))\n }\n return to_return", "def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def components(self):\r\n return list(self._components)" ]
[ "0.66509014", "0.61322004", "0.5766209", "0.56483555", "0.5634054", "0.56314075", "0.5607587", "0.55948377", "0.55948377", "0.55948377", "0.55948377", "0.55781025", "0.55628926", "0.5496233", "0.54629517", "0.54608715", "0.5457087", "0.54336566", "0.54232746", "0.54089713", "0.53461367", "0.5335933", "0.53161335", "0.5310634", "0.53068", "0.53068", "0.5296456", "0.52914083", "0.52826047", "0.5280826" ]
0.6515991
1